Merge
This commit is contained in:
commit
6ed285af0a
@ -76,6 +76,11 @@ endif
|
|||||||
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
|
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
|
||||||
CFLAGS_WARN = +w -errwarn
|
CFLAGS_WARN = +w -errwarn
|
||||||
endif
|
endif
|
||||||
|
# When using compiler version 5.13 (Solaris Studio 12.4), calls to explicitly
|
||||||
|
# instantiated template functions trigger this warning when +w is active.
|
||||||
|
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 513), 1)
|
||||||
|
CFLAGS_WARN += -erroff=notemsource
|
||||||
|
endif
|
||||||
CFLAGS += $(CFLAGS_WARN)
|
CFLAGS += $(CFLAGS_WARN)
|
||||||
|
|
||||||
ifeq ("${Platform_compiler}", "sparcWorks")
|
ifeq ("${Platform_compiler}", "sparcWorks")
|
||||||
|
@ -3043,7 +3043,9 @@ void MacroAssembler::store_check(Register obj) {
|
|||||||
// register obj is destroyed afterwards.
|
// register obj is destroyed afterwards.
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||||
|
bs->kind() == BarrierSet::CardTableExtension,
|
||||||
|
"Wrong barrier set kind");
|
||||||
|
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||||
|
@ -691,7 +691,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
|
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
|
||||||
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
|
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
break;
|
break;
|
||||||
@ -731,7 +731,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
|
__ pop(RegSet::range(r0, r29), sp); // integer registers except lr & sp }
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
||||||
|
@ -186,7 +186,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
if (val == noreg) {
|
if (val == noreg) {
|
||||||
|
@ -2614,7 +2614,7 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register t
|
|||||||
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
|
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
|
||||||
CardTableModRefBS* bs =
|
CardTableModRefBS* bs =
|
||||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||||
assert(bs->kind() == BarrierSet::CardTableModRef ||
|
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
cmpdi(CCR0, Rnew_val, 0);
|
cmpdi(CCR0, Rnew_val, 0);
|
||||||
|
@ -656,7 +656,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ bind(filtered);
|
__ bind(filtered);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
break;
|
break;
|
||||||
@ -697,7 +697,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
Label Lskip_loop, Lstore_loop;
|
Label Lskip_loop, Lstore_loop;
|
||||||
|
@ -105,7 +105,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
Label Lnull, Ldone;
|
Label Lnull, Ldone;
|
||||||
|
@ -3958,7 +3958,7 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
|
|||||||
if (new_val == G0) return;
|
if (new_val == G0) return;
|
||||||
CardTableModRefBS* bs =
|
CardTableModRefBS* bs =
|
||||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||||
assert(bs->kind() == BarrierSet::CardTableModRef ||
|
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
||||||
card_table_write(bs->byte_map_base, tmp, store_addr);
|
card_table_write(bs->byte_map_base, tmp, store_addr);
|
||||||
}
|
}
|
||||||
|
@ -981,7 +981,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ restore();
|
__ restore();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
break;
|
break;
|
||||||
@ -1014,7 +1014,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ restore();
|
__ restore();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
|
@ -91,7 +91,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
if (index == noreg ) {
|
if (index == noreg ) {
|
||||||
|
@ -4320,7 +4320,9 @@ void MacroAssembler::store_check(Register obj) {
|
|||||||
// register obj is destroyed afterwards.
|
// register obj is destroyed afterwards.
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
||||||
|
bs->kind() == BarrierSet::CardTableExtension,
|
||||||
|
"Wrong barrier set kind");
|
||||||
|
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||||
|
@ -722,7 +722,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ popa();
|
__ popa();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
break;
|
break;
|
||||||
@ -754,7 +754,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
|
@ -367,16 +367,20 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// verify that threads correspond
|
// verify that threads correspond
|
||||||
{
|
{
|
||||||
Label L, S;
|
Label L1, L2, L3;
|
||||||
__ cmpptr(r15_thread, thread);
|
__ cmpptr(r15_thread, thread);
|
||||||
__ jcc(Assembler::notEqual, S);
|
__ jcc(Assembler::equal, L1);
|
||||||
|
__ stop("StubRoutines::call_stub: r15_thread is corrupted");
|
||||||
|
__ bind(L1);
|
||||||
__ get_thread(rbx);
|
__ get_thread(rbx);
|
||||||
|
__ cmpptr(r15_thread, thread);
|
||||||
|
__ jcc(Assembler::equal, L2);
|
||||||
|
__ stop("StubRoutines::call_stub: r15_thread is modified by call");
|
||||||
|
__ bind(L2);
|
||||||
__ cmpptr(r15_thread, rbx);
|
__ cmpptr(r15_thread, rbx);
|
||||||
__ jcc(Assembler::equal, L);
|
__ jcc(Assembler::equal, L3);
|
||||||
__ bind(S);
|
|
||||||
__ jcc(Assembler::equal, L);
|
|
||||||
__ stop("StubRoutines::call_stub: threads must correspond");
|
__ stop("StubRoutines::call_stub: threads must correspond");
|
||||||
__ bind(L);
|
__ bind(L3);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -450,15 +454,20 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// verify that threads correspond
|
// verify that threads correspond
|
||||||
{
|
{
|
||||||
Label L, S;
|
Label L1, L2, L3;
|
||||||
__ cmpptr(r15_thread, thread);
|
__ cmpptr(r15_thread, thread);
|
||||||
__ jcc(Assembler::notEqual, S);
|
__ jcc(Assembler::equal, L1);
|
||||||
|
__ stop("StubRoutines::catch_exception: r15_thread is corrupted");
|
||||||
|
__ bind(L1);
|
||||||
__ get_thread(rbx);
|
__ get_thread(rbx);
|
||||||
|
__ cmpptr(r15_thread, thread);
|
||||||
|
__ jcc(Assembler::equal, L2);
|
||||||
|
__ stop("StubRoutines::catch_exception: r15_thread is modified by call");
|
||||||
|
__ bind(L2);
|
||||||
__ cmpptr(r15_thread, rbx);
|
__ cmpptr(r15_thread, rbx);
|
||||||
__ jcc(Assembler::equal, L);
|
__ jcc(Assembler::equal, L3);
|
||||||
__ bind(S);
|
|
||||||
__ stop("StubRoutines::catch_exception: threads must correspond");
|
__ stop("StubRoutines::catch_exception: threads must correspond");
|
||||||
__ bind(L);
|
__ bind(L3);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1244,7 +1253,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ popa();
|
__ popa();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
break;
|
break;
|
||||||
@ -1284,7 +1293,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ popa();
|
__ popa();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
|
@ -200,7 +200,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
{
|
{
|
||||||
if (val == noreg) {
|
if (val == noreg) {
|
||||||
|
@ -1425,7 +1425,7 @@ void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
|
|||||||
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
|
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
// No pre barriers
|
// No pre barriers
|
||||||
break;
|
break;
|
||||||
@ -1445,7 +1445,7 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
|
|||||||
G1SATBCardTableModRef_post_barrier(addr, new_val);
|
G1SATBCardTableModRef_post_barrier(addr, new_val);
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
CardTableModRef_post_barrier(addr, new_val);
|
CardTableModRef_post_barrier(addr, new_val);
|
||||||
break;
|
break;
|
||||||
|
@ -367,7 +367,7 @@ bool G1ArchiveAllocator::alloc_new_region() {
|
|||||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||||
|
|
||||||
// Tell mark-sweep that objects in this region are not to be marked.
|
// Tell mark-sweep that objects in this region are not to be marked.
|
||||||
G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
|
G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
|
||||||
|
|
||||||
// Since we've modified the old set, call update_sizes.
|
// Since we've modified the old set, call update_sizes.
|
||||||
_g1h->g1mm()->update_sizes();
|
_g1h->g1mm()->update_sizes();
|
||||||
|
@ -65,6 +65,7 @@
|
|||||||
#include "memory/iterator.hpp"
|
#include "memory/iterator.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "runtime/atomic.inline.hpp"
|
#include "runtime/atomic.inline.hpp"
|
||||||
|
#include "runtime/init.hpp"
|
||||||
#include "runtime/orderAccess.inline.hpp"
|
#include "runtime/orderAccess.inline.hpp"
|
||||||
#include "runtime/vmThread.hpp"
|
#include "runtime/vmThread.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
@ -949,6 +950,7 @@ bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||||
|
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||||
assert(ranges != NULL, "MemRegion array NULL");
|
assert(ranges != NULL, "MemRegion array NULL");
|
||||||
assert(count != 0, "No MemRegions provided");
|
assert(count != 0, "No MemRegions provided");
|
||||||
MutexLockerEx x(Heap_lock);
|
MutexLockerEx x(Heap_lock);
|
||||||
@ -1037,12 +1039,13 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Notify mark-sweep of the archive range.
|
// Notify mark-sweep of the archive range.
|
||||||
G1MarkSweep::mark_range_archive(curr_range);
|
G1MarkSweep::set_range_archive(curr_range, true);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||||
|
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||||
assert(ranges != NULL, "MemRegion array NULL");
|
assert(ranges != NULL, "MemRegion array NULL");
|
||||||
assert(count != 0, "No MemRegions provided");
|
assert(count != 0, "No MemRegions provided");
|
||||||
MemRegion reserved = _hrm.reserved();
|
MemRegion reserved = _hrm.reserved();
|
||||||
@ -1125,6 +1128,81 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||||
|
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||||
|
assert(ranges != NULL, "MemRegion array NULL");
|
||||||
|
assert(count != 0, "No MemRegions provided");
|
||||||
|
MemRegion reserved = _hrm.reserved();
|
||||||
|
HeapWord* prev_last_addr = NULL;
|
||||||
|
HeapRegion* prev_last_region = NULL;
|
||||||
|
size_t size_used = 0;
|
||||||
|
size_t uncommitted_regions = 0;
|
||||||
|
|
||||||
|
// For each Memregion, free the G1 regions that constitute it, and
|
||||||
|
// notify mark-sweep that the range is no longer to be considered 'archive.'
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
for (size_t i = 0; i < count; i++) {
|
||||||
|
HeapWord* start_address = ranges[i].start();
|
||||||
|
HeapWord* last_address = ranges[i].last();
|
||||||
|
|
||||||
|
assert(reserved.contains(start_address) && reserved.contains(last_address),
|
||||||
|
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||||
|
p2i(start_address), p2i(last_address)));
|
||||||
|
assert(start_address > prev_last_addr,
|
||||||
|
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||||
|
p2i(start_address), p2i(prev_last_addr)));
|
||||||
|
size_used += ranges[i].byte_size();
|
||||||
|
prev_last_addr = last_address;
|
||||||
|
|
||||||
|
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||||
|
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||||
|
|
||||||
|
// Check for ranges that start in the same G1 region in which the previous
|
||||||
|
// range ended, and adjust the start address so we don't try to free
|
||||||
|
// the same region again. If the current range is entirely within that
|
||||||
|
// region, skip it.
|
||||||
|
if (start_region == prev_last_region) {
|
||||||
|
start_address = start_region->end();
|
||||||
|
if (start_address > last_address) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
start_region = _hrm.addr_to_region(start_address);
|
||||||
|
}
|
||||||
|
prev_last_region = last_region;
|
||||||
|
|
||||||
|
// After verifying that each region was marked as an archive region by
|
||||||
|
// alloc_archive_regions, set it free and empty and uncommit it.
|
||||||
|
HeapRegion* curr_region = start_region;
|
||||||
|
while (curr_region != NULL) {
|
||||||
|
guarantee(curr_region->is_archive(),
|
||||||
|
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
|
||||||
|
uint curr_index = curr_region->hrm_index();
|
||||||
|
_old_set.remove(curr_region);
|
||||||
|
curr_region->set_free();
|
||||||
|
curr_region->set_top(curr_region->bottom());
|
||||||
|
if (curr_region != last_region) {
|
||||||
|
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||||
|
} else {
|
||||||
|
curr_region = NULL;
|
||||||
|
}
|
||||||
|
_hrm.shrink_at(curr_index, 1);
|
||||||
|
uncommitted_regions++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify mark-sweep that this is no longer an archive range.
|
||||||
|
G1MarkSweep::set_range_archive(ranges[i], false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (uncommitted_regions != 0) {
|
||||||
|
ergo_verbose1(ErgoHeapSizing,
|
||||||
|
"attempt heap shrinking",
|
||||||
|
ergo_format_reason("uncommitted archive regions")
|
||||||
|
ergo_format_byte("total size"),
|
||||||
|
HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
|
||||||
|
}
|
||||||
|
decrease_used(size_used);
|
||||||
|
}
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||||
uint* gc_count_before_ret,
|
uint* gc_count_before_ret,
|
||||||
uint* gclocker_retry_count_ret) {
|
uint* gclocker_retry_count_ret) {
|
||||||
@ -4051,7 +4129,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||||
#endif // YOUNG_LIST_VERBOSE
|
#endif // YOUNG_LIST_VERBOSE
|
||||||
|
|
||||||
g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
|
g1_policy()->finalize_cset(target_pause_time_ms);
|
||||||
|
|
||||||
|
evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
|
||||||
|
|
||||||
register_humongous_regions_with_cset();
|
register_humongous_regions_with_cset();
|
||||||
|
|
||||||
@ -4175,7 +4255,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
// investigate this in CR 7178365.
|
// investigate this in CR 7178365.
|
||||||
double sample_end_time_sec = os::elapsedTime();
|
double sample_end_time_sec = os::elapsedTime();
|
||||||
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
|
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
|
||||||
g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
|
g1_policy()->record_collection_pause_end(pause_time_ms);
|
||||||
|
|
||||||
|
evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
|
||||||
|
evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
|
||||||
|
|
||||||
MemoryService::track_memory_usage();
|
MemoryService::track_memory_usage();
|
||||||
|
|
||||||
@ -4501,8 +4584,7 @@ public:
|
|||||||
bool only_young, bool claim)
|
bool only_young, bool claim)
|
||||||
: _oop_closure(oop_closure),
|
: _oop_closure(oop_closure),
|
||||||
_oop_in_klass_closure(oop_closure->g1(),
|
_oop_in_klass_closure(oop_closure->g1(),
|
||||||
oop_closure->pss(),
|
oop_closure->pss()),
|
||||||
oop_closure->rp()),
|
|
||||||
_klass_in_cld_closure(&_oop_in_klass_closure, only_young),
|
_klass_in_cld_closure(&_oop_in_klass_closure, only_young),
|
||||||
_claim(claim) {
|
_claim(claim) {
|
||||||
|
|
||||||
@ -4531,18 +4613,18 @@ public:
|
|||||||
bool only_young = _g1h->collector_state()->gcs_are_young();
|
bool only_young = _g1h->collector_state()->gcs_are_young();
|
||||||
|
|
||||||
// Non-IM young GC.
|
// Non-IM young GC.
|
||||||
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss, rp);
|
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss);
|
||||||
G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
|
G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
|
||||||
only_young, // Only process dirty klasses.
|
only_young, // Only process dirty klasses.
|
||||||
false); // No need to claim CLDs.
|
false); // No need to claim CLDs.
|
||||||
// IM young GC.
|
// IM young GC.
|
||||||
// Strong roots closures.
|
// Strong roots closures.
|
||||||
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss, rp);
|
G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss);
|
||||||
G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
|
G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
|
||||||
false, // Process all klasses.
|
false, // Process all klasses.
|
||||||
true); // Need to claim CLDs.
|
true); // Need to claim CLDs.
|
||||||
// Weak roots closures.
|
// Weak roots closures.
|
||||||
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss, rp);
|
G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
|
||||||
G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
|
G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
|
||||||
false, // Process all klasses.
|
false, // Process all klasses.
|
||||||
true); // Need to claim CLDs.
|
true); // Need to claim CLDs.
|
||||||
@ -5241,9 +5323,9 @@ public:
|
|||||||
G1ParScanThreadState* pss = _pss[worker_id];
|
G1ParScanThreadState* pss = _pss[worker_id];
|
||||||
pss->set_ref_processor(NULL);
|
pss->set_ref_processor(NULL);
|
||||||
|
|
||||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL);
|
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
|
||||||
|
|
||||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
|
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
|
||||||
|
|
||||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||||
|
|
||||||
@ -5341,9 +5423,9 @@ public:
|
|||||||
pss->set_ref_processor(NULL);
|
pss->set_ref_processor(NULL);
|
||||||
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
|
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
|
||||||
|
|
||||||
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL);
|
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss);
|
||||||
|
|
||||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
|
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
|
||||||
|
|
||||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||||
|
|
||||||
@ -5451,9 +5533,9 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadState** per_t
|
|||||||
// closures while we're actually processing the discovered
|
// closures while we're actually processing the discovered
|
||||||
// reference objects.
|
// reference objects.
|
||||||
|
|
||||||
G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss, NULL);
|
G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss);
|
||||||
|
|
||||||
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
|
G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
|
||||||
|
|
||||||
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
|
||||||
|
|
||||||
|
@ -757,6 +757,12 @@ public:
|
|||||||
// alloc_archive_regions, and after class loading has occurred.
|
// alloc_archive_regions, and after class loading has occurred.
|
||||||
void fill_archive_regions(MemRegion* range, size_t count);
|
void fill_archive_regions(MemRegion* range, size_t count);
|
||||||
|
|
||||||
|
// For each of the specified MemRegions, uncommit the containing G1 regions
|
||||||
|
// which had been allocated by alloc_archive_regions. This should be called
|
||||||
|
// rather than fill_archive_regions at JVM init time if the archive file
|
||||||
|
// mapping failed, with the same non-overlapping and sorted MemRegion array.
|
||||||
|
void dealloc_archive_regions(MemRegion* range, size_t count);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||||
|
@ -932,7 +932,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
|
|||||||
// Anything below that is considered to be zero
|
// Anything below that is considered to be zero
|
||||||
#define MIN_TIMER_GRANULARITY 0.0000001
|
#define MIN_TIMER_GRANULARITY 0.0000001
|
||||||
|
|
||||||
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
|
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
|
||||||
double end_time_sec = os::elapsedTime();
|
double end_time_sec = os::elapsedTime();
|
||||||
assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
|
assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
|
||||||
"otherwise, the subtraction below does not make sense");
|
"otherwise, the subtraction below does not make sense");
|
||||||
@ -964,9 +964,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
|
|||||||
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
|
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
|
||||||
end_time_sec, _g1->gc_tracer_stw()->gc_id());
|
end_time_sec, _g1->gc_tracer_stw()->gc_id());
|
||||||
|
|
||||||
evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
|
|
||||||
evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
|
|
||||||
|
|
||||||
if (update_stats) {
|
if (update_stats) {
|
||||||
_trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
|
_trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
|
||||||
// this is where we update the allocation rate of the application
|
// this is where we update the allocation rate of the application
|
||||||
@ -1883,7 +1880,7 @@ uint G1CollectorPolicy::calc_max_old_cset_length() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
|
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
|
||||||
double young_start_time_sec = os::elapsedTime();
|
double young_start_time_sec = os::elapsedTime();
|
||||||
|
|
||||||
YoungList* young_list = _g1->young_list();
|
YoungList* young_list = _g1->young_list();
|
||||||
@ -2093,7 +2090,6 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
|
|||||||
|
|
||||||
double non_young_end_time_sec = os::elapsedTime();
|
double non_young_end_time_sec = os::elapsedTime();
|
||||||
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
|
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
|
||||||
evacuation_info.set_collectionset_regions(cset_region_length());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
|
void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
|
||||||
|
@ -634,13 +634,11 @@ public:
|
|||||||
virtual HeapWord* satisfy_failed_allocation(size_t size,
|
virtual HeapWord* satisfy_failed_allocation(size_t size,
|
||||||
bool is_tlab);
|
bool is_tlab);
|
||||||
|
|
||||||
BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
|
|
||||||
|
|
||||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||||
|
|
||||||
// Record the start and end of an evacuation pause.
|
// Record the start and end of an evacuation pause.
|
||||||
void record_collection_pause_start(double start_time_sec);
|
void record_collection_pause_start(double start_time_sec);
|
||||||
void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
|
void record_collection_pause_end(double pause_time_ms);
|
||||||
|
|
||||||
// Record the start and end of a full collection.
|
// Record the start and end of a full collection.
|
||||||
void record_full_collection_start();
|
void record_full_collection_start();
|
||||||
@ -682,6 +680,10 @@ public:
|
|||||||
return _bytes_copied_during_gc;
|
return _bytes_copied_during_gc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t collection_set_bytes_used_before() const {
|
||||||
|
return _collection_set_bytes_used_before;
|
||||||
|
}
|
||||||
|
|
||||||
// Determine whether there are candidate regions so that the
|
// Determine whether there are candidate regions so that the
|
||||||
// next GC should be mixed. The two action strings are used
|
// next GC should be mixed. The two action strings are used
|
||||||
// in the ergo output when the method returns true or false.
|
// in the ergo output when the method returns true or false.
|
||||||
@ -691,7 +693,7 @@ public:
|
|||||||
// Choose a new collection set. Marks the chosen regions as being
|
// Choose a new collection set. Marks the chosen regions as being
|
||||||
// "in_collection_set", and links them together. The head and number of
|
// "in_collection_set", and links them together. The head and number of
|
||||||
// the collection set are available via access methods.
|
// the collection set are available via access methods.
|
||||||
void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
|
void finalize_cset(double target_pause_time_ms);
|
||||||
|
|
||||||
// The head of the list (via "next_in_collection_set()") representing the
|
// The head of the list (via "next_in_collection_set()") representing the
|
||||||
// current collection set.
|
// current collection set.
|
||||||
|
@ -310,9 +310,9 @@ void G1MarkSweep::enable_archive_object_check() {
|
|||||||
HeapRegion::GrainBytes);
|
HeapRegion::GrainBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1MarkSweep::mark_range_archive(MemRegion range) {
|
void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
|
||||||
assert(_archive_check_enabled, "archive range check not enabled");
|
assert(_archive_check_enabled, "archive range check not enabled");
|
||||||
_archive_region_map.set_by_address(range, true);
|
_archive_region_map.set_by_address(range, is_archive);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool G1MarkSweep::in_archive_range(oop object) {
|
bool G1MarkSweep::in_archive_range(oop object) {
|
||||||
|
@ -58,8 +58,8 @@ class G1MarkSweep : AllStatic {
|
|||||||
// Create the _archive_region_map which is used to identify archive objects.
|
// Create the _archive_region_map which is used to identify archive objects.
|
||||||
static void enable_archive_object_check();
|
static void enable_archive_object_check();
|
||||||
|
|
||||||
// Mark the regions containing the specified address range as archive regions.
|
// Set the regions containing the specified address range as archive/non-archive.
|
||||||
static void mark_range_archive(MemRegion range);
|
static void set_range_archive(MemRegion range, bool is_archive);
|
||||||
|
|
||||||
// Check if an object is in an archive region using the _archive_region_map.
|
// Check if an object is in an archive region using the _archive_region_map.
|
||||||
static bool in_archive_range(oop object);
|
static bool in_archive_range(oop object);
|
||||||
|
@ -125,8 +125,7 @@ private:
|
|||||||
template <class T> void do_oop_work(T* p);
|
template <class T> void do_oop_work(T* p);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
|
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||||
ReferenceProcessor* rp) :
|
|
||||||
G1ParCopyHelper(g1, par_scan_state) {
|
G1ParCopyHelper(g1, par_scan_state) {
|
||||||
assert(_ref_processor == NULL, "sanity");
|
assert(_ref_processor == NULL, "sanity");
|
||||||
}
|
}
|
||||||
@ -141,7 +140,6 @@ public:
|
|||||||
|
|
||||||
G1CollectedHeap* g1() { return _g1; };
|
G1CollectedHeap* g1() { return _g1; };
|
||||||
G1ParScanThreadState* pss() { return _par_scan_state; }
|
G1ParScanThreadState* pss() { return _par_scan_state; }
|
||||||
ReferenceProcessor* rp() { return _ref_processor; };
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;
|
typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;
|
||||||
|
@ -426,7 +426,7 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
|
|||||||
(num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
|
(num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
|
||||||
uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
|
uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
|
||||||
|
|
||||||
uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
|
shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
|
||||||
|
|
||||||
cur = idx_last_found;
|
cur = idx_last_found;
|
||||||
removed += to_remove;
|
removed += to_remove;
|
||||||
@ -437,6 +437,17 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
|
|||||||
return removed;
|
return removed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
|
||||||
|
#ifdef ASSERT
|
||||||
|
for (uint i = index; i < (index + num_regions); i++) {
|
||||||
|
assert(is_available(i), err_msg("Expected available region at index %u", i));
|
||||||
|
assert(at(i)->is_empty(), err_msg("Expected empty region at index %u", i));
|
||||||
|
assert(at(i)->is_free(), err_msg("Expected free region at index %u", i));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
uncommit_regions(index, num_regions);
|
||||||
|
}
|
||||||
|
|
||||||
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
|
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
|
||||||
guarantee(start_idx < _allocated_heapregions_length, "checking");
|
guarantee(start_idx < _allocated_heapregions_length, "checking");
|
||||||
guarantee(res_idx != NULL, "checking");
|
guarantee(res_idx != NULL, "checking");
|
||||||
|
@ -241,6 +241,10 @@ public:
|
|||||||
// Return the actual number of uncommitted regions.
|
// Return the actual number of uncommitted regions.
|
||||||
uint shrink_by(uint num_regions_to_remove);
|
uint shrink_by(uint num_regions_to_remove);
|
||||||
|
|
||||||
|
// Uncommit a number of regions starting at the specified index, which must be available,
|
||||||
|
// empty, and free.
|
||||||
|
void shrink_at(uint index, size_t num_regions);
|
||||||
|
|
||||||
void verify();
|
void verify();
|
||||||
|
|
||||||
// Do some sanity checking.
|
// Do some sanity checking.
|
||||||
|
@ -56,13 +56,7 @@ class CardTableExtension : public CardTableModRefBS {
|
|||||||
CardTableExtension(MemRegion whole_heap) :
|
CardTableExtension(MemRegion whole_heap) :
|
||||||
CardTableModRefBS(
|
CardTableModRefBS(
|
||||||
whole_heap,
|
whole_heap,
|
||||||
// Concrete tag should be BarrierSet::CardTableExtension.
|
BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
|
||||||
// That will presently break things in a bunch of places though.
|
|
||||||
// The concrete tag is used as a dispatch key in many places, and
|
|
||||||
// CardTableExtension does not correctly dispatch in some of those
|
|
||||||
// uses. This will be addressed as part of a reorganization of the
|
|
||||||
// BarrierSet hierarchy.
|
|
||||||
BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableExtension))
|
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
// Scavenge support
|
// Scavenge support
|
||||||
|
@ -132,6 +132,9 @@ public:
|
|||||||
// First the pre-write versions...
|
// First the pre-write versions...
|
||||||
template <class T> inline void write_ref_field_pre(T* field, oop new_val);
|
template <class T> inline void write_ref_field_pre(T* field, oop new_val);
|
||||||
private:
|
private:
|
||||||
|
// Helper for write_ref_field_pre and friends, testing for specialized cases.
|
||||||
|
bool devirtualize_reference_writes() const;
|
||||||
|
|
||||||
// Keep this private so as to catch violations at build time.
|
// Keep this private so as to catch violations at build time.
|
||||||
virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
|
virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
|
||||||
protected:
|
protected:
|
||||||
|
@ -32,8 +32,18 @@
|
|||||||
// performance-critical calls when the barrier is the most common
|
// performance-critical calls when the barrier is the most common
|
||||||
// card-table kind.
|
// card-table kind.
|
||||||
|
|
||||||
|
inline bool BarrierSet::devirtualize_reference_writes() const {
|
||||||
|
switch (kind()) {
|
||||||
|
case CardTableForRS:
|
||||||
|
case CardTableExtension:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
|
template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
|
||||||
if (kind() == CardTableModRef) {
|
if (devirtualize_reference_writes()) {
|
||||||
barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
|
barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
|
||||||
} else {
|
} else {
|
||||||
write_ref_field_pre_work(field, new_val);
|
write_ref_field_pre_work(field, new_val);
|
||||||
@ -41,7 +51,7 @@ template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
|
void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
|
||||||
if (kind() == CardTableModRef) {
|
if (devirtualize_reference_writes()) {
|
||||||
barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
|
barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
|
||||||
} else {
|
} else {
|
||||||
write_ref_field_work(field, new_val, release);
|
write_ref_field_work(field, new_val, release);
|
||||||
@ -77,7 +87,7 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
|||||||
|
|
||||||
|
|
||||||
inline void BarrierSet::write_region(MemRegion mr) {
|
inline void BarrierSet::write_region(MemRegion mr) {
|
||||||
if (kind() == CardTableModRef) {
|
if (devirtualize_reference_writes()) {
|
||||||
barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
|
barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
|
||||||
} else {
|
} else {
|
||||||
write_region_work(mr);
|
write_region_work(mr);
|
||||||
|
@ -31,13 +31,7 @@
|
|||||||
CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
|
CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
|
||||||
CardTableModRefBS(
|
CardTableModRefBS(
|
||||||
whole_heap,
|
whole_heap,
|
||||||
// Concrete tag should be BarrierSet::CardTableForRS.
|
BarrierSet::FakeRtti(BarrierSet::CardTableForRS)),
|
||||||
// That will presently break things in a bunch of places though.
|
|
||||||
// The concrete tag is used as a dispatch key in many places, and
|
|
||||||
// CardTableForRS does not correctly dispatch in some of those
|
|
||||||
// uses. This will be addressed as part of a reorganization of the
|
|
||||||
// BarrierSet hierarchy.
|
|
||||||
BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableForRS)),
|
|
||||||
// LNC functionality
|
// LNC functionality
|
||||||
_lowest_non_clean(NULL),
|
_lowest_non_clean(NULL),
|
||||||
_lowest_non_clean_chunk_size(NULL),
|
_lowest_non_clean_chunk_size(NULL),
|
||||||
|
@ -150,8 +150,6 @@ class CollectorPolicy : public CHeapObj<mtGC> {
|
|||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
|
|
||||||
virtual BarrierSet::Name barrier_set_name() = 0;
|
|
||||||
|
|
||||||
virtual GenRemSet* create_rem_set(MemRegion reserved);
|
virtual GenRemSet* create_rem_set(MemRegion reserved);
|
||||||
|
|
||||||
// This method controls how a collector satisfies a request
|
// This method controls how a collector satisfies a request
|
||||||
@ -299,8 +297,6 @@ class GenCollectorPolicy : public CollectorPolicy {
|
|||||||
assert(_max_young_size == MaxNewSize, "Should be taken care of by initialize_size_info");
|
assert(_max_young_size == MaxNewSize, "Should be taken care of by initialize_size_info");
|
||||||
}
|
}
|
||||||
|
|
||||||
BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; }
|
|
||||||
|
|
||||||
virtual CollectorPolicy::Name kind() {
|
virtual CollectorPolicy::Name kind() {
|
||||||
return CollectorPolicy::GenCollectorPolicyKind;
|
return CollectorPolicy::GenCollectorPolicyKind;
|
||||||
}
|
}
|
||||||
|
@ -707,12 +707,16 @@ bool FileMapInfo::map_string_regions() {
|
|||||||
addr, string_ranges[i].byte_size(), si->_read_only,
|
addr, string_ranges[i].byte_size(), si->_read_only,
|
||||||
si->_allow_exec);
|
si->_allow_exec);
|
||||||
if (base == NULL || base != addr) {
|
if (base == NULL || base != addr) {
|
||||||
|
// dealloc the string regions from java heap
|
||||||
|
dealloc_string_regions();
|
||||||
fail_continue("Unable to map shared string space at required address.");
|
fail_continue("Unable to map shared string space at required address.");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!verify_string_regions()) {
|
if (!verify_string_regions()) {
|
||||||
|
// dealloc the string regions from java heap
|
||||||
|
dealloc_string_regions();
|
||||||
fail_continue("Shared string regions are corrupt");
|
fail_continue("Shared string regions are corrupt");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -745,12 +749,14 @@ bool FileMapInfo::verify_string_regions() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void FileMapInfo::fixup_string_regions() {
|
void FileMapInfo::fixup_string_regions() {
|
||||||
|
#if INCLUDE_ALL_GCS
|
||||||
// If any string regions were found, call the fill routine to make them parseable.
|
// If any string regions were found, call the fill routine to make them parseable.
|
||||||
// Note that string_ranges may be non-NULL even if no ranges were found.
|
// Note that string_ranges may be non-NULL even if no ranges were found.
|
||||||
if (num_ranges != 0) {
|
if (num_ranges != 0) {
|
||||||
assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
|
assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
|
||||||
G1CollectedHeap::heap()->fill_archive_regions(string_ranges, num_ranges);
|
G1CollectedHeap::heap()->fill_archive_regions(string_ranges, num_ranges);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FileMapInfo::verify_region_checksum(int i) {
|
bool FileMapInfo::verify_region_checksum(int i) {
|
||||||
@ -793,20 +799,14 @@ void FileMapInfo::unmap_region(int i) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileMapInfo::unmap_string_regions() {
|
// dealloc the archived string region from java heap
|
||||||
for (int i = MetaspaceShared::first_string;
|
void FileMapInfo::dealloc_string_regions() {
|
||||||
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
|
#if INCLUDE_ALL_GCS
|
||||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
|
if (num_ranges > 0) {
|
||||||
size_t used = si->_used;
|
assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
|
||||||
if (used > 0) {
|
G1CollectedHeap::heap()->dealloc_archive_regions(string_ranges, num_ranges);
|
||||||
size_t size = align_size_up(used, os::vm_allocation_granularity());
|
|
||||||
char* addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
|
|
||||||
(narrowOop)si->_addr._offset));
|
|
||||||
if (!os::unmap_memory(addr, size)) {
|
|
||||||
fail_stop("Unable to unmap shared space.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileMapInfo::assert_mark(bool check) {
|
void FileMapInfo::assert_mark(bool check) {
|
||||||
@ -967,7 +967,9 @@ void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
|
|||||||
map_info->_header->_space[i]._addr._base = NULL;
|
map_info->_header->_space[i]._addr._base = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
map_info->unmap_string_regions();
|
// Dealloc the string regions only without unmapping. The string regions are part
|
||||||
|
// of the java heap. Unmapping of the heap regions are managed by GC.
|
||||||
|
map_info->dealloc_string_regions();
|
||||||
} else if (DumpSharedSpaces) {
|
} else if (DumpSharedSpaces) {
|
||||||
fail_stop("%s", msg);
|
fail_stop("%s", msg);
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,7 @@ public:
|
|||||||
bool verify_string_regions();
|
bool verify_string_regions();
|
||||||
void fixup_string_regions();
|
void fixup_string_regions();
|
||||||
void unmap_region(int i);
|
void unmap_region(int i);
|
||||||
void unmap_string_regions();
|
void dealloc_string_regions();
|
||||||
bool verify_region_checksum(int i);
|
bool verify_region_checksum(int i);
|
||||||
void close();
|
void close();
|
||||||
bool is_open() { return _file_open; }
|
bool is_open() { return _file_open; }
|
||||||
|
@ -1522,7 +1522,7 @@ void GraphKit::pre_barrier(bool do_load,
|
|||||||
g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
|
g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
break;
|
break;
|
||||||
@ -1539,7 +1539,7 @@ bool GraphKit::can_move_pre_barrier() const {
|
|||||||
case BarrierSet::G1SATBCTLogging:
|
case BarrierSet::G1SATBCTLogging:
|
||||||
return true; // Can move it if no safepoint
|
return true; // Can move it if no safepoint
|
||||||
|
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
return true; // There is no pre-barrier
|
return true; // There is no pre-barrier
|
||||||
@ -1565,7 +1565,7 @@ void GraphKit::post_barrier(Node* ctl,
|
|||||||
g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
|
g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableForRS:
|
||||||
case BarrierSet::CardTableExtension:
|
case BarrierSet::CardTableExtension:
|
||||||
write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
|
write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
|
||||||
break;
|
break;
|
||||||
@ -3791,7 +3791,7 @@ void GraphKit::write_barrier_post(Node* oop_store,
|
|||||||
Node* cast = __ CastPX(__ ctrl(), adr);
|
Node* cast = __ CastPX(__ ctrl(), adr);
|
||||||
|
|
||||||
// Divide by card size
|
// Divide by card size
|
||||||
assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
|
assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
|
||||||
"Only one we handle so far.");
|
"Only one we handle so far.");
|
||||||
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
|
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
|
||||||
|
|
||||||
|
@ -3482,7 +3482,7 @@ JvmtiEnv::SetSystemProperty(const char* property, const char* value_ptr) {
|
|||||||
|
|
||||||
for (SystemProperty* p = Arguments::system_properties(); p != NULL; p = p->next()) {
|
for (SystemProperty* p = Arguments::system_properties(); p != NULL; p = p->next()) {
|
||||||
if (strcmp(property, p->key()) == 0) {
|
if (strcmp(property, p->key()) == 0) {
|
||||||
if (p->set_value((char *)value_ptr)) {
|
if (p->set_value(value_ptr)) {
|
||||||
err = JVMTI_ERROR_NONE;
|
err = JVMTI_ERROR_NONE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -983,53 +983,61 @@ const char* Arguments::get_property(const char* key) {
|
|||||||
|
|
||||||
bool Arguments::add_property(const char* prop) {
|
bool Arguments::add_property(const char* prop) {
|
||||||
const char* eq = strchr(prop, '=');
|
const char* eq = strchr(prop, '=');
|
||||||
char* key;
|
const char* key;
|
||||||
// ns must be static--its address may be stored in a SystemProperty object.
|
const char* value = "";
|
||||||
const static char ns[1] = {0};
|
|
||||||
char* value = (char *)ns;
|
|
||||||
|
|
||||||
size_t key_len = (eq == NULL) ? strlen(prop) : (eq - prop);
|
if (eq == NULL) {
|
||||||
key = AllocateHeap(key_len + 1, mtInternal);
|
// property doesn't have a value, thus use passed string
|
||||||
strncpy(key, prop, key_len);
|
key = prop;
|
||||||
key[key_len] = '\0';
|
} else {
|
||||||
|
// property have a value, thus extract it and save to the
|
||||||
|
// allocated string
|
||||||
|
size_t key_len = eq - prop;
|
||||||
|
char* tmp_key = AllocateHeap(key_len + 1, mtInternal);
|
||||||
|
|
||||||
if (eq != NULL) {
|
strncpy(tmp_key, prop, key_len);
|
||||||
size_t value_len = strlen(prop) - key_len - 1;
|
tmp_key[key_len] = '\0';
|
||||||
value = AllocateHeap(value_len + 1, mtInternal);
|
key = tmp_key;
|
||||||
strncpy(value, &prop[key_len + 1], value_len + 1);
|
|
||||||
|
value = &prop[key_len + 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (strcmp(key, "java.compiler") == 0) {
|
if (strcmp(key, "java.compiler") == 0) {
|
||||||
process_java_compiler_argument(value);
|
process_java_compiler_argument(value);
|
||||||
FreeHeap(key);
|
|
||||||
if (eq != NULL) {
|
|
||||||
FreeHeap(value);
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
} else if (strcmp(key, "sun.java.command") == 0) {
|
|
||||||
_java_command = value;
|
|
||||||
|
|
||||||
// Record value in Arguments, but let it get passed to Java.
|
// Record value in Arguments, but let it get passed to Java.
|
||||||
} else if (strcmp(key, "sun.java.launcher.is_altjvm") == 0 ||
|
} else if (strcmp(key, "sun.java.launcher.is_altjvm") == 0 ||
|
||||||
strcmp(key, "sun.java.launcher.pid") == 0) {
|
strcmp(key, "sun.java.launcher.pid") == 0) {
|
||||||
// sun.java.launcher.is_altjvm and sun.java.launcher.pid property are
|
// sun.java.launcher.is_altjvm and sun.java.launcher.pid property are
|
||||||
// private and are processed in process_sun_java_launcher_properties();
|
// private and are processed in process_sun_java_launcher_properties();
|
||||||
// the sun.java.launcher property is passed on to the java application
|
// the sun.java.launcher property is passed on to the java application
|
||||||
FreeHeap(key);
|
|
||||||
if (eq != NULL) {
|
|
||||||
FreeHeap(value);
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
} else if (strcmp(key, "java.vendor.url.bug") == 0) {
|
|
||||||
// save it in _java_vendor_url_bug, so JVM fatal error handler can access
|
|
||||||
// its value without going through the property list or making a Java call.
|
|
||||||
_java_vendor_url_bug = value;
|
|
||||||
} else if (strcmp(key, "sun.boot.library.path") == 0) {
|
} else if (strcmp(key, "sun.boot.library.path") == 0) {
|
||||||
PropertyList_unique_add(&_system_properties, key, value, true);
|
PropertyList_unique_add(&_system_properties, key, value, true);
|
||||||
return true;
|
} else {
|
||||||
|
if (strcmp(key, "sun.java.command") == 0) {
|
||||||
|
if (_java_command != NULL) {
|
||||||
|
os::free(_java_command);
|
||||||
|
}
|
||||||
|
_java_command = os::strdup_check_oom(value, mtInternal);
|
||||||
|
} else if (strcmp(key, "java.vendor.url.bug") == 0) {
|
||||||
|
if (_java_vendor_url_bug != DEFAULT_VENDOR_URL_BUG) {
|
||||||
|
assert(_java_vendor_url_bug != NULL, "_java_vendor_url_bug is NULL");
|
||||||
|
os::free((void *)_java_vendor_url_bug);
|
||||||
|
}
|
||||||
|
// save it in _java_vendor_url_bug, so JVM fatal error handler can access
|
||||||
|
// its value without going through the property list or making a Java call.
|
||||||
|
_java_vendor_url_bug = os::strdup_check_oom(value, mtInternal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new property and add at the end of the list
|
||||||
|
PropertyList_unique_add(&_system_properties, key, value);
|
||||||
}
|
}
|
||||||
// Create new property and add at the end of the list
|
|
||||||
PropertyList_unique_add(&_system_properties, key, value);
|
if (key != prop) {
|
||||||
|
// SystemProperty copy passed value, thus free previously allocated
|
||||||
|
// memory
|
||||||
|
FreeHeap((void *)key);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1046,7 +1054,7 @@ void Arguments::set_mode_flags(Mode mode) {
|
|||||||
// Ensure Agent_OnLoad has the correct initial values.
|
// Ensure Agent_OnLoad has the correct initial values.
|
||||||
// This may not be the final mode; mode may change later in onload phase.
|
// This may not be the final mode; mode may change later in onload phase.
|
||||||
PropertyList_unique_add(&_system_properties, "java.vm.info",
|
PropertyList_unique_add(&_system_properties, "java.vm.info",
|
||||||
(char*)VM_Version::vm_info_string(), false);
|
VM_Version::vm_info_string(), false);
|
||||||
|
|
||||||
UseInterpreter = true;
|
UseInterpreter = true;
|
||||||
UseCompiler = true;
|
UseCompiler = true;
|
||||||
@ -1858,7 +1866,7 @@ void Arguments::set_bytecode_flags() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Aggressive optimization flags -XX:+AggressiveOpts
|
// Aggressive optimization flags -XX:+AggressiveOpts
|
||||||
void Arguments::set_aggressive_opts_flags() {
|
jint Arguments::set_aggressive_opts_flags() {
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
if (AggressiveUnboxing) {
|
if (AggressiveUnboxing) {
|
||||||
if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
|
if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
|
||||||
@ -1885,7 +1893,9 @@ void Arguments::set_aggressive_opts_flags() {
|
|||||||
// Feed the cache size setting into the JDK
|
// Feed the cache size setting into the JDK
|
||||||
char buffer[1024];
|
char buffer[1024];
|
||||||
sprintf(buffer, "java.lang.Integer.IntegerCache.high=" INTX_FORMAT, AutoBoxCacheMax);
|
sprintf(buffer, "java.lang.Integer.IntegerCache.high=" INTX_FORMAT, AutoBoxCacheMax);
|
||||||
add_property(buffer);
|
if (!add_property(buffer)) {
|
||||||
|
return JNI_ENOMEM;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) {
|
if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) {
|
||||||
FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500);
|
FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500);
|
||||||
@ -1898,12 +1908,14 @@ void Arguments::set_aggressive_opts_flags() {
|
|||||||
// FLAG_SET_DEFAULT(EliminateZeroing, true);
|
// FLAG_SET_DEFAULT(EliminateZeroing, true);
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return JNI_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
//===========================================================================================================
|
//===========================================================================================================
|
||||||
// Parsing of java.compiler property
|
// Parsing of java.compiler property
|
||||||
|
|
||||||
void Arguments::process_java_compiler_argument(char* arg) {
|
void Arguments::process_java_compiler_argument(const char* arg) {
|
||||||
// For backwards compatibility, Djava.compiler=NONE or ""
|
// For backwards compatibility, Djava.compiler=NONE or ""
|
||||||
// causes us to switch to -Xint mode UNLESS -Xdebug
|
// causes us to switch to -Xint mode UNLESS -Xdebug
|
||||||
// is also specified.
|
// is also specified.
|
||||||
@ -3870,7 +3882,10 @@ jint Arguments::apply_ergo() {
|
|||||||
set_bytecode_flags();
|
set_bytecode_flags();
|
||||||
|
|
||||||
// Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled
|
// Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled
|
||||||
set_aggressive_opts_flags();
|
jint code = set_aggressive_opts_flags();
|
||||||
|
if (code != JNI_OK) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
// Turn off biased locking for locking debug mode flags,
|
// Turn off biased locking for locking debug mode flags,
|
||||||
// which are subtly different from each other but neither works with
|
// which are subtly different from each other but neither works with
|
||||||
@ -4036,7 +4051,7 @@ void Arguments::PropertyList_add(SystemProperty** plist, SystemProperty *new_p)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Arguments::PropertyList_add(SystemProperty** plist, const char* k, char* v) {
|
void Arguments::PropertyList_add(SystemProperty** plist, const char* k, const char* v) {
|
||||||
if (plist == NULL)
|
if (plist == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -4049,7 +4064,7 @@ void Arguments::PropertyList_add(SystemProperty *element) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This add maintains unique property key in the list.
|
// This add maintains unique property key in the list.
|
||||||
void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append) {
|
void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v, jboolean append) {
|
||||||
if (plist == NULL)
|
if (plist == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ class SystemProperty: public CHeapObj<mtInternal> {
|
|||||||
char* value() const { return _value; }
|
char* value() const { return _value; }
|
||||||
SystemProperty* next() const { return _next; }
|
SystemProperty* next() const { return _next; }
|
||||||
void set_next(SystemProperty* next) { _next = next; }
|
void set_next(SystemProperty* next) { _next = next; }
|
||||||
bool set_value(char *value) {
|
bool set_value(const char *value) {
|
||||||
if (writeable()) {
|
if (writeable()) {
|
||||||
if (_value != NULL) {
|
if (_value != NULL) {
|
||||||
FreeHeap(_value);
|
FreeHeap(_value);
|
||||||
@ -364,14 +364,14 @@ class Arguments : AllStatic {
|
|||||||
static bool add_property(const char* prop);
|
static bool add_property(const char* prop);
|
||||||
|
|
||||||
// Aggressive optimization flags.
|
// Aggressive optimization flags.
|
||||||
static void set_aggressive_opts_flags();
|
static jint set_aggressive_opts_flags();
|
||||||
|
|
||||||
// Argument parsing
|
// Argument parsing
|
||||||
static void do_pd_flag_adjustments();
|
static void do_pd_flag_adjustments();
|
||||||
static bool parse_argument(const char* arg, Flag::Flags origin);
|
static bool parse_argument(const char* arg, Flag::Flags origin);
|
||||||
static bool process_argument(const char* arg, jboolean ignore_unrecognized, Flag::Flags origin);
|
static bool process_argument(const char* arg, jboolean ignore_unrecognized, Flag::Flags origin);
|
||||||
static void process_java_launcher_argument(const char*, void*);
|
static void process_java_launcher_argument(const char*, void*);
|
||||||
static void process_java_compiler_argument(char* arg);
|
static void process_java_compiler_argument(const char* arg);
|
||||||
static jint parse_options_environment_variable(const char* name, ScopedVMInitArgs* vm_args);
|
static jint parse_options_environment_variable(const char* name, ScopedVMInitArgs* vm_args);
|
||||||
static jint parse_java_tool_options_environment_variable(ScopedVMInitArgs* vm_args);
|
static jint parse_java_tool_options_environment_variable(ScopedVMInitArgs* vm_args);
|
||||||
static jint parse_java_options_environment_variable(ScopedVMInitArgs* vm_args);
|
static jint parse_java_options_environment_variable(ScopedVMInitArgs* vm_args);
|
||||||
@ -561,22 +561,22 @@ class Arguments : AllStatic {
|
|||||||
// Property List manipulation
|
// Property List manipulation
|
||||||
static void PropertyList_add(SystemProperty *element);
|
static void PropertyList_add(SystemProperty *element);
|
||||||
static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
|
static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
|
||||||
static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
|
static void PropertyList_add(SystemProperty** plist, const char* k, const char* v);
|
||||||
static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v) {
|
static void PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v) {
|
||||||
PropertyList_unique_add(plist, k, v, false);
|
PropertyList_unique_add(plist, k, v, false);
|
||||||
}
|
}
|
||||||
static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append);
|
static void PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v, jboolean append);
|
||||||
static const char* PropertyList_get_value(SystemProperty* plist, const char* key);
|
static const char* PropertyList_get_value(SystemProperty* plist, const char* key);
|
||||||
static int PropertyList_count(SystemProperty* pl);
|
static int PropertyList_count(SystemProperty* pl);
|
||||||
static const char* PropertyList_get_key_at(SystemProperty* pl,int index);
|
static const char* PropertyList_get_key_at(SystemProperty* pl,int index);
|
||||||
static char* PropertyList_get_value_at(SystemProperty* pl,int index);
|
static char* PropertyList_get_value_at(SystemProperty* pl,int index);
|
||||||
|
|
||||||
// Miscellaneous System property value getter and setters.
|
// Miscellaneous System property value getter and setters.
|
||||||
static void set_dll_dir(char *value) { _sun_boot_library_path->set_value(value); }
|
static void set_dll_dir(const char *value) { _sun_boot_library_path->set_value(value); }
|
||||||
static void set_java_home(char *value) { _java_home->set_value(value); }
|
static void set_java_home(const char *value) { _java_home->set_value(value); }
|
||||||
static void set_library_path(char *value) { _java_library_path->set_value(value); }
|
static void set_library_path(const char *value) { _java_library_path->set_value(value); }
|
||||||
static void set_ext_dirs(char *value) { _ext_dirs = os::strdup_check_oom(value); }
|
static void set_ext_dirs(char *value) { _ext_dirs = os::strdup_check_oom(value); }
|
||||||
static void set_sysclasspath(char *value) { _sun_boot_class_path->set_value(value); }
|
static void set_sysclasspath(const char *value) { _sun_boot_class_path->set_value(value); }
|
||||||
static void append_sysclasspath(const char *value) { _sun_boot_class_path->append_value(value); }
|
static void append_sysclasspath(const char *value) { _sun_boot_class_path->append_value(value); }
|
||||||
|
|
||||||
static char* get_java_home() { return _java_home->value(); }
|
static char* get_java_home() { return _java_home->value(); }
|
||||||
|
@ -2253,6 +2253,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
|||||||
\
|
\
|
||||||
declare_constant(BarrierSet::ModRef) \
|
declare_constant(BarrierSet::ModRef) \
|
||||||
declare_constant(BarrierSet::CardTableModRef) \
|
declare_constant(BarrierSet::CardTableModRef) \
|
||||||
|
declare_constant(BarrierSet::CardTableForRS) \
|
||||||
declare_constant(BarrierSet::CardTableExtension) \
|
declare_constant(BarrierSet::CardTableExtension) \
|
||||||
declare_constant(BarrierSet::G1SATBCT) \
|
declare_constant(BarrierSet::G1SATBCT) \
|
||||||
declare_constant(BarrierSet::G1SATBCTLogging) \
|
declare_constant(BarrierSet::G1SATBCTLogging) \
|
||||||
|
@ -440,8 +440,10 @@ CallInst* SharkBuilder::CreateDump(Value* value) {
|
|||||||
// HotSpot memory barriers
|
// HotSpot memory barriers
|
||||||
|
|
||||||
void SharkBuilder::CreateUpdateBarrierSet(BarrierSet* bs, Value* field) {
|
void SharkBuilder::CreateUpdateBarrierSet(BarrierSet* bs, Value* field) {
|
||||||
if (bs->kind() != BarrierSet::CardTableModRef)
|
if (bs->kind() != BarrierSet::CardTableForRS &&
|
||||||
|
bs->kind() != BarrierSet::CardTableExtension) {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
CreateStore(
|
CreateStore(
|
||||||
LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card_val()),
|
LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card_val()),
|
||||||
|
@ -231,7 +231,7 @@ char* VMError::error_string(char* buf, int buflen) {
|
|||||||
|
|
||||||
if (signame) {
|
if (signame) {
|
||||||
jio_snprintf(buf, buflen,
|
jio_snprintf(buf, buflen,
|
||||||
"%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" INTPTR_FORMAT,
|
"%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" UINTX_FORMAT,
|
||||||
signame, _id, _pc,
|
signame, _id, _pc,
|
||||||
os::current_process_id(), os::current_thread_id());
|
os::current_process_id(), os::current_thread_id());
|
||||||
} else if (_filename != NULL && _lineno > 0) {
|
} else if (_filename != NULL && _lineno > 0) {
|
||||||
@ -239,7 +239,7 @@ char* VMError::error_string(char* buf, int buflen) {
|
|||||||
char separator = os::file_separator()[0];
|
char separator = os::file_separator()[0];
|
||||||
const char *p = strrchr(_filename, separator);
|
const char *p = strrchr(_filename, separator);
|
||||||
int n = jio_snprintf(buf, buflen,
|
int n = jio_snprintf(buf, buflen,
|
||||||
"Internal Error at %s:%d, pid=%d, tid=" INTPTR_FORMAT,
|
"Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT,
|
||||||
p ? p + 1 : _filename, _lineno,
|
p ? p + 1 : _filename, _lineno,
|
||||||
os::current_process_id(), os::current_thread_id());
|
os::current_process_id(), os::current_thread_id());
|
||||||
if (n >= 0 && n < buflen && _message) {
|
if (n >= 0 && n < buflen && _message) {
|
||||||
@ -253,7 +253,7 @@ char* VMError::error_string(char* buf, int buflen) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
jio_snprintf(buf, buflen,
|
jio_snprintf(buf, buflen,
|
||||||
"Internal Error (0x%x), pid=%d, tid=" INTPTR_FORMAT,
|
"Internal Error (0x%x), pid=%d, tid=" UINTX_FORMAT,
|
||||||
_id, os::current_process_id(), os::current_thread_id());
|
_id, os::current_process_id(), os::current_thread_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -486,7 +486,7 @@ void VMError::report(outputStream* st) {
|
|||||||
|
|
||||||
// process id, thread id
|
// process id, thread id
|
||||||
st->print(", pid=%d", os::current_process_id());
|
st->print(", pid=%d", os::current_process_id());
|
||||||
st->print(", tid=" INTPTR_FORMAT, os::current_thread_id());
|
st->print(", tid=" UINTX_FORMAT, os::current_thread_id());
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
STEP(80, "(printing error message)")
|
STEP(80, "(printing error message)")
|
||||||
|
@ -75,7 +75,7 @@ public class CheckCICompilerCount {
|
|||||||
"intx CICompilerCount := 1 {product}"
|
"intx CICompilerCount := 1 {product}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"CICompilerCount=0 must be at least 1",
|
"CICompilerCount (0) must be at least 1",
|
||||||
"Improperly specified VM option 'CICompilerCount=0'"
|
"Improperly specified VM option 'CICompilerCount=0'"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -130,7 +130,7 @@ public class CheckCICompilerCount {
|
|||||||
"intx CICompilerCount := 2 {product}"
|
"intx CICompilerCount := 2 {product}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"CICompilerCount=1 must be at least 2",
|
"CICompilerCount (1) must be at least 2",
|
||||||
"Improperly specified VM option 'CICompilerCount=1'"
|
"Improperly specified VM option 'CICompilerCount=1'"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
58
hotspot/test/gc/g1/humongousObjects/Helpers.java
Normal file
58
hotspot/test/gc/g1/humongousObjects/Helpers.java
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package gc.g1.humongousObjects;
|
||||||
|
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
|
||||||
|
public class Helpers {
|
||||||
|
|
||||||
|
// In case of 128 byte padding
|
||||||
|
private static final int MAX_PADDING_SIZE = 128;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detects amount of extra bytes required to allocate a byte array.
|
||||||
|
* Allocating a byte[n] array takes more then just n bytes in the heap.
|
||||||
|
* Extra bytes are required to store object reference and the length.
|
||||||
|
* This amount depends on bitness and other factors.
|
||||||
|
*
|
||||||
|
* @return byte[] memory overhead
|
||||||
|
*/
|
||||||
|
public static int detectByteArrayAllocationOverhead() {
|
||||||
|
|
||||||
|
WhiteBox whiteBox = WhiteBox.getWhiteBox();
|
||||||
|
|
||||||
|
int zeroLengthByteArraySize = (int) whiteBox.getObjectSize(new byte[0]);
|
||||||
|
|
||||||
|
// Since we do not know is there any padding in zeroLengthByteArraySize we cannot just take byte[0] size as overhead
|
||||||
|
for (int i = 1; i < MAX_PADDING_SIZE + 1; ++i) {
|
||||||
|
int realAllocationSize = (int) whiteBox.getObjectSize(new byte[i]);
|
||||||
|
if (realAllocationSize != zeroLengthByteArraySize) {
|
||||||
|
// It means we did not have any padding on previous step
|
||||||
|
return zeroLengthByteArraySize - (i - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new Error("We cannot find byte[] memory overhead - should not reach here");
|
||||||
|
}
|
||||||
|
}
|
165
hotspot/test/gc/g1/humongousObjects/TestHumongousThreshold.java
Normal file
165
hotspot/test/gc/g1/humongousObjects/TestHumongousThreshold.java
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package gc.g1.humongousObjects;
|
||||||
|
|
||||||
|
import jdk.test.lib.Asserts;
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test TestHumongousThreshold
|
||||||
|
* @summary Checks that objects larger than half a region are allocated as humongous
|
||||||
|
* @requires vm.gc=="G1" | vm.gc=="null"
|
||||||
|
* @library /testlibrary /../../test/lib
|
||||||
|
* @modules java.management
|
||||||
|
* @build sun.hotspot.WhiteBox
|
||||||
|
* gc.g1.humongousObjects.Helpers
|
||||||
|
* gc.g1.humongousObjects.TestHumongousThreshold
|
||||||
|
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
|
||||||
|
* -XX:G1HeapRegionSize=1M
|
||||||
|
* gc.g1.humongousObjects.TestHumongousThreshold
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
|
||||||
|
* -XX:G1HeapRegionSize=2M
|
||||||
|
* gc.g1.humongousObjects.TestHumongousThreshold
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
|
||||||
|
* -XX:G1HeapRegionSize=4M
|
||||||
|
* gc.g1.humongousObjects.TestHumongousThreshold
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
|
||||||
|
* -XX:G1HeapRegionSize=8M
|
||||||
|
* gc.g1.humongousObjects.TestHumongousThreshold
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
|
||||||
|
* -XX:G1HeapRegionSize=16M
|
||||||
|
* gc.g1.humongousObjects.TestHumongousThreshold
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
|
||||||
|
* -XX:G1HeapRegionSize=32M
|
||||||
|
* gc.g1.humongousObjects.TestHumongousThreshold
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class TestHumongousThreshold {
|
||||||
|
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
|
||||||
|
private static final int REGION_SIZE = WHITE_BOX.g1RegionSize();
|
||||||
|
private static final int MAX_CONTINUOUS_SIZE_CHECK = 129;
|
||||||
|
private static final int NON_HUMONGOUS_DIVIDER = 10;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The method allocates byte[] with specified size and checks that:
|
||||||
|
* 1. byte[] is allocated as we specified in expectedHumongous.
|
||||||
|
* 2. byte[] is allocated as humongous if its size is large than a half of region and non-humongous otherwise.
|
||||||
|
* It uses WB to obtain the size of created byte[]. Only objects larger than half of region are expected
|
||||||
|
* to be humongous.
|
||||||
|
*
|
||||||
|
* @param arraySize size of allocation
|
||||||
|
* @param expectedHumongous expected humongous/non-humongous allocation
|
||||||
|
* @return allocated byte array
|
||||||
|
*/
|
||||||
|
|
||||||
|
private static byte[] allocateAndCheck(int arraySize, boolean expectedHumongous) {
|
||||||
|
byte[] storage = new byte[arraySize];
|
||||||
|
long objectSize = WHITE_BOX.getObjectSize(storage);
|
||||||
|
boolean shouldBeHumongous = objectSize > (REGION_SIZE / 2);
|
||||||
|
|
||||||
|
Asserts.assertEquals(expectedHumongous, shouldBeHumongous, "Despite we expected this object to be "
|
||||||
|
+ (expectedHumongous ? "humongous" : "non-humongous") + " it appeared otherwise when we checked "
|
||||||
|
+ "object size - likely test bug; Allocation size = " + arraySize + "; Object size = " + objectSize
|
||||||
|
+ "; region size = " + REGION_SIZE);
|
||||||
|
|
||||||
|
Asserts.assertEquals(WHITE_BOX.g1IsHumongous(storage), shouldBeHumongous,
|
||||||
|
"Object should be allocated as " + (shouldBeHumongous ? "humongous"
|
||||||
|
: "non-humongous") + " but it wasn't; Allocation size = " + arraySize + "; Object size = "
|
||||||
|
+ objectSize + "; region size = " + REGION_SIZE);
|
||||||
|
return storage;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
int byteArrayMemoryOverhead = Helpers.detectByteArrayAllocationOverhead();
|
||||||
|
|
||||||
|
// Largest non-humongous byte[]
|
||||||
|
int maxByteArrayNonHumongousSize = (REGION_SIZE / 2) - byteArrayMemoryOverhead;
|
||||||
|
|
||||||
|
// Increment for non-humongous testing
|
||||||
|
int nonHumongousStep = maxByteArrayNonHumongousSize / NON_HUMONGOUS_DIVIDER;
|
||||||
|
|
||||||
|
// Maximum byte[] that takes one region
|
||||||
|
int maxByteArrayOneRegionSize = REGION_SIZE - byteArrayMemoryOverhead;
|
||||||
|
|
||||||
|
// Sizes in regions
|
||||||
|
// i,e, 1.0f means one region, 1.5f means one and half region etc
|
||||||
|
float[] humongousFactors = {0.8f, 1.0f, 1.2f, 1.5f, 1.7f, 2.0f, 2.5f};
|
||||||
|
|
||||||
|
// Some diagnostic output
|
||||||
|
System.out.format("%s started%n", TestHumongousThreshold.class.getName());
|
||||||
|
System.out.format("Actual G1 region size %d%n", REGION_SIZE);
|
||||||
|
System.out.format("byte[] memory overhead %d%n", byteArrayMemoryOverhead);
|
||||||
|
|
||||||
|
// Non-humongous allocations
|
||||||
|
System.out.format("Doing non-humongous allocations%n");
|
||||||
|
|
||||||
|
// Testing allocations with byte[] with length from 0 to MAX_CONTINUOUS_SIZE_CHECK
|
||||||
|
System.out.format("Testing allocations with byte[] with length from 0 to %d%n", MAX_CONTINUOUS_SIZE_CHECK);
|
||||||
|
for (int i = 0; i < MAX_CONTINUOUS_SIZE_CHECK; ++i) {
|
||||||
|
allocateAndCheck(i, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Testing allocations with byte[] with length from 0 to nonHumongousStep * NON_HUMONGOUS_DIVIDER
|
||||||
|
System.out.format("Testing allocations with byte[] with length from 0 to %d with step %d%n",
|
||||||
|
nonHumongousStep * NON_HUMONGOUS_DIVIDER, nonHumongousStep);
|
||||||
|
for (int i = 0; i < NON_HUMONGOUS_DIVIDER; ++i) {
|
||||||
|
allocateAndCheck(i * nonHumongousStep, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Testing allocations with byte[] of maximum non-humongous length
|
||||||
|
System.out.format("Testing allocations with byte[] of maximum non-humongous length %d%n",
|
||||||
|
maxByteArrayNonHumongousSize);
|
||||||
|
allocateAndCheck(maxByteArrayNonHumongousSize, false);
|
||||||
|
|
||||||
|
// Humongous allocations
|
||||||
|
System.out.format("Doing humongous allocations%n");
|
||||||
|
// Testing with minimum humongous object
|
||||||
|
System.out.format("Testing with byte[] of minimum humongous object %d%n", maxByteArrayNonHumongousSize + 1);
|
||||||
|
allocateAndCheck(maxByteArrayNonHumongousSize + 1, true);
|
||||||
|
|
||||||
|
// Testing allocations with byte[] with length from (maxByteArrayNonHumongousSize + 1) to
|
||||||
|
// (maxByteArrayNonHumongousSize + 1 + MAX_CONTINUOUS_SIZE_CHECK)
|
||||||
|
System.out.format("Testing allocations with byte[] with length from %d to %d%n",
|
||||||
|
maxByteArrayNonHumongousSize + 1, maxByteArrayNonHumongousSize + 1 + MAX_CONTINUOUS_SIZE_CHECK);
|
||||||
|
for (int i = 0; i < MAX_CONTINUOUS_SIZE_CHECK; ++i) {
|
||||||
|
allocateAndCheck(maxByteArrayNonHumongousSize + 1 + i, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checking that large (more than a half of region size) objects are humongous
|
||||||
|
System.out.format("Checking that large (more than a half of region size) objects are humongous%n");
|
||||||
|
for (float factor : humongousFactors) {
|
||||||
|
allocateAndCheck((int) (maxByteArrayOneRegionSize * factor), true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,67 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test SharedStringsAuto
|
||||||
|
* @summary Test -Xshare:auto with shared strings.
|
||||||
|
* Feature support: G1GC only, compressed oops/kptrs, 64-bit os, not on windows
|
||||||
|
* @requires (sun.arch.data.model != "32") & (os.family != "windows")
|
||||||
|
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
|
||||||
|
* @requires (vm.gc=="G1" | vm.gc=="null")
|
||||||
|
* @library /testlibrary
|
||||||
|
* @modules java.base/sun.misc
|
||||||
|
* java.management
|
||||||
|
* @run main SharedStringsRunAuto
|
||||||
|
*/
|
||||||
|
|
||||||
|
import jdk.test.lib.*;
|
||||||
|
import java.io.File;
|
||||||
|
|
||||||
|
public class SharedStringsRunAuto {
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
// Dump
|
||||||
|
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
|
||||||
|
"-XX:+UnlockDiagnosticVMOptions",
|
||||||
|
"-XX:SharedArchiveFile=./SharedStringsRunAuto.jsa",
|
||||||
|
"-XX:+UseCompressedOops", "-XX:+UseG1GC",
|
||||||
|
"-XX:+PrintSharedSpaces",
|
||||||
|
"-Xshare:dump");
|
||||||
|
|
||||||
|
new OutputAnalyzer(pb.start())
|
||||||
|
.shouldContain("Loading classes to share")
|
||||||
|
.shouldContain("Shared string table stats")
|
||||||
|
.shouldHaveExitValue(0);
|
||||||
|
|
||||||
|
// Run with -Xshare:auto
|
||||||
|
pb = ProcessTools.createJavaProcessBuilder(
|
||||||
|
"-XX:+UnlockDiagnosticVMOptions",
|
||||||
|
"-XX:SharedArchiveFile=./SharedStringsRunAuto.jsa",
|
||||||
|
"-XX:+UseCompressedOops", "-XX:+UseG1GC",
|
||||||
|
"-Xshare:auto",
|
||||||
|
"-version");
|
||||||
|
|
||||||
|
new OutputAnalyzer(pb.start())
|
||||||
|
.shouldMatch("(java|openjdk) version")
|
||||||
|
.shouldHaveExitValue(0);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user