This commit is contained in:
Jesper Wilhelmsson 2019-01-02 16:05:13 +01:00
commit 59716b0bb3
85 changed files with 3119 additions and 241 deletions

View File

@ -5648,12 +5648,12 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
orr(v5, T16B, Vtmp3, Vtmp4);
uzp1(Vtmp1, T16B, Vtmp1, Vtmp2);
uzp1(Vtmp3, T16B, Vtmp3, Vtmp4);
stpq(Vtmp1, Vtmp3, dst);
uzp2(v5, T16B, v4, v5); // high bytes
umov(tmp2, v5, D, 1);
fmovd(tmp1, v5);
orr(tmp1, tmp1, tmp2);
cbnz(tmp1, LOOP_8);
stpq(Vtmp1, Vtmp3, dst);
sub(len, len, 32);
add(dst, dst, 32);
add(src, src, 64);
@ -5671,7 +5671,6 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
prfm(Address(src, SoftwarePrefetchHintDistance));
uzp1(v4, T16B, Vtmp1, Vtmp2);
uzp1(v5, T16B, Vtmp3, Vtmp4);
stpq(v4, v5, dst);
orr(Vtmp1, T16B, Vtmp1, Vtmp2);
orr(Vtmp3, T16B, Vtmp3, Vtmp4);
uzp2(Vtmp1, T16B, Vtmp1, Vtmp3); // high bytes
@ -5679,6 +5678,7 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
fmovd(tmp1, Vtmp1);
orr(tmp1, tmp1, tmp2);
cbnz(tmp1, LOOP_8);
stpq(v4, v5, dst);
sub(len, len, 32);
add(dst, dst, 32);
add(src, src, 64);
@ -5693,9 +5693,9 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
ld1(Vtmp1, T8H, src);
uzp1(Vtmp2, T16B, Vtmp1, Vtmp1); // low bytes
uzp2(Vtmp3, T16B, Vtmp1, Vtmp1); // high bytes
strd(Vtmp2, dst);
fmovd(tmp1, Vtmp3);
cbnz(tmp1, NEXT_1);
strd(Vtmp2, dst);
sub(len, len, 8);
add(dst, dst, 8);
@ -5708,9 +5708,9 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
cbz(len, DONE);
BIND(NEXT_1);
ldrh(tmp1, Address(post(src, 2)));
strb(tmp1, Address(post(dst, 1)));
tst(tmp1, 0xff00);
br(NE, SET_RESULT);
strb(tmp1, Address(post(dst, 1)));
subs(len, len, 1);
br(GT, NEXT_1);

View File

@ -5058,7 +5058,7 @@ jint os::init_2(void) {
// initialize thread priority policy
prio_init();
if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
if (!FLAG_IS_DEFAULT(AllocateHeapAt) || !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
set_coredump_filter(DAX_SHARED_BIT);
}

View File

@ -370,7 +370,6 @@ JVM_handle_linux_signal(int sig,
if (thread->on_local_stack(addr)) {
// stack overflow
if (thread->in_stack_yellow_reserved_zone(addr)) {
thread->disable_stack_yellow_reserved_zone();
if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
@ -392,9 +391,11 @@ JVM_handle_linux_signal(int sig,
}
// Throw a stack overflow exception. Guard pages will be reenabled
// while unwinding the stack.
thread->disable_stack_yellow_reserved_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else {
// Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_reserved_zone();
return 1;
}
} else if (thread->in_stack_red_zone(addr)) {

View File

@ -97,7 +97,7 @@ inline void G1ArchiveAllocator::enable_archive_object_check() {
}
_archive_check_enabled = true;
size_t length = Universe::heap()->max_capacity();
size_t length = G1CollectedHeap::heap()->max_reserved_capacity();
_closed_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
(HeapWord*)Universe::heap()->base() + length,
HeapRegion::GrainBytes);

View File

@ -28,6 +28,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
@ -156,5 +157,9 @@ void G1Arguments::initialize() {
}
CollectedHeap* G1Arguments::create_heap() {
return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
if (AllocateOldGenAt != NULL) {
return create_heap_with_policy<G1CollectedHeap, G1HeterogeneousCollectorPolicy>();
} else {
return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
}
}

View File

@ -63,7 +63,7 @@ G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
}
void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
assert(_g1h->max_capacity() > 0, "initialization order");
assert(_g1h->max_reserved_capacity() > 0, "initialization order");
assert(_g1h->capacity() == 0, "initialization order");
if (G1ConcRSHotCardLimit > 0) {

View File

@ -161,12 +161,12 @@ HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
// Private methods.
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) {
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
"the only time we use this to allocate a humongous region is "
"when we are allocating a single humongous region");
HeapRegion* res = _hrm.allocate_free_region(is_old);
HeapRegion* res = _hrm->allocate_free_region(type);
if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
// Currently, only attempts to allocate GC alloc regions set
@ -183,7 +183,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
// always expand the heap by an amount aligned to the heap
// region size, the free list should in theory not be empty.
// In either case allocate_free_region() will check for NULL.
res = _hrm.allocate_free_region(is_old);
res = _hrm->allocate_free_region(type);
} else {
_expand_heap_after_alloc_failure = false;
}
@ -330,16 +330,16 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
// Only one region to allocate, try to use a fast path by directly allocating
// from the free lists. Do not try to expand here, we will potentially do that
// later.
HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
HeapRegion* hr = new_region(word_size, HeapRegionType::Humongous, false /* do_expand */);
if (hr != NULL) {
first = hr->hrm_index();
}
} else {
// Policy: Try only empty regions (i.e. already committed first). Maybe we
// are lucky enough to find some.
first = _hrm.find_contiguous_only_empty(obj_regions);
first = _hrm->find_contiguous_only_empty(obj_regions);
if (first != G1_NO_HRM_INDEX) {
_hrm.allocate_free_regions_starting_at(first, obj_regions);
_hrm->allocate_free_regions_starting_at(first, obj_regions);
}
}
@ -347,14 +347,14 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
// Policy: We could not find enough regions for the humongous object in the
// free list. Look through the heap to find a mix of free and uncommitted regions.
// If so, try expansion.
first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
if (first != G1_NO_HRM_INDEX) {
// We found something. Make sure these regions are committed, i.e. expand
// the heap. Alternatively we could do a defragmentation GC.
log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
word_size * HeapWordSize);
_hrm.expand_at(first, obj_regions, workers());
_hrm->expand_at(first, obj_regions, workers());
g1_policy()->record_new_heap_size(num_regions());
#ifdef ASSERT
@ -365,7 +365,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
assert(is_on_master_free_list(hr), "sanity");
}
#endif
_hrm.allocate_free_regions_starting_at(first, obj_regions);
_hrm->allocate_free_regions_starting_at(first, obj_regions);
} else {
// Policy: Potentially trigger a defragmentation GC.
}
@ -554,7 +554,7 @@ void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
MemRegion reserved = _hrm->reserved();
for (size_t i = 0; i < count; i++) {
if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
return false;
@ -571,7 +571,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
assert(count != 0, "No MemRegions provided");
MutexLockerEx x(Heap_lock);
MemRegion reserved = _hrm.reserved();
MemRegion reserved = _hrm->reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
@ -605,7 +605,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
// range ended, and adjust the start address so we don't try to allocate
// the same region again. If the current range is entirely within that
// region, skip it, just adjusting the recorded top.
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* start_region = _hrm->addr_to_region(start_address);
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
start_address = start_region->end();
if (start_address > last_address) {
@ -615,12 +615,12 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
}
start_region->set_top(start_address);
curr_range = MemRegion(start_address, last_address + 1);
start_region = _hrm.addr_to_region(start_address);
start_region = _hrm->addr_to_region(start_address);
}
// Perform the actual region allocation, exiting if it fails.
// Then note how much new space we have allocated.
if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) {
return false;
}
increase_used(word_size * HeapWordSize);
@ -632,8 +632,8 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
// Mark each G1 region touched by the range as archive, add it to
// the old set, and set top.
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
HeapRegion* curr_region = _hrm->addr_to_region(start_address);
HeapRegion* last_region = _hrm->addr_to_region(last_address);
prev_last_region = last_region;
while (curr_region != NULL) {
@ -650,7 +650,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
HeapRegion* next_region;
if (curr_region != last_region) {
top = curr_region->end();
next_region = _hrm.next_region_in_heap(curr_region);
next_region = _hrm->next_region_in_heap(curr_region);
} else {
top = last_address + 1;
next_region = NULL;
@ -671,7 +671,7 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
MemRegion reserved = _hrm->reserved();
HeapWord *prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
@ -691,8 +691,8 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr));
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
HeapRegion* start_region = _hrm->addr_to_region(start_address);
HeapRegion* last_region = _hrm->addr_to_region(last_address);
HeapWord* bottom_address = start_region->bottom();
// Check for a range beginning in the same region in which the
@ -708,7 +708,7 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
guarantee(curr_region->is_archive(),
"Expected archive region at index %u", curr_region->hrm_index());
if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
curr_region = _hrm->next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
@ -757,7 +757,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
MemRegion reserved = _hrm->reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
size_t size_used = 0;
@ -779,8 +779,8 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
size_used += ranges[i].byte_size();
prev_last_addr = last_address;
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
HeapRegion* start_region = _hrm->addr_to_region(start_address);
HeapRegion* last_region = _hrm->addr_to_region(last_address);
// Check for ranges that start in the same G1 region in which the previous
// range ended, and adjust the start address so we don't try to free
@ -791,7 +791,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
if (start_address > last_address) {
continue;
}
start_region = _hrm.addr_to_region(start_address);
start_region = _hrm->addr_to_region(start_address);
}
prev_last_region = last_region;
@ -806,11 +806,11 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
curr_region->set_free();
curr_region->set_top(curr_region->bottom());
if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
curr_region = _hrm->next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
_hrm.shrink_at(curr_index, 1);
_hrm->shrink_at(curr_index, 1);
uncommitted_regions++;
}
@ -1024,6 +1024,8 @@ void G1CollectedHeap::prepare_heap_for_full_collection() {
abandon_collection_set(collection_set());
tear_down_region_sets(false /* free_list_only */);
hrm()->prepare_for_full_collection_start();
}
void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
@ -1035,6 +1037,8 @@ void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
}
void G1CollectedHeap::prepare_heap_for_mutators() {
hrm()->prepare_for_full_collection_end();
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge();
MetaspaceUtils::verify_metrics();
@ -1071,7 +1075,7 @@ void G1CollectedHeap::abort_refinement() {
}
void G1CollectedHeap::verify_after_full_collection() {
_hrm.verify_optional();
_hrm->verify_optional();
_verifier->verify_region_sets_optional();
_verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
// Clear the previous marking bitmap, if needed for bitmap verification.
@ -1325,7 +1329,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
if (expand(expand_bytes, _workers)) {
_hrm.verify_optional();
_hrm->verify_optional();
_verifier->verify_region_sets_optional();
return attempt_allocation_at_safepoint(word_size,
false /* expect_null_mutator_alloc_region */);
@ -1350,7 +1354,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, do
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
assert(regions_to_expand > 0, "Must expand by at least one region");
uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
if (expand_time_ms != NULL) {
*expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
}
@ -1365,7 +1369,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, do
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
if (G1ExitOnExpansionFailure &&
_hrm.available() >= regions_to_expand) {
_hrm->available() >= regions_to_expand) {
// We had head room...
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
}
@ -1380,7 +1384,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
HeapRegion::GrainBytes);
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
@ -1408,7 +1412,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
shrink_helper(shrink_bytes);
rebuild_region_sets(true /* free_list_only */);
_hrm.verify_optional();
_hrm->verify_optional();
_verifier->verify_region_sets_optional();
}
@ -1486,7 +1490,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
_bot(NULL),
_listener(),
_hrm(),
_hrm(NULL),
_allocator(NULL),
_verifier(NULL),
_summary_bytes_used(0),
@ -1505,7 +1509,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_survivor(),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_g1_policy(new G1Policy(_gc_timer_stw)),
_g1_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
_heap_sizing_policy(NULL),
_collection_set(this, _g1_policy),
_hot_card_cache(NULL),
@ -1632,7 +1636,7 @@ jint G1CollectedHeap::initialize() {
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
size_t max_byte_size = collector_policy()->max_heap_byte_size();
size_t max_byte_size = g1_collector_policy()->heap_reserved_size_bytes();
size_t heap_alignment = collector_policy()->heap_alignment();
// Ensure that the sizes are properly aligned.
@ -1692,12 +1696,17 @@ jint G1CollectedHeap::initialize() {
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
size_t page_size = actual_reserved_page_size(heap_rs);
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(g1_rs,
g1_rs.size(),
page_size,
HeapRegion::GrainBytes,
1,
mtJavaHeap);
G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
g1_rs.size(),
page_size,
HeapRegion::GrainBytes,
1,
mtJavaHeap);
if(heap_storage == NULL) {
vm_shutdown_during_initialization("Could not initialize G1 heap");
return JNI_ERR;
}
os::trace_page_sizes("Heap",
collector_policy()->min_heap_byte_size(),
max_byte_size,
@ -1728,7 +1737,9 @@ jint G1CollectedHeap::initialize() {
G1RegionToSpaceMapper* next_bitmap_storage =
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
_hrm = HeapRegionManager::create_manager(this, g1_collector_policy());
_hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
_card_table->initialize(cardtable_storage);
// Do later initialization work for concurrent refinement.
_hot_card_cache->initialize(card_counts_storage);
@ -1743,20 +1754,20 @@ jint G1CollectedHeap::initialize() {
guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
// Also create a G1 rem set.
_g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
_g1_rem_set->initialize(max_capacity(), max_regions());
_g1_rem_set->initialize(max_reserved_capacity(), max_regions());
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
"too many cards per region");
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
_bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
{
HeapWord* start = _hrm.reserved().start();
HeapWord* end = _hrm.reserved().end();
HeapWord* start = _hrm->reserved().start();
HeapWord* end = _hrm->reserved().end();
size_t granularity = HeapRegion::GrainBytes;
_in_cset_fast_test.initialize(start, end, granularity);
@ -1807,7 +1818,7 @@ jint G1CollectedHeap::initialize() {
// Here we allocate the dummy HeapRegion that is required by the
// G1AllocRegion class.
HeapRegion* dummy_region = _hrm.get_dummy_region();
HeapRegion* dummy_region = _hrm->get_dummy_region();
// We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young
@ -1927,16 +1938,20 @@ CollectorPolicy* G1CollectedHeap::collector_policy() const {
return _collector_policy;
}
G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
return _collector_policy;
}
SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
return &_soft_ref_policy;
}
size_t G1CollectedHeap::capacity() const {
return _hrm.length() * HeapRegion::GrainBytes;
return _hrm->length() * HeapRegion::GrainBytes;
}
size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
return _hrm.total_free_bytes();
return _hrm->total_free_bytes();
}
void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
@ -2001,6 +2016,18 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
}
}
bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
if(g1_policy()->force_upgrade_to_full()) {
return true;
} else if (should_do_concurrent_full_gc(_gc_cause)) {
return false;
} else if (has_regions_left_for_allocation()) {
return false;
} else {
return true;
}
}
#ifndef PRODUCT
void G1CollectedHeap::allocate_dummy_regions() {
// Let's fill up most of the region
@ -2151,7 +2178,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
}
bool G1CollectedHeap::is_in(const void* p) const {
if (_hrm.reserved().contains(p)) {
if (_hrm->reserved().contains(p)) {
// Given that we know that p is in the reserved space,
// heap_region_containing() should successfully
// return the containing region.
@ -2165,7 +2192,7 @@ bool G1CollectedHeap::is_in(const void* p) const {
#ifdef ASSERT
bool G1CollectedHeap::is_in_exact(const void* p) const {
bool contains = reserved_region().contains(p);
bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
if (contains && available) {
return true;
} else {
@ -2196,18 +2223,18 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
}
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
_hrm.iterate(cl);
_hrm->iterate(cl);
}
void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
HeapRegionClaimer *hrclaimer,
uint worker_id) const {
_hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
_hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
}
void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
HeapRegionClaimer *hrclaimer) const {
_hrm.par_iterate(cl, hrclaimer, 0);
_hrm->par_iterate(cl, hrclaimer, 0);
}
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
@ -2256,7 +2283,11 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
}
size_t G1CollectedHeap::max_capacity() const {
return _hrm.reserved().byte_size();
return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
}
size_t G1CollectedHeap::max_reserved_capacity() const {
return _hrm->max_length() * HeapRegion::GrainBytes;
}
jlong G1CollectedHeap::millis_since_last_gc() {
@ -2346,8 +2377,8 @@ void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
capacity()/K, used_unlocked()/K);
st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
p2i(_hrm.reserved().start()),
p2i(_hrm.reserved().end()));
p2i(_hrm->reserved().start()),
p2i(_hrm->reserved().end()));
st->cr();
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
uint young_regions = young_regions_count();
@ -3130,7 +3161,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// output from the concurrent mark thread interfering with this
// logging output either.
_hrm.verify_optional();
_hrm->verify_optional();
_verifier->verify_region_sets_optional();
TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
@ -3946,7 +3977,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
bool locked) {
assert(!hr->is_free(), "the region should not be free");
assert(!hr->is_empty(), "the region should not be empty");
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
assert(free_list != NULL, "pre-condition");
if (G1VerifyBitmaps) {
@ -3987,7 +4018,7 @@ void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
assert(list != NULL, "list can't be null");
if (!list->is_empty()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
_hrm.insert_list_into_free_list(list);
_hrm->insert_list_into_free_list(list);
}
}
@ -4520,7 +4551,7 @@ void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
// this is that during a full GC string deduplication needs to know if
// a collected region was young or old when the full GC was initiated.
}
_hrm.remove_all_free_regions();
_hrm->remove_all_free_regions();
}
void G1CollectedHeap::increase_used(size_t bytes) {
@ -4595,7 +4626,7 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
_survivor.clear();
}
RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
heap_region_iterate(&cl);
if (!free_list_only) {
@ -4622,7 +4653,7 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
bool should_allocate = g1_policy()->should_allocate_mutator_region();
if (force || should_allocate) {
HeapRegion* new_alloc_region = new_region(word_size,
false /* is_old */,
HeapRegionType::Eden,
false /* do_expand */);
if (new_alloc_region != NULL) {
set_region_short_lived_locked(new_alloc_region);
@ -4666,13 +4697,19 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
return NULL;
}
const bool is_survivor = dest.is_young();
HeapRegionType type;
if (dest.is_young()) {
type = HeapRegionType::Survivor;
} else {
type = HeapRegionType::Old;
}
HeapRegion* new_alloc_region = new_region(word_size,
!is_survivor,
type,
true /* do_expand */);
if (new_alloc_region != NULL) {
if (is_survivor) {
if (type.is_survivor()) {
new_alloc_region->set_survivor();
_survivor.add(new_alloc_region);
_verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
@ -4704,14 +4741,14 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
bool expanded = false;
uint index = _hrm.find_highest_free(&expanded);
uint index = _hrm->find_highest_free(&expanded);
if (index != G1_NO_HRM_INDEX) {
if (expanded) {
log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize);
}
_hrm.allocate_free_regions_starting_at(index, 1);
_hrm->allocate_free_regions_starting_at(index, 1);
return region_at(index);
}
return NULL;

View File

@ -45,6 +45,7 @@
#include "gc/g1/g1YCTypes.hpp"
#include "gc/g1/heapRegionManager.hpp"
#include "gc/g1/heapRegionSet.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcHeapSummary.hpp"
@ -194,7 +195,7 @@ private:
G1RegionMappingChangedListener _listener;
// The sequence of all heap regions in the heap.
HeapRegionManager _hrm;
HeapRegionManager* _hrm;
// Manages all allocations with regions except humongous object allocations.
G1Allocator* _allocator;
@ -267,6 +268,9 @@ private:
// (e) cause == _wb_conc_mark
bool should_do_concurrent_full_gc(GCCause::Cause cause);
// Return true if should upgrade to full gc after an incremental one.
bool should_upgrade_to_full_gc(GCCause::Cause cause);
// indicates whether we are in young or mixed GC mode
G1CollectorState _collector_state;
@ -369,9 +373,9 @@ private:
// Try to allocate a single non-humongous HeapRegion sufficient for
// an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation
// request. If the region is to be used as an old region or for a
// humongous object, set is_old to true. If not, to false.
HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
// request. 'type' takes the type of region to be allocated. (Use constants
// Old, Eden, Humongous, Survivor defined in HeapRegionType.)
HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
@ -957,10 +961,13 @@ public:
// The current policy object for the collector.
G1Policy* g1_policy() const { return _g1_policy; }
HeapRegionManager* hrm() const { return _hrm; }
const G1CollectionSet* collection_set() const { return &_collection_set; }
G1CollectionSet* collection_set() { return &_collection_set; }
virtual CollectorPolicy* collector_policy() const;
virtual G1CollectorPolicy* g1_collector_policy() const;
virtual SoftRefPolicy* soft_ref_policy();
@ -1009,7 +1016,7 @@ public:
// But G1CollectedHeap doesn't yet support this.
virtual bool is_maximal_no_gc() const {
return _hrm.available() == 0;
return _hrm->available() == 0;
}
// Returns whether there are any regions left in the heap for allocation.
@ -1018,19 +1025,22 @@ public:
}
// The current number of regions in the heap.
uint num_regions() const { return _hrm.length(); }
uint num_regions() const { return _hrm->length(); }
// The max number of regions in the heap.
uint max_regions() const { return _hrm.max_length(); }
uint max_regions() const { return _hrm->max_length(); }
// Max number of regions that can be comitted.
uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
// The number of regions that are completely free.
uint num_free_regions() const { return _hrm.num_free_regions(); }
uint num_free_regions() const { return _hrm->num_free_regions(); }
// The number of regions that can be allocated into.
uint num_free_or_available_regions() const { return num_free_regions() + _hrm.available(); }
uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
MemoryUsage get_auxiliary_data_memory_usage() const {
return _hrm.get_auxiliary_data_memory_usage();
return _hrm->get_auxiliary_data_memory_usage();
}
// The number of regions that are not completely free.
@ -1038,7 +1048,7 @@ public:
#ifdef ASSERT
bool is_on_master_free_list(HeapRegion* hr) {
return _hrm.is_free(hr);
return _hrm->is_free(hr);
}
#endif // ASSERT
@ -1095,13 +1105,13 @@ public:
// Return "TRUE" iff the given object address is in the reserved
// region of g1.
bool is_in_g1_reserved(const void* p) const {
return _hrm.reserved().contains(p);
return _hrm->reserved().contains(p);
}
// Returns a MemRegion that corresponds to the space that has been
// reserved for the heap
MemRegion g1_reserved() const {
return _hrm.reserved();
return _hrm->reserved();
}
virtual bool is_in_closed_subset(const void* p) const;
@ -1227,6 +1237,9 @@ public:
// Print the maximum heap capacity.
virtual size_t max_capacity() const;
// Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
virtual size_t max_reserved_capacity() const;
virtual jlong millis_since_last_gc();

View File

@ -57,13 +57,13 @@ size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
// Inline functions for G1CollectedHeap
// Return the region with the given index. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
return _hrm.next_region_in_humongous(hr);
return _hrm->next_region_in_humongous(hr);
}
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
@ -74,7 +74,7 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
}
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
return _hrm.reserved().start() + index * HeapRegion::GrainWords;
return _hrm->reserved().start() + index * HeapRegion::GrainWords;
}
template <class T>
@ -83,7 +83,7 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
assert(is_in_g1_reserved((const void*) addr),
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
return _hrm.addr_to_region((HeapWord*) addr);
return _hrm->addr_to_region((HeapWord*) addr);
}
template <class T>
@ -266,12 +266,12 @@ inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
}
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
_humongous_reclaim_candidates.set_candidate(region, value);
}
inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
return _humongous_reclaim_candidates.is_candidate(region);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,3 +55,11 @@ void G1CollectorPolicy::initialize_alignments() {
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
_heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
}
size_t G1CollectorPolicy::heap_reserved_size_bytes() const {
return _max_heap_byte_size;
}
bool G1CollectorPolicy::is_hetero_heap() const {
return false;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,6 +38,7 @@ protected:
public:
G1CollectorPolicy();
virtual size_t heap_reserved_size_bytes() const;
virtual bool is_hetero_heap() const;
};
#endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP

View File

@ -599,14 +599,14 @@ void G1HeapVerifier::verify_region_sets() {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
// First, check the explicit lists.
_g1h->_hrm.verify();
_g1h->_hrm->verify();
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
_g1h->heap_region_iterate(&cl);
cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
}
void G1HeapVerifier::prepare_for_verify() {
@ -847,7 +847,7 @@ class G1CheckCSetFastTableClosure : public HeapRegionClosure {
bool G1HeapVerifier::check_cset_fast_test() {
G1CheckCSetFastTableClosure cl;
_g1h->_hrm.iterate(&cl);
_g1h->_hrm->iterate(&cl);
return !cl.failures();
}
#endif // PRODUCT

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
#include "logging/log.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/os.hpp"
#include "utilities/formatBuffer.hpp"
const double G1HeterogeneousCollectorPolicy::MaxRamFractionForYoung = 0.8;
size_t G1HeterogeneousCollectorPolicy::MaxMemoryForYoung;
static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
julong phys_mem;
// If MaxRam is specified, we use that as maximum physical memory available.
if (FLAG_IS_DEFAULT(MaxRAM)) {
phys_mem = os::physical_memory();
calc_str.append("Physical_Memory");
} else {
phys_mem = (julong)MaxRAM;
calc_str.append("MaxRAM");
}
julong reasonable_max = phys_mem;
// If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
// reasonable max size of young generation.
if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
reasonable_max = (julong)(phys_mem / MaxRAMFraction);
calc_str.append(" / MaxRAMFraction");
} else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
calc_str.append(" * MaxRAMPercentage / 100");
} else {
// We use our own fraction to calculate max size of young generation.
reasonable_max = phys_mem * max_ram_fraction_for_young;
calc_str.append(" * %0.2f", max_ram_fraction_for_young);
}
return (size_t)reasonable_max;
}
void G1HeterogeneousCollectorPolicy::initialize_flags() {
FormatBuffer<100> calc_str("");
MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
if (MaxNewSize > MaxMemoryForYoung) {
if (FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
MaxMemoryForYoung, calc_str.buffer());
} else {
log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
"Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
}
MaxNewSize = MaxMemoryForYoung;
}
if (NewSize > MaxMemoryForYoung) {
if (FLAG_IS_CMDLINE(NewSize)) {
log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
MaxMemoryForYoung, calc_str.buffer());
}
NewSize = MaxMemoryForYoung;
}
// After setting new size flags, call base class initialize_flags()
G1CollectorPolicy::initialize_flags();
}
size_t G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() {
return MaxMemoryForYoung;
}
size_t G1HeterogeneousCollectorPolicy::heap_reserved_size_bytes() const {
return 2 * _max_heap_byte_size;
}
bool G1HeterogeneousCollectorPolicy::is_hetero_heap() const {
return true;
}

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
#define SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
class G1HeterogeneousCollectorPolicy : public G1CollectorPolicy {
private:
// Max fraction of dram to use for young generation when MaxRAMFraction and
// MaxRAMPercentage are not specified on commandline.
static const double MaxRamFractionForYoung;
static size_t MaxMemoryForYoung;
protected:
virtual void initialize_flags();
public:
G1HeterogeneousCollectorPolicy() {}
virtual size_t heap_reserved_size_bytes() const;
virtual bool is_hetero_heap() const;
static size_t reasonable_max_memory_for_young();
};
#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
G1Policy(policy, gc_timer), _manager(NULL) {}
// We call the super class init(), after which we provision young_list_target_length() regions in dram.
void G1HeterogeneousHeapPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
G1Policy::init(g1h, collection_set);
_manager = HeterogeneousHeapRegionManager::manager();
_manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
}
// After a collection pause, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
void G1HeterogeneousHeapPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
G1Policy::record_collection_pause_end(pause_time_ms, cards_scanned, heap_used_bytes_before_gc);
_manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
}
// After a full collection, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
void G1HeterogeneousHeapPolicy::record_full_collection_end() {
G1Policy::record_full_collection_end();
_manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
}
bool G1HeterogeneousHeapPolicy::force_upgrade_to_full() {
if (_manager->has_borrowed_regions()) {
return true;
}
return false;
}

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
class G1HeterogeneousHeapPolicy : public G1Policy {
// Stash a pointer to the hrm.
HeterogeneousHeapRegionManager* _manager;
public:
G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
// initialize policy
virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
// Record end of an evacuation pause.
virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
// Record the end of full collection.
virtual void record_full_collection_end();
virtual bool force_upgrade_to_full();
};
#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
#include "gc/g1/heapRegion.hpp"
G1HeterogeneousHeapYoungGenSizer::G1HeterogeneousHeapYoungGenSizer() : G1YoungGenSizer() {
// will be used later when min and max young size is calculated.
_max_young_length = (uint)(G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
}
// Since heap is sized potentially to larger value accounting for dram + nvdimm, we need to limit
// max young gen size to the available dram.
// Call parent class method first and then adjust sizes based on available dram
void G1HeterogeneousHeapYoungGenSizer::adjust_max_new_size(uint number_of_heap_regions) {
G1YoungGenSizer::adjust_max_new_size(number_of_heap_regions);
adjust_lengths_based_on_dram_memory();
}
void G1HeterogeneousHeapYoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
G1YoungGenSizer::heap_size_changed(new_number_of_heap_regions);
adjust_lengths_based_on_dram_memory();
}
void G1HeterogeneousHeapYoungGenSizer::adjust_lengths_based_on_dram_memory() {
_min_desired_young_length = MIN2(_min_desired_young_length, _max_young_length);
_max_desired_young_length = MIN2(_max_desired_young_length, _max_young_length);
}

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
#include "gc/g1/g1YoungGenSizer.hpp"
// This class prevents the size of young generation of G1 heap to exceed dram
// memory available. If set on command line, MaxRAM and MaxRAMFraction/MaxRAMPercentage
// are used to determine the maximum size that young generation can grow.
// Else we set the maximum size to 80% of dram available in the system.
class G1HeterogeneousHeapYoungGenSizer : public G1YoungGenSizer {
private:
// maximum no of regions that young generation can grow to. Calculated in constructor.
uint _max_young_length;
void adjust_lengths_based_on_dram_memory();
public:
G1HeterogeneousHeapYoungGenSizer();
// Calculate the maximum length of the young gen given the number of regions
// depending on the sizing algorithm.
virtual void adjust_max_new_size(uint number_of_heap_regions);
virtual void heap_size_changed(uint new_number_of_heap_regions);
};
#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP

View File

@ -100,6 +100,12 @@ size_t G1PageBasedVirtualSpace::uncommitted_size() const {
return reserved_size() - committed_size();
}
void G1PageBasedVirtualSpace::commit_and_set_special() {
commit_internal(addr_to_page_index(_low_boundary), addr_to_page_index(_high_boundary));
_special = true;
_dirty.initialize(reserved_size()/_page_size);
}
size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
return (addr - _low_boundary) / _page_size;
}

View File

@ -136,6 +136,8 @@ class G1PageBasedVirtualSpace {
// Memory left to use/expand in this virtual space.
size_t uncommitted_size() const;
void commit_and_set_special();
bool contains(const void* p) const;
MemRegion reserved() {

View File

@ -29,6 +29,7 @@
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1IHOPControl.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
@ -46,7 +47,7 @@
#include "utilities/growableArray.hpp"
#include "utilities/pair.hpp"
G1Policy::G1Policy(STWGCTimer* gc_timer) :
G1Policy::G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
_predictor(G1ConfidencePercent / 100.0),
_analytics(new G1Analytics(&_predictor)),
_remset_tracker(),
@ -62,7 +63,7 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
_survivor_surv_rate_group(new SurvRateGroup()),
_reserve_factor((double) G1ReservePercent / 100.0),
_reserve_regions(0),
_young_gen_sizer(),
_young_gen_sizer(G1YoungGenSizer::create_gen_sizer(policy)),
_free_regions_at_end_of_collection(0),
_max_rs_lengths(0),
_rs_lengths_prediction(0),
@ -83,6 +84,15 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
G1Policy::~G1Policy() {
delete _ihop_control;
delete _young_gen_sizer;
}
G1Policy* G1Policy::create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw) {
if (policy->is_hetero_heap()) {
return new G1HeterogeneousHeapPolicy(policy, gc_timer_stw);
} else {
return new G1Policy(policy, gc_timer_stw);
}
}
G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
@ -94,9 +104,9 @@ void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
assert(Heap_lock->owned_by_self(), "Locking discipline.");
if (!adaptive_young_list_length()) {
_young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
}
_young_gen_sizer.adjust_max_new_size(_g1h->max_regions());
_young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
_free_regions_at_end_of_collection = _g1h->num_free_regions();
@ -176,7 +186,7 @@ void G1Policy::record_new_heap_size(uint new_number_of_regions) {
// smaller than 1.0) we'll get 1.
_reserve_regions = (uint) ceil(reserve_regions_d);
_young_gen_sizer.heap_size_changed(new_number_of_regions);
_young_gen_sizer->heap_size_changed(new_number_of_regions);
_ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
}
@ -195,14 +205,14 @@ uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) con
}
desired_min_length += base_min_length;
// make sure we don't go below any user-defined minimum bound
return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
}
uint G1Policy::calculate_young_list_desired_max_length() const {
// Here, we might want to also take into account any additional
// constraints (i.e., user-defined minimum bound). Currently, we
// effectively don't set this bound.
return _young_gen_sizer.max_desired_young_length();
return _young_gen_sizer->max_desired_young_length();
}
uint G1Policy::update_young_list_max_and_target_length() {
@ -218,6 +228,7 @@ uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) {
uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
_young_list_target_length = young_lengths.first;
return young_lengths.second;
}
@ -900,7 +911,7 @@ bool G1Policy::can_expand_young_list() const {
}
bool G1Policy::adaptive_young_list_length() const {
return _young_gen_sizer.adaptive_young_list_length();
return _young_gen_sizer->adaptive_young_list_length();
}
size_t G1Policy::desired_survivor_size(uint max_regions) const {

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1POLICY_HPP
#define SHARE_VM_GC_G1_G1POLICY_HPP
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1InCSetState.hpp"
@ -91,7 +92,7 @@ class G1Policy: public CHeapObj<mtGC> {
// for the first time during initialization.
uint _reserve_regions;
G1YoungGenSizer _young_gen_sizer;
G1YoungGenSizer* _young_gen_sizer;
uint _free_regions_at_end_of_collection;
@ -282,10 +283,12 @@ private:
void abort_time_to_mixed_tracking();
public:
G1Policy(STWGCTimer* gc_timer);
G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
virtual ~G1Policy();
static G1Policy* create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw);
G1CollectorState* collector_state() const;
G1GCPhaseTimes* phase_times() const { return _phase_times; }
@ -298,7 +301,7 @@ public:
// This should be called after the heap is resized.
void record_new_heap_size(uint new_number_of_regions);
void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
void note_gc_start();
@ -308,11 +311,11 @@ public:
// Record the start and end of an evacuation pause.
void record_collection_pause_start(double start_time_sec);
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
// Record the start and end of a full collection.
void record_full_collection_start();
void record_full_collection_end();
virtual void record_full_collection_end();
// Must currently be called while the world is stopped.
void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
@ -432,6 +435,10 @@ public:
void update_max_gc_locker_expansion();
void update_survivors_policy();
virtual bool force_upgrade_to_full() {
return false;
}
};
#endif // SHARE_VM_GC_G1_G1POLICY_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,11 +25,15 @@
#include "precompiled.hpp"
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/formatBuffer.hpp"
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
size_t used_size,
@ -170,16 +174,156 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
}
}
static bool map_nvdimm_space(ReservedSpace rs) {
assert(AllocateOldGenAt != NULL, "");
int _backing_fd = os::create_file_for_heap(AllocateOldGenAt);
if (_backing_fd == -1) {
log_error(gc, init)("Could not create file for Old generation at location %s", AllocateOldGenAt);
return false;
}
// commit this memory in nv-dimm
char* ret = os::attempt_reserve_memory_at(rs.size(), rs.base(), _backing_fd);
if (ret != rs.base()) {
if (ret != NULL) {
os::unmap_memory(rs.base(), rs.size());
}
log_error(gc, init)("Error in mapping Old Gen to given AllocateOldGenAt = %s", AllocateOldGenAt);
os::close(_backing_fd);
return false;
}
os::close(_backing_fd);
return true;
}
G1RegionToHeteroSpaceMapper::G1RegionToHeteroSpaceMapper(ReservedSpace rs,
size_t actual_size,
size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
_rs(rs),
_num_committed_dram(0),
_num_committed_nvdimm(0),
_page_size(page_size),
_commit_factor(commit_factor),
_type(type) {
assert(actual_size == 2 * MaxHeapSize, "For 2-way heterogenuous heap, reserved space is two times MaxHeapSize");
}
bool G1RegionToHeteroSpaceMapper::initialize() {
// Since we need to re-map the reserved space - 'Xmx' to nv-dimm and 'Xmx' to dram, we need to release the reserved memory first.
// Because on some OSes (e.g. Windows) you cannot do a file mapping on memory reserved with regular mapping.
os::release_memory(_rs.base(), _rs.size());
// First half of size Xmx is for nv-dimm.
ReservedSpace rs_nvdimm = _rs.first_part(MaxHeapSize);
assert(rs_nvdimm.base() == _rs.base(), "We should get the same base address");
// Second half of reserved memory is mapped to dram.
ReservedSpace rs_dram = _rs.last_part(MaxHeapSize);
assert(rs_dram.size() == rs_nvdimm.size() && rs_nvdimm.size() == MaxHeapSize, "They all should be same");
// Reserve dram memory
char* base = os::attempt_reserve_memory_at(rs_dram.size(), rs_dram.base());
if (base != rs_dram.base()) {
if (base != NULL) {
os::release_memory(base, rs_dram.size());
}
log_error(gc, init)("Error in re-mapping memory on dram during G1 heterogenous memory initialization");
return false;
}
// We reserve and commit this entire space to NV-DIMM.
if (!map_nvdimm_space(rs_nvdimm)) {
log_error(gc, init)("Error in re-mapping memory to nv-dimm during G1 heterogenous memory initialization");
return false;
}
if (_region_granularity >= (_page_size * _commit_factor)) {
_dram_mapper = new G1RegionsLargerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
} else {
_dram_mapper = new G1RegionsSmallerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
}
_start_index_of_nvdimm = 0;
_start_index_of_dram = (uint)(rs_nvdimm.size() / _region_granularity);
return true;
}
void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
uint end_idx = (start_idx + (uint)num_regions - 1);
uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
uint num_nvdimm = (uint)num_regions - num_dram;
if (num_nvdimm > 0) {
// We do not need to commit nv-dimm regions, since they are committed in the beginning.
_num_committed_nvdimm += num_nvdimm;
}
if (num_dram > 0) {
_dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram, pretouch_gang);
_num_committed_dram += num_dram;
}
}
void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) {
uint end_idx = (start_idx + (uint)num_regions - 1);
uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
uint num_nvdimm = (uint)num_regions - num_dram;
if (num_nvdimm > 0) {
// We do not uncommit memory for nv-dimm regions.
_num_committed_nvdimm -= num_nvdimm;
}
if (num_dram > 0) {
_dram_mapper->uncommit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram);
_num_committed_dram -= num_dram;
}
}
uint G1RegionToHeteroSpaceMapper::num_committed_dram() const {
return _num_committed_dram;
}
uint G1RegionToHeteroSpaceMapper::num_committed_nvdimm() const {
return _num_committed_nvdimm;
}
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_heap_mapper(ReservedSpace rs,
size_t actual_size,
size_t page_size,
size_t region_granularity,
size_t commit_factor,
MemoryType type) {
if (AllocateOldGenAt != NULL) {
G1RegionToHeteroSpaceMapper* mapper = new G1RegionToHeteroSpaceMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
if (!mapper->initialize()) {
delete mapper;
return NULL;
}
return (G1RegionToSpaceMapper*)mapper;
} else {
return create_mapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
}
}
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
size_t actual_size,
size_t page_size,
size_t region_granularity,
size_t commit_factor,
MemoryType type) {
if (region_granularity >= (page_size * commit_factor)) {
return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
} else {
return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
}
}
void G1RegionToSpaceMapper::commit_and_set_special() {
_storage.commit_and_set_special();
}

View File

@ -70,6 +70,7 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
return _commit_map.at(idx);
}
void commit_and_set_special();
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0;
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
@ -87,6 +88,37 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
size_t region_granularity,
size_t byte_translation_factor,
MemoryType type);
static G1RegionToSpaceMapper* create_heap_mapper(ReservedSpace rs,
size_t actual_size,
size_t page_size,
size_t region_granularity,
size_t byte_translation_factor,
MemoryType type);
};
// G1RegionToSpaceMapper implementation where
// part of space is mapped to dram and part to nv-dimm
class G1RegionToHeteroSpaceMapper : public G1RegionToSpaceMapper {
private:
size_t _pages_per_region;
ReservedSpace _rs;
G1RegionToSpaceMapper* _dram_mapper;
uint _num_committed_dram;
uint _num_committed_nvdimm;
uint _start_index_of_nvdimm;
uint _start_index_of_dram;
size_t _page_size;
size_t _commit_factor;
MemoryType _type;
public:
G1RegionToHeteroSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type);
bool initialize();
uint num_committed_dram() const;
uint num_committed_nvdimm() const;
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL);
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1);
};
#endif // SHARE_VM_GC_G1_G1REGIONTOSPACEMAPPER_HPP

View File

@ -138,8 +138,8 @@ void VM_G1CollectForAllocation::doit() {
// kind of GC.
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
} else {
bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
!g1h->has_regions_left_for_allocation();
bool should_upgrade_to_full = g1h->should_upgrade_to_full_gc(_gc_cause);
if (should_upgrade_to_full) {
// There has been a request to perform a GC to free some space. We have no
// information on how much memory has been asked for. In case there are

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,14 @@
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
#include "gc/g1/g1YoungGenSizer.hpp"
#include "gc/g1/heapRegion.hpp"
#include "logging/log.hpp"
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
_min_desired_young_length(0), _max_desired_young_length(0), _adaptive_size(true) {
_adaptive_size(true), _min_desired_young_length(0), _max_desired_young_length(0) {
if (FLAG_IS_CMDLINE(NewRatio)) {
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
@ -127,3 +129,11 @@ void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
&_max_desired_young_length);
}
G1YoungGenSizer* G1YoungGenSizer::create_gen_sizer(G1CollectorPolicy* policy) {
if (policy->is_hetero_heap()) {
return new G1HeterogeneousHeapYoungGenSizer();
} else {
return new G1YoungGenSizer();
}
}

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
#define SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
#include "gc/g1/g1CollectorPolicy.hpp"
#include "utilities/globalDefinitions.hpp"
// There are three command line options related to the young gen size:
@ -63,7 +64,7 @@
//
// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
// combined with either NewSize or MaxNewSize. (A warning message is printed.)
class G1YoungGenSizer {
class G1YoungGenSizer : public CHeapObj<mtGC> {
private:
enum SizerKind {
SizerDefaults,
@ -73,8 +74,6 @@ private:
SizerNewRatio
};
SizerKind _sizer_kind;
uint _min_desired_young_length;
uint _max_desired_young_length;
// False when using a fixed young generation size due to command-line options,
// true otherwise.
@ -87,13 +86,17 @@ private:
// given the number of heap regions depending on the kind of sizing algorithm.
void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
protected:
uint _min_desired_young_length;
uint _max_desired_young_length;
public:
G1YoungGenSizer();
// Calculate the maximum length of the young gen given the number of regions
// depending on the sizing algorithm.
void adjust_max_new_size(uint number_of_heap_regions);
virtual void adjust_max_new_size(uint number_of_heap_regions);
void heap_size_changed(uint new_number_of_heap_regions);
virtual void heap_size_changed(uint new_number_of_heap_regions);
uint min_desired_young_length() const {
return _min_desired_young_length;
}
@ -104,6 +107,8 @@ public:
bool adaptive_young_list_length() const {
return _adaptive_size;
}
static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
};
#endif // SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP

View File

@ -317,5 +317,15 @@
"above this value cancels a given periodic GC. A value of zero " \
"disables this check.") \
range(0.0, (double)max_uintx) \
\
experimental(uintx, G1YoungExpansionBufferPercent, 10, \
"When heterogenous heap is enabled by AllocateOldGenAt " \
"option, after every GC, young gen is re-sized which " \
"involves system calls to commit/uncommit memory. To " \
"reduce these calls, we keep a buffer of extra regions to " \
"absorb small changes in young gen length. This flag takes " \
"the buffer size as an percentage of young gen length") \
range(0, 100) \
#endif // SHARE_VM_GC_G1_G1_GLOBALS_HPP

View File

@ -28,6 +28,8 @@
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "memory/allocation.hpp"
#include "utilities/bitMap.inline.hpp"
@ -54,18 +56,25 @@ public:
};
HeapRegionManager::HeapRegionManager() :
_regions(), _heap_mapper(NULL),
_prev_bitmap_mapper(NULL),
_next_bitmap_mapper(NULL),
_bot_mapper(NULL),
_cardtable_mapper(NULL),
_card_counts_mapper(NULL),
_free_list("Free list", new MasterFreeRegionListChecker()),
_available_map(mtGC),
_num_committed(0),
_allocated_heapregions_length(0)
_allocated_heapregions_length(0),
_regions(), _heap_mapper(NULL),
_prev_bitmap_mapper(NULL),
_next_bitmap_mapper(NULL),
_free_list("Free list", new MasterFreeRegionListChecker())
{ }
HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy) {
if (policy->is_hetero_heap()) {
return new HeterogeneousHeapRegionManager((uint)(policy->max_heap_byte_size() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
}
return new HeapRegionManager();
}
void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* prev_bitmap,
G1RegionToSpaceMapper* next_bitmap,
@ -514,7 +523,7 @@ void HeapRegionManager::verify_optional() {
#endif // PRODUCT
HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) {
assert(n_workers > 0, "Need at least one worker.");
uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);

View File

@ -26,8 +26,10 @@
#define SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/g1/heapRegionSet.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "services/memoryUsage.hpp"
class HeapRegion;
@ -71,17 +73,10 @@ class HeapRegionManager: public CHeapObj<mtGC> {
friend class VMStructs;
friend class HeapRegionClaimer;
G1HeapRegionTable _regions;
G1RegionToSpaceMapper* _heap_mapper;
G1RegionToSpaceMapper* _prev_bitmap_mapper;
G1RegionToSpaceMapper* _next_bitmap_mapper;
G1RegionToSpaceMapper* _bot_mapper;
G1RegionToSpaceMapper* _cardtable_mapper;
G1RegionToSpaceMapper* _card_counts_mapper;
FreeRegionList _free_list;
// Each bit in this bitmap indicates that the corresponding region is available
// for allocation.
CHeapBitMap _available_map;
@ -95,11 +90,8 @@ class HeapRegionManager: public CHeapObj<mtGC> {
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
// Pass down commit calls to the VirtualSpace.
void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL);
void uncommit_regions(uint index, size_t num_regions = 1);
// Notify other data structures about change in the heap layout.
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
@ -117,6 +109,16 @@ class HeapRegionManager: public CHeapObj<mtGC> {
// the heap. Returns the length of the sequence found. If this value is zero, no
// sequence could be found, otherwise res_idx contains the start index of this range.
uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
protected:
G1HeapRegionTable _regions;
G1RegionToSpaceMapper* _heap_mapper;
G1RegionToSpaceMapper* _prev_bitmap_mapper;
G1RegionToSpaceMapper* _next_bitmap_mapper;
FreeRegionList _free_list;
void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
void uncommit_regions(uint index, size_t num_regions = 1);
// Allocate a new HeapRegion for the given index.
HeapRegion* new_heap_region(uint hrm_index);
#ifdef ASSERT
@ -127,18 +129,25 @@ public:
// Empty constructor, we'll initialize it with the initialize() method.
HeapRegionManager();
void initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* prev_bitmap,
G1RegionToSpaceMapper* next_bitmap,
G1RegionToSpaceMapper* bot,
G1RegionToSpaceMapper* cardtable,
G1RegionToSpaceMapper* card_counts);
static HeapRegionManager* create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy);
virtual void initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* prev_bitmap,
G1RegionToSpaceMapper* next_bitmap,
G1RegionToSpaceMapper* bot,
G1RegionToSpaceMapper* cardtable,
G1RegionToSpaceMapper* card_counts);
// Prepare heap regions before and after full collection.
// Nothing to be done in this class.
virtual void prepare_for_full_collection_start() {}
virtual void prepare_for_full_collection_end() {}
// Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
// new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
// the heap from the lowest address, this region (and its associated data
// structures) are available and we do not need to check further.
HeapRegion* get_dummy_region() { return new_heap_region(0); }
virtual HeapRegion* get_dummy_region() { return new_heap_region(0); }
// Return the HeapRegion at the given index. Assume that the index
// is valid.
@ -167,8 +176,8 @@ public:
_free_list.add_ordered(list);
}
HeapRegion* allocate_free_region(bool is_old) {
HeapRegion* hr = _free_list.remove_region(is_old);
virtual HeapRegion* allocate_free_region(HeapRegionType type) {
HeapRegion* hr = _free_list.remove_region(!type.is_young());
if (hr != NULL) {
assert(hr->next() == NULL, "Single region should not have next");
@ -202,6 +211,9 @@ public:
// Return the maximum number of regions in the heap.
uint max_length() const { return (uint)_regions.length(); }
// Return maximum number of regions that heap can expand to.
virtual uint max_expandable_length() const { return (uint)_regions.length(); }
MemoryUsage get_auxiliary_data_memory_usage() const;
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
@ -210,26 +222,26 @@ public:
// HeapRegions, or re-use existing ones. Returns the number of regions the
// sequence was expanded by. If a HeapRegion allocation fails, the resulting
// number of regions might be smaller than what's desired.
uint expand_by(uint num_regions, WorkGang* pretouch_workers);
virtual uint expand_by(uint num_regions, WorkGang* pretouch_workers);
// Makes sure that the regions from start to start+num_regions-1 are available
// for allocation. Returns the number of regions that were committed to achieve
// this.
uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
// Find a contiguous set of empty regions of length num. Returns the start index of
// that set, or G1_NO_HRM_INDEX.
uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
virtual uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
// Find a contiguous set of empty or unavailable regions of length num. Returns the
// start index of that set, or G1_NO_HRM_INDEX.
uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
virtual uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
// Find the highest free or uncommitted region in the reserved heap,
// and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
// Set the 'expanded' boolean true if a new region was committed.
uint find_highest_free(bool* expanded);
virtual uint find_highest_free(bool* expanded);
// Allocate the regions that contain the address range specified, committing the
// regions if necessary. Return false if any of the regions is already committed
@ -244,13 +256,13 @@ public:
// Uncommit up to num_regions_to_remove regions that are completely free.
// Return the actual number of uncommitted regions.
uint shrink_by(uint num_regions_to_remove);
virtual uint shrink_by(uint num_regions_to_remove);
// Uncommit a number of regions starting at the specified index, which must be available,
// empty, and free.
void shrink_at(uint index, size_t num_regions);
void verify();
virtual void verify();
// Do some sanity checking.
void verify_optional() PRODUCT_RETURN;

View File

@ -234,6 +234,21 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
verify_optional();
}
uint FreeRegionList::num_of_regions_in_range(uint start, uint end) const {
HeapRegion* cur = _head;
uint num = 0;
while (cur != NULL) {
uint index = cur->hrm_index();
if (index > end) {
break;
} else if (index >= start) {
num++;
}
cur = cur->next();
}
return num;
}
void FreeRegionList::verify() {
// See comment in HeapRegionSetBase::verify() about MT safety and
// verification.

View File

@ -194,6 +194,8 @@ public:
void remove_starting_at(HeapRegion* first, uint num_regions);
virtual void verify();
uint num_of_regions_in_range(uint start, uint end) const;
};
// Iterator class that provides a convenient way to iterate over the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,11 @@
#include "gc/g1/g1HeapRegionTraceType.hpp"
#include "gc/g1/heapRegionType.hpp"
const HeapRegionType HeapRegionType::Eden = HeapRegionType(EdenTag);
const HeapRegionType HeapRegionType::Survivor = HeapRegionType(SurvTag);
const HeapRegionType HeapRegionType::Old = HeapRegionType(OldTag);
const HeapRegionType HeapRegionType::Humongous = HeapRegionType(StartsHumongousTag);
bool HeapRegionType::is_valid(Tag tag) {
switch (tag) {
case FreeTag:

View File

@ -117,6 +117,9 @@ private:
_tag = tag;
}
// Private constructor used static constants
HeapRegionType(Tag t) : _tag(t) { hrt_assert_is_valid(_tag); }
public:
// Queries
@ -186,6 +189,11 @@ public:
G1HeapRegionTraceType::Type get_trace_type();
HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
static const HeapRegionType Eden;
static const HeapRegionType Survivor;
static const HeapRegionType Old;
static const HeapRegionType Humongous;
};
#endif // SHARE_VM_GC_G1_HEAPREGIONTYPE_HPP

View File

@ -0,0 +1,523 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
#include "memory/allocation.hpp"
HeterogeneousHeapRegionManager* HeterogeneousHeapRegionManager::manager() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(g1h != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
HeapRegionManager* hrm = g1h->hrm();
assert(hrm != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
return (HeterogeneousHeapRegionManager*)hrm;
}
void HeterogeneousHeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* prev_bitmap,
G1RegionToSpaceMapper* next_bitmap,
G1RegionToSpaceMapper* bot,
G1RegionToSpaceMapper* cardtable,
G1RegionToSpaceMapper* card_counts) {
HeapRegionManager::initialize(heap_storage, prev_bitmap, next_bitmap, bot, cardtable, card_counts);
// We commit bitmap for all regions during initialization and mark the bitmap space as special.
// This allows regions to be un-committed while concurrent-marking threads are accessing the bitmap concurrently.
_prev_bitmap_mapper->commit_and_set_special();
_next_bitmap_mapper->commit_and_set_special();
}
// expand_by() is called to grow the heap. We grow into nvdimm now.
// Dram regions are committed later as needed during mutator region allocation or
// when young list target length is determined after gc cycle.
uint HeterogeneousHeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
uint num_regions_possible = total_regions_committed() >= max_expandable_length() ? 0 : max_expandable_length() - total_regions_committed();
uint num_expanded = expand_nvdimm(MIN2(num_regions, num_regions_possible), pretouch_workers);
return num_expanded;
}
// Expands heap starting from 'start' index. The question is should we expand from one memory (e.g. nvdimm) to another (e.g. dram).
// Looking at the code, expand_at() is called for humongous allocation where 'start' is in nv-dimm.
// So we only allocate regions in the same kind of memory as 'start'.
uint HeterogeneousHeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) {
if (num_regions == 0) {
return 0;
}
uint target_num_regions = MIN2(num_regions, max_expandable_length() - total_regions_committed());
uint end = is_in_nvdimm(start) ? end_index_of_nvdimm() : end_index_of_dram();
uint num_expanded = expand_in_range(start, end, target_num_regions, pretouch_workers);
assert(total_regions_committed() <= max_expandable_length(), "must be");
return num_expanded;
}
// This function ensures that there are 'expected_num_regions' committed regions in dram.
// If new regions are committed, it un-commits that many regions from nv-dimm.
// If there are already more regions committed in dram, extra regions are un-committed.
void HeterogeneousHeapRegionManager::adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers) {
// Release back the extra regions allocated in evacuation failure scenario.
if(_no_borrowed_regions > 0) {
_no_borrowed_regions -= shrink_dram(_no_borrowed_regions);
_no_borrowed_regions -= shrink_nvdimm(_no_borrowed_regions);
}
if(expected_num_regions > free_list_dram_length()) {
// If we are going to expand DRAM, we expand a little more so that we can absorb small variations in Young gen sizing.
uint targeted_dram_regions = expected_num_regions * (1 + (double)G1YoungExpansionBufferPercent / 100);
uint to_be_made_available = targeted_dram_regions - free_list_dram_length();
#ifdef ASSERT
uint total_committed_before = total_regions_committed();
#endif
uint can_be_made_available = shrink_nvdimm(to_be_made_available);
uint ret = expand_dram(can_be_made_available, pretouch_workers);
#ifdef ASSERT
assert(ret == can_be_made_available, "should be equal");
assert(total_committed_before == total_regions_committed(), "invariant not met");
#endif
} else {
uint to_be_released = free_list_dram_length() - expected_num_regions;
// if number of extra DRAM regions is small, do not shrink.
if (to_be_released < expected_num_regions * G1YoungExpansionBufferPercent / 100) {
return;
}
#ifdef ASSERT
uint total_committed_before = total_regions_committed();
#endif
uint ret = shrink_dram(to_be_released);
assert(ret == to_be_released, "Should be able to shrink by given amount");
ret = expand_nvdimm(to_be_released, pretouch_workers);
#ifdef ASSERT
assert(ret == to_be_released, "Should be able to expand by given amount");
assert(total_committed_before == total_regions_committed(), "invariant not met");
#endif
}
}
uint HeterogeneousHeapRegionManager::total_regions_committed() const {
return num_committed_dram() + num_committed_nvdimm();
}
uint HeterogeneousHeapRegionManager::num_committed_dram() const {
// This class does not keep count of committed regions in dram and nv-dimm.
// G1RegionToHeteroSpaceMapper keeps this information.
return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_dram();
}
uint HeterogeneousHeapRegionManager::num_committed_nvdimm() const {
// See comment for num_committed_dram()
return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_nvdimm();
}
// Return maximum number of regions that heap can expand to.
uint HeterogeneousHeapRegionManager::max_expandable_length() const {
return _max_regions;
}
uint HeterogeneousHeapRegionManager::find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const {
guarantee(res_idx != NULL, "checking");
guarantee(start_idx <= (max_length() + 1), "checking");
uint num_regions = 0;
uint cur = start_idx;
while (cur <= end_idx && is_available(cur)) {
cur++;
}
if (cur == end_idx + 1) {
return num_regions;
}
*res_idx = cur;
while (cur <= end_idx && !is_available(cur)) {
cur++;
}
num_regions = cur - *res_idx;
#ifdef ASSERT
for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
assert(!is_available(i), "just checking");
}
assert(cur == end_idx + 1 || num_regions == 0 || is_available(cur),
"The region at the current position %u must be available or at the end", cur);
#endif
return num_regions;
}
uint HeterogeneousHeapRegionManager::expand_dram(uint num_regions, WorkGang* pretouch_workers) {
return expand_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, pretouch_workers);
}
uint HeterogeneousHeapRegionManager::expand_nvdimm(uint num_regions, WorkGang* pretouch_workers) {
return expand_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, pretouch_workers);
}
// Follows same logic as expand_at() form HeapRegionManager.
uint HeterogeneousHeapRegionManager::expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_gang) {
uint so_far = 0;
uint chunk_start = 0;
uint num_last_found = 0;
while (so_far < num_regions &&
(num_last_found = find_unavailable_in_range(start, end, &chunk_start)) > 0) {
uint to_commit = MIN2(num_regions - so_far, num_last_found);
make_regions_available(chunk_start, to_commit, pretouch_gang);
so_far += to_commit;
start = chunk_start + to_commit + 1;
}
return so_far;
}
// Shrink in the range of indexes which are reserved for dram.
uint HeterogeneousHeapRegionManager::shrink_dram(uint num_regions, bool update_free_list) {
return shrink_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, update_free_list);
}
// Shrink in the range of indexes which are reserved for nv-dimm.
uint HeterogeneousHeapRegionManager::shrink_nvdimm(uint num_regions, bool update_free_list) {
return shrink_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, update_free_list);
}
// Find empty regions in given range, un-commit them and return the count.
uint HeterogeneousHeapRegionManager::shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list) {
if (num_regions == 0) {
return 0;
}
uint so_far = 0;
uint idx_last_found = 0;
uint num_last_found;
while (so_far < num_regions &&
(num_last_found = find_empty_in_range_reverse(start, end, &idx_last_found)) > 0) {
uint to_uncommit = MIN2(num_regions - so_far, num_last_found);
if(update_free_list) {
_free_list.remove_starting_at(at(idx_last_found + num_last_found - to_uncommit), to_uncommit);
}
uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
so_far += to_uncommit;
end = idx_last_found;
}
return so_far;
}
uint HeterogeneousHeapRegionManager::find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx) {
guarantee(res_idx != NULL, "checking");
guarantee(start_idx < max_length(), "checking");
guarantee(end_idx < max_length(), "checking");
if(start_idx > end_idx) {
return 0;
}
uint num_regions_found = 0;
jlong cur = end_idx;
while (cur >= start_idx && !(is_available(cur) && at(cur)->is_empty())) {
cur--;
}
if (cur == start_idx - 1) {
return num_regions_found;
}
jlong old_cur = cur;
// cur indexes the first empty region
while (cur >= start_idx && is_available(cur) && at(cur)->is_empty()) {
cur--;
}
*res_idx = cur + 1;
num_regions_found = old_cur - cur;
#ifdef ASSERT
for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
assert(at(i)->is_empty(), "just checking");
}
#endif
return num_regions_found;
}
HeapRegion* HeterogeneousHeapRegionManager::allocate_free_region(HeapRegionType type) {
// We want to prevent mutators from proceeding when we have borrowed regions from the last collection. This
// will force a full collection to remedy the situation.
// Free region requests from GC threads can proceed.
if(type.is_eden() || type.is_humongous()) {
if(has_borrowed_regions()) {
return NULL;
}
}
// old and humongous regions are allocated from nv-dimm; eden and survivor regions are allocated from dram
// assumption: dram regions take higher indexes
bool from_nvdimm = (type.is_old() || type.is_humongous()) ? true : false;
bool from_head = from_nvdimm;
HeapRegion* hr = _free_list.remove_region(from_head);
if (hr != NULL && ( (from_nvdimm && !is_in_nvdimm(hr->hrm_index())) || (!from_nvdimm && !is_in_dram(hr->hrm_index())) ) ) {
_free_list.add_ordered(hr);
hr = NULL;
}
#ifdef ASSERT
uint total_committed_before = total_regions_committed();
#endif
if (hr == NULL) {
if (!from_nvdimm) {
uint ret = shrink_nvdimm(1);
if (ret == 1) {
ret = expand_dram(1, NULL);
assert(ret == 1, "We should be able to commit one region");
hr = _free_list.remove_region(from_head);
}
}
else { /*is_old*/
uint ret = shrink_dram(1);
if (ret == 1) {
ret = expand_nvdimm(1, NULL);
assert(ret == 1, "We should be able to commit one region");
hr = _free_list.remove_region(from_head);
}
}
}
#ifdef ASSERT
assert(total_committed_before == total_regions_committed(), "invariant not met");
#endif
// When an old region is requested (which happens during collection pause) and we can't find any empty region
// in the set of available regions (which is an evacuation failure scenario), we borrow (or pre-allocate) an unavailable region
// from nv-dimm. This region is used to evacuate surviving objects from eden, survivor or old.
if(hr == NULL && type.is_old()) {
hr = borrow_old_region_for_gc();
}
if (hr != NULL) {
assert(hr->next() == NULL, "Single region should not have next");
assert(is_available(hr->hrm_index()), "Must be committed");
}
return hr;
}
uint HeterogeneousHeapRegionManager::find_contiguous_only_empty(size_t num) {
if (has_borrowed_regions()) {
return G1_NO_HRM_INDEX;
}
return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, true);
}
uint HeterogeneousHeapRegionManager::find_contiguous_empty_or_unavailable(size_t num) {
if (has_borrowed_regions()) {
return G1_NO_HRM_INDEX;
}
return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, false);
}
uint HeterogeneousHeapRegionManager::find_contiguous(size_t start, size_t end, size_t num, bool empty_only) {
uint found = 0;
size_t length_found = 0;
uint cur = (uint)start;
uint length_unavailable = 0;
while (length_found < num && cur <= end) {
HeapRegion* hr = _regions.get_by_index(cur);
if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
// This region is a potential candidate for allocation into.
if (!is_available(cur)) {
if(shrink_dram(1) == 1) {
uint ret = expand_in_range(cur, cur, 1, NULL);
assert(ret == 1, "We should be able to expand at this index");
} else {
length_unavailable++;
}
}
length_found++;
}
else {
// This region is not a candidate. The next region is the next possible one.
found = cur + 1;
length_found = 0;
}
cur++;
}
if (length_found == num) {
for (uint i = found; i < (found + num); i++) {
HeapRegion* hr = _regions.get_by_index(i);
// sanity check
guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
"Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
" that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr));
}
if (!empty_only && length_unavailable > (max_expandable_length() - total_regions_committed())) {
// if 'length_unavailable' number of regions will be made available, we will exceed max regions.
return G1_NO_HRM_INDEX;
}
return found;
}
else {
return G1_NO_HRM_INDEX;
}
}
uint HeterogeneousHeapRegionManager::find_highest_free(bool* expanded) {
// Loop downwards from the highest dram region index, looking for an
// entry which is either free or not yet committed. If not yet
// committed, expand_at that index.
uint curr = end_index_of_dram();
while (true) {
HeapRegion *hr = _regions.get_by_index(curr);
if (hr == NULL && !(total_regions_committed() < _max_regions)) {
uint res = shrink_nvdimm(1);
if (res == 1) {
res = expand_in_range(curr, curr, 1, NULL);
assert(res == 1, "We should be able to expand since shrink was successful");
*expanded = true;
return curr;
}
}
else {
if (hr->is_free()) {
*expanded = false;
return curr;
}
}
if (curr == start_index_of_dram()) {
return G1_NO_HRM_INDEX;
}
curr--;
}
}
// We need to override this since region 0 which serves are dummy region in base class may not be available here.
// This is a corner condition when either number of regions is small. When adaptive sizing is used, initial heap size
// could be just one region. This region is commited in dram to be used for young generation, leaving region 0 (which is in nvdimm)
// unavailable.
HeapRegion* HeterogeneousHeapRegionManager::get_dummy_region() {
uint curr = 0;
while (curr < _regions.length()) {
if (is_available(curr)) {
return new_heap_region(curr);
}
curr++;
}
assert(false, "We should always find a region available for dummy region");
return NULL;
}
// First shrink in dram, then in nv-dimm.
uint HeterogeneousHeapRegionManager::shrink_by(uint num_regions) {
// This call is made at end of full collection. Before making this call the region sets are tore down (tear_down_region_sets()).
// So shrink() calls below do not need to remove uncomitted regions from free list.
uint ret = shrink_dram(num_regions, false /* update_free_list */);
ret += shrink_nvdimm(num_regions - ret, false /* update_free_list */);
return ret;
}
void HeterogeneousHeapRegionManager::verify() {
HeapRegionManager::verify();
}
uint HeterogeneousHeapRegionManager::free_list_dram_length() const {
return _free_list.num_of_regions_in_range(start_index_of_dram(), end_index_of_dram());
}
uint HeterogeneousHeapRegionManager::free_list_nvdimm_length() const {
return _free_list.num_of_regions_in_range(start_index_of_nvdimm(), end_index_of_nvdimm());
}
bool HeterogeneousHeapRegionManager::is_in_nvdimm(uint index) const {
return index >= start_index_of_nvdimm() && index <= end_index_of_nvdimm();
}
bool HeterogeneousHeapRegionManager::is_in_dram(uint index) const {
return index >= start_index_of_dram() && index <= end_index_of_dram();
}
// We have to make sure full collection copies all surviving objects to NV-DIMM.
// We might not have enough regions in nvdimm_set, so we need to make more regions on NV-DIMM available for full collection.
// Note: by doing this we are breaking the in-variant that total number of committed regions is equal to current heap size.
// After full collection ends, we will re-establish this in-variant by freeing DRAM regions.
void HeterogeneousHeapRegionManager::prepare_for_full_collection_start() {
_total_commited_before_full_gc = total_regions_committed() - _no_borrowed_regions;
_no_borrowed_regions = 0;
expand_nvdimm(num_committed_dram(), NULL);
remove_all_free_regions();
}
// We need to bring back the total committed regions to before full collection start.
// Unless we are close to OOM, all regular (not pinned) regions in DRAM should be free.
// We shrink all free regions in DRAM and if needed from NV-DIMM (when there are pinned DRAM regions)
// If we can't bring back committed regions count to _total_commited_before_full_gc, we keep the extra count in _no_borrowed_regions.
// When this GC finishes, new regions won't be allocated since has_borrowed_regions() is true. VM will be forced to re-try GC
// with clear soft references followed by OOM error in worst case.
void HeterogeneousHeapRegionManager::prepare_for_full_collection_end() {
uint shrink_size = total_regions_committed() - _total_commited_before_full_gc;
uint so_far = 0;
uint idx_last_found = 0;
uint num_last_found;
uint end = (uint)_regions.length() - 1;
while (so_far < shrink_size &&
(num_last_found = find_empty_in_range_reverse(0, end, &idx_last_found)) > 0) {
uint to_uncommit = MIN2(shrink_size - so_far, num_last_found);
uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
so_far += to_uncommit;
end = idx_last_found;
}
// See comment above the function.
_no_borrowed_regions = shrink_size - so_far;
}
uint HeterogeneousHeapRegionManager::start_index_of_dram() const { return _max_regions;}
uint HeterogeneousHeapRegionManager::end_index_of_dram() const { return 2*_max_regions - 1; }
uint HeterogeneousHeapRegionManager::start_index_of_nvdimm() const { return 0; }
uint HeterogeneousHeapRegionManager::end_index_of_nvdimm() const { return _max_regions - 1; }
// This function is called when there are no free nv-dimm regions.
// It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
HeapRegion* HeterogeneousHeapRegionManager::borrow_old_region_for_gc() {
assert(free_list_nvdimm_length() == 0, "this function should be called only when there are no nv-dimm regions in free list");
uint ret = expand_nvdimm(1, NULL);
if(ret != 1) {
return NULL;
}
HeapRegion* hr = _free_list.remove_region(true /*from_head*/);
assert(is_in_nvdimm(hr->hrm_index()), "allocated region should be in nv-dimm");
_no_borrowed_regions++;
return hr;
}
bool HeterogeneousHeapRegionManager::has_borrowed_regions() const {
return _no_borrowed_regions > 0;
}

View File

@ -0,0 +1,150 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
#define SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
#include "gc/g1/heapRegionManager.hpp"
// This class manages heap regions on heterogenous memory comprising of dram and nv-dimm.
// Regions in dram (dram_set) are used for young objects and archive regions (CDS).
// Regions in nv-dimm (nvdimm_set) are used for old objects and humongous objects.
// At any point there are some regions committed on dram and some on nv-dimm with the following guarantees:
// 1. The total number of regions committed in dram and nv-dimm equals the current size of heap.
// 2. Consequently, total number of regions committed is less than or equal to Xmx.
// 3. To maintain the guarantee stated by 1., whenever one set grows (new regions committed), the other set shrinks (regions un-committed).
// 3a. If more dram regions are needed (young generation expansion), corresponding number of regions in nv-dimm are un-committed.
// 3b. When old generation or humongous set grows, and new regions need to be committed to nv-dimm, corresponding number of regions
// are un-committed in dram.
class HeterogeneousHeapRegionManager : public HeapRegionManager {
const uint _max_regions;
uint _max_dram_regions;
uint _max_nvdimm_regions;
uint _start_index_of_nvdimm;
uint _total_commited_before_full_gc;
uint _no_borrowed_regions;
uint total_regions_committed() const;
uint num_committed_dram() const;
uint num_committed_nvdimm() const;
// Similar to find_unavailable_from_idx() function from base class, difference is this function searches in range [start, end].
uint find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const;
// Expand into dram. Maintains the invariant that total number of committed regions is less than current heap size.
uint expand_dram(uint num_regions, WorkGang* pretouch_workers);
// Expand into nv-dimm.
uint expand_nvdimm(uint num_regions, WorkGang* pretouch_workers);
// Expand by finding unavailable regions in [start, end] range.
uint expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_workers);
// Shrink dram set of regions.
uint shrink_dram(uint num_regions, bool update_free_list = true);
// Shrink nv-dimm set of regions.
uint shrink_nvdimm(uint num_regions, bool update_free_list = true);
// Shrink regions from [start, end] range.
uint shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list = true);
// Similar to find_empty_from_idx_reverse() in base class. Only here it searches in a range.
uint find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx);
// Similar to find_contiguous() in base class, with [start, end] range
uint find_contiguous(size_t start, size_t end, size_t num, bool empty_only);
// This function is called when there are no free nv-dimm regions.
// It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
HeapRegion* borrow_old_region_for_gc();
uint free_list_dram_length() const;
uint free_list_nvdimm_length() const;
// is region with given index in nv-dimm?
bool is_in_nvdimm(uint index) const;
bool is_in_dram(uint index) const;
public:
// Empty constructor, we'll initialize it with the initialize() method.
HeterogeneousHeapRegionManager(uint num_regions) : _max_regions(num_regions), _max_dram_regions(0),
_max_nvdimm_regions(0), _start_index_of_nvdimm(0),
_total_commited_before_full_gc(0), _no_borrowed_regions(0)
{}
static HeterogeneousHeapRegionManager* manager();
virtual void initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* prev_bitmap,
G1RegionToSpaceMapper* next_bitmap,
G1RegionToSpaceMapper* bot,
G1RegionToSpaceMapper* cardtable,
G1RegionToSpaceMapper* card_counts);
uint start_index_of_nvdimm() const;
uint start_index_of_dram() const;
uint end_index_of_nvdimm() const;
uint end_index_of_dram() const;
// Override.
HeapRegion* get_dummy_region();
// Adjust dram_set to provision 'expected_num_regions' regions.
void adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers);
// Prepare heap regions before and after full collection.
void prepare_for_full_collection_start();
void prepare_for_full_collection_end();
virtual HeapRegion* allocate_free_region(HeapRegionType type);
// Return maximum number of regions that heap can expand to.
uint max_expandable_length() const;
// Override. Expand in nv-dimm.
uint expand_by(uint num_regions, WorkGang* pretouch_workers);
// Override.
uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
// Override. This function is called for humongous allocation, so we need to find empty regions in nv-dimm.
uint find_contiguous_only_empty(size_t num);
// Override. This function is called for humongous allocation, so we need to find empty or unavailable regions in nv-dimm.
uint find_contiguous_empty_or_unavailable(size_t num);
// Overrides base class implementation to find highest free region in dram.
uint find_highest_free(bool* expanded);
// Override. This fuction is called to shrink the heap, we shrink in dram first then in nv-dimm.
uint shrink_by(uint num_regions_to_remove);
bool has_borrowed_regions() const;
void verify();
};
#endif // SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP

View File

@ -53,7 +53,7 @@
nonstatic_field(HeapRegionManager, _num_committed, uint) \
\
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager*) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _archive_set, HeapRegionSetBase) \

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/parallel/adjoiningGenerations.hpp"
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
#include "gc/parallel/generationSizer.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
@ -40,8 +41,8 @@
AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
GenerationSizer* policy,
size_t alignment) :
_virtual_spaces(old_young_rs, policy->min_old_size(),
policy->min_young_size(), alignment) {
_virtual_spaces(new AdjoiningVirtualSpaces(old_young_rs, policy->min_old_size(),
policy->min_young_size(), alignment)) {
size_t init_low_byte_size = policy->initial_old_size();
size_t min_low_byte_size = policy->min_old_size();
size_t max_low_byte_size = policy->max_old_size();
@ -61,21 +62,21 @@ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
// generation.
// Does the actual creation of the virtual spaces
_virtual_spaces.initialize(max_low_byte_size,
init_low_byte_size,
init_high_byte_size);
_virtual_spaces->initialize(max_low_byte_size,
init_low_byte_size,
init_high_byte_size);
// Place the young gen at the high end. Passes in the virtual space.
_young_gen = new ASPSYoungGen(_virtual_spaces.high(),
_virtual_spaces.high()->committed_size(),
_young_gen = new ASPSYoungGen(_virtual_spaces->high(),
_virtual_spaces->high()->committed_size(),
min_high_byte_size,
_virtual_spaces.high_byte_size_limit());
_virtual_spaces->high_byte_size_limit());
// Place the old gen at the low end. Passes in the virtual space.
_old_gen = new ASPSOldGen(_virtual_spaces.low(),
_virtual_spaces.low()->committed_size(),
_old_gen = new ASPSOldGen(_virtual_spaces->low(),
_virtual_spaces->low()->committed_size(),
min_low_byte_size,
_virtual_spaces.low_byte_size_limit(),
_virtual_spaces->low_byte_size_limit(),
"old", 1);
young_gen()->initialize_work();
@ -92,8 +93,9 @@ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
} else {
// Layout the reserved space for the generations.
// If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows).
ReservedSpace old_rs =
virtual_spaces()->reserved_space().first_part(max_low_byte_size);
virtual_spaces()->reserved_space().first_part(max_low_byte_size, policy->is_hetero_heap() /* split */);
ReservedSpace heap_rs =
virtual_spaces()->reserved_space().last_part(max_low_byte_size);
ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
@ -117,6 +119,8 @@ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
}
}
AdjoiningGenerations::AdjoiningGenerations() { }
size_t AdjoiningGenerations::reserved_byte_size() {
return virtual_spaces()->reserved_space().size();
}
@ -279,3 +283,13 @@ void AdjoiningGenerations::adjust_boundary_for_young_gen_needs(size_t eden_size,
}
}
}
AdjoiningGenerations* AdjoiningGenerations::create_adjoining_generations(ReservedSpace old_young_rs,
GenerationSizer* policy,
size_t alignment) {
if (policy->is_hetero_heap() && UseAdaptiveGCBoundary) {
return new AdjoiningGenerationsForHeteroHeap(old_young_rs, policy, alignment);
} else {
return new AdjoiningGenerations(old_young_rs, policy, alignment);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,27 +43,29 @@
class AdjoiningGenerations : public CHeapObj<mtGC> {
friend class VMStructs;
private:
// The young generation and old generation, respectively
PSYoungGen* _young_gen;
PSOldGen* _old_gen;
// The spaces used by the two generations.
AdjoiningVirtualSpaces _virtual_spaces;
// Move boundary up to expand old gen. Checks are made to
// determine if the move can be done with specified limits.
void request_old_gen_expansion(size_t desired_change_in_bytes);
// Move boundary down to expand young gen.
bool request_young_gen_expansion(size_t desired_change_in_bytes);
protected:
// The young generation and old generation, respectively
PSYoungGen* _young_gen;
PSOldGen* _old_gen;
// The spaces used by the two generations.
AdjoiningVirtualSpaces* _virtual_spaces;
public:
AdjoiningGenerations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
AdjoiningGenerations();
// Accessors
PSYoungGen* young_gen() { return _young_gen; }
PSOldGen* old_gen() { return _old_gen; }
AdjoiningVirtualSpaces* virtual_spaces() { return &_virtual_spaces; }
AdjoiningVirtualSpaces* virtual_spaces() { return _virtual_spaces; }
// Additional space is needed in the old generation. Check
// the available space and attempt to move the boundary if more space
@ -74,7 +76,9 @@ class AdjoiningGenerations : public CHeapObj<mtGC> {
// Return the total byte size of the reserved space
// for the adjoining generations.
size_t reserved_byte_size();
};
virtual size_t reserved_byte_size();
// Return new AdjoiningGenerations instance based on collector policy (specifically - whether heap is heterogeneous).
static AdjoiningGenerations* create_adjoining_generations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
};
#endif // SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONS_HPP

View File

@ -0,0 +1,260 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
#include "gc/parallel/generationSizer.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psFileBackedVirtualspace.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "utilities/align.hpp"
#include "utilities/ostream.hpp"
// Create two virtual spaces (HeteroVirtualSpaces), low() on nv-dimm memory, high() on dram.
// create ASPSOldGen and ASPSYoungGen the same way as in base class
AdjoiningGenerationsForHeteroHeap::AdjoiningGenerationsForHeteroHeap(ReservedSpace old_young_rs, GenerationSizer* policy, size_t alignment) :
_total_size_limit(policy->max_heap_byte_size()) {
size_t init_old_byte_size = policy->initial_old_size();
size_t min_old_byte_size = policy->min_old_size();
size_t max_old_byte_size = policy->max_old_size();
size_t init_young_byte_size = policy->initial_young_size();
size_t min_young_byte_size = policy->min_young_size();
size_t max_young_byte_size = policy->max_young_size();
// create HeteroVirtualSpaces which is composed of non-overlapping virtual spaces.
HeteroVirtualSpaces* hetero_virtual_spaces = new HeteroVirtualSpaces(old_young_rs, min_old_byte_size,
min_young_byte_size, _total_size_limit, alignment);
assert(min_old_byte_size <= init_old_byte_size &&
init_old_byte_size <= max_old_byte_size, "Parameter check");
assert(min_young_byte_size <= init_young_byte_size &&
init_young_byte_size <= max_young_byte_size, "Parameter check");
assert(UseAdaptiveGCBoundary, "Should be used only when UseAdaptiveGCBoundary is true");
// Initialize the virtual spaces. Then pass a virtual space to each generation
// for initialization of the generation.
// Does the actual creation of the virtual spaces
hetero_virtual_spaces->initialize(max_old_byte_size, init_old_byte_size, init_young_byte_size);
_young_gen = new ASPSYoungGen(hetero_virtual_spaces->high(),
hetero_virtual_spaces->high()->committed_size() /* intial_size */,
min_young_byte_size,
hetero_virtual_spaces->max_young_size());
_old_gen = new ASPSOldGen(hetero_virtual_spaces->low(),
hetero_virtual_spaces->low()->committed_size() /* intial_size */,
min_old_byte_size,
hetero_virtual_spaces->max_old_size(), "old", 1);
young_gen()->initialize_work();
assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(), "Consistency check");
assert(old_young_rs.size() >= young_gen()->gen_size_limit(), "Consistency check");
old_gen()->initialize_work("old", 1);
assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(), "Consistency check");
assert(old_young_rs.size() >= old_gen()->gen_size_limit(), "Consistency check");
_virtual_spaces = hetero_virtual_spaces;
}
size_t AdjoiningGenerationsForHeteroHeap::required_reserved_memory(GenerationSizer* policy) {
// This is the size that young gen can grow to, when AdaptiveGCBoundary is true.
size_t max_yg_size = policy->max_heap_byte_size() - policy->min_old_size();
// This is the size that old gen can grow to, when AdaptiveGCBoundary is true.
size_t max_old_size = policy->max_heap_byte_size() - policy->min_young_size();
return max_yg_size + max_old_size;
}
// We override this function since size of reservedspace here is more than heap size and
// callers expect this function to return heap size.
size_t AdjoiningGenerationsForHeteroHeap::reserved_byte_size() {
return total_size_limit();
}
AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::HeteroVirtualSpaces(ReservedSpace rs, size_t min_old_byte_size, size_t min_yg_byte_size, size_t max_total_size, size_t alignment) :
AdjoiningVirtualSpaces(rs, min_old_byte_size, min_yg_byte_size, alignment),
_max_total_size(max_total_size),
_min_old_byte_size(min_old_byte_size), _min_young_byte_size(min_yg_byte_size),
_max_old_byte_size(_max_total_size - _min_young_byte_size),
_max_young_byte_size(_max_total_size - _min_old_byte_size) {
}
void AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::initialize(size_t initial_old_reserved_size, size_t init_old_byte_size,
size_t init_young_byte_size) {
// This is the reserved space exclusively for old generation.
ReservedSpace low_rs = _reserved_space.first_part(_max_old_byte_size, true);
// Intially we only assign 'initial_old_reserved_size' of the reserved space to old virtual space.
low_rs = low_rs.first_part(initial_old_reserved_size);
// This is the reserved space exclusively for young generation.
ReservedSpace high_rs = _reserved_space.last_part(_max_old_byte_size).first_part(_max_young_byte_size);
// Carve out 'initial_young_reserved_size' of reserved space.
size_t initial_young_reserved_size = _max_total_size - initial_old_reserved_size;
high_rs = high_rs.last_part(_max_young_byte_size - initial_young_reserved_size);
_low = new PSFileBackedVirtualSpace(low_rs, alignment(), AllocateOldGenAt);
if (!static_cast <PSFileBackedVirtualSpace*>(_low)->initialize()) {
vm_exit_during_initialization("Could not map space for old generation at given AllocateOldGenAt path");
}
if (!_low->expand_by(init_old_byte_size)) {
vm_exit_during_initialization("Could not reserve enough space for object heap");
}
_high = new PSVirtualSpaceHighToLow(high_rs, alignment());
if (!_high->expand_by(init_young_byte_size)) {
vm_exit_during_initialization("Could not reserve enough space for object heap");
}
}
// Since the virtual spaces are non-overlapping, there is no boundary as such.
// We replicate the same behavior and maintain the same invariants as base class 'AdjoiningVirtualSpaces' by
// increasing old generation size and decreasing young generation size by same amount.
bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_up(size_t change_in_bytes) {
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
size_t bytes_needed = change_in_bytes;
size_t uncommitted_in_old = MIN2(old_vs()->uncommitted_size(), bytes_needed);
bool old_expanded = false;
// 1. Try to expand old within its reserved space.
if (uncommitted_in_old != 0) {
if (!old_vs()->expand_by(uncommitted_in_old)) {
return false;
}
old_expanded = true;
bytes_needed -= uncommitted_in_old;
if (bytes_needed == 0) {
return true;
}
}
size_t bytes_to_add_in_old = 0;
// 2. Get uncommitted memory from Young virtualspace.
size_t young_uncommitted = MIN2(young_vs()->uncommitted_size(), bytes_needed);
if (young_uncommitted > 0) {
young_vs()->set_reserved(young_vs()->reserved_low_addr() + young_uncommitted,
young_vs()->reserved_high_addr(),
young_vs()->special());
bytes_needed -= young_uncommitted;
bytes_to_add_in_old = young_uncommitted;
}
// 3. Get committed memory from Young virtualspace
if (bytes_needed > 0) {
size_t shrink_size = align_down(bytes_needed, young_vs()->alignment());
bool ret = young_vs()->shrink_by(shrink_size);
assert(ret, "We should be able to shrink young space");
young_vs()->set_reserved(young_vs()->reserved_low_addr() + shrink_size,
young_vs()->reserved_high_addr(),
young_vs()->special());
bytes_to_add_in_old += shrink_size;
}
// 4. Increase size of old space
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
old_vs()->reserved_high_addr() + bytes_to_add_in_old,
old_vs()->special());
if (!old_vs()->expand_by(bytes_to_add_in_old) && !old_expanded) {
return false;
}
DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
assert(total_size_after == total_size_before, "should be equal");
return true;
}
// Read comment for adjust_boundary_up()
// Increase young generation size and decrease old generation size by same amount.
bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_down(size_t change_in_bytes) {
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
size_t bytes_needed = change_in_bytes;
size_t uncommitted_in_young = MIN2(young_vs()->uncommitted_size(), bytes_needed);
bool young_expanded = false;
// 1. Try to expand old within its reserved space.
if (uncommitted_in_young > 0) {
if (!young_vs()->expand_by(uncommitted_in_young)) {
return false;
}
young_expanded = true;
bytes_needed -= uncommitted_in_young;
if (bytes_needed == 0) {
return true;
}
}
size_t bytes_to_add_in_young = 0;
// 2. Get uncommitted memory from Old virtualspace.
size_t old_uncommitted = MIN2(old_vs()->uncommitted_size(), bytes_needed);
if (old_uncommitted > 0) {
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
old_vs()->reserved_high_addr() - old_uncommitted,
old_vs()->special());
bytes_needed -= old_uncommitted;
bytes_to_add_in_young = old_uncommitted;
}
// 3. Get committed memory from Old virtualspace
if (bytes_needed > 0) {
size_t shrink_size = align_down(bytes_needed, old_vs()->alignment());
bool ret = old_vs()->shrink_by(shrink_size);
assert(ret, "We should be able to shrink young space");
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
old_vs()->reserved_high_addr() - shrink_size,
old_vs()->special());
bytes_to_add_in_young += shrink_size;
}
assert(bytes_to_add_in_young <= change_in_bytes, "should not be more than requested size");
// 4. Increase size of young space
young_vs()->set_reserved(young_vs()->reserved_low_addr() - bytes_to_add_in_young,
young_vs()->reserved_high_addr(),
young_vs()->special());
if (!young_vs()->expand_by(bytes_to_add_in_young) && !young_expanded) {
return false;
}
DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
assert(total_size_after == total_size_before, "should be equal");
return true;
}

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
#define SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
#include "gc/parallel/adjoiningGenerations.hpp"
class AdjoiningGenerationsForHeteroHeap : public AdjoiningGenerations {
friend class VMStructs;
private:
// Maximum total size of the generations. This is equal to the heap size specified by user.
// When adjusting young and old generation sizes, we need ensure that sum of the generation sizes does not exceed this.
size_t _total_size_limit;
size_t total_size_limit() const {
return _total_size_limit;
}
// HeteroVirtualSpaces creates non-overlapping virtual spaces. Here _low and _high do not share a reserved space, i.e. there is no boundary
// separating the two virtual spaces.
class HeteroVirtualSpaces : public AdjoiningVirtualSpaces {
size_t _max_total_size;
size_t _min_old_byte_size;
size_t _min_young_byte_size;
size_t _max_old_byte_size;
size_t _max_young_byte_size;
// Internally we access the virtual spaces using these methods. It increases readability, since we were not really
// dealing with adjoining virtual spaces separated by a boundary as is the case in base class.
// Externally they are accessed using low() and high() methods of base class.
PSVirtualSpace* young_vs() { return high(); }
PSVirtualSpace* old_vs() { return low(); }
public:
HeteroVirtualSpaces(ReservedSpace rs,
size_t min_old_byte_size,
size_t min_young_byte_size, size_t max_total_size,
size_t alignment);
// Increase old generation size and decrease young generation size by same amount
bool adjust_boundary_up(size_t size_in_bytes);
// Increase young generation size and decrease old generation size by same amount
bool adjust_boundary_down(size_t size_in_bytes);
size_t max_young_size() const { return _max_young_byte_size; }
size_t max_old_size() const { return _max_old_byte_size; }
void initialize(size_t initial_old_reserved_size, size_t init_low_byte_size,
size_t init_high_byte_size);
};
public:
AdjoiningGenerationsForHeteroHeap(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
// Given the size policy, calculate the total amount of memory that needs to be reserved.
// We need to reserve more memory than Xmx, since we use non-overlapping virtual spaces for the young and old generations.
static size_t required_reserved_memory(GenerationSizer* policy);
// Return the total byte size of the reserved space
size_t reserved_byte_size();
};
#endif // SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,8 @@
// moved up consistently. AdjoiningVirtualSpaces provide the
// interfaces for moving the this boundary.
class AdjoiningVirtualSpaces {
class AdjoiningVirtualSpaces : public CHeapObj<mtGC> {
protected:
// space at the high end and the low end, respectively
PSVirtualSpace* _high;
PSVirtualSpace* _low;
@ -84,17 +85,17 @@ class AdjoiningVirtualSpaces {
size_t alignment);
// accessors
PSVirtualSpace* high() { return _high; }
PSVirtualSpace* low() { return _low; }
virtual PSVirtualSpace* high() { return _high; }
virtual PSVirtualSpace* low() { return _low; }
ReservedSpace reserved_space() { return _reserved_space; }
size_t min_low_byte_size() { return _min_low_byte_size; }
size_t min_high_byte_size() { return _min_high_byte_size; }
size_t alignment() const { return _alignment; }
// move boundary between the two spaces up
bool adjust_boundary_up(size_t size_in_bytes);
virtual bool adjust_boundary_up(size_t size_in_bytes);
// and down
bool adjust_boundary_down(size_t size_in_bytes);
virtual bool adjust_boundary_down(size_t size_in_bytes);
// Maximum byte size for the high space.
size_t high_byte_size_limit() {
@ -107,9 +108,8 @@ class AdjoiningVirtualSpaces {
// Sets the boundaries for the virtual spaces and commits and
// initial size;
void initialize(size_t max_low_byte_size,
virtual void initialize(size_t max_low_byte_size,
size_t init_low_byte_size,
size_t init_high_byte_size);
};
#endif // SHARE_VM_GC_PARALLEL_ADJOININGVIRTUALSPACES_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,3 +67,11 @@ void GenerationSizer::initialize_size_info() {
}
GenCollectorPolicy::initialize_size_info();
}
bool GenerationSizer::is_hetero_heap() const {
return false;
}
size_t GenerationSizer::heap_reserved_size_bytes() const {
return _max_heap_byte_size;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,6 @@
class GenerationSizer : public GenCollectorPolicy {
private:
// The alignment used for boundary between young gen and old gen
static size_t default_gen_alignment() { return 64 * K * HeapWordSize; }
@ -41,5 +40,9 @@ class GenerationSizer : public GenCollectorPolicy {
void initialize_alignments();
void initialize_flags();
void initialize_size_info();
public:
virtual size_t heap_reserved_size_bytes() const;
virtual bool is_hetero_heap() const;
};
#endif // SHARE_VM_GC_PARALLEL_GENERATIONSIZER_HPP

View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/parallel/heterogeneousGenerationSizer.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "logging/log.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/globalDefinitions.hpp"
const double HeterogeneousGenerationSizer::MaxRamFractionForYoung = 0.8;
// Check the available dram memory to limit NewSize and MaxNewSize before
// calling base class initialize_flags().
void HeterogeneousGenerationSizer::initialize_flags() {
FormatBuffer<100> calc_str("");
julong phys_mem;
// If MaxRam is specified, we use that as maximum physical memory available.
if (FLAG_IS_DEFAULT(MaxRAM)) {
phys_mem = os::physical_memory();
calc_str.append("Physical_Memory");
} else {
phys_mem = (julong)MaxRAM;
calc_str.append("MaxRAM");
}
julong reasonable_max = phys_mem;
// If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
// reasonable max size of young generation.
if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
reasonable_max = (julong)(phys_mem / MaxRAMFraction);
calc_str.append(" / MaxRAMFraction");
} else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
calc_str.append(" * MaxRAMPercentage / 100");
} else {
// We use our own fraction to calculate max size of young generation.
reasonable_max = phys_mem * MaxRamFractionForYoung;
calc_str.append(" * %0.2f", MaxRamFractionForYoung);
}
reasonable_max = align_up(reasonable_max, _gen_alignment);
if (MaxNewSize > reasonable_max) {
if (FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
(size_t)reasonable_max, calc_str.buffer());
} else {
log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
"Dram usage can be lowered by setting MaxNewSize to a lower value", (size_t)reasonable_max, calc_str.buffer());
}
MaxNewSize = reasonable_max;
}
if (NewSize > reasonable_max) {
if (FLAG_IS_CMDLINE(NewSize)) {
log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
(size_t)reasonable_max, calc_str.buffer());
}
NewSize = reasonable_max;
}
// After setting new size flags, call base class initialize_flags()
GenerationSizer::initialize_flags();
}
bool HeterogeneousGenerationSizer::is_hetero_heap() const {
return true;
}
size_t HeterogeneousGenerationSizer::heap_reserved_size_bytes() const {
if (UseAdaptiveGCBoundary) {
// This is the size that young gen can grow to, when UseAdaptiveGCBoundary is true.
size_t max_yg_size = _max_heap_byte_size - _min_old_size;
// This is the size that old gen can grow to, when UseAdaptiveGCBoundary is true.
size_t max_old_size = _max_heap_byte_size - _min_young_size;
return max_yg_size + max_old_size;
} else {
return _max_heap_byte_size;
}
}

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
#define SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
#include "gc/parallel/generationSizer.hpp"
// There is a nice batch of tested generation sizing code in
// GenCollectorPolicy. Lets reuse it!
class HeterogeneousGenerationSizer : public GenerationSizer {
private:
// Max fraction of dram to use for young generation when MaxRAMFraction and
// MaxRAMPercentage are not specified on commandline.
static const double MaxRamFractionForYoung;
protected:
virtual void initialize_flags();
public:
virtual size_t heap_reserved_size_bytes() const;
virtual bool is_hetero_heap() const;
};
#endif // SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP

View File

@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
#include "gc/parallel/heterogeneousGenerationSizer.hpp"
#include "gc/parallel/parallelArguments.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
@ -93,5 +94,9 @@ void ParallelArguments::initialize() {
}
CollectedHeap* ParallelArguments::create_heap() {
return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
if (AllocateOldGenAt != NULL) {
return create_heap_with_policy<ParallelScavengeHeap, HeterogeneousGenerationSizer>();
} else {
return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
}
}

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "gc/parallel/adjoiningGenerations.hpp"
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/generationSizer.hpp"
@ -58,7 +59,7 @@ PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
jint ParallelScavengeHeap::initialize() {
const size_t heap_size = _collector_policy->max_heap_byte_size();
size_t heap_size = _collector_policy->heap_reserved_size_bytes();
ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
@ -86,7 +87,7 @@ jint ParallelScavengeHeap::initialize() {
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
_gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
_gens = AdjoiningGenerations::create_adjoining_generations(heap_rs, _collector_policy, generation_alignment());
_old_gen = _gens->old_gen();
_young_gen = _gens->young_gen();
@ -104,7 +105,7 @@ jint ParallelScavengeHeap::initialize() {
GCTimeRatio
);
assert(!UseAdaptiveGCBoundary ||
assert(_collector_policy->is_hetero_heap() || !UseAdaptiveGCBoundary ||
(old_gen()->virtual_space()->high_boundary() ==
young_gen()->virtual_space()->low_boundary()),
"Boundaries must meet");

View File

@ -111,6 +111,8 @@ class ParallelScavengeHeap : public CollectedHeap {
virtual CollectorPolicy* collector_policy() const { return _collector_policy; }
virtual GenerationSizer* ps_collector_policy() const { return _collector_policy; }
virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
virtual GrowableArray<GCMemoryManager*> memory_managers();

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/parallel/psFileBackedVirtualspace.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/os.inline.hpp"
PSFileBackedVirtualSpace::PSFileBackedVirtualSpace(ReservedSpace rs, size_t alignment, const char* path) : PSVirtualSpace(rs, alignment),
_file_path(path), _fd(-1), _mapping_succeeded(false) {
assert(!rs.special(), "ReservedSpace passed to PSFileBackedVirtualSpace cannot be special");
}
bool PSFileBackedVirtualSpace::initialize() {
_fd = os::create_file_for_heap(_file_path);
if (_fd == -1) {
return false;
}
// We map the reserved space to a file at initialization.
char* ret = os::replace_existing_mapping_with_file_mapping(reserved_low_addr(), reserved_size(), _fd);
if (ret != reserved_low_addr()) {
os::close(_fd);
return false;
}
// _mapping_succeeded is false if we return before this point.
// expand calls later check value of this flag and return error if it is false.
_mapping_succeeded = true;
_special = true;
os::close(_fd);
return true;
}
PSFileBackedVirtualSpace::PSFileBackedVirtualSpace(ReservedSpace rs, const char* path) {
PSFileBackedVirtualSpace(rs, os::vm_page_size(), path);
}
bool PSFileBackedVirtualSpace::expand_by(size_t bytes) {
assert(special(), "Since entire space is committed at initialization, _special should always be true for PSFileBackedVirtualSpace");
// if mapping did not succeed during intialization return false
if (!_mapping_succeeded) {
return false;
}
return PSVirtualSpace::expand_by(bytes);
}
bool PSFileBackedVirtualSpace::shrink_by(size_t bytes) {
assert(special(), "Since entire space is committed at initialization, _special should always be true for PSFileBackedVirtualSpace");
return PSVirtualSpace::shrink_by(bytes);
}
size_t PSFileBackedVirtualSpace::expand_into(PSVirtualSpace* space, size_t bytes) {
// not supported. Since doing this will change page mapping which will lead to large TLB penalties.
assert(false, "expand_into() should not be called for PSFileBackedVirtualSpace");
return 0;
}
void PSFileBackedVirtualSpace::release() {
os::close(_fd);
_fd = -1;
_file_path = NULL;
PSVirtualSpace::release();
}

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
#define SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
#include "gc/parallel/psVirtualspace.hpp"
class PSFileBackedVirtualSpace : public PSVirtualSpace {
private:
const char* _file_path;
int _fd;
bool _mapping_succeeded;
public:
PSFileBackedVirtualSpace(ReservedSpace rs, size_t alignment, const char* file_path);
PSFileBackedVirtualSpace(ReservedSpace rs, const char* file_path);
bool initialize();
bool expand_by(size_t bytes);
bool shrink_by(size_t bytes);
size_t expand_into(PSVirtualSpace* space, size_t bytes);
void release();
};
#endif // SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP

View File

@ -27,6 +27,7 @@
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psFileBackedVirtualspace.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
@ -71,7 +72,14 @@ void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
_virtual_space = new PSVirtualSpace(rs, alignment);
if(ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
_virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt);
if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) {
vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path");
}
} else {
_virtual_space = new PSVirtualSpace(rs, alignment);
}
if (!_virtual_space->expand_by(_init_gen_size)) {
vm_exit_during_initialization("Could not reserve enough space for "
"object heap");

View File

@ -1995,7 +1995,10 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
assert(young_gen->virtual_space()->alignment() ==
old_gen->virtual_space()->alignment(), "alignments do not match");
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
// We also return false when it's a heterogenous heap because old generation cannot absorb data from eden
// when it is allocated on different memory (example, nv-dimm) than young.
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary) ||
ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
return false;
}

View File

@ -28,6 +28,7 @@
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
void GCArguments::initialize() {
@ -53,4 +54,28 @@ void GCArguments::initialize() {
// If class unloading is disabled, also disable concurrent class unloading.
FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
}
if (!FLAG_IS_DEFAULT(AllocateOldGenAt)) {
// CompressedOops not supported when AllocateOldGenAt is set.
LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
// When AllocateOldGenAt is set, we cannot use largepages for entire heap memory.
// Only young gen which is allocated in dram can use large pages, but we currently don't support that.
FLAG_SET_DEFAULT(UseLargePages, false);
}
}
bool GCArguments::check_args_consistency() {
bool status = true;
if (!FLAG_IS_DEFAULT(AllocateHeapAt) && !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
jio_fprintf(defaultStream::error_stream(),
"AllocateHeapAt and AllocateOldGenAt cannot be used together.\n");
status = false;
}
if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseConcMarkSweepGC || UseEpsilonGC || UseZGC)) {
jio_fprintf(defaultStream::error_stream(),
"AllocateOldGenAt is not supported for selected GC.\n");
status = false;
}
return status;
}

View File

@ -39,6 +39,7 @@ public:
virtual void initialize();
virtual size_t conservative_max_heap_alignment() = 0;
virtual CollectedHeap* create_heap() = 0;
static bool check_args_consistency();
};
#endif // SHARE_GC_SHARED_GCARGUMENTS_HPP

View File

@ -82,6 +82,7 @@
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentMarkThread.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
#endif // INCLUDE_G1GC
#if INCLUDE_PARALLELGC
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
@ -499,6 +500,113 @@ WB_END
#endif // INCLUDE_G1GC
#if INCLUDE_G1GC || INCLUDE_PARALLELGC
WB_ENTRY(jlong, WB_DramReservedStart(JNIEnv* env, jobject o))
#if INCLUDE_G1GC
if (UseG1GC) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (g1h->g1_collector_policy()->is_hetero_heap()) {
uint start_region = HeterogeneousHeapRegionManager::manager()->start_index_of_dram();
return (jlong)(g1h->base() + start_region * HeapRegion::GrainBytes);
} else {
return (jlong)g1h->base();
}
}
#endif // INCLUDE_G1GC
#if INCLUDE_PARALLELGC
if (UseParallelGC) {
ParallelScavengeHeap* ps_heap = ParallelScavengeHeap::heap();
if (AllocateOldGenAt != NULL) {
MemRegion reserved = ps_heap->young_gen()->reserved();
return (jlong)reserved.start();
} else {
return (jlong)ps_heap->base();
}
}
#endif // INCLUDE_PARALLELGC
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_DramReservedStart: enabled only for G1 and Parallel GC");
WB_END
WB_ENTRY(jlong, WB_DramReservedEnd(JNIEnv* env, jobject o))
#if INCLUDE_G1GC
if (UseG1GC) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (g1h->g1_collector_policy()->is_hetero_heap()) {
uint end_region = HeterogeneousHeapRegionManager::manager()->end_index_of_dram();
return (jlong)(g1h->base() + (end_region + 1) * HeapRegion::GrainBytes - 1);
} else {
return (jlong)g1h->base() + g1h->collector_policy()->max_heap_byte_size();
}
}
#endif // INCLUDE_G1GC
#if INCLUDE_PARALLELGC
if (UseParallelGC) {
ParallelScavengeHeap* ps_heap = ParallelScavengeHeap::heap();
if (AllocateOldGenAt != NULL) {
MemRegion reserved = ps_heap->young_gen()->reserved();
return (jlong)reserved.end();
} else {
return (jlong)ps_heap->reserved_region().end();
}
}
#endif // INCLUDE_PARALLELGC
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_DramReservedEnd: enabled only for G1 and Parallel GC");
WB_END
WB_ENTRY(jlong, WB_NvdimmReservedStart(JNIEnv* env, jobject o))
#if INCLUDE_G1GC
if (UseG1GC) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (g1h->g1_collector_policy()->is_hetero_heap()) {
uint start_region = HeterogeneousHeapRegionManager::manager()->start_index_of_nvdimm();
return (jlong)(g1h->base() + start_region * HeapRegion::GrainBytes);
} else {
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedStart: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
}
}
#endif // INCLUDE_G1GC
#if INCLUDE_PARALLELGC
if (UseParallelGC) {
ParallelScavengeHeap* ps_heap = ParallelScavengeHeap::heap();
if (AllocateOldGenAt != NULL) {
MemRegion reserved = ps_heap->old_gen()->reserved();
return (jlong)reserved.start();
} else {
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedStart: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
}
}
#endif // INCLUDE_PARALLELGC
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedStart: enabled only for G1 and Parallel GC");
WB_END
WB_ENTRY(jlong, WB_NvdimmReservedEnd(JNIEnv* env, jobject o))
#if INCLUDE_G1GC
if (UseG1GC) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (g1h->g1_collector_policy()->is_hetero_heap()) {
uint end_region = HeterogeneousHeapRegionManager::manager()->start_index_of_nvdimm();
return (jlong)(g1h->base() + (end_region + 1) * HeapRegion::GrainBytes - 1);
} else {
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedEnd: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
}
}
#endif // INCLUDE_G1GC
#if INCLUDE_PARALLELGC
if (UseParallelGC) {
ParallelScavengeHeap* ps_heap = ParallelScavengeHeap::heap();
if (AllocateOldGenAt != NULL) {
MemRegion reserved = ps_heap->old_gen()->reserved();
return (jlong)reserved.end();
} else {
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedEnd: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
}
}
#endif // INCLUDE_PARALLELGC
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedEnd: enabled only for G1 and Parallel GC");
WB_END
#endif // INCLUDE_G1GC || INCLUDE_PARALLELGC
#if INCLUDE_PARALLELGC
WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
@ -2054,6 +2162,12 @@ static JNINativeMethod methods[] = {
(void*)&WB_G1AuxiliaryMemoryUsage },
{CC"g1GetMixedGCInfo", CC"(I)[J", (void*)&WB_G1GetMixedGCInfo },
#endif // INCLUDE_G1GC
#if INCLUDE_G1GC || INCLUDE_PARALLELGC
{CC"dramReservedStart", CC"()J", (void*)&WB_DramReservedStart },
{CC"dramReservedEnd", CC"()J", (void*)&WB_DramReservedEnd },
{CC"nvdimmReservedStart", CC"()J", (void*)&WB_NvdimmReservedStart },
{CC"nvdimmReservedEnd", CC"()J", (void*)&WB_NvdimmReservedEnd },
#endif // INCLUDE_G1GC || INCLUDE_PARALLELGC
#if INCLUDE_PARALLELGC
{CC"psVirtualSpaceAlignment",CC"()J", (void*)&WB_PSVirtualSpaceAlignment},
{CC"psHeapGenerationAlignment",CC"()J", (void*)&WB_PSHeapGenerationAlignment},

View File

@ -2061,6 +2061,9 @@ bool Arguments::check_vm_args_consistency() {
log_warning(arguments) ("NUMA support for Heap depends on the file system when AllocateHeapAt option is used.\n");
}
}
status = status && GCArguments::check_args_consistency();
return status;
}
@ -2952,6 +2955,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
}
#endif // LINUX
fix_appclasspath();
return JNI_OK;
}

View File

@ -2575,6 +2575,12 @@ define_pd_global(uint64_t,MaxRAM, 1ULL*G);
"Path to the directoy where a temporary file will be created " \
"to use as the backing store for Java Heap.") \
\
experimental(ccstr, AllocateOldGenAt, NULL, \
"Path to the directoy where a temporary file will be " \
"created to use as the backing store for old generation." \
"File of size Xmx is pre-allocated for performance reason, so" \
"we need that much space available") \
\
develop(bool, VerifyMetaspace, false, \
"Verify metaspace on chunk movements.") \
\

View File

@ -2665,8 +2665,7 @@ void JavaThread::remove_stack_guard_pages() {
}
void JavaThread::enable_stack_reserved_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_enabled, "already enabled");
assert(_stack_guard_state == stack_guard_reserved_disabled, "inconsistent state");
// The base notation is from the stack's point of view, growing downward.
// We need to adjust it to work correctly with guard_memory()
@ -2684,11 +2683,10 @@ void JavaThread::enable_stack_reserved_zone() {
}
void JavaThread::disable_stack_reserved_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_reserved_disabled, "already disabled");
assert(_stack_guard_state == stack_guard_enabled, "inconsistent state");
// Simply return if called for a thread that does not use guard pages.
if (_stack_guard_state == stack_guard_unused) return;
if (_stack_guard_state != stack_guard_enabled) return;
// The base notation is from the stack's point of view, growing downward.
// We need to adjust it to work correctly with guard_memory()

View File

@ -1864,12 +1864,12 @@ public abstract class ClassLoader {
* <p> The default system class loader is an implementation-dependent
* instance of this class.
*
* <p> If the system property "{@code java.system.class.loader}" is defined
* when this method is first invoked then the value of that property is
* taken to be the name of a class that will be returned as the system
* class loader. The class is loaded using the default system class loader
* and must define a public constructor that takes a single parameter of
* type {@code ClassLoader} which is used as the delegation parent. An
* <p> If the system property "{@systemProperty java.system.class.loader}"
* is defined when this method is first invoked then the value of that
* property is taken to be the name of a class that will be returned as the
* system class loader. The class is loaded using the default system class
* loader and must define a public constructor that takes a single parameter
* of type {@code ClassLoader} which is used as the delegation parent. An
* instance is then created using this constructor with the default system
* class loader as the parameter. The resulting class loader is defined
* to be the system class loader. During construction, the class loader

View File

@ -304,7 +304,7 @@ public final class URL implements java.io.Serializable {
* or all providers have been exhausted.
* <li>If the previous step fails to find a protocol handler, the
* constructor reads the value of the system property:
* <blockquote>{@code
* <blockquote>{@systemProperty
* java.protocol.handler.pkgs
* }</blockquote>
* If the value of that system property is not {@code null},

View File

@ -99,7 +99,7 @@ import java.util.Collections;
* <p>
* The Java virtual machine has a default provider that provides zone rules
* for the time-zones defined by IANA Time Zone Database (TZDB). If the system
* property {@code java.time.zone.DefaultZoneRulesProvider} is defined then
* property {@systemProperty java.time.zone.DefaultZoneRulesProvider} is defined then
* it is taken to be the fully-qualified name of a concrete ZoneRulesProvider
* class to be loaded as the default provider, using the system class loader.
* If this system property is not defined, a system-default provider will be

View File

@ -60,7 +60,7 @@ import sun.util.logging.PlatformLogger;
* the <code>getInstance</code> methods.
* <p>
* Users can supersede the Java runtime currency data by means of the system
* property {@code java.util.currency.data}. If this system property is
* property {@systemProperty java.util.currency.data}. If this system property is
* defined then its value is the location of a properties file, the contents of
* which are key/value pairs of the ISO 3166 country codes and the ISO 4217
* currency data respectively. The value part consists of three ISO 4217 values

View File

@ -115,7 +115,7 @@ import sun.util.ResourceBundleEnumeration;
* input stream, then the {@code PropertyResourceBundle} instance resets to the state
* before the exception, re-reads the input stream in {@code ISO-8859-1}, and
* continues reading. If the system property
* {@code java.util.PropertyResourceBundle.encoding} is set to either
* {@systemProperty java.util.PropertyResourceBundle.encoding} is set to either
* "ISO-8859-1" or "UTF-8", the input stream is solely read in that encoding,
* and throws the exception if it encounters an invalid sequence.
* If "ISO-8859-1" is specified, characters that cannot be represented in

View File

@ -112,7 +112,7 @@ public abstract class Pack200 {
/**
* Obtain new instance of a class that implements Packer.
* <ul>
* <li><p>If the system property {@code java.util.jar.Pack200.Packer}
* <li><p>If the system property {@systemProperty java.util.jar.Pack200.Packer}
* is defined, then the value is taken to be the fully-qualified name
* of a concrete implementation class, which must implement Packer.
* This class is loaded and instantiated. If this process fails
@ -138,7 +138,7 @@ public abstract class Pack200 {
/**
* Obtain new instance of a class that implements Unpacker.
* <ul>
* <li><p>If the system property {@code java.util.jar.Pack200.Unpacker}
* <li><p>If the system property {@systemProperty java.util.jar.Pack200.Unpacker}
* is defined, then the value is taken to be the fully-qualified
* name of a concrete implementation class, which must implement Unpacker.
* The class is loaded and instantiated. If this process fails

View File

@ -113,7 +113,7 @@ import java.util.Locale;
* described above as if the locale was not supported.
* <p>
* The search order of locale sensitive services can
* be configured by using the "java.locale.providers" system property.
* be configured by using the {@systemProperty java.locale.providers} system property.
* This system property declares the user's preferred order for looking up
* the locale sensitive services separated by a comma. It is only read at
* the Java runtime startup, so the later call to System.setProperty() won't

View File

@ -150,6 +150,10 @@ JNIEXPORT jint JNICALL
DEF_JNI_OnLoad(JavaVM *vm, void *reserved)
{
jvm = vm;
//Set the gtk backend to x11 on all the systems
putenv("GDK_BACKEND=x11");
return JNI_VERSION_1_2;
}

View File

@ -76,8 +76,8 @@ import static jdk.internal.logger.DefaultLoggerFinder.isSystem;
* the initial configuration, as specified in the {@link #readConfiguration()}
* method:
* <ul>
* <li>"java.util.logging.config.class"
* <li>"java.util.logging.config.file"
* <li>{@systemProperty java.util.logging.config.class}
* <li>{@systemProperty java.util.logging.config.file}
* </ul>
* <p>
* These two system properties may be specified on the command line to the "java"

View File

@ -39,7 +39,7 @@ import jdk.internal.logger.SurrogateLogger;
* <a id="formatting">
* <b>Configuration:</b></a>
* The {@code SimpleFormatter} is initialized with the format string
* specified in the {@code java.util.logging.SimpleFormatter.format}
* specified in the {@systemProperty java.util.logging.SimpleFormatter.format}
* property to {@linkplain #format(LogRecord) format} the log messages.
* This property can be defined
* in the {@linkplain LogManager#getProperty logging properties}

View File

@ -57,7 +57,7 @@ import java.util.concurrent.atomic.AtomicLong;
* equivalent to one returned by invoking the {@link UID#UID(short)}
* constructor with the value zero.
*
* <p>If the system property <code>java.rmi.server.randomIDs</code>
* <p>If the system property {@systemProperty java.rmi.server.randomIDs}
* is defined to equal the string <code>"true"</code> (case insensitive),
* then the {@link #ObjID()} constructor will use a cryptographically
* strong random number generator to choose the object number of the

View File

@ -68,7 +68,7 @@ import java.util.ServiceLoader;
* <ul>
*
* <li>If the system property
* <code>java.rmi.server.RMIClassLoaderSpi</code> is defined, then if
* {@systemProperty java.rmi.server.RMIClassLoaderSpi} is defined, then if
* its value equals the string <code>"default"</code>, the provider
* instance will be the value returned by an invocation of the {@link
* #getDefaultProviderInstance()} method, and for any other value, if
@ -429,7 +429,7 @@ public class RMIClassLoader {
* system class loader such as the loader used for installed
* extensions, or the bootstrap class loader (which may be
* represented by <code>null</code>), then the value of the
* <code>java.rmi.server.codebase</code> property (or possibly an
* {@systemProperty java.rmi.server.codebase} property (or possibly an
* earlier cached value) is returned, or
* <code>null</code> is returned if that property is not set.
*

View File

@ -66,7 +66,7 @@ import java.net.*;
* RMISocketFactory.setSocketFactory(new LoopbackSocketFactory());
* }</pre>
*
* Set the {@code java.rmi.server.hostname} system property
* Set the {@systemProperty java.rmi.server.hostname} system property
* to {@code 127.0.0.1} to ensure that the generated stubs connect to the right
* network interface.
*

View File

@ -53,14 +53,14 @@ import javax.net.ssl.SSLSocketFactory;
* #hashCode() hashCode} may also need to be overridden.</p>
*
* <p>If the system property
* <code>javax.rmi.ssl.client.enabledCipherSuites</code> is specified,
* {@systemProperty javax.rmi.ssl.client.enabledCipherSuites} is specified,
* the {@link #createSocket(String,int)} method will call {@link
* SSLSocket#setEnabledCipherSuites(String[])} before returning the
* socket. The value of this system property is a string that is a
* comma-separated list of SSL/TLS cipher suites to enable.</p>
*
* <p>If the system property
* <code>javax.rmi.ssl.client.enabledProtocols</code> is specified,
* {@systemProperty javax.rmi.ssl.client.enabledProtocols} is specified,
* the {@link #createSocket(String,int)} method will call {@link
* SSLSocket#setEnabledProtocols(String[])} before returning the
* socket. The value of this system property is a string that is a
@ -96,7 +96,7 @@ public class SslRMIClientSocketFactory
* <p>Creates an SSL socket.</p>
*
* <p>If the system property
* <code>javax.rmi.ssl.client.enabledCipherSuites</code> is
* {@systemProperty javax.rmi.ssl.client.enabledCipherSuites} is
* specified, this method will call {@link
* SSLSocket#setEnabledCipherSuites(String[])} before returning
* the socket. The value of this system property is a string that
@ -104,7 +104,7 @@ public class SslRMIClientSocketFactory
* enable.</p>
*
* <p>If the system property
* <code>javax.rmi.ssl.client.enabledProtocols</code> is
* {@systemProperty javax.rmi.ssl.client.enabledProtocols} is
* specified, this method will call {@link
* SSLSocket#setEnabledProtocols(String[])} before returning the
* socket. The value of this system property is a string that is a

View File

@ -47,7 +47,7 @@ import sun.jvm.hotspot.tools.HeapSummary;
public class G1CollectedHeap extends CollectedHeap {
// HeapRegionManager _hrm;
static private long hrmFieldOffset;
static private AddressField hrmField;
// MemRegion _g1_reserved;
static private long g1ReservedFieldOffset;
// size_t _summary_bytes_used;
@ -72,7 +72,7 @@ public class G1CollectedHeap extends CollectedHeap {
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("G1CollectedHeap");
hrmFieldOffset = type.getField("_hrm").getOffset();
hrmField = type.getAddressField("_hrm");
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
@ -93,7 +93,7 @@ public class G1CollectedHeap extends CollectedHeap {
}
public HeapRegionManager hrm() {
Address hrmAddr = addr.addOffsetTo(hrmFieldOffset);
Address hrmAddr = hrmField.getValue(addr);
return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class,
hrmAddr);
}

View File

@ -55,7 +55,7 @@ function getURLPrefix(ui) {
return ui.item.m + slash;
} else if ((ui.item.category === catTypes && ui.item.p) || ui.item.category === catMembers) {
$.each(packageSearchIndex, function(index, item) {
if (ui.item.p == item.l) {
if (item.m && ui.item.p == item.l) {
urlPrefix = item.m + slash;
}
});

View File

@ -40,7 +40,8 @@ hotspot_compiler_all_gcs = \
-:tier1_compiler_not_cms
hotspot_gc = \
gc
gc \
-gc/nvdimm
hotspot_runtime = \
runtime
@ -190,7 +191,8 @@ tier1_gc_2 = \
-gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java \
-gc/cms/TestMBeanCMS.java \
-gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
-gc/shenandoah
-gc/shenandoah \
-gc/nvdimm
gc_epsilon = \
gc/epsilon/ \

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test TestAllocateOldGenAt.java
* @key gc
* @summary Test to check allocation of Java Heap with AllocateOldGenAt option
* @requires vm.gc=="null"
* @library /test/lib
* @modules java.base/jdk.internal.misc
*/
import jdk.test.lib.JDKToolFinder;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
import java.util.ArrayList;
import java.util.Collections;
public class TestAllocateOldGenAt {
private static ArrayList<String> commonOpts;
public static void main(String args[]) throws Exception {
commonOpts = new ArrayList();
String testVmOptsStr = System.getProperty("test.java.opts");
if (!testVmOptsStr.isEmpty()) {
String[] testVmOpts = testVmOptsStr.split(" ");
Collections.addAll(commonOpts, testVmOpts);
}
String test_dir = System.getProperty("test.dir", ".");
Collections.addAll(commonOpts, new String[] {"-XX:+UnlockExperimentalVMOptions",
"-XX:AllocateOldGenAt=" + test_dir,
"-Xmx32m",
"-Xms32m",
"-version"});
runTest("-XX:+UseG1GC");
runTest("-XX:+UseParallelOldGC -XX:-UseAdaptiveGCBoundary");
runTest("-XX:+UseParallelOldGC -XX:+UseAdaptiveGCBoundary");
}
private static void runTest(String... extraFlags) throws Exception {
ArrayList<String> testOpts = new ArrayList();
Collections.addAll(testOpts, commonOpts.toArray(new String[commonOpts.size()]));
Collections.addAll(testOpts, extraFlags);
System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
for (int i = 0; i < testOpts.size(); i += 1) {
System.out.print(" " + testOpts.get(i));
}
System.out.println();
ProcessBuilder pb =
ProcessTools.createJavaProcessBuilder(testOpts.toArray(new String[testOpts.size()]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldHaveExitValue(0);
}
}

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test TestAllocateOldGenAtError.java
* @key gc
* @summary Test to check correct handling of non-existent directory passed to AllocateOldGenAt option
* @requires vm.gc=="null"
* @library /test/lib
* @modules java.base/jdk.internal.misc
*/
import java.io.File;
import jdk.test.lib.JDKToolFinder;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.UUID;
public class TestAllocateOldGenAtError {
private static ArrayList<String> commonOpts;
public static void main(String args[]) throws Exception {
commonOpts = new ArrayList();
String testVmOptsStr = System.getProperty("test.java.opts");
if (!testVmOptsStr.isEmpty()) {
String[] testVmOpts = testVmOptsStr.split(" ");
Collections.addAll(commonOpts, testVmOpts);
}
String test_dir = System.getProperty("test.dir", ".");
File f = null;
do {
f = new File(test_dir, UUID.randomUUID().toString());
} while(f.exists());
Collections.addAll(commonOpts, new String[] {"-XX:+UnlockExperimentalVMOptions",
"-XX:AllocateOldGenAt=" + f.getName(),
"-Xlog:gc+heap=info",
"-Xmx32m",
"-Xms32m",
"-version"});
testG1();
testParallelOld();
}
private static void testG1() throws Exception {
System.out.println("Testing G1 GC");
OutputAnalyzer output = runTest("-XX:+UseG1GC");
output.shouldContain("Could not initialize G1 heap");
output.shouldContain("Error occurred during initialization of VM");
output.shouldNotHaveExitValue(0);
}
private static void testParallelOld() throws Exception {
System.out.println("Testing ParallelOld GC with UseAdaptiveGCBoundary disabled");
OutputAnalyzer output = runTest("-XX:+UseParallelOldGC -XX:-UseAdaptiveGCBoundary");
output.shouldContain("Error occurred during initialization of VM");
output.shouldNotHaveExitValue(0);
System.out.println("Testing ParallelOld GC with UseAdaptiveGCBoundary enabled");
output = runTest("-XX:+UseParallelOldGC -XX:+UseAdaptiveGCBoundary");
output.shouldContain("Error occurred during initialization of VM");
output.shouldNotHaveExitValue(0);
}
private static OutputAnalyzer runTest(String... extraFlags) throws Exception {
ArrayList<String> testOpts = new ArrayList();
Collections.addAll(testOpts, commonOpts.toArray(new String[commonOpts.size()]));
Collections.addAll(testOpts, extraFlags);
System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
for (int i = 0; i < testOpts.size(); i += 1) {
System.out.print(" " + testOpts.get(i));
}
System.out.println();
ProcessBuilder pb =
ProcessTools.createJavaProcessBuilder(testOpts.toArray(new String[testOpts.size()]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
return output;
}
}

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test TestAllocateOldGenAtMultiple.java
* @key gc
* @summary Test to check allocation of Java Heap with AllocateOldGenAt option. Has multiple sub-tests to cover different code paths.
* @requires vm.gc=="null"
* @library /test/lib
* @modules java.base/jdk.internal.misc
* @requires vm.bits == "64"
* @run main TestAllocateOldGenAtMultiple -XX:+UseG1GC
*/
import jdk.test.lib.JDKToolFinder;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
import java.util.ArrayList;
import java.util.Collections;
public class TestAllocateOldGenAtMultiple {
public static void main(String args[]) throws Exception {
ArrayList<String> vmOpts = new ArrayList();
String[] testVmOpts = null;
String test_dir = System.getProperty("test.dir", ".");
String testVmOptsStr = System.getProperty("test.java.opts");
if (!testVmOptsStr.isEmpty()) {
testVmOpts = testVmOptsStr.split(" ");
}
// Extra options for each of the sub-tests
String[] extraOptsList = new String[] {
"-Xmx32m -Xms32m -XX:+UseCompressedOops", // 1. With compressedoops enabled.
"-Xmx32m -Xms32m -XX:-UseCompressedOops", // 2. With compressedoops disabled.
"-Xmx32m -Xms32m -XX:HeapBaseMinAddress=3g", // 3. With user specified HeapBaseMinAddress.
"-Xmx4g -Xms4g", // 4. With larger heap size (UnscaledNarrowOop not possible).
"-Xmx4g -Xms4g -XX:+UseLargePages", // 5. Set UseLargePages.
"-Xmx4g -Xms4g -XX:+UseNUMA" // 6. Set UseNUMA.
};
for(String extraOpts : extraOptsList) {
vmOpts.clear();
if(testVmOpts != null) {
Collections.addAll(vmOpts, testVmOpts);
}
// Add extra options specific to the sub-test.
String[] extraOptsArray = extraOpts.split(" ");
if(extraOptsArray != null) {
Collections.addAll(vmOpts, extraOptsArray);
}
// Add common options
Collections.addAll(vmOpts, new String[] {"-XX:+UnlockExperimentalVMOptions",
"-XX:AllocateOldGenAt=" + test_dir,
"-version"});
System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
for (int i = 0; i < vmOpts.size(); i += 1) {
System.out.print(" " + vmOpts.get(i));
}
System.out.println();
ProcessBuilder pb =
ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
System.out.println("Output:\n" + output.getOutput());
output.shouldHaveExitValue(0);
}
}
}

View File

@ -0,0 +1,117 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestHumongousObjectsOnNvdimm
* @summary Check that humongous objects reside in nv-dimm
* @library /test/lib /
* @requires vm.gc=="null"
* @build sun.hotspot.WhiteBox
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* @run main TestHumongousObjectsOnNvdimm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.Asserts;
import sun.hotspot.WhiteBox;
import java.util.ArrayList;
import java.util.List;
import java.util.Collections;
import gc.testlibrary.Helpers;
/**
* Test spawns HumongousObjectTest in a separate VM and expects that it
* completes without a RuntimeException.
*/
public class TestHumongousObjectsOnNvdimm {
private static ArrayList<String> testOpts;
public static void main(String args[]) throws Exception {
testOpts = new ArrayList();
String[] common_options = new String[] {
"-Xbootclasspath/a:.",
"-XX:+UnlockExperimentalVMOptions",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-XX:AllocateOldGenAt="+System.getProperty("test.dir", "."),
"-Xms10M", "-Xmx10M",
"-XX:G1HeapRegionSize=1m"
};
String testVmOptsStr = System.getProperty("test.java.opts");
if (!testVmOptsStr.isEmpty()) {
String[] testVmOpts = testVmOptsStr.split(" ");
Collections.addAll(testOpts, testVmOpts);
}
Collections.addAll(testOpts, common_options);
// Test with G1 GC
runTest("-XX:+UseG1GC");
}
private static void runTest(String... extraFlags) throws Exception {
Collections.addAll(testOpts, extraFlags);
testOpts.add(HumongousObjectTest.class.getName());
System.out.println(testOpts);
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false,
testOpts.toArray(new String[testOpts.size()]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldHaveExitValue(0);
}
}
/**
* This class tests that a humongous object resides in NVDIMM.
*/
class HumongousObjectTest {
private static final WhiteBox WB = WhiteBox.getWhiteBox();
private static void validateObject(Object o) {
Asserts.assertTrue(WB.isObjectInOldGen(o),
"Object is supposed to be in OldGen");
long obj_addr = WB.getObjectAddress(o);
long nvdimm_heap_start = WB.nvdimmReservedStart();
long nvdimm_heap_end = WB.nvdimmReservedEnd();
Asserts.assertTrue(WB.g1BelongsToHumongousRegion(obj_addr), "Object address should be in Humongous set");
Asserts.assertTrue(obj_addr >= nvdimm_heap_start && obj_addr < nvdimm_heap_end,
"Humongous object does not reside in NVDIMM");
}
public static void main(String args[]) throws Exception {
// allocate an humongous object
int byteArrayMemoryOverhead = Helpers.detectByteArrayAllocationOverhead();
int MinByteArrayHumongousSize = (WB.g1RegionSize() / 2) - byteArrayMemoryOverhead + 1;
byte[] obj = new byte[MinByteArrayHumongousSize];
validateObject(obj);
}
}

View File

@ -0,0 +1,121 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestOldObjectsOnNvdimm
* @summary Check that objects in old generation reside in dram.
* @requires vm.gc=="null"
* @library /test/lib
* @build sun.hotspot.WhiteBox
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* @run main TestOldObjectsOnNvdimm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.Asserts;
import sun.hotspot.WhiteBox;
import java.util.ArrayList;
import java.util.List;
import java.util.Collections;
/*
* Test spawns OldObjectTest in a separate VM and expects that it
* completes without a RuntimeException.
*/
public class TestOldObjectsOnNvdimm {
public static final int ALLOCATION_SIZE = 100;
private static ArrayList<String> testOpts;
public static void main(String args[]) throws Exception {
testOpts = new ArrayList();
String[] common_options = new String[] {
"-Xbootclasspath/a:.",
"-XX:+UnlockExperimentalVMOptions",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-XX:AllocateOldGenAt="+System.getProperty("test.dir", "."),
"-Xms10M", "-Xmx10M",
"-XX:MaxTenuringThreshold=1" // Promote objects to Old Gen
};
String testVmOptsStr = System.getProperty("test.java.opts");
if (!testVmOptsStr.isEmpty()) {
String[] testVmOpts = testVmOptsStr.split(" ");
Collections.addAll(testOpts, testVmOpts);
}
Collections.addAll(testOpts, common_options);
// Test with G1 GC
runTest("-XX:+UseG1GC");
// Test with ParallelOld GC
runTest("-XX:+UseParallelOldGC -XX:-UseAdaptiveGCBoundary");
runTest("-XX:+UseParallelOldGC -XX:+UseAdaptiveGCBoundary");
}
private static void runTest(String... extraFlags) throws Exception {
Collections.addAll(testOpts, extraFlags);
testOpts.add(OldObjectTest.class.getName());
System.out.println(testOpts);
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false,
testOpts.toArray(new String[testOpts.size()]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
System.out.println(output.getStdout());
output.shouldHaveExitValue(0);
}
}
/*
* This class tests that object is in Old generation after tenuring and resides in NVDIMM.
* The necessary condition for this test is running in VM with the following flags:
* -XX:AllocateOldGenAt=, -XX:MaxTenuringThreshold=1
*/
class OldObjectTest {
private static final WhiteBox WB = WhiteBox.getWhiteBox();
private static void validateOldObject(Object o) {
Asserts.assertTrue(WB.isObjectInOldGen(o),
"Object is supposed to be in OldGen");
long oldObj_addr = WB.getObjectAddress(o);
long nvdimm_heap_start = WB.nvdimmReservedStart();
long nvdimm_heap_end = WB.nvdimmReservedEnd();
Asserts.assertTrue(oldObj_addr >= nvdimm_heap_start && oldObj_addr <= nvdimm_heap_end,
"Old object does not reside in NVDIMM");
}
public static void main(String args[]) throws Exception {
// allocate an object and perform Young GCs to promote it to Old
byte[] oldObj = new byte[TestOldObjectsOnNvdimm.ALLOCATION_SIZE];
WB.youngGC();
WB.youngGC();
validateOldObject(oldObj);
}
}

View File

@ -0,0 +1,125 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestYoungObjectsOnDram
* @summary Check that objects in young generation reside in dram.
* @requires vm.gc=="null"
* @library /test/lib
* @build sun.hotspot.WhiteBox
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* @run main TestYoungObjectsOnDram -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.Asserts;
import sun.hotspot.WhiteBox;
import java.util.ArrayList;
import java.util.List;
import java.util.Collections;
/**
* Test spawns YoungObjectTest in a separate VM and expects that it
* completes without a RuntimeException.
*/
public class TestYoungObjectsOnDram {
public static final int ALLOCATION_SIZE = 100;
private static ArrayList<String> testOpts;
public static void main(String args[]) throws Exception {
testOpts = new ArrayList();
String[] common_options = new String[] {
"-Xbootclasspath/a:.",
"-XX:+UnlockExperimentalVMOptions",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-XX:AllocateOldGenAt="+System.getProperty("test.dir", "."),
"-XX:SurvivorRatio=1", // Survivor-to-eden ratio is 1:1
"-Xms10M", "-Xmx10M",
"-XX:InitialTenuringThreshold=15" // avoid promotion of objects to Old Gen
};
String testVmOptsStr = System.getProperty("test.java.opts");
if (!testVmOptsStr.isEmpty()) {
String[] testVmOpts = testVmOptsStr.split(" ");
Collections.addAll(testOpts, testVmOpts);
}
Collections.addAll(testOpts, common_options);
// Test with G1 GC
runTest("-XX:+UseG1GC");
// Test with ParallelOld GC
runTest("-XX:+UseParallelOldGC -XX:-UseAdaptiveGCBoundary");
runTest("-XX:+UseParallelOldGC -XX:+UseAdaptiveGCBoundary");
}
private static void runTest(String... extraFlags) throws Exception {
Collections.addAll(testOpts, extraFlags);
testOpts.add(YoungObjectTest.class.getName());
System.out.println(testOpts);
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false,
testOpts.toArray(new String[testOpts.size()]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
System.out.println(output.getStdout());
output.shouldHaveExitValue(0);
}
}
/**
* This class tests that newly created object is in Young generation and resides in DRAM.
* The necessary condition for this test is running in VM with the following flags:
* -XX:AllocateOldGenAt=, -XX:InitialTenuringThreshold=15, -XX:SurvivorRatio=1
*/
class YoungObjectTest {
private static final WhiteBox WB = WhiteBox.getWhiteBox();
private static void validateYoungObject(Object o) {
Asserts.assertTrue(!WB.isObjectInOldGen(o),
"Object is supposed to be in YoungGen");
long youngObj_addr = WB.getObjectAddress(o);
long dram_heap_start = WB.dramReservedStart();
long dram_heap_end = WB.dramReservedEnd();
Asserts.assertTrue(youngObj_addr >= dram_heap_start && youngObj_addr <= dram_heap_end,
"Young object does not reside in DRAM");
}
public static void main(String args[]) throws Exception {
// allocate an object
byte[] youngObj = new byte[TestYoungObjectsOnDram.ALLOCATION_SIZE];
validateYoungObject(youngObj);
// Start a Young GC and check that object is still in DRAM.
// We have used -XX:InitialTenuringThreshold=15 to invoke this test
WB.youngGC();
validateYoungObject(youngObj);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -180,7 +180,7 @@ public class FindEncoderBugs {
private final long failed0 = failed;
// legend: r=regular d=direct In=Input Ou=Output
static final int maxBufSize = 20;
static final int maxBufSize = 40;
static final CharBuffer[] rInBuffers = new CharBuffer[maxBufSize];
static final CharBuffer[] dInBuffers = new CharBuffer[maxBufSize];
@ -444,6 +444,28 @@ public class FindEncoderBugs {
}
}
void testISO88591InvalidChar() {
// Several architectures implement the ISO-8859-1 encoder as an
// intrinsic where the vectorised assembly has separate cases
// for different input sizes, so exhaustively test all sizes
// from 0 to maxBufSize to ensure we get coverage
for (int i = 0; i < CharsetTester.maxBufSize; i++) {
char[] ia = new char[i];
for (int j = 0; j < i; j++)
ia[j] = randomChar();
test(ia);
// Test break on unrepresentable character
for (int j = 0; j < i; j++) {
char[] iaInvalid = ia.clone();
iaInvalid[j] = (char)(randomChar() | 0x100);
test(iaInvalid);
}
}
}
void testPrefix(char[] prefix) {
if (prefix.length > 0)
System.out.printf("Testing prefix %s%n", string(prefix));
@ -492,6 +514,9 @@ public class FindEncoderBugs {
System.out.println("More ISCII testing...");
new CharsetTester(cs).testPrefix(new char[]{'\u094d'}); // Halant
new CharsetTester(cs).testPrefix(new char[]{'\u093c'}); // Nukta
} else if (csn.equals("ISO-8859-1")) {
System.out.println("More ISO-8859-1 testing...");
new CharsetTester(cs).testISO88591InvalidChar();
}
}

View File

@ -688,7 +688,7 @@ public class TestSearch extends JavadocTester {
+ " return ui.item.m + slash;\n"
+ " } else if ((ui.item.category === catTypes && ui.item.p) || ui.item.category === catMembers) {\n"
+ " $.each(packageSearchIndex, function(index, item) {\n"
+ " if (ui.item.p == item.l) {\n"
+ " if (item.m && ui.item.p == item.l) {\n"
+ " urlPrefix = item.m + slash;\n"
+ " }\n"
+ " });\n"

View File

@ -182,6 +182,10 @@ public class WhiteBox {
public native long g1NumMaxRegions();
public native long g1NumFreeRegions();
public native int g1RegionSize();
public native long dramReservedStart();
public native long dramReservedEnd();
public native long nvdimmReservedStart();
public native long nvdimmReservedEnd();
public native MemoryUsage g1AuxiliaryMemoryUsage();
private native Object[] parseCommandLine0(String commandline, char delim, DiagnosticCommand[] args);
public Object[] parseCommandLine(String commandline, char delim, DiagnosticCommand[] args) {