8292296: Use multiple threads to process ParallelGC deferred updates
Reviewed-by: tschatzl, ayang
This commit is contained in:
parent
800e68d690
commit
3fa6778ab2
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -59,6 +59,8 @@ ParCompactionManager::ParCompactionManager() {
|
|||||||
_start_array = old_gen()->start_array();
|
_start_array = old_gen()->start_array();
|
||||||
|
|
||||||
reset_bitmap_query_cache();
|
reset_bitmap_query_cache();
|
||||||
|
|
||||||
|
_deferred_obj_array = new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(10, mtGC);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||||
@ -165,6 +167,15 @@ void ParCompactionManager::drain_region_stacks() {
|
|||||||
} while (!region_stack()->is_empty());
|
} while (!region_stack()->is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ParCompactionManager::drain_deferred_objects() {
|
||||||
|
while (!_deferred_obj_array->is_empty()) {
|
||||||
|
HeapWord* addr = _deferred_obj_array->pop();
|
||||||
|
assert(addr != NULL, "expected a deferred object");
|
||||||
|
PSParallelCompact::update_deferred_object(this, addr);
|
||||||
|
}
|
||||||
|
_deferred_obj_array->clear_and_deallocate();
|
||||||
|
}
|
||||||
|
|
||||||
size_t ParCompactionManager::pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr) {
|
size_t ParCompactionManager::pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr) {
|
||||||
MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
|
MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -195,6 +206,10 @@ void ParCompactionManager::remove_all_shadow_regions() {
|
|||||||
_shadow_region_array->clear();
|
_shadow_region_array->clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ParCompactionManager::push_deferred_object(HeapWord* addr) {
|
||||||
|
_deferred_obj_array->push(addr);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void ParCompactionManager::verify_all_marking_stack_empty() {
|
void ParCompactionManager::verify_all_marking_stack_empty() {
|
||||||
uint parallel_gc_threads = ParallelGCThreads;
|
uint parallel_gc_threads = ParallelGCThreads;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -75,6 +75,8 @@ class ParCompactionManager : public CHeapObj<mtGC> {
|
|||||||
// type of TaskQueue.
|
// type of TaskQueue.
|
||||||
RegionTaskQueue _region_stack;
|
RegionTaskQueue _region_stack;
|
||||||
|
|
||||||
|
GrowableArray<HeapWord*>* _deferred_obj_array;
|
||||||
|
|
||||||
static ParMarkBitMap* _mark_bitmap;
|
static ParMarkBitMap* _mark_bitmap;
|
||||||
|
|
||||||
// Contains currently free shadow regions. We use it in
|
// Contains currently free shadow regions. We use it in
|
||||||
@ -128,6 +130,8 @@ class ParCompactionManager : public CHeapObj<mtGC> {
|
|||||||
return next_shadow_region();
|
return next_shadow_region();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void push_deferred_object(HeapWord* addr);
|
||||||
|
|
||||||
void reset_bitmap_query_cache() {
|
void reset_bitmap_query_cache() {
|
||||||
_last_query_beg = NULL;
|
_last_query_beg = NULL;
|
||||||
_last_query_obj = NULL;
|
_last_query_obj = NULL;
|
||||||
@ -195,6 +199,7 @@ class ParCompactionManager : public CHeapObj<mtGC> {
|
|||||||
|
|
||||||
// Process tasks remaining on any stack
|
// Process tasks remaining on any stack
|
||||||
void drain_region_stacks();
|
void drain_region_stacks();
|
||||||
|
void drain_deferred_objects();
|
||||||
|
|
||||||
void follow_contents(oop obj);
|
void follow_contents(oop obj);
|
||||||
void follow_array(objArrayOop array, int index);
|
void follow_array(objArrayOop array, int index);
|
||||||
|
@ -2427,6 +2427,10 @@ public:
|
|||||||
// Once a thread has drained it's stack, it should try to steal regions from
|
// Once a thread has drained it's stack, it should try to steal regions from
|
||||||
// other threads.
|
// other threads.
|
||||||
compaction_with_stealing_work(&_terminator, worker_id);
|
compaction_with_stealing_work(&_terminator, worker_id);
|
||||||
|
|
||||||
|
// At this point all regions have been compacted, so it's now safe
|
||||||
|
// to update the deferred objects that cross region boundaries.
|
||||||
|
cm->drain_deferred_objects();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2456,22 +2460,13 @@ void PSParallelCompact::compact() {
|
|||||||
ParallelScavengeHeap::heap()->workers().run_task(&task);
|
ParallelScavengeHeap::heap()->workers().run_task(&task);
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Verify that all regions have been processed before the deferred updates.
|
// Verify that all regions have been processed.
|
||||||
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
|
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
|
||||||
verify_complete(SpaceId(id));
|
verify_complete(SpaceId(id));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
|
|
||||||
// Update the deferred objects, if any.
|
|
||||||
ParCompactionManager* cm = ParCompactionManager::get_vmthread_cm();
|
|
||||||
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
|
|
||||||
update_deferred_objects(cm, SpaceId(id));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
DEBUG_ONLY(write_block_fill_histogram());
|
DEBUG_ONLY(write_block_fill_histogram());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2598,32 +2593,22 @@ PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
|
|||||||
return last_space_id;
|
return last_space_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
|
void PSParallelCompact::update_deferred_object(ParCompactionManager* cm, HeapWord *addr) {
|
||||||
SpaceId id) {
|
#ifdef ASSERT
|
||||||
assert(id < last_space_id, "bad space id");
|
|
||||||
|
|
||||||
ParallelCompactData& sd = summary_data();
|
ParallelCompactData& sd = summary_data();
|
||||||
const SpaceInfo* const space_info = _space_info + id;
|
size_t region_idx = sd.addr_to_region_idx(addr);
|
||||||
|
assert(sd.region(region_idx)->completed(), "first region must be completed before deferred updates");
|
||||||
|
assert(sd.region(region_idx + 1)->completed(), "second region must be completed before deferred updates");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const SpaceInfo* const space_info = _space_info + space_id(addr);
|
||||||
ObjectStartArray* const start_array = space_info->start_array();
|
ObjectStartArray* const start_array = space_info->start_array();
|
||||||
|
if (start_array != NULL) {
|
||||||
const MutableSpace* const space = space_info->space();
|
start_array->allocate_block(addr);
|
||||||
assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
|
|
||||||
HeapWord* const beg_addr = space_info->dense_prefix();
|
|
||||||
HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
|
|
||||||
|
|
||||||
const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
|
|
||||||
const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
|
|
||||||
const RegionData* cur_region;
|
|
||||||
for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
|
|
||||||
HeapWord* const addr = cur_region->deferred_obj_addr();
|
|
||||||
if (addr != NULL) {
|
|
||||||
if (start_array != NULL) {
|
|
||||||
start_array->allocate_block(addr);
|
|
||||||
}
|
|
||||||
cm->update_contents(cast_to_oop(addr));
|
|
||||||
assert(oopDesc::is_oop_or_null(cast_to_oop(addr)), "Expected an oop or NULL at " PTR_FORMAT, p2i(cast_to_oop(addr)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cm->update_contents(cast_to_oop(addr));
|
||||||
|
assert(oopDesc::is_oop(cast_to_oop(addr)), "Expected an oop at " PTR_FORMAT, p2i(cast_to_oop(addr)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip over count live words starting from beg, and return the address of the
|
// Skip over count live words starting from beg, and return the address of the
|
||||||
@ -2870,7 +2855,6 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosu
|
|||||||
if (closure.is_full()) {
|
if (closure.is_full()) {
|
||||||
decrement_destination_counts(cm, src_space_id, src_region_idx,
|
decrement_destination_counts(cm, src_space_id, src_region_idx,
|
||||||
closure.source());
|
closure.source());
|
||||||
region_ptr->set_deferred_obj_addr(NULL);
|
|
||||||
closure.complete_region(cm, dest_addr, region_ptr);
|
closure.complete_region(cm, dest_addr, region_ptr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -2915,7 +2899,7 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosu
|
|||||||
if (status == ParMarkBitMap::would_overflow) {
|
if (status == ParMarkBitMap::would_overflow) {
|
||||||
// The last object did not fit. Note that interior oop updates were
|
// The last object did not fit. Note that interior oop updates were
|
||||||
// deferred, then copy enough of the object to fill the region.
|
// deferred, then copy enough of the object to fill the region.
|
||||||
region_ptr->set_deferred_obj_addr(closure.destination());
|
cm->push_deferred_object(closure.destination());
|
||||||
status = closure.copy_until_full(); // copies from closure.source()
|
status = closure.copy_until_full(); // copies from closure.source()
|
||||||
|
|
||||||
decrement_destination_counts(cm, src_space_id, src_region_idx,
|
decrement_destination_counts(cm, src_space_id, src_region_idx,
|
||||||
@ -2927,7 +2911,6 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosu
|
|||||||
if (status == ParMarkBitMap::full) {
|
if (status == ParMarkBitMap::full) {
|
||||||
decrement_destination_counts(cm, src_space_id, src_region_idx,
|
decrement_destination_counts(cm, src_space_id, src_region_idx,
|
||||||
closure.source());
|
closure.source());
|
||||||
region_ptr->set_deferred_obj_addr(NULL);
|
|
||||||
closure.complete_region(cm, dest_addr, region_ptr);
|
closure.complete_region(cm, dest_addr, region_ptr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -243,13 +243,6 @@ public:
|
|||||||
// Reuse _source_region to store the corresponding shadow region index
|
// Reuse _source_region to store the corresponding shadow region index
|
||||||
size_t shadow_region() const { return _source_region; }
|
size_t shadow_region() const { return _source_region; }
|
||||||
|
|
||||||
// The object (if any) starting in this region and ending in a different
|
|
||||||
// region that could not be updated during the main (parallel) compaction
|
|
||||||
// phase. This is different from _partial_obj_addr, which is an object that
|
|
||||||
// extends onto a source region. However, the two uses do not overlap in
|
|
||||||
// time, so the same field is used to save space.
|
|
||||||
HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
|
|
||||||
|
|
||||||
// The starting address of the partial object extending onto the region.
|
// The starting address of the partial object extending onto the region.
|
||||||
HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
|
HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
|
||||||
|
|
||||||
@ -312,7 +305,6 @@ public:
|
|||||||
void set_destination(HeapWord* addr) { _destination = addr; }
|
void set_destination(HeapWord* addr) { _destination = addr; }
|
||||||
void set_source_region(size_t region) { _source_region = region; }
|
void set_source_region(size_t region) { _source_region = region; }
|
||||||
void set_shadow_region(size_t region) { _source_region = region; }
|
void set_shadow_region(size_t region) { _source_region = region; }
|
||||||
void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
|
|
||||||
void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
|
void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
|
||||||
void set_partial_obj_size(size_t words) {
|
void set_partial_obj_size(size_t words) {
|
||||||
_partial_obj_size = (region_sz_t) words;
|
_partial_obj_size = (region_sz_t) words;
|
||||||
@ -948,8 +940,8 @@ inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
|
|||||||
// but do not have their references updated. References are not updated because
|
// but do not have their references updated. References are not updated because
|
||||||
// it cannot easily be determined if the klass pointer KKK for the object AAA
|
// it cannot easily be determined if the klass pointer KKK for the object AAA
|
||||||
// has been updated. KKK likely resides in a region to the left of the region
|
// has been updated. KKK likely resides in a region to the left of the region
|
||||||
// containing AAA. These AAA's have there references updated at the end in a
|
// containing AAA. These AAA's have their references updated at the end in a
|
||||||
// clean up phase. See the method PSParallelCompact::update_deferred_objects().
|
// clean up phase. See the method PSParallelCompact::update_deferred_object().
|
||||||
//
|
//
|
||||||
// Compaction is done on a region basis. A region that is ready to be filled is
|
// Compaction is done on a region basis. A region that is ready to be filled is
|
||||||
// put on a ready list and GC threads take region off the list and fill them. A
|
// put on a ready list and GC threads take region off the list and fill them. A
|
||||||
@ -1248,8 +1240,8 @@ class PSParallelCompact : AllStatic {
|
|||||||
// Fill in the block table for the specified region.
|
// Fill in the block table for the specified region.
|
||||||
static void fill_blocks(size_t region_idx);
|
static void fill_blocks(size_t region_idx);
|
||||||
|
|
||||||
// Update the deferred objects in the space.
|
// Update a single deferred object.
|
||||||
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
|
static void update_deferred_object(ParCompactionManager* cm, HeapWord* addr);
|
||||||
|
|
||||||
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
|
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
|
||||||
static ParallelCompactData& summary_data() { return _summary_data; }
|
static ParallelCompactData& summary_data() { return _summary_data; }
|
||||||
|
Loading…
x
Reference in New Issue
Block a user