8236176: Parallel GC SplitInfo comment should be updated for shadow regions

Reviewed-by: tschatzl, ayang
This commit is contained in:
Ivan Walulya 2021-08-25 10:18:54 +00:00
parent c5a271259d
commit 63e062fb78
2 changed files with 20 additions and 12 deletions

View File

@ -1792,7 +1792,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ref_processor()->start_discovery(maximum_heap_compaction);
marking_start.update();
marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
marking_phase(vmthread_cm, &_gc_tracer);
bool max_on_system_gc = UseMaximumCompactionOnSystemGC
&& GCCause::is_user_requested_gc(gc_cause);
@ -2077,7 +2077,6 @@ public:
};
void PSParallelCompact::marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction,
ParallelOldTracer *gc_tracer) {
// Recursively traverse all live objects and mark them
GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
@ -2282,7 +2281,6 @@ void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
FillableRegionLogger region_logger;
for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
SpaceInfo* const space_info = _space_info + id;
MutableSpace* const space = space_info->space();
HeapWord* const new_top = space_info->new_top();
const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
@ -3092,12 +3090,6 @@ bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_
// the shadow region by copying live objects from source regions of the unavailable one. Once
// the unavailable region becomes available, the data in the shadow region will be copied back.
// Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
//
// For more details, please refer to §4.2 of the VEE'19 paper:
// Haoyu Li, Mingyu Wu, Binyu Zang, and Haibo Chen. 2019. ScissorGC: scalable and efficient
// compaction for Java full garbage collection. In Proceedings of the 15th ACM SIGPLAN/SIGOPS
// International Conference on Virtual Execution Environments (VEE 2019). ACM, New York, NY, USA,
// 108-121. DOI: https://doi.org/10.1145/3313808.3313820
void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
{
const ParallelCompactData& sd = PSParallelCompact::summary_data();

View File

@ -950,7 +950,6 @@ inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
// has been updated. KKK likely resides in a region to the left of the region
// containing AAA. These AAA's have there references updated at the end in a
// clean up phase. See the method PSParallelCompact::update_deferred_objects().
// An alternate strategy is being investigated for this deferral of updating.
//
// Compaction is done on a region basis. A region that is ready to be filled is
// put on a ready list and GC threads take region off the list and fill them. A
@ -961,6 +960,25 @@ inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
// regions and regions compacting into themselves. There is always at least 1
// region that can be put on the ready list. The regions are atomically added
// and removed from the ready list.
//
// During compaction, there is a natural task dependency among regions because
// destination regions may also be source regions themselves. Consequently, the
// destination regions are not available for processing until all live objects
// within them are evacuated to their destinations. These dependencies lead to
// limited thread utilization as threads spin waiting on regions to be ready.
// Shadow regions are utilized to address these region dependencies. The basic
// idea is that, if a region is unavailable because it still contains live
// objects and thus cannot serve as a destination momentarily, the GC thread
// may allocate a shadow region as a substitute destination and directly copy
// live objects into this shadow region. Live objects in the shadow region will
// be copied into the target destination region when it becomes available.
//
// For more details on shadow regions, please refer to §4.2 of the VEE'19 paper:
// Haoyu Li, Mingyu Wu, Binyu Zang, and Haibo Chen. 2019. ScissorGC: scalable
// and efficient compaction for Java full garbage collection. In Proceedings of
// the 15th ACM SIGPLAN/SIGOPS International Conference on Virtual Execution
// Environments (VEE 2019). ACM, New York, NY, USA, 108-121. DOI:
// https://doi.org/10.1145/3313808.3313820
class TaskQueue;
@ -1045,7 +1063,6 @@ class PSParallelCompact : AllStatic {
// Mark live objects
static void marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction,
ParallelOldTracer *gc_tracer);
// Compute the dense prefix for the designated space. This is an experimental
@ -1114,7 +1131,6 @@ class PSParallelCompact : AllStatic {
DEBUG_ONLY(static void write_block_fill_histogram();)
// Move objects to new locations.
static void compact_perm(ParCompactionManager* cm);
static void compact();
// Add available regions to the stack and draining tasks to the task queue.