8210708: Use single mark bitmap in G1
Co-authored-by: Stefan Johansson <sjohanss@openjdk.org> Co-authored-by: Ivan Walulya <iwalulya@openjdk.org> Reviewed-by: iwalulya, ayang
This commit is contained in:
parent
8e7b45b820
commit
95e3190d96
src/hotspot/share/gc
g1
g1BlockOffsetTable.cppg1BlockOffsetTable.hppg1BlockOffsetTable.inline.hppg1CodeBlobClosure.cppg1CollectedHeap.cppg1CollectedHeap.hppg1CollectedHeap.inline.hppg1CollectionSet.cppg1CollectorState.hppg1ConcurrentMark.cppg1ConcurrentMark.hppg1ConcurrentMark.inline.hppg1ConcurrentMarkBitMap.cppg1ConcurrentMarkBitMap.hppg1ConcurrentMarkBitMap.inline.hppg1ConcurrentMarkThread.cppg1ConcurrentMarkThread.hppg1ConcurrentRebuildAndScrub.cppg1ConcurrentRebuildAndScrub.hppg1EvacFailure.cppg1FullCollector.cppg1FullGCCompactTask.cppg1FullGCCompactTask.hppg1FullGCPrepareTask.cppg1FullGCPrepareTask.hppg1HeapVerifier.cppg1HeapVerifier.hppg1OopClosures.inline.hppg1ParScanThreadState.cppg1Policy.cppg1RegionMarkStatsCache.hppg1RemSet.cppg1RemSet.hppg1RemSetTrackingPolicy.cppg1SATBMarkQueueSet.cppg1YoungCollector.cppg1YoungGCPostEvacuateTasks.cppheapRegion.cppheapRegion.hppheapRegion.inline.hppheapRegionManager.cppheapRegionManager.hpp
shared
test/hotspot
gtest
jtreg/gc/g1
@ -79,19 +79,6 @@ G1BlockOffsetTablePart::G1BlockOffsetTablePart(G1BlockOffsetTable* array, HeapRe
|
||||
{
|
||||
}
|
||||
|
||||
void G1BlockOffsetTablePart::update() {
|
||||
HeapWord* next_addr = _hr->bottom();
|
||||
HeapWord* const limit = _hr->top();
|
||||
|
||||
HeapWord* prev_addr;
|
||||
while (next_addr < limit) {
|
||||
prev_addr = next_addr;
|
||||
next_addr = prev_addr + block_size(prev_addr);
|
||||
update_for_block(prev_addr, next_addr);
|
||||
}
|
||||
assert(next_addr == limit, "Should stop the scan at the limit.");
|
||||
}
|
||||
|
||||
// Write the backskip value for each region.
|
||||
//
|
||||
// offset
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -122,6 +122,7 @@ private:
|
||||
void set_remainder_to_point_to_start_incl(size_t start, size_t end);
|
||||
|
||||
inline size_t block_size(const HeapWord* p) const;
|
||||
inline size_t block_size(const HeapWord* p, HeapWord* pb) const;
|
||||
|
||||
// Returns the address of a block whose start is at most "addr".
|
||||
inline HeapWord* block_at_or_preceding(const void* addr) const;
|
||||
@ -129,8 +130,10 @@ private:
|
||||
// Return the address of the beginning of the block that contains "addr".
|
||||
// "q" is a block boundary that is <= "addr"; "n" is the address of the
|
||||
// next block (or the end of the space.)
|
||||
// "pb" is the current value of the region's parsable_bottom.
|
||||
inline HeapWord* forward_to_block_containing_addr(HeapWord* q, HeapWord* n,
|
||||
const void* addr) const;
|
||||
const void* addr,
|
||||
HeapWord* pb) const;
|
||||
|
||||
// Update BOT entries corresponding to the mem range [blk_start, blk_end).
|
||||
void update_for_block_work(HeapWord* blk_start, HeapWord* blk_end);
|
||||
@ -152,8 +155,6 @@ public:
|
||||
// The elements of the array are initialized to zero.
|
||||
G1BlockOffsetTablePart(G1BlockOffsetTable* array, HeapRegion* hr);
|
||||
|
||||
void update();
|
||||
|
||||
void verify() const;
|
||||
|
||||
// Returns the address of the start of the block containing "addr", or
|
||||
@ -161,7 +162,8 @@ public:
|
||||
// namely updating of shared array entries that "point" too far
|
||||
// backwards. This can occur, for example, when lab allocation is used
|
||||
// in a space covered by the table.)
|
||||
inline HeapWord* block_start(const void* addr);
|
||||
// "pb" is the current value of the region's parsable_bottom.
|
||||
inline HeapWord* block_start(const void* addr, HeapWord* pb);
|
||||
|
||||
void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
if (is_crossing_card_boundary(blk_start, blk_end)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,11 +32,11 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
inline HeapWord* G1BlockOffsetTablePart::block_start(const void* addr) {
|
||||
inline HeapWord* G1BlockOffsetTablePart::block_start(const void* addr, HeapWord* const pb) {
|
||||
assert(addr >= _hr->bottom() && addr < _hr->top(), "invalid address");
|
||||
HeapWord* q = block_at_or_preceding(addr);
|
||||
HeapWord* n = q + block_size(q);
|
||||
return forward_to_block_containing_addr(q, n, addr);
|
||||
HeapWord* n = q + block_size(q, pb);
|
||||
return forward_to_block_containing_addr(q, n, addr, pb);
|
||||
}
|
||||
|
||||
u_char G1BlockOffsetTable::offset_array(size_t index) const {
|
||||
@ -99,6 +99,10 @@ inline size_t G1BlockOffsetTablePart::block_size(const HeapWord* p) const {
|
||||
return _hr->block_size(p);
|
||||
}
|
||||
|
||||
inline size_t G1BlockOffsetTablePart::block_size(const HeapWord* p, HeapWord* const pb) const {
|
||||
return _hr->block_size(p, pb);
|
||||
}
|
||||
|
||||
inline HeapWord* G1BlockOffsetTablePart::block_at_or_preceding(const void* addr) const {
|
||||
#ifdef ASSERT
|
||||
if (!_hr->is_continues_humongous()) {
|
||||
@ -126,7 +130,8 @@ inline HeapWord* G1BlockOffsetTablePart::block_at_or_preceding(const void* addr)
|
||||
}
|
||||
|
||||
inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr(HeapWord* q, HeapWord* n,
|
||||
const void* addr) const {
|
||||
const void* addr,
|
||||
HeapWord* const pb) const {
|
||||
while (n <= addr) {
|
||||
// When addr is not covered by the block starting at q we need to
|
||||
// step forward until we find the correct block. With the BOT
|
||||
@ -138,7 +143,7 @@ inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr(HeapWo
|
||||
q = n;
|
||||
assert(cast_to_oop(q)->klass_or_null() != nullptr,
|
||||
"start of block must be an initialized object");
|
||||
n += block_size(q);
|
||||
n += block_size(q, pb);
|
||||
}
|
||||
assert(q <= addr, "wrong order for q and addr");
|
||||
assert(addr < n, "wrong order for addr and n");
|
||||
|
@ -59,7 +59,7 @@ void G1CodeBlobClosure::MarkingOopClosure::do_oop_work(T* p) {
|
||||
T oop_or_narrowoop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(oop_or_narrowoop)) {
|
||||
oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
|
||||
_cm->mark_in_next_bitmap(_worker_id, o);
|
||||
_cm->mark_in_bitmap(_worker_id, o);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,8 +285,6 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
|
||||
assert(hr->bottom() < obj_top && obj_top <= hr->end(),
|
||||
"obj_top should be in last region");
|
||||
|
||||
_verifier->check_bitmaps("Humongous Region Allocation", first_hr);
|
||||
|
||||
assert(words_not_fillable == 0 ||
|
||||
first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
|
||||
"Miscalculation in humongous allocation");
|
||||
@ -436,7 +434,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
|
||||
if (should_try_gc) {
|
||||
GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
|
||||
: GCCause::_g1_inc_collection_pause;
|
||||
: GCCause::_g1_inc_collection_pause;
|
||||
bool succeeded;
|
||||
result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause);
|
||||
if (result != NULL) {
|
||||
@ -985,7 +983,7 @@ void G1CollectedHeap::print_heap_after_full_collection() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::abort_concurrent_cycle() {
|
||||
bool G1CollectedHeap::abort_concurrent_cycle() {
|
||||
// If we start the compaction before the CM threads finish
|
||||
// scanning the root regions we might trip them over as we'll
|
||||
// be moving objects / updating references. So let's wait until
|
||||
@ -1002,7 +1000,7 @@ void G1CollectedHeap::abort_concurrent_cycle() {
|
||||
|
||||
// Abandon current iterations of concurrent marking and concurrent
|
||||
// refinement, if any are in progress.
|
||||
concurrent_mark()->concurrent_cycle_abort();
|
||||
return concurrent_mark()->concurrent_cycle_abort();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::prepare_heap_for_full_collection() {
|
||||
@ -1027,7 +1025,7 @@ void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
|
||||
}
|
||||
_verifier->verify_region_sets_optional();
|
||||
_verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
|
||||
_verifier->check_bitmaps("Full GC Start");
|
||||
_verifier->verify_bitmap_clear(true /* above_tams_only */);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::prepare_heap_for_mutators() {
|
||||
@ -1076,9 +1074,7 @@ void G1CollectedHeap::verify_after_full_collection() {
|
||||
_hrm.verify_optional();
|
||||
_verifier->verify_region_sets_optional();
|
||||
_verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
|
||||
|
||||
// This call implicitly verifies that the next bitmap is clear after Full GC.
|
||||
_verifier->check_bitmaps("Full GC End");
|
||||
_verifier->verify_bitmap_clear(false /* above_tams_only */);
|
||||
|
||||
// At this point there should be no regions in the
|
||||
// entire heap tagged as young.
|
||||
@ -1627,7 +1623,7 @@ jint G1CollectedHeap::initialize() {
|
||||
heap_rs.size());
|
||||
heap_storage->set_mapping_changed_listener(&_listener);
|
||||
|
||||
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
|
||||
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmap.
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
create_aux_memory_mapper("Block Offset Table",
|
||||
G1BlockOffsetTable::compute_size(heap_rs.size() / HeapWordSize),
|
||||
@ -1644,12 +1640,10 @@ jint G1CollectedHeap::initialize() {
|
||||
G1CardCounts::heap_map_factor());
|
||||
|
||||
size_t bitmap_size = G1CMBitMap::compute_size(heap_rs.size());
|
||||
G1RegionToSpaceMapper* prev_bitmap_storage =
|
||||
create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
|
||||
G1RegionToSpaceMapper* next_bitmap_storage =
|
||||
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
|
||||
G1RegionToSpaceMapper* bitmap_storage =
|
||||
create_aux_memory_mapper("Mark Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
|
||||
|
||||
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||
_hrm.initialize(heap_storage, bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||
_card_table->initialize(cardtable_storage);
|
||||
|
||||
// Do later initialization work for concurrent refinement.
|
||||
@ -1695,7 +1689,7 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
// Create the G1ConcurrentMark data structure and thread.
|
||||
// (Must do this late, so that "max_[reserved_]regions" is defined.)
|
||||
_cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
|
||||
_cm = new G1ConcurrentMark(this, bitmap_storage);
|
||||
_cm_thread = _cm->cm_thread();
|
||||
|
||||
// Now expand into the initial heap size.
|
||||
@ -2352,12 +2346,12 @@ HeapWord* G1CollectedHeap::block_start(const void* addr) const {
|
||||
if (addr >= hr->top()) {
|
||||
return nullptr;
|
||||
}
|
||||
return hr->block_start(addr);
|
||||
return hr->block_start(addr, hr->parsable_bottom_acquire());
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
|
||||
HeapRegion* hr = heap_region_containing(addr);
|
||||
return hr->block_is_obj(addr);
|
||||
return hr->block_is_obj(addr, hr->parsable_bottom_acquire());
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
|
||||
@ -2412,9 +2406,9 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
const HeapRegion* hr,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption::G1UsePrevMarking: return is_obj_dead(obj, hr);
|
||||
case VerifyOption::G1UseConcMarking: return is_obj_dead(obj, hr);
|
||||
case VerifyOption::G1UseFullMarking: return is_obj_dead_full(obj, hr);
|
||||
default: ShouldNotReachHere();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
@ -2422,9 +2416,9 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption::G1UsePrevMarking: return is_obj_dead(obj);
|
||||
case VerifyOption::G1UseConcMarking: return is_obj_dead(obj);
|
||||
case VerifyOption::G1UseFullMarking: return is_obj_dead_full(obj);
|
||||
default: ShouldNotReachHere();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
@ -2472,7 +2466,8 @@ void G1CollectedHeap::print_regions_on(outputStream* st) const {
|
||||
"HS=humongous(starts), HC=humongous(continues), "
|
||||
"CS=collection set, F=free, "
|
||||
"OA=open archive, CA=closed archive, "
|
||||
"TAMS=top-at-mark-start (previous, next)");
|
||||
"TAMS=top-at-mark-start, "
|
||||
"PB=parsable bottom");
|
||||
PrintRegionClosure blk(st);
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
@ -2756,7 +2751,6 @@ void G1CollectedHeap::verify_before_young_collection(G1HeapVerifier::G1VerifyTyp
|
||||
heap_region_iterate(&v_cl);
|
||||
}
|
||||
_verifier->verify_before_gc(type);
|
||||
_verifier->check_bitmaps("GC Start");
|
||||
verify_numa_regions("GC Start");
|
||||
phase_times()->record_verify_before_time_ms((Ticks::now() - start).seconds() * MILLIUNITS);
|
||||
}
|
||||
@ -2772,7 +2766,6 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType
|
||||
heap_region_iterate(&v_cl);
|
||||
}
|
||||
_verifier->verify_after_gc(type);
|
||||
_verifier->check_bitmaps("GC End");
|
||||
verify_numa_regions("GC End");
|
||||
_verifier->verify_region_sets_optional();
|
||||
phase_times()->record_verify_after_time_ms((Ticks::now() - start).seconds() * MILLIUNITS);
|
||||
@ -2885,6 +2878,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
|
||||
// without its logging output interfering with the logging output
|
||||
// that came from the pause.
|
||||
if (should_start_concurrent_mark_operation) {
|
||||
verifier()->verify_bitmap_clear(true /* above_tams_only */);
|
||||
// CAUTION: after the start_concurrent_cycle() call below, the concurrent marking
|
||||
// thread(s) could be running concurrently with us. Make sure that anything
|
||||
// after this point does not assume that we are the only GC thread running.
|
||||
@ -2916,7 +2910,7 @@ void G1CollectedHeap::make_pending_list_reachable() {
|
||||
oop pll_head = Universe::reference_pending_list();
|
||||
if (pll_head != NULL) {
|
||||
// Any valid worker id is fine here as we are in the VM thread and single-threaded.
|
||||
_cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
|
||||
_cm->mark_in_bitmap(0 /* worker_id */, pll_head);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2947,9 +2941,8 @@ void G1CollectedHeap::record_obj_copy_mem_stats() {
|
||||
create_g1_evac_summary(&_old_evac_stats));
|
||||
}
|
||||
|
||||
void G1CollectedHeap::clear_prev_bitmap_for_region(HeapRegion* hr) {
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
concurrent_mark()->clear_range_in_prev_bitmap(mr);
|
||||
void G1CollectedHeap::clear_bitmap_for_region(HeapRegion* hr) {
|
||||
concurrent_mark()->clear_bitmap_for_region(hr);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
|
||||
@ -2957,10 +2950,6 @@ void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
|
||||
assert(!hr->is_empty(), "the region should not be empty");
|
||||
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
|
||||
|
||||
if (G1VerifyBitmaps) {
|
||||
clear_prev_bitmap_for_region(hr);
|
||||
}
|
||||
|
||||
// Clear the card counts for this region.
|
||||
// Note: we only need to do this if the region is not young
|
||||
// (since we don't refine cards in young regions).
|
||||
@ -3208,7 +3197,6 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
|
||||
if (new_alloc_region != NULL) {
|
||||
set_region_short_lived_locked(new_alloc_region);
|
||||
_hr_printer.alloc(new_alloc_region, !should_allocate);
|
||||
_verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
|
||||
_policy->remset_tracker()->update_at_allocate(new_alloc_region);
|
||||
return new_alloc_region;
|
||||
}
|
||||
@ -3265,11 +3253,9 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
|
||||
if (type.is_survivor()) {
|
||||
new_alloc_region->set_survivor();
|
||||
_survivor.add(new_alloc_region);
|
||||
_verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
|
||||
register_new_survivor_region_with_region_attr(new_alloc_region);
|
||||
} else {
|
||||
new_alloc_region->set_old();
|
||||
_verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
|
||||
}
|
||||
_policy->remset_tracker()->update_at_allocate(new_alloc_region);
|
||||
register_region_with_region_attr(new_alloc_region);
|
||||
@ -3292,7 +3278,7 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
|
||||
bool const during_im = collector_state()->in_concurrent_start_gc();
|
||||
if (during_im && allocated_bytes > 0) {
|
||||
_cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top());
|
||||
_cm->root_regions()->add(alloc_region->top_at_mark_start(), alloc_region->top());
|
||||
}
|
||||
_hr_printer.retire(alloc_region);
|
||||
}
|
||||
@ -3313,13 +3299,12 @@ HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
|
||||
|
||||
void G1CollectedHeap::mark_evac_failure_object(const oop obj) const {
|
||||
// All objects failing evacuation are live. What we'll do is
|
||||
// that we'll update the prev marking info so that they are
|
||||
// all under PTAMS and explicitly marked.
|
||||
_cm->par_mark_in_prev_bitmap(obj);
|
||||
// that we'll update the marking info so that they are
|
||||
// all below TAMS and explicitly marked.
|
||||
_cm->raw_mark_in_bitmap(obj);
|
||||
}
|
||||
|
||||
// Optimized nmethod scanning
|
||||
|
||||
class RegisterNMethodOopClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
nmethod* _nm;
|
||||
|
@ -487,7 +487,7 @@ private:
|
||||
bool* succeeded);
|
||||
// Internal helpers used during full GC to split it up to
|
||||
// increase readability.
|
||||
void abort_concurrent_cycle();
|
||||
bool abort_concurrent_cycle();
|
||||
void verify_before_full_collection(bool explicit_gc);
|
||||
void prepare_heap_for_full_collection();
|
||||
void prepare_heap_for_mutators();
|
||||
@ -599,7 +599,7 @@ public:
|
||||
// for all regions.
|
||||
void verify_region_attr_remset_is_tracked() PRODUCT_RETURN;
|
||||
|
||||
void clear_prev_bitmap_for_region(HeapRegion* hr);
|
||||
void clear_bitmap_for_region(HeapRegion* hr);
|
||||
|
||||
bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
|
||||
|
||||
@ -1210,7 +1210,7 @@ public:
|
||||
bool check_young_list_empty();
|
||||
#endif
|
||||
|
||||
bool is_marked_next(oop obj) const;
|
||||
bool is_marked(oop obj) const;
|
||||
|
||||
// Determine if an object is dead, given the object and also
|
||||
// the region to which the object belongs.
|
||||
@ -1219,9 +1219,7 @@ public:
|
||||
// Determine if an object is dead, given only the object itself.
|
||||
// This will find the region to which the object belongs and
|
||||
// then call the region version of the same function.
|
||||
|
||||
// Added if it is NULL it isn't dead.
|
||||
|
||||
// If obj is NULL it is not dead.
|
||||
inline bool is_obj_dead(const oop obj) const;
|
||||
|
||||
inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
|
||||
|
@ -29,6 +29,7 @@
|
||||
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ConcurrentMark.inline.hpp"
|
||||
#include "gc/g1/g1EvacFailureRegions.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
@ -159,8 +160,8 @@ inline G1ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {
|
||||
return _task_queues->queue(i);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_marked_next(oop obj) const {
|
||||
return _cm->next_mark_bitmap()->is_marked(obj);
|
||||
inline bool G1CollectedHeap::is_marked(oop obj) const {
|
||||
return _cm->mark_bitmap()->is_marked(obj);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_in_cset(oop obj) const {
|
||||
@ -221,7 +222,7 @@ inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||
return hr->is_obj_dead(obj, _cm->prev_mark_bitmap());
|
||||
return hr->is_obj_dead(obj, hr->parsable_bottom());
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
||||
@ -232,7 +233,7 @@ inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
|
||||
return !is_marked_next(obj) && !hr->is_closed_archive();
|
||||
return !is_marked(obj) && !hr->is_closed_archive();
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -375,10 +375,10 @@ public:
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
|
||||
_st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
|
||||
_st->print_cr(" " HR_FORMAT ", TAMS: " PTR_FORMAT " PB: " PTR_FORMAT ", age: %4d",
|
||||
HR_FORMAT_PARAMS(r),
|
||||
p2i(r->prev_top_at_mark_start()),
|
||||
p2i(r->next_top_at_mark_start()),
|
||||
p2i(r->top_at_mark_start()),
|
||||
p2i(r->parsable_bottom()),
|
||||
r->has_surv_rate_group() ? r->age_in_surv_rate_group() : -1);
|
||||
return false;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,8 +42,8 @@ class G1CollectorState {
|
||||
// pause, it is a suggestion that the pause should start a marking
|
||||
// cycle by doing the concurrent start work. However, it is possible
|
||||
// that the concurrent marking thread is still finishing up the
|
||||
// previous marking cycle (e.g., clearing the next marking
|
||||
// bitmap). If that is the case we cannot start a new cycle and
|
||||
// previous marking cycle (e.g., clearing the marking bitmap).
|
||||
// If that is the case we cannot start a new cycle and
|
||||
// we'll have to wait for the concurrent marking thread to finish
|
||||
// what it is doing. In this case we will postpone the marking cycle
|
||||
// initiation decision for the next pause. When we eventually decide
|
||||
@ -64,9 +64,8 @@ class G1CollectorState {
|
||||
// of the concurrent start pause to the end of the Cleanup pause.
|
||||
bool _mark_or_rebuild_in_progress;
|
||||
|
||||
// The next bitmap is currently being cleared or about to be cleared. TAMS and bitmap
|
||||
// may be out of sync.
|
||||
bool _clearing_next_bitmap;
|
||||
// The marking bitmap is currently being cleared or about to be cleared.
|
||||
bool _clearing_bitmap;
|
||||
|
||||
// Set during a full gc pause.
|
||||
bool _in_full_gc;
|
||||
@ -80,7 +79,7 @@ public:
|
||||
_initiate_conc_mark_if_possible(false),
|
||||
|
||||
_mark_or_rebuild_in_progress(false),
|
||||
_clearing_next_bitmap(false),
|
||||
_clearing_bitmap(false),
|
||||
_in_full_gc(false) { }
|
||||
|
||||
// Phase setters
|
||||
@ -94,7 +93,7 @@ public:
|
||||
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
|
||||
|
||||
void set_mark_or_rebuild_in_progress(bool v) { _mark_or_rebuild_in_progress = v; }
|
||||
void set_clearing_next_bitmap(bool v) { _clearing_next_bitmap = v; }
|
||||
void set_clearing_bitmap(bool v) { _clearing_bitmap = v; }
|
||||
|
||||
// Phase getters
|
||||
bool in_young_only_phase() const { return _in_young_only_phase && !_in_full_gc; }
|
||||
@ -108,7 +107,7 @@ public:
|
||||
bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; }
|
||||
|
||||
bool mark_or_rebuild_in_progress() const { return _mark_or_rebuild_in_progress; }
|
||||
bool clearing_next_bitmap() const { return _clearing_next_bitmap; }
|
||||
bool clearing_bitmap() const { return _clearing_bitmap; }
|
||||
|
||||
// Calculate GC Pause Type from internal state.
|
||||
G1GCPauseType young_gc_pause_type(bool concurrent_operation_is_full_mark) const;
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ConcurrentMark.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRebuildAndScrub.hpp"
|
||||
#include "gc/g1/g1DirtyCardQueue.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
@ -362,15 +363,11 @@ bool G1CMRootMemRegions::wait_until_scan_finished() {
|
||||
}
|
||||
|
||||
G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
G1RegionToSpaceMapper* prev_bitmap_storage,
|
||||
G1RegionToSpaceMapper* next_bitmap_storage) :
|
||||
G1RegionToSpaceMapper* bitmap_storage) :
|
||||
// _cm_thread set inside the constructor
|
||||
_g1h(g1h),
|
||||
|
||||
_mark_bitmap_1(),
|
||||
_mark_bitmap_2(),
|
||||
_prev_mark_bitmap(&_mark_bitmap_1),
|
||||
_next_mark_bitmap(&_mark_bitmap_2),
|
||||
_mark_bitmap(),
|
||||
|
||||
_heap(_g1h->reserved()),
|
||||
|
||||
@ -419,8 +416,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
{
|
||||
assert(CGC_lock != NULL, "CGC_lock must be initialized");
|
||||
|
||||
_mark_bitmap_1.initialize(g1h->reserved(), prev_bitmap_storage);
|
||||
_mark_bitmap_2.initialize(g1h->reserved(), next_bitmap_storage);
|
||||
_mark_bitmap.initialize(g1h->reserved(), bitmap_storage);
|
||||
|
||||
// Create & start ConcurrentMark thread.
|
||||
_cm_thread = new G1ConcurrentMarkThread(this);
|
||||
@ -467,7 +463,7 @@ void G1ConcurrentMark::reset() {
|
||||
// Reset all tasks, since different phases will use different number of active
|
||||
// threads. So, it's easiest to have all of them ready.
|
||||
for (uint i = 0; i < _max_num_tasks; ++i) {
|
||||
_tasks[i]->reset(_next_mark_bitmap);
|
||||
_tasks[i]->reset(mark_bitmap());
|
||||
}
|
||||
|
||||
uint max_reserved_regions = _g1h->max_reserved_regions();
|
||||
@ -500,18 +496,11 @@ void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
|
||||
if (bitmap->is_marked(addr)) {
|
||||
bitmap->clear(addr);
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
|
||||
assert_at_safepoint();
|
||||
|
||||
// Need to clear all mark bits of the humongous object.
|
||||
clear_mark_if_set(_prev_mark_bitmap, r->bottom());
|
||||
clear_mark_if_set(_next_mark_bitmap, r->bottom());
|
||||
// Need to clear mark bit of the humongous object. Doing this unconditionally is fine.
|
||||
mark_bitmap()->clear(r->bottom());
|
||||
|
||||
if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
|
||||
return;
|
||||
@ -587,7 +576,7 @@ public:
|
||||
static size_t chunk_size() { return M; }
|
||||
|
||||
private:
|
||||
// Heap region closure used for clearing the _next_mark_bitmap.
|
||||
// Heap region closure used for clearing the _mark_bitmap.
|
||||
class G1ClearBitmapHRClosure : public HeapRegionClosure {
|
||||
private:
|
||||
G1ConcurrentMark* _cm;
|
||||
@ -611,18 +600,16 @@ private:
|
||||
}
|
||||
|
||||
HeapWord* region_clear_limit(HeapRegion* r) {
|
||||
// During a Concurrent Undo Mark cycle, the _next_mark_bitmap is cleared
|
||||
// without swapping with the _prev_mark_bitmap. Therefore, the per region
|
||||
// next_top_at_mark_start and live_words data are current wrt
|
||||
// _next_mark_bitmap. We use this information to only clear ranges of the
|
||||
// bitmap that require clearing.
|
||||
// During a Concurrent Undo Mark cycle, the per region top_at_mark_start and
|
||||
// live_words data are current wrt to the _mark_bitmap. We use this information
|
||||
// to only clear ranges of the bitmap that require clearing.
|
||||
if (is_clear_concurrent_undo()) {
|
||||
// No need to clear bitmaps for empty regions.
|
||||
if (_cm->live_words(r->hrm_index()) == 0) {
|
||||
assert(_bitmap->get_next_marked_addr(r->bottom(), r->end()) == r->end(), "Should not have marked bits");
|
||||
return r->bottom();
|
||||
}
|
||||
assert(_bitmap->get_next_marked_addr(r->next_top_at_mark_start(), r->end()) == r->end(), "Should not have marked bits above ntams");
|
||||
assert(_bitmap->get_next_marked_addr(r->top_at_mark_start(), r->end()) == r->end(), "Should not have marked bits above tams");
|
||||
}
|
||||
return r->end();
|
||||
}
|
||||
@ -631,7 +618,7 @@ private:
|
||||
G1ClearBitmapHRClosure(G1ConcurrentMark* cm, bool suspendible) :
|
||||
HeapRegionClosure(),
|
||||
_cm(cm),
|
||||
_bitmap(cm->next_mark_bitmap()),
|
||||
_bitmap(cm->mark_bitmap()),
|
||||
_suspendible(suspendible)
|
||||
{ }
|
||||
|
||||
@ -666,6 +653,8 @@ private:
|
||||
}
|
||||
assert(cur >= end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
|
||||
|
||||
r->note_end_of_clearing();
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@ -692,7 +681,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1ConcurrentMark::clear_next_bitmap(WorkerThreads* workers, bool may_yield) {
|
||||
void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers, bool may_yield) {
|
||||
assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
|
||||
|
||||
size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
|
||||
@ -718,21 +707,21 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
|
||||
// is the case.
|
||||
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
|
||||
|
||||
clear_next_bitmap(_concurrent_workers, true);
|
||||
clear_bitmap(_concurrent_workers, true);
|
||||
|
||||
// Repeat the asserts from above.
|
||||
guarantee(cm_thread()->in_progress(), "invariant");
|
||||
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::clear_next_bitmap(WorkerThreads* workers) {
|
||||
void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
// To avoid fragmentation the full collection requesting to clear the bitmap
|
||||
// might use fewer workers than available. To ensure the bitmap is cleared
|
||||
// as efficiently as possible the number of active workers are temporarily
|
||||
// increased to include all currently created workers.
|
||||
WithActiveWorkers update(workers, workers->created_workers());
|
||||
clear_next_bitmap(workers, false);
|
||||
clear_bitmap(workers, false);
|
||||
}
|
||||
|
||||
class G1PreConcurrentStartTask : public G1BatchedTask {
|
||||
@ -955,10 +944,10 @@ void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id)
|
||||
#ifdef ASSERT
|
||||
HeapWord* last = region->last();
|
||||
HeapRegion* hr = _g1h->heap_region_containing(last);
|
||||
assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
|
||||
assert(hr->is_old() || hr->top_at_mark_start() == hr->bottom(),
|
||||
"Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
|
||||
assert(hr->next_top_at_mark_start() == region->start(),
|
||||
"MemRegion start should be equal to nTAMS");
|
||||
assert(hr->top_at_mark_start() == region->start(),
|
||||
"MemRegion start should be equal to TAMS");
|
||||
#endif
|
||||
|
||||
G1RootRegionScanClosure cl(_g1h, this, worker_id);
|
||||
@ -1026,7 +1015,7 @@ void G1ConcurrentMark::concurrent_cycle_start() {
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::concurrent_cycle_end() {
|
||||
_g1h->collector_state()->set_clearing_next_bitmap(false);
|
||||
_g1h->collector_state()->set_clearing_bitmap(false);
|
||||
|
||||
_g1h->trace_heap_after_gc(_gc_tracer_cm);
|
||||
|
||||
@ -1061,11 +1050,23 @@ void G1ConcurrentMark::mark_from_roots() {
|
||||
print_stats();
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
|
||||
const char* G1ConcurrentMark::verify_location_string(VerifyLocation location) {
|
||||
static const char* location_strings[] = { "Remark Before",
|
||||
"Remark After",
|
||||
"Remark Overflow",
|
||||
"Cleanup Before",
|
||||
"Cleanup After" };
|
||||
return location_strings[static_cast<std::underlying_type_t<VerifyLocation>>(location)];
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type,
|
||||
VerifyLocation location) {
|
||||
G1HeapVerifier* verifier = _g1h->verifier();
|
||||
|
||||
verifier->verify_region_sets_optional();
|
||||
|
||||
const char* caller = verify_location_string(location);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
|
||||
|
||||
@ -1073,10 +1074,14 @@ void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, Ve
|
||||
char buffer[BufLen];
|
||||
|
||||
jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
|
||||
verifier->verify(type, vo, buffer);
|
||||
}
|
||||
verifier->verify(type, VerifyOption::G1UseConcMarking, buffer);
|
||||
|
||||
verifier->check_bitmaps(caller);
|
||||
// Only check bitmap in Remark, and not at After-Verification because the regions
|
||||
// already have their TAMS'es reset.
|
||||
if (location != VerifyLocation::RemarkAfter) {
|
||||
verifier->verify_bitmap_clear(true /* above_tams_only */);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
|
||||
@ -1116,7 +1121,7 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
|
||||
// note end of marking.
|
||||
void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
|
||||
uint const region_idx = hr->hrm_index();
|
||||
size_t const obj_size_in_words = (size_t)cast_to_oop(hr->bottom())->size();
|
||||
size_t const obj_size_in_words = cast_to_oop(hr->bottom())->size();
|
||||
uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
|
||||
|
||||
// "Distributing" zero words means that we only note end of marking for these
|
||||
@ -1159,9 +1164,8 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
|
||||
}
|
||||
|
||||
void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
|
||||
hr->add_to_marked_bytes(marked_bytes);
|
||||
hr->note_end_of_marking(marked_bytes);
|
||||
_cl->do_heap_region(hr);
|
||||
hr->note_end_of_marking();
|
||||
}
|
||||
|
||||
public:
|
||||
@ -1195,12 +1199,17 @@ public:
|
||||
static const uint RegionsPerThread = 384;
|
||||
};
|
||||
|
||||
class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
|
||||
class G1UpdateRegionsAfterRebuild : public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
public:
|
||||
G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
|
||||
G1UpdateRegionsAfterRebuild(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h) {
|
||||
}
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
// Update the remset tracking state from updating to complete
|
||||
// if remembered sets have been rebuilt.
|
||||
_g1h->policy()->remset_tracker()->update_after_rebuild(r);
|
||||
return false;
|
||||
}
|
||||
@ -1220,7 +1229,7 @@ void G1ConcurrentMark::remark() {
|
||||
|
||||
double start = os::elapsedTime();
|
||||
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption::G1UsePrevMarking, "Remark before");
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyLocation::RemarkBefore);
|
||||
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
|
||||
@ -1245,10 +1254,10 @@ void G1ConcurrentMark::remark() {
|
||||
flush_all_task_caches();
|
||||
}
|
||||
|
||||
// Install newly created mark bitmap as "prev".
|
||||
swap_mark_bitmaps();
|
||||
// All marking completed. Check bitmap now as we will start to reset TAMSes
|
||||
// in parallel below so that we can not do this in the After-Remark verification.
|
||||
_g1h->verifier()->verify_bitmap_clear(true /* above_tams_only */);
|
||||
|
||||
_g1h->collector_state()->set_clearing_next_bitmap(true);
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
|
||||
|
||||
@ -1281,16 +1290,16 @@ void G1ConcurrentMark::remark() {
|
||||
|
||||
compute_new_sizes();
|
||||
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption::G1UsePrevMarking, "Remark after");
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyLocation::RemarkAfter);
|
||||
|
||||
assert(!restart_for_overflow(), "sanity");
|
||||
// Completely reset the marking state since marking completed
|
||||
// Completely reset the marking state (except bitmaps) since marking completed.
|
||||
reset_at_marking_complete();
|
||||
} else {
|
||||
// We overflowed. Restart concurrent marking.
|
||||
_restart_for_overflow = true;
|
||||
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption::G1UsePrevMarking, "Remark overflow");
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyLocation::RemarkOverflow);
|
||||
|
||||
// Clear the marking state because we will be restarting
|
||||
// marking due to overflowing the global mark stack.
|
||||
@ -1340,7 +1349,7 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask {
|
||||
const uint humongous_regions_removed() { return _humongous_regions_removed; }
|
||||
|
||||
bool do_heap_region(HeapRegion *hr) {
|
||||
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_closed_archive()) {
|
||||
if (hr->used() > 0 && hr->live_bytes() == 0 && !hr->is_young() && !hr->is_closed_archive()) {
|
||||
log_trace(gc)("Reclaimed empty old gen region %u (%s) bot " PTR_FORMAT,
|
||||
hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
|
||||
_freed_bytes += hr->used();
|
||||
@ -1436,17 +1445,19 @@ void G1ConcurrentMark::cleanup() {
|
||||
|
||||
double start = os::elapsedTime();
|
||||
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption::G1UsePrevMarking, "Cleanup before");
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyLocation::CleanupBefore);
|
||||
|
||||
if (needs_remembered_set_rebuild()) {
|
||||
// Update the remset tracking information as well as marking all regions
|
||||
// as fully parsable.
|
||||
GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
|
||||
G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
|
||||
G1UpdateRegionsAfterRebuild cl(_g1h);
|
||||
_g1h->heap_region_iterate(&cl);
|
||||
} else {
|
||||
log_debug(gc, phases)("No Remembered Sets to update after rebuild");
|
||||
}
|
||||
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption::G1UsePrevMarking, "Cleanup after");
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyLocation::CleanupAfter);
|
||||
|
||||
// We need to make this be a "collection" so any collection pause that
|
||||
// races with it goes around and waits for Cleanup to finish.
|
||||
@ -1711,8 +1722,6 @@ void G1ConcurrentMark::preclean() {
|
||||
_gc_timer_cm);
|
||||
}
|
||||
|
||||
// When sampling object counts, we already swapped the mark bitmaps, so we need to use
|
||||
// the prev bitmap determining liveness.
|
||||
class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
public:
|
||||
@ -1726,7 +1735,7 @@ public:
|
||||
|
||||
void G1ConcurrentMark::report_object_count(bool mark_completed) {
|
||||
// Depending on the completion of the marking liveness needs to be determined
|
||||
// using either the next or prev bitmap.
|
||||
// using either the bitmap or after the cycle using the scrubbing information.
|
||||
if (mark_completed) {
|
||||
G1ObjectCountIsAliveClosure is_alive(_g1h);
|
||||
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
|
||||
@ -1736,13 +1745,6 @@ void G1ConcurrentMark::report_object_count(bool mark_completed) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void G1ConcurrentMark::swap_mark_bitmaps() {
|
||||
G1CMBitMap* temp = _prev_mark_bitmap;
|
||||
_prev_mark_bitmap = _next_mark_bitmap;
|
||||
_next_mark_bitmap = temp;
|
||||
}
|
||||
|
||||
// Closure for marking entries in SATB buffers.
|
||||
class G1CMSATBBufferClosure : public SATBBufferClosure {
|
||||
private:
|
||||
@ -1874,12 +1876,13 @@ void G1ConcurrentMark::flush_all_task_caches() {
|
||||
hits, misses, percent_of(hits, sum));
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
|
||||
_prev_mark_bitmap->clear_range(mr);
|
||||
void G1ConcurrentMark::clear_bitmap_for_region(HeapRegion* hr) {
|
||||
assert_at_safepoint();
|
||||
_mark_bitmap.clear_range(MemRegion(hr->bottom(), hr->end()));
|
||||
hr->note_end_of_clearing();
|
||||
}
|
||||
|
||||
HeapRegion*
|
||||
G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
// "checkpoint" the finger
|
||||
HeapWord* finger = _finger;
|
||||
|
||||
@ -1897,8 +1900,8 @@ G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
|
||||
if (res == finger && curr_region != NULL) {
|
||||
// we succeeded
|
||||
HeapWord* bottom = curr_region->bottom();
|
||||
HeapWord* limit = curr_region->next_top_at_mark_start();
|
||||
HeapWord* bottom = curr_region->bottom();
|
||||
HeapWord* limit = curr_region->top_at_mark_start();
|
||||
|
||||
// notice that _finger == end cannot be guaranteed here since,
|
||||
// someone else might have moved the finger even further
|
||||
@ -1995,14 +1998,12 @@ void G1ConcurrentMark::verify_no_collection_set_oops() {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void G1ConcurrentMark::rebuild_rem_set_concurrently() {
|
||||
// If Remark did not select any regions for RemSet rebuild,
|
||||
// skip the rebuild remembered set phase
|
||||
void G1ConcurrentMark::rebuild_and_scrub() {
|
||||
if (!needs_remembered_set_rebuild()) {
|
||||
log_debug(gc, marking)("Skipping Remembered Set Rebuild. No regions selected for rebuild");
|
||||
return;
|
||||
log_debug(gc, marking)("Skipping Remembered Set Rebuild. No regions selected for rebuild, will only scrub");
|
||||
}
|
||||
_g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
|
||||
|
||||
G1ConcurrentRebuildAndScrub::rebuild_and_scrub(this, needs_remembered_set_rebuild(), _concurrent_workers);
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::print_stats() {
|
||||
@ -2016,7 +2017,7 @@ void G1ConcurrentMark::print_stats() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::concurrent_cycle_abort() {
|
||||
bool G1ConcurrentMark::concurrent_cycle_abort() {
|
||||
// We haven't started a concurrent cycle no need to do anything; we might have
|
||||
// aborted the marking because of shutting down though. In this case the marking
|
||||
// might have already completed the abort (leading to in_progress() below to
|
||||
@ -2027,19 +2028,9 @@ void G1ConcurrentMark::concurrent_cycle_abort() {
|
||||
// has been signalled is already rare), and this work should be negligible compared
|
||||
// to actual full gc work.
|
||||
if (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating()) {
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear all marks in the next bitmap for this full gc as it has been used by the
|
||||
// marking that is interrupted by this full gc.
|
||||
{
|
||||
GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
|
||||
clear_next_bitmap(_g1h->workers());
|
||||
}
|
||||
// Note we cannot clear the previous marking bitmap here
|
||||
// since VerifyDuringGC verifies the objects marked during
|
||||
// a full GC against the previous bitmap.
|
||||
|
||||
// Empty mark stack
|
||||
reset_marking_for_restart();
|
||||
for (uint i = 0; i < _max_num_tasks; ++i) {
|
||||
@ -2052,9 +2043,9 @@ void G1ConcurrentMark::concurrent_cycle_abort() {
|
||||
satb_mq_set.abandon_partial_marking();
|
||||
// This can be called either during or outside marking, we'll read
|
||||
// the expected_active value from the SATB queue set.
|
||||
satb_mq_set.set_active_all_threads(
|
||||
false, /* new active value */
|
||||
satb_mq_set.is_active() /* expected_active */);
|
||||
satb_mq_set.set_active_all_threads(false, /* new active value */
|
||||
satb_mq_set.is_active() /* expected_active */);
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::abort_marking_threads() {
|
||||
@ -2102,10 +2093,8 @@ void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::print_on_error(outputStream* st) const {
|
||||
st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
|
||||
p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
|
||||
_prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
|
||||
_next_mark_bitmap->print_on_error(st, " Next Bits: ");
|
||||
st->print_cr("Marking Bits: (CMBitMap*) " PTR_FORMAT, p2i(mark_bitmap()));
|
||||
_mark_bitmap.print_on_error(st, " Bits: ");
|
||||
}
|
||||
|
||||
static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
|
||||
@ -2129,9 +2118,9 @@ void G1CMTask::setup_for_region(HeapRegion* hr) {
|
||||
}
|
||||
|
||||
void G1CMTask::update_region_limit() {
|
||||
HeapRegion* hr = _curr_region;
|
||||
HeapWord* bottom = hr->bottom();
|
||||
HeapWord* limit = hr->next_top_at_mark_start();
|
||||
HeapRegion* hr = _curr_region;
|
||||
HeapWord* bottom = hr->bottom();
|
||||
HeapWord* limit = hr->top_at_mark_start();
|
||||
|
||||
if (limit == bottom) {
|
||||
// The region was collected underneath our feet.
|
||||
@ -2145,10 +2134,10 @@ void G1CMTask::update_region_limit() {
|
||||
} else {
|
||||
assert(limit < _region_limit, "only way to get here");
|
||||
// This can happen under some pretty unusual circumstances. An
|
||||
// evacuation pause empties the region underneath our feet (NTAMS
|
||||
// at bottom). We then do some allocation in the region (NTAMS
|
||||
// evacuation pause empties the region underneath our feet (TAMS
|
||||
// at bottom). We then do some allocation in the region (TAMS
|
||||
// stays at bottom), followed by the region being used as a GC
|
||||
// alloc region (NTAMS will move to top() and the objects
|
||||
// alloc region (TAMS will move to top() and the objects
|
||||
// originally below it will be grayed). All objects now marked in
|
||||
// the region are explicitly grayed, if below the global finger,
|
||||
// and we do not need in fact to scan anything else. So, we simply
|
||||
@ -2182,9 +2171,9 @@ void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
|
||||
_cm_oop_closure = cm_oop_closure;
|
||||
}
|
||||
|
||||
void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
|
||||
guarantee(next_mark_bitmap != NULL, "invariant");
|
||||
_next_mark_bitmap = next_mark_bitmap;
|
||||
void G1CMTask::reset(G1CMBitMap* mark_bitmap) {
|
||||
guarantee(mark_bitmap != NULL, "invariant");
|
||||
_mark_bitmap = mark_bitmap;
|
||||
clear_region_fields();
|
||||
|
||||
_calls = 0;
|
||||
@ -2668,7 +2657,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
giveup_current_region();
|
||||
abort_marking_if_regular_check_fail();
|
||||
} else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
|
||||
if (_next_mark_bitmap->is_marked(mr.start())) {
|
||||
if (_mark_bitmap->is_marked(mr.start())) {
|
||||
// The object is marked - apply the closure
|
||||
bitmap_closure.do_addr(mr.start());
|
||||
}
|
||||
@ -2676,7 +2665,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// we can (and should) give up the current region.
|
||||
giveup_current_region();
|
||||
abort_marking_if_regular_check_fail();
|
||||
} else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
|
||||
} else if (_mark_bitmap->iterate(&bitmap_closure, mr)) {
|
||||
giveup_current_region();
|
||||
abort_marking_if_regular_check_fail();
|
||||
} else {
|
||||
@ -2894,7 +2883,7 @@ G1CMTask::G1CMTask(uint worker_id,
|
||||
_worker_id(worker_id),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(cm),
|
||||
_next_mark_bitmap(NULL),
|
||||
_mark_bitmap(NULL),
|
||||
_task_queue(task_queue),
|
||||
_mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize),
|
||||
_calls(0),
|
||||
@ -2960,9 +2949,11 @@ G1CMTask::G1CMTask(uint worker_id,
|
||||
#define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
|
||||
|
||||
G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
|
||||
_total_used_bytes(0), _total_capacity_bytes(0),
|
||||
_total_prev_live_bytes(0), _total_next_live_bytes(0),
|
||||
_total_remset_bytes(0), _total_code_roots_bytes(0)
|
||||
_total_used_bytes(0),
|
||||
_total_capacity_bytes(0),
|
||||
_total_live_bytes(0),
|
||||
_total_remset_bytes(0),
|
||||
_total_code_roots_bytes(0)
|
||||
{
|
||||
if (!log_is_enabled(Trace, gc, liveness)) {
|
||||
return;
|
||||
@ -2985,26 +2976,24 @@ G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* p
|
||||
G1PPRL_ADDR_BASE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_GCEFF_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_STATE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT,
|
||||
"type", "address-range",
|
||||
"used", "prev-live", "next-live", "gc-eff",
|
||||
"used", "live", "gc-eff",
|
||||
"remset", "state", "code-roots");
|
||||
log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
|
||||
G1PPRL_TYPE_H_FORMAT
|
||||
G1PPRL_ADDR_BASE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_GCEFF_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT
|
||||
G1PPRL_STATE_H_FORMAT
|
||||
G1PPRL_BYTE_H_FORMAT,
|
||||
"", "",
|
||||
"(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
|
||||
"(bytes)", "(bytes)", "(bytes/ms)",
|
||||
"(bytes)", "", "(bytes)");
|
||||
}
|
||||
|
||||
@ -3018,8 +3007,7 @@ bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
|
||||
HeapWord* end = r->end();
|
||||
size_t capacity_bytes = r->capacity();
|
||||
size_t used_bytes = r->used();
|
||||
size_t prev_live_bytes = r->live_bytes();
|
||||
size_t next_live_bytes = r->next_live_bytes();
|
||||
size_t live_bytes = r->live_bytes();
|
||||
double gc_eff = r->gc_efficiency();
|
||||
size_t remset_bytes = r->rem_set()->mem_size();
|
||||
size_t code_roots_bytes = r->rem_set()->code_roots_mem_size();
|
||||
@ -3028,8 +3016,7 @@ bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
|
||||
|
||||
_total_used_bytes += used_bytes;
|
||||
_total_capacity_bytes += capacity_bytes;
|
||||
_total_prev_live_bytes += prev_live_bytes;
|
||||
_total_next_live_bytes += next_live_bytes;
|
||||
_total_live_bytes += live_bytes;
|
||||
_total_remset_bytes += remset_bytes;
|
||||
_total_code_roots_bytes += code_roots_bytes;
|
||||
|
||||
@ -3045,13 +3032,12 @@ bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
|
||||
G1PPRL_ADDR_BASE_FORMAT
|
||||
G1PPRL_BYTE_FORMAT
|
||||
G1PPRL_BYTE_FORMAT
|
||||
G1PPRL_BYTE_FORMAT
|
||||
G1PPRL_GCEFF_FORMAT
|
||||
G1PPRL_BYTE_FORMAT
|
||||
G1PPRL_STATE_FORMAT
|
||||
G1PPRL_BYTE_FORMAT,
|
||||
type, p2i(bottom), p2i(end),
|
||||
used_bytes, prev_live_bytes, next_live_bytes, gc_efficiency.buffer(),
|
||||
used_bytes, live_bytes, gc_efficiency.buffer(),
|
||||
remset_bytes, remset_type, code_roots_bytes);
|
||||
|
||||
return false;
|
||||
@ -3070,17 +3056,14 @@ G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
|
||||
" SUMMARY"
|
||||
G1PPRL_SUM_MB_FORMAT("capacity")
|
||||
G1PPRL_SUM_MB_PERC_FORMAT("used")
|
||||
G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
|
||||
G1PPRL_SUM_MB_PERC_FORMAT("next-live")
|
||||
G1PPRL_SUM_MB_PERC_FORMAT("live")
|
||||
G1PPRL_SUM_MB_FORMAT("remset")
|
||||
G1PPRL_SUM_MB_FORMAT("code-roots"),
|
||||
bytes_to_mb(_total_capacity_bytes),
|
||||
bytes_to_mb(_total_used_bytes),
|
||||
percent_of(_total_used_bytes, _total_capacity_bytes),
|
||||
bytes_to_mb(_total_prev_live_bytes),
|
||||
percent_of(_total_prev_live_bytes, _total_capacity_bytes),
|
||||
bytes_to_mb(_total_next_live_bytes),
|
||||
percent_of(_total_next_live_bytes, _total_capacity_bytes),
|
||||
bytes_to_mb(_total_live_bytes),
|
||||
percent_of(_total_live_bytes, _total_capacity_bytes),
|
||||
bytes_to_mb(_total_remset_bytes),
|
||||
bytes_to_mb(_total_code_roots_bytes));
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -216,7 +216,7 @@ private:
|
||||
// Root MemRegions are memory areas that contain objects which references are
|
||||
// roots wrt to the marking. They must be scanned before marking to maintain the
|
||||
// SATB invariant.
|
||||
// Typically they contain the areas from nTAMS to top of the regions.
|
||||
// Typically they contain the areas from TAMS to top of the regions.
|
||||
// We could scan and mark through these objects during the concurrent start pause,
|
||||
// but for pause time reasons we move this work to the concurrent phase.
|
||||
// We need to complete this procedure before the next GC because it might determine
|
||||
@ -292,10 +292,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
G1CollectedHeap* _g1h; // The heap
|
||||
|
||||
// Concurrent marking support structures
|
||||
G1CMBitMap _mark_bitmap_1;
|
||||
G1CMBitMap _mark_bitmap_2;
|
||||
G1CMBitMap* _prev_mark_bitmap; // Completed mark bitmap
|
||||
G1CMBitMap* _next_mark_bitmap; // Under-construction mark bitmap
|
||||
G1CMBitMap _mark_bitmap;
|
||||
|
||||
// Heap bounds
|
||||
MemRegion const _heap;
|
||||
@ -359,7 +356,16 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
uint _num_concurrent_workers; // The number of marking worker threads we're using
|
||||
uint _max_concurrent_workers; // Maximum number of marking worker threads
|
||||
|
||||
void verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller);
|
||||
enum class VerifyLocation {
|
||||
RemarkBefore,
|
||||
RemarkAfter,
|
||||
RemarkOverflow,
|
||||
CleanupBefore,
|
||||
CleanupAfter
|
||||
};
|
||||
static const char* verify_location_string(VerifyLocation location);
|
||||
void verify_during_pause(G1HeapVerifier::G1VerifyType type,
|
||||
VerifyLocation location);
|
||||
|
||||
void finalize_marking();
|
||||
|
||||
@ -443,7 +449,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
|
||||
// Clear the next marking bitmap in parallel using the given WorkerThreads. If may_yield is
|
||||
// true, periodically insert checks to see if this method should exit prematurely.
|
||||
void clear_next_bitmap(WorkerThreads* workers, bool may_yield);
|
||||
void clear_bitmap(WorkerThreads* workers, bool may_yield);
|
||||
|
||||
// Region statistics gathered during marking.
|
||||
G1RegionMarkStats* _region_mark_stats;
|
||||
@ -455,9 +461,11 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
// True when Remark pause selected regions for rebuilding.
|
||||
bool _needs_remembered_set_rebuild;
|
||||
public:
|
||||
// To be called when an object is marked the first time, e.g. after a successful
|
||||
// mark_in_bitmap call. Updates various statistics data.
|
||||
void add_to_liveness(uint worker_id, oop const obj, size_t size);
|
||||
// Live words in the given region as determined by concurrent marking, i.e. the amount of
|
||||
// live words between bottom and nTAMS.
|
||||
// live words between bottom and TAMS.
|
||||
size_t live_words(uint region) const { return _region_mark_stats[region]._live_words; }
|
||||
// Returns the liveness value in bytes.
|
||||
size_t live_bytes(uint region) const { return live_words(region) * HeapWordSize; }
|
||||
@ -493,7 +501,7 @@ public:
|
||||
|
||||
void concurrent_cycle_start();
|
||||
// Abandon current marking iteration due to a Full GC.
|
||||
void concurrent_cycle_abort();
|
||||
bool concurrent_cycle_abort();
|
||||
void concurrent_cycle_end();
|
||||
|
||||
// Notifies marking threads to abort. This is a best-effort notification. Does not
|
||||
@ -516,14 +524,12 @@ public:
|
||||
bool try_stealing(uint worker_id, G1TaskQueueEntry& task_entry);
|
||||
|
||||
G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
G1RegionToSpaceMapper* prev_bitmap_storage,
|
||||
G1RegionToSpaceMapper* next_bitmap_storage);
|
||||
G1RegionToSpaceMapper* bitmap_storage);
|
||||
~G1ConcurrentMark();
|
||||
|
||||
G1ConcurrentMarkThread* cm_thread() { return _cm_thread; }
|
||||
|
||||
const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; }
|
||||
G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; }
|
||||
G1CMBitMap* mark_bitmap() const { return (G1CMBitMap*)&_mark_bitmap; }
|
||||
|
||||
// Calculates the number of concurrent GC threads to be used in the marking phase.
|
||||
uint calc_active_marking_workers();
|
||||
@ -540,7 +546,7 @@ public:
|
||||
void cleanup_for_next_mark();
|
||||
|
||||
// Clear the next marking bitmap during safepoint.
|
||||
void clear_next_bitmap(WorkerThreads* workers);
|
||||
void clear_bitmap(WorkerThreads* workers);
|
||||
|
||||
// These two methods do the work that needs to be done at the start and end of the
|
||||
// concurrent start pause.
|
||||
@ -563,19 +569,16 @@ public:
|
||||
|
||||
void remark();
|
||||
|
||||
void swap_mark_bitmaps();
|
||||
|
||||
void cleanup();
|
||||
// Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
|
||||
// this carefully.
|
||||
inline void par_mark_in_prev_bitmap(oop p);
|
||||
|
||||
// Clears marks for all objects in the given range, for the prev or
|
||||
// next bitmaps. Caution: the previous bitmap is usually
|
||||
// read-only, so use this carefully!
|
||||
void clear_range_in_prev_bitmap(MemRegion mr);
|
||||
// Mark in the marking bitmap. Used during evacuation failure to
|
||||
// remember what objects need handling. Not for use during marking.
|
||||
inline void raw_mark_in_bitmap(oop p);
|
||||
|
||||
inline bool is_marked_in_prev_bitmap(oop p) const;
|
||||
// Clears marks for all objects in the given region in the marking
|
||||
// bitmap. This should only be used clean the bitmap during a
|
||||
// safepoint.
|
||||
void clear_bitmap_for_region(HeapRegion* hr);
|
||||
|
||||
// Verify that there are no collection set oops on the stacks (taskqueues /
|
||||
// global mark stack) and fingers (global / per-task).
|
||||
@ -592,17 +595,18 @@ public:
|
||||
|
||||
void print_on_error(outputStream* st) const;
|
||||
|
||||
// Mark the given object on the next bitmap if it is below nTAMS.
|
||||
inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj);
|
||||
inline bool mark_in_next_bitmap(uint worker_id, oop const obj);
|
||||
// Mark the given object on the marking bitmap if it is below TAMS.
|
||||
inline bool mark_in_bitmap(uint worker_id, HeapRegion* const hr, oop const obj);
|
||||
inline bool mark_in_bitmap(uint worker_id, oop const obj);
|
||||
|
||||
inline bool is_marked_in_next_bitmap(oop p) const;
|
||||
inline bool is_marked_in_bitmap(oop p) const;
|
||||
|
||||
ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
|
||||
|
||||
private:
|
||||
// Rebuilds the remembered sets for chosen regions in parallel and concurrently to the application.
|
||||
void rebuild_rem_set_concurrently();
|
||||
// Rebuilds the remembered sets for chosen regions in parallel and concurrently
|
||||
// to the application. Also scrubs dead objects to ensure region is parsable.
|
||||
void rebuild_and_scrub();
|
||||
|
||||
uint needs_remembered_set_rebuild() const { return _needs_remembered_set_rebuild; }
|
||||
|
||||
@ -627,7 +631,7 @@ private:
|
||||
uint _worker_id;
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ConcurrentMark* _cm;
|
||||
G1CMBitMap* _next_mark_bitmap;
|
||||
G1CMBitMap* _mark_bitmap;
|
||||
// the task queue of this task
|
||||
G1CMTaskQueue* _task_queue;
|
||||
|
||||
@ -733,7 +737,7 @@ public:
|
||||
// scanned.
|
||||
inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
|
||||
// Resets the task; should be called right at the beginning of a marking phase.
|
||||
void reset(G1CMBitMap* next_mark_bitmap);
|
||||
void reset(G1CMBitMap* mark_bitmap);
|
||||
// Clears all the fields that correspond to a claimed region.
|
||||
void clear_region_fields();
|
||||
|
||||
@ -777,14 +781,14 @@ public:
|
||||
void increment_refs_reached() { ++_refs_reached; }
|
||||
|
||||
// Grey the object by marking it. If not already marked, push it on
|
||||
// the local queue if below the finger. obj is required to be below its region's NTAMS.
|
||||
// the local queue if below the finger. obj is required to be below its region's TAMS.
|
||||
// Returns whether there has been a mark to the bitmap.
|
||||
inline bool make_reference_grey(oop obj);
|
||||
|
||||
// Grey the object (by calling make_grey_reference) if required,
|
||||
// e.g. obj is below its containing region's NTAMS.
|
||||
// e.g. obj is below its containing region's TAMS.
|
||||
// Precondition: obj is a valid heap object.
|
||||
// Returns true if the reference caused a mark to be set in the next bitmap.
|
||||
// Returns true if the reference caused a mark to be set in the marking bitmap.
|
||||
template <class T>
|
||||
inline bool deal_with_reference(T* p);
|
||||
|
||||
@ -841,8 +845,7 @@ class G1PrintRegionLivenessInfoClosure : public HeapRegionClosure {
|
||||
// Accumulators for these values.
|
||||
size_t _total_used_bytes;
|
||||
size_t _total_capacity_bytes;
|
||||
size_t _total_prev_live_bytes;
|
||||
size_t _total_next_live_bytes;
|
||||
size_t _total_live_bytes;
|
||||
|
||||
// Accumulator for the remembered set size
|
||||
size_t _total_remset_bytes;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,7 +50,7 @@ inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
|
||||
HeapRegion* hr = _g1h->heap_region_containing(cast_from_oop<HeapWord*>(obj));
|
||||
// All objects allocated since the start of marking are considered live.
|
||||
if (hr->obj_allocated_since_next_marking(obj)) {
|
||||
if (hr->obj_allocated_since_marking_start(obj)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
}
|
||||
|
||||
// All objects that are marked are live.
|
||||
return _g1h->is_marked_next(obj);
|
||||
return _g1h->is_marked(obj);
|
||||
}
|
||||
|
||||
inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
@ -75,24 +75,24 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive();
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj) {
|
||||
inline bool G1ConcurrentMark::mark_in_bitmap(uint const worker_id, oop const obj) {
|
||||
HeapRegion* const hr = _g1h->heap_region_containing(obj);
|
||||
return mark_in_next_bitmap(worker_id, hr, obj);
|
||||
return mark_in_bitmap(worker_id, hr, obj);
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj) {
|
||||
inline bool G1ConcurrentMark::mark_in_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj) {
|
||||
assert(hr != NULL, "just checking");
|
||||
assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u", p2i(obj), hr->hrm_index());
|
||||
|
||||
if (hr->obj_allocated_since_next_marking(obj)) {
|
||||
if (hr->obj_allocated_since_marking_start(obj)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Some callers may have stale objects to mark above nTAMS after humongous reclaim.
|
||||
// Some callers may have stale objects to mark above TAMS after humongous reclaim.
|
||||
// Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread.
|
||||
assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start()));
|
||||
assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above TAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->top_at_mark_start()));
|
||||
|
||||
bool success = _next_mark_bitmap->par_mark(obj);
|
||||
bool success = _mark_bitmap.par_mark(obj);
|
||||
if (success) {
|
||||
add_to_liveness(worker_id, obj, obj->size());
|
||||
}
|
||||
@ -129,7 +129,7 @@ inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
|
||||
assert(task_entry.is_array_slice() || _g1h->is_in_reserved(task_entry.obj()), "invariant");
|
||||
assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
|
||||
_g1h->heap_region_containing(task_entry.obj())), "invariant");
|
||||
assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())), "invariant");
|
||||
assert(task_entry.is_array_slice() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())), "invariant");
|
||||
|
||||
if (!_task_queue->push(task_entry)) {
|
||||
// The local task queue looks full. We need to push some entries
|
||||
@ -177,7 +177,7 @@ inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
|
||||
template<bool scan>
|
||||
inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) {
|
||||
assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray");
|
||||
assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())),
|
||||
assert(task_entry.is_array_slice() || _mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())),
|
||||
"Any stolen object should be a slice or marked");
|
||||
|
||||
if (scan) {
|
||||
@ -234,7 +234,7 @@ inline void G1CMTask::abort_marking_if_regular_check_fail() {
|
||||
}
|
||||
|
||||
inline bool G1CMTask::make_reference_grey(oop obj) {
|
||||
if (!_cm->mark_in_next_bitmap(_worker_id, obj)) {
|
||||
if (!_cm->mark_in_bitmap(_worker_id, obj)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -286,18 +286,13 @@ inline bool G1CMTask::deal_with_reference(T* p) {
|
||||
return make_reference_grey(obj);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentMark::par_mark_in_prev_bitmap(oop p) {
|
||||
_prev_mark_bitmap->par_mark(p);
|
||||
inline void G1ConcurrentMark::raw_mark_in_bitmap(oop p) {
|
||||
_mark_bitmap.par_mark(p);
|
||||
}
|
||||
|
||||
bool G1ConcurrentMark::is_marked_in_prev_bitmap(oop p) const {
|
||||
bool G1ConcurrentMark::is_marked_in_bitmap(oop p) const {
|
||||
assert(p != NULL && oopDesc::is_oop(p), "expected an oop");
|
||||
return _prev_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(p));
|
||||
}
|
||||
|
||||
bool G1ConcurrentMark::is_marked_in_next_bitmap(oop p) const {
|
||||
assert(p != NULL && oopDesc::is_oop(p), "expected an oop");
|
||||
return _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(p));
|
||||
return _mark_bitmap.is_marked(cast_from_oop<HeapWord*>(p));
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::do_yield_check() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,10 @@
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
|
||||
G1CMBitMap::G1CMBitMap() : MarkBitMap(), _listener() {
|
||||
_listener.set_bitmap(this);
|
||||
}
|
||||
|
||||
void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
|
||||
MarkBitMap::initialize(heap, storage->reserved());
|
||||
storage->set_mapping_changed_listener(&_listener);
|
||||
@ -41,18 +45,3 @@ void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_r
|
||||
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
|
||||
_bm->clear_range(mr);
|
||||
}
|
||||
|
||||
void G1CMBitMap::clear_region(HeapRegion* region) {
|
||||
if (!region->is_empty()) {
|
||||
MemRegion mr(region->bottom(), region->top());
|
||||
clear_range(mr);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void G1CMBitMap::check_mark(HeapWord* addr) {
|
||||
assert(G1CollectedHeap::heap()->is_in(addr),
|
||||
"Trying to access bitmap " PTR_FORMAT " for address " PTR_FORMAT " not in the heap.",
|
||||
p2i(this), p2i(addr));
|
||||
}
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,24 +61,16 @@ public:
|
||||
// A generic mark bitmap for concurrent marking. This is essentially a wrapper
|
||||
// around the BitMap class that is based on HeapWords, with one bit per (1 << _shifter) HeapWords.
|
||||
class G1CMBitMap : public MarkBitMap {
|
||||
|
||||
G1CMBitMapMappingChangedListener _listener;
|
||||
|
||||
protected:
|
||||
|
||||
virtual void check_mark(HeapWord* addr) NOT_DEBUG_RETURN;
|
||||
|
||||
public:
|
||||
|
||||
G1CMBitMap() : MarkBitMap(), _listener() { _listener.set_bitmap(this); }
|
||||
G1CMBitMap();
|
||||
|
||||
// Initializes the underlying BitMap to cover the given area.
|
||||
void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
|
||||
|
||||
// Apply the closure to the addresses that correspond to marked bits in the bitmap.
|
||||
inline bool iterate(G1CMBitMapClosure* cl, MemRegion mr);
|
||||
|
||||
void clear_region(HeapRegion* hr);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1CONCURRENTMARKBITMAP_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -233,10 +233,10 @@ bool G1ConcurrentMarkThread::subphase_remark() {
|
||||
return _cm->has_aborted();
|
||||
}
|
||||
|
||||
bool G1ConcurrentMarkThread::phase_rebuild_remembered_sets() {
|
||||
bool G1ConcurrentMarkThread::phase_rebuild_and_scrub() {
|
||||
ConcurrentGCBreakpoints::at("AFTER REBUILD STARTED");
|
||||
G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets");
|
||||
_cm->rebuild_rem_set_concurrently();
|
||||
G1ConcPhaseTimer p(_cm, "Concurrent Rebuild Remembered Sets and Scrub Regions");
|
||||
_cm->rebuild_and_scrub();
|
||||
return _cm->has_aborted();
|
||||
}
|
||||
|
||||
@ -293,8 +293,8 @@ void G1ConcurrentMarkThread::concurrent_mark_cycle_do() {
|
||||
// Phase 3: Actual mark loop.
|
||||
if (phase_mark_loop()) return;
|
||||
|
||||
// Phase 4: Rebuild remembered sets.
|
||||
if (phase_rebuild_remembered_sets()) return;
|
||||
// Phase 4: Rebuild remembered sets and scrub dead objects.
|
||||
if (phase_rebuild_and_scrub()) return;
|
||||
|
||||
// Phase 5: Wait for Cleanup.
|
||||
if (phase_delay_to_keep_mmu_before_cleanup()) return;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,7 +67,7 @@ class G1ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
bool subphase_delay_to_keep_mmu_before_remark();
|
||||
bool subphase_remark();
|
||||
|
||||
bool phase_rebuild_remembered_sets();
|
||||
bool phase_rebuild_and_scrub();
|
||||
bool phase_delay_to_keep_mmu_before_cleanup();
|
||||
bool phase_cleanup();
|
||||
bool phase_clear_bitmap_for_next_mark();
|
||||
|
358
src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp
Normal file
358
src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp
Normal file
@ -0,0 +1,358 @@
|
||||
/*
|
||||
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "gc/g1/g1ConcurrentRebuildAndScrub.hpp"
|
||||
|
||||
#include "gc/g1/g1ConcurrentMark.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
|
||||
#include "gc/g1/g1_globals.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// Worker task that scans the objects in the old generation to rebuild the remembered
|
||||
// set and at the same time scrubs dead objects by replacing them with filler objects
|
||||
// to make them completely parseable.
|
||||
//
|
||||
// The remark pause recorded two pointers within the regions:
|
||||
//
|
||||
// parsable_bottom (pb): this is the TAMS of the recent marking for that region. Objects
|
||||
// below that may or may not be dead (as per mark bitmap).
|
||||
// This task needs to remove the dead objects, replacing them
|
||||
// with filler objects so that they can be walked through later.
|
||||
//
|
||||
// top_at_rebuild_start (tars): at rebuild phase start we record the current top: up to
|
||||
// this address (live) objects need to be scanned for references
|
||||
// that might need to be added to the remembered sets.
|
||||
//
|
||||
// Note that bottom <= parsable_bottom <= tars; if there is no tars (i.e. NULL),
|
||||
// obviously there can not be a parsable_bottom.
|
||||
//
|
||||
// We need to scrub and scan objects to rebuild remembered sets until parsable_bottom;
|
||||
// we need to scan objects to rebuild remembered sets until tars.
|
||||
class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
G1ConcurrentMark* _cm;
|
||||
HeapRegionClaimer _hr_claimer;
|
||||
|
||||
const bool _should_rebuild_remset;
|
||||
|
||||
class G1RebuildRSAndScrubRegionClosure : public HeapRegionClosure {
|
||||
G1ConcurrentMark* _cm;
|
||||
const G1CMBitMap* _bitmap;
|
||||
|
||||
G1RebuildRemSetClosure _rebuild_closure;
|
||||
|
||||
const bool _should_rebuild_remset;
|
||||
|
||||
size_t _marked_words;
|
||||
size_t _processed_words;
|
||||
|
||||
const size_t ProcessingYieldLimitInWords = G1RebuildRemSetChunkSize / HeapWordSize;
|
||||
|
||||
void reset_marked_words() {
|
||||
_marked_words = 0;
|
||||
}
|
||||
|
||||
void reset_processed_words() {
|
||||
_processed_words = 0;
|
||||
}
|
||||
|
||||
void assert_marked_words(HeapRegion* hr) {
|
||||
assert((_marked_words * HeapWordSize) == hr->marked_bytes(),
|
||||
"Mismatch between marking and re-calculation for region %u, %zu != %zu",
|
||||
hr->hrm_index(), (_marked_words * HeapWordSize), hr->marked_bytes());
|
||||
}
|
||||
|
||||
void add_processed_words(size_t processed) {
|
||||
_processed_words += processed;
|
||||
_marked_words += processed;
|
||||
}
|
||||
|
||||
// Yield if enough has been processed; returns if the concurrent marking cycle
|
||||
// has been aborted for any reason.
|
||||
bool yield_if_necessary() {
|
||||
if (_processed_words >= ProcessingYieldLimitInWords) {
|
||||
reset_processed_words();
|
||||
_cm->do_yield_check();
|
||||
}
|
||||
return _cm->has_aborted();
|
||||
}
|
||||
|
||||
// Returns whether the top at rebuild start value for the given region indicates
|
||||
// that there is some rebuild or scrubbing work.
|
||||
//
|
||||
// Based on the results of G1RemSetTrackingPolicy::needs_scan_for_rebuild(),
|
||||
// the value may be changed to nullptr during rebuilding if the region has either:
|
||||
// - been allocated after rebuild start, or
|
||||
// - been eagerly reclaimed by a young collection (only humongous)
|
||||
bool should_rebuild_or_scrub(HeapRegion* hr) const {
|
||||
return _cm->top_at_rebuild_start(hr->hrm_index()) != nullptr;
|
||||
}
|
||||
|
||||
// Helper used by both humongous objects and when chunking an object larger than the
|
||||
// G1RebuildRemSetChunkSize. The heap region is needed to ensure a humongous object
|
||||
// is not eagerly reclaimed during yielding.
|
||||
// Returns whether marking has been aborted.
|
||||
bool scan_large_object(HeapRegion* hr, const oop obj, MemRegion scan_range) {
|
||||
HeapWord* start = scan_range.start();
|
||||
HeapWord* limit = scan_range.end();
|
||||
do {
|
||||
MemRegion mr(start, MIN2(start + ProcessingYieldLimitInWords, limit));
|
||||
obj->oop_iterate(&_rebuild_closure, mr);
|
||||
|
||||
// Update processed words and yield, for humongous objects we will yield
|
||||
// after each chunk.
|
||||
add_processed_words(mr.word_size());
|
||||
bool mark_aborted = yield_if_necessary();
|
||||
if (mark_aborted) {
|
||||
return true;
|
||||
} else if (!should_rebuild_or_scrub(hr)) {
|
||||
// We need to check should_rebuild_or_scrub() again (for humongous objects)
|
||||
// because the region might have been eagerly reclaimed during the yield.
|
||||
log_trace(gc, marking)("Rebuild aborted for eagerly reclaimed humongous region: %u", hr->hrm_index());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step to next chunk of the humongous object
|
||||
start = mr.end();
|
||||
} while (start < limit);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Scan for references into regions that need remembered set update for the given
|
||||
// live object. Returns the offset to the next object.
|
||||
size_t scan_object(HeapRegion* hr, HeapWord* current) {
|
||||
oop obj = cast_to_oop(current);
|
||||
size_t obj_size = obj->size();
|
||||
|
||||
if (!_should_rebuild_remset) {
|
||||
// Not rebuilding, just step to next object.
|
||||
add_processed_words(obj_size);
|
||||
} else if (obj_size > ProcessingYieldLimitInWords) {
|
||||
// Large object, needs to be chunked to avoid stalling safepoints.
|
||||
MemRegion mr(current, obj_size);
|
||||
scan_large_object(hr, obj, mr);
|
||||
// No need to add to _processed_words, this is all handled by the above call;
|
||||
// we also ignore the marking abort result of scan_large_object - we will check
|
||||
// again right afterwards.
|
||||
} else {
|
||||
// Object smaller than yield limit, process it fully.
|
||||
obj->oop_iterate(&_rebuild_closure);
|
||||
// Update how much we have processed. Yield check in main loop
|
||||
// will handle this case.
|
||||
add_processed_words(obj_size);
|
||||
}
|
||||
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
// Scrub a range of dead objects starting at scrub_start. Will never scrub past limit.
|
||||
HeapWord* scrub_to_next_live(HeapRegion* hr, HeapWord* scrub_start, HeapWord* limit) {
|
||||
assert(!_bitmap->is_marked(scrub_start), "Should not scrub live object");
|
||||
|
||||
HeapWord* scrub_end = _bitmap->get_next_marked_addr(scrub_start, limit);
|
||||
hr->fill_range_with_dead_objects(scrub_start, scrub_end);
|
||||
|
||||
// Return the next object to handle.
|
||||
return scrub_end;
|
||||
}
|
||||
|
||||
// Scan the given region from bottom to parsable_bottom. Returns whether marking has
|
||||
// been aborted.
|
||||
bool scan_and_scrub_to_pb(HeapRegion* hr, HeapWord* start, HeapWord* const limit) {
|
||||
|
||||
while (start < limit) {
|
||||
if (_bitmap->is_marked(start)) {
|
||||
// Live object, need to scan to rebuild remembered sets for this object.
|
||||
start += scan_object(hr, start);
|
||||
} else {
|
||||
// Found dead object (which klass has potentially been unloaded). Scrub to next
|
||||
// marked object and continue.
|
||||
start = scrub_to_next_live(hr, start, limit);
|
||||
}
|
||||
|
||||
bool mark_aborted = yield_if_necessary();
|
||||
if (mark_aborted) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Scan the given region from parsable_bottom to tars. Returns whether marking has
|
||||
// been aborted.
|
||||
bool scan_from_pb_to_tars(HeapRegion* hr, HeapWord* start, HeapWord* const limit) {
|
||||
|
||||
while (start < limit) {
|
||||
start += scan_object(hr, start);
|
||||
// Avoid stalling safepoints and stop iteration if mark cycle has been aborted.
|
||||
bool mark_aborted = yield_if_necessary();
|
||||
if (mark_aborted) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Scan and scrub the given region to tars. Returns whether marking has
|
||||
// been aborted.
|
||||
bool scan_and_scrub_region(HeapRegion* hr, HeapWord* const pb) {
|
||||
assert(should_rebuild_or_scrub(hr), "must be");
|
||||
|
||||
reset_marked_words();
|
||||
log_trace(gc, marking)("Scrub and rebuild region: " HR_FORMAT " pb: " PTR_FORMAT " TARS: " PTR_FORMAT,
|
||||
HR_FORMAT_PARAMS(hr), p2i(pb), p2i(_cm->top_at_rebuild_start(hr->hrm_index())));
|
||||
|
||||
if (scan_and_scrub_to_pb(hr, hr->bottom(), pb)) {
|
||||
log_trace(gc, marking)("Scan and scrub aborted for region: %u", hr->hrm_index());
|
||||
return true;
|
||||
}
|
||||
|
||||
// Scrubbing completed for this region - notify that we are done with it, resetting
|
||||
// pb to bottom.
|
||||
hr->note_end_of_scrubbing();
|
||||
// Assert that the size of marked objects from the marking matches
|
||||
// the size of the objects which we scanned to rebuild remembered sets.
|
||||
assert_marked_words(hr);
|
||||
|
||||
// Rebuild from TAMS (= parsable_bottom) to TARS.
|
||||
if (scan_from_pb_to_tars(hr, pb, _cm->top_at_rebuild_start(hr->hrm_index()))) {
|
||||
log_trace(gc, marking)("Rebuild aborted for region: %u (%s)", hr->hrm_index(), hr->get_short_type_str());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Scan a humongous region for remembered set updates. Scans in chunks to avoid
|
||||
// stalling safepoints. Returns whether the concurrent marking phase has been aborted.
|
||||
bool scan_humongous_region(HeapRegion* hr, HeapWord* const pb) {
|
||||
assert(should_rebuild_or_scrub(hr), "must be");
|
||||
|
||||
if (!_should_rebuild_remset) {
|
||||
// When not rebuilding there is nothing to do for humongous objects.
|
||||
return false;
|
||||
}
|
||||
|
||||
// At this point we should only have live humongous objects, that
|
||||
// means it must either be:
|
||||
// - marked
|
||||
// - or seen as fully parsable, i.e. allocated after the marking started
|
||||
oop humongous = cast_to_oop(hr->humongous_start_region()->bottom());
|
||||
assert(_bitmap->is_marked(humongous) || pb == hr->bottom(),
|
||||
"Humongous object not live");
|
||||
|
||||
reset_marked_words();
|
||||
log_trace(gc, marking)("Rebuild for humongous region: " HR_FORMAT " pb: " PTR_FORMAT " TARS: " PTR_FORMAT,
|
||||
HR_FORMAT_PARAMS(hr), p2i(pb), p2i(_cm->top_at_rebuild_start(hr->hrm_index())));
|
||||
|
||||
// Scan the humongous object in chunks from bottom to top to rebuild remembered sets.
|
||||
HeapWord* humongous_end = hr->humongous_start_region()->bottom() + humongous->size();
|
||||
MemRegion mr(hr->bottom(), MIN2(hr->top(), humongous_end));
|
||||
|
||||
bool mark_aborted = scan_large_object(hr, humongous, mr);
|
||||
if (mark_aborted) {
|
||||
log_trace(gc, marking)("Rebuild aborted for region: %u (%s)", hr->hrm_index(), hr->get_short_type_str());
|
||||
return true;
|
||||
} else if (_bitmap->is_marked(humongous) && should_rebuild_or_scrub(hr)) {
|
||||
// Only verify that the marked size matches the rebuilt size if this object was marked
|
||||
// and the object should still be handled. The should_rebuild_or_scrub() state can
|
||||
// change during rebuild for humongous objects that are eagerly reclaimed so we need to
|
||||
// check this.
|
||||
// If the object has not been marked the size from marking will be 0.
|
||||
assert_marked_words(hr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public:
|
||||
G1RebuildRSAndScrubRegionClosure(G1ConcurrentMark* cm, bool should_rebuild_remset, uint worker_id) :
|
||||
_cm(cm),
|
||||
_bitmap(_cm->mark_bitmap()),
|
||||
_rebuild_closure(G1CollectedHeap::heap(), worker_id),
|
||||
_should_rebuild_remset(should_rebuild_remset),
|
||||
_marked_words(0),
|
||||
_processed_words(0) { }
|
||||
|
||||
bool do_heap_region(HeapRegion* hr) {
|
||||
// Avoid stalling safepoints and stop iteration if mark cycle has been aborted.
|
||||
_cm->do_yield_check();
|
||||
if (_cm->has_aborted()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
HeapWord* const pb = hr->parsable_bottom_acquire();
|
||||
|
||||
if (!should_rebuild_or_scrub(hr)) {
|
||||
// Region has been allocated during this phase, no need to either scrub or
|
||||
// scan to rebuild remembered sets.
|
||||
log_trace(gc, marking)("Scrub and rebuild region skipped for " HR_FORMAT " pb: " PTR_FORMAT,
|
||||
HR_FORMAT_PARAMS(hr), p2i(pb));
|
||||
assert(hr->bottom() == pb, "Region must be fully parsable");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool mark_aborted;
|
||||
if (hr->needs_scrubbing()) {
|
||||
// This is a region with potentially unparsable (dead) objects.
|
||||
mark_aborted = scan_and_scrub_region(hr, pb);
|
||||
} else {
|
||||
assert(hr->is_humongous(), "must be, but %u is %s", hr->hrm_index(), hr->get_short_type_str());
|
||||
// No need to scrub humongous, but we should scan it to rebuild remsets.
|
||||
mark_aborted = scan_humongous_region(hr, pb);
|
||||
}
|
||||
|
||||
return mark_aborted;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
G1RebuildRSAndScrubTask(G1ConcurrentMark* cm, bool should_rebuild_remset, uint num_workers) :
|
||||
WorkerTask("Scrub dead objects"),
|
||||
_cm(cm),
|
||||
_hr_claimer(num_workers),
|
||||
_should_rebuild_remset(should_rebuild_remset) { }
|
||||
|
||||
void work(uint worker_id) {
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1RebuildRSAndScrubRegionClosure cl(_cm, _should_rebuild_remset, worker_id);
|
||||
g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
|
||||
}
|
||||
};
|
||||
|
||||
void G1ConcurrentRebuildAndScrub::rebuild_and_scrub(G1ConcurrentMark* cm, bool should_rebuild_remset, WorkerThreads* workers) {
|
||||
uint num_workers = workers->active_workers();
|
||||
|
||||
G1RebuildRSAndScrubTask task(cm, should_rebuild_remset, num_workers);
|
||||
workers->run_task(&task, num_workers);
|
||||
}
|
42
src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.hpp
Normal file
42
src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1CONCURRENTREBUILDANDSCRUB_HPP
|
||||
#define SHARE_GC_G1_G1CONCURRENTREBUILDANDSCRUB_HPP
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
|
||||
class G1ConcurrentMark;
|
||||
class WorkerThreads;
|
||||
|
||||
// Rebuild and scrubbing helper class.
|
||||
class G1ConcurrentRebuildAndScrub : AllStatic {
|
||||
public:
|
||||
|
||||
static void rebuild_and_scrub(G1ConcurrentMark* cm, bool should_rebuild_remset, WorkerThreads* workers);
|
||||
};
|
||||
|
||||
|
||||
#endif /* SHARE_GC_G1_G1CONCURRENTREBUILDANDSCRUB_HPP */
|
||||
|
@ -68,6 +68,7 @@ public:
|
||||
// dead too) already.
|
||||
size_t apply(oop obj) {
|
||||
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
|
||||
size_t obj_size = obj->size();
|
||||
assert(_last_forwarded_object_end <= obj_addr, "should iterate in ascending address order");
|
||||
assert(_hr->is_in(obj_addr), "sanity");
|
||||
|
||||
@ -76,21 +77,12 @@ public:
|
||||
|
||||
zap_dead_objects(_last_forwarded_object_end, obj_addr);
|
||||
|
||||
assert(_cm->is_marked_in_prev_bitmap(obj), "should be correctly marked");
|
||||
assert(_cm->is_marked_in_bitmap(obj), "should be correctly marked");
|
||||
if (_during_concurrent_start) {
|
||||
// For the next marking info we'll only mark the
|
||||
// self-forwarded objects explicitly if we are during
|
||||
// concurrent start (since, normally, we only mark objects pointed
|
||||
// to by roots if we succeed in copying them). By marking all
|
||||
// self-forwarded objects we ensure that we mark any that are
|
||||
// still pointed to be roots. During concurrent marking, and
|
||||
// after concurrent start, we don't need to mark any objects
|
||||
// explicitly and all objects in the CSet are considered
|
||||
// (implicitly) live. So, we won't mark them explicitly and
|
||||
// we'll leave them over NTAMS.
|
||||
_cm->mark_in_next_bitmap(_worker_id, obj);
|
||||
// If the evacuation failure occurs during concurrent start we should do
|
||||
// any additional necessary per-object actions.
|
||||
_cm->add_to_liveness(_worker_id, obj, obj_size);
|
||||
}
|
||||
size_t obj_size = obj->size();
|
||||
|
||||
_marked_words += obj_size;
|
||||
// Reset the markWord
|
||||
@ -103,37 +95,13 @@ public:
|
||||
}
|
||||
|
||||
// Fill the memory area from start to end with filler objects, and update the BOT
|
||||
// accordingly. Since we clear and use the prev bitmap for marking objects that
|
||||
// failed evacuation, there is no work to be done there.
|
||||
// accordingly.
|
||||
void zap_dead_objects(HeapWord* start, HeapWord* end) {
|
||||
if (start == end) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t gap_size = pointer_delta(end, start);
|
||||
MemRegion mr(start, gap_size);
|
||||
if (gap_size >= CollectedHeap::min_fill_size()) {
|
||||
CollectedHeap::fill_with_objects(start, gap_size);
|
||||
|
||||
HeapWord* end_first_obj = start + cast_to_oop(start)->size();
|
||||
_hr->update_bot_for_block(start, end_first_obj);
|
||||
// Fill_with_objects() may have created multiple (i.e. two)
|
||||
// objects, as the max_fill_size() is half a region.
|
||||
// After updating the BOT for the first object, also update the
|
||||
// BOT for the second object to make the BOT complete.
|
||||
if (end_first_obj != end) {
|
||||
_hr->update_bot_for_block(end_first_obj, end);
|
||||
#ifdef ASSERT
|
||||
size_t size_second_obj = cast_to_oop(end_first_obj)->size();
|
||||
HeapWord* end_of_second_obj = end_first_obj + size_second_obj;
|
||||
assert(end == end_of_second_obj,
|
||||
"More than two objects were used to fill the area from " PTR_FORMAT " to " PTR_FORMAT ", "
|
||||
"second objects size " SIZE_FORMAT " ends at " PTR_FORMAT,
|
||||
p2i(start), p2i(end), size_second_obj, p2i(end_of_second_obj));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
assert(!_cm->is_marked_in_prev_bitmap(cast_to_oop(start)), "should not be marked in prev bitmap");
|
||||
_hr->fill_range_with_dead_objects(start, end);
|
||||
}
|
||||
|
||||
void zap_remainder() {
|
||||
@ -164,12 +132,24 @@ public:
|
||||
during_concurrent_start,
|
||||
_worker_id);
|
||||
|
||||
// All objects that failed evacuation has been marked in the prev bitmap.
|
||||
// All objects that failed evacuation has been marked in the bitmap.
|
||||
// Use the bitmap to apply the above closure to all failing objects.
|
||||
G1CMBitMap* bitmap = const_cast<G1CMBitMap*>(_g1h->concurrent_mark()->prev_mark_bitmap());
|
||||
G1CMBitMap* bitmap = _g1h->concurrent_mark()->mark_bitmap();
|
||||
hr->apply_to_marked_objects(bitmap, &rspc);
|
||||
// Need to zap the remainder area of the processed region.
|
||||
rspc.zap_remainder();
|
||||
// Now clear all the marks to be ready for a new marking cyle.
|
||||
if (!during_concurrent_start) {
|
||||
assert(hr->top_at_mark_start() == hr->bottom(), "TAMS must be bottom to make all objects look live");
|
||||
_g1h->clear_bitmap_for_region(hr);
|
||||
} else {
|
||||
assert(hr->top_at_mark_start() == hr->top(), "TAMS must be top for bitmap to have any value");
|
||||
// Keep the bits.
|
||||
}
|
||||
// We never evacuate Old (non-humongous, non-archive) regions during scrubbing
|
||||
// (only afterwards); other regions (young, humongous, archive) never need
|
||||
// scrubbing, so the following must hold.
|
||||
assert(hr->parsable_bottom() == hr->bottom(), "PB must be bottom to make the whole area parsable");
|
||||
|
||||
return rspc.marked_bytes();
|
||||
}
|
||||
@ -198,7 +178,6 @@ public:
|
||||
hr->rem_set()->clear_locked(true);
|
||||
|
||||
hr->note_self_forwarding_removal_end(live_bytes);
|
||||
_g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ static void update_derived_pointers() {
|
||||
}
|
||||
|
||||
G1CMBitMap* G1FullCollector::mark_bitmap() {
|
||||
return _heap->concurrent_mark()->next_mark_bitmap();
|
||||
return _heap->concurrent_mark()->mark_bitmap();
|
||||
}
|
||||
|
||||
ReferenceProcessor* G1FullCollector::reference_processor() {
|
||||
@ -120,7 +120,7 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
|
||||
_array_queue_set(_num_workers),
|
||||
_preserved_marks_set(true),
|
||||
_serial_compaction_point(),
|
||||
_is_alive(this, heap->concurrent_mark()->next_mark_bitmap()),
|
||||
_is_alive(this, heap->concurrent_mark()->mark_bitmap()),
|
||||
_is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
|
||||
_always_subject_to_discovery(),
|
||||
_is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery),
|
||||
@ -171,8 +171,13 @@ public:
|
||||
void G1FullCollector::prepare_collection() {
|
||||
_heap->policy()->record_full_collection_start();
|
||||
|
||||
_heap->abort_concurrent_cycle();
|
||||
// Verification needs the bitmap, so we should clear the bitmap only later.
|
||||
bool in_concurrent_cycle = _heap->abort_concurrent_cycle();
|
||||
_heap->verify_before_full_collection(scope()->is_explicit_gc());
|
||||
if (in_concurrent_cycle) {
|
||||
GCTraceTime(Debug, gc) debug("Clear Bitmap");
|
||||
_heap->concurrent_mark()->clear_bitmap(_heap->workers());
|
||||
}
|
||||
|
||||
_heap->gc_prologue(true);
|
||||
_heap->retire_tlabs();
|
||||
@ -214,9 +219,8 @@ void G1FullCollector::complete_collection() {
|
||||
// update the derived pointer table.
|
||||
update_derived_pointers();
|
||||
|
||||
_heap->concurrent_mark()->swap_mark_bitmaps();
|
||||
// Prepare the bitmap for the next (potentially concurrent) marking.
|
||||
_heap->concurrent_mark()->clear_next_bitmap(_heap->workers());
|
||||
_heap->concurrent_mark()->clear_bitmap(_heap->workers());
|
||||
|
||||
_heap->prepare_heap_for_mutators();
|
||||
|
||||
|
@ -68,33 +68,29 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_prev_bitmap(oop obj) {
|
||||
void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_bitmap(oop obj) {
|
||||
assert(_bitmap->is_marked(obj), "Should only compact marked objects");
|
||||
_bitmap->clear(obj);
|
||||
}
|
||||
|
||||
size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
|
||||
size_t size = obj->size();
|
||||
if (!obj->is_forwarded()) {
|
||||
// Object not moving, but clear the mark to allow reuse of the bitmap.
|
||||
clear_in_prev_bitmap(obj);
|
||||
return size;
|
||||
if (obj->is_forwarded()) {
|
||||
HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee());
|
||||
|
||||
// copy object and reinit its mark
|
||||
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
|
||||
assert(obj_addr != destination, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(obj_addr, destination, size);
|
||||
|
||||
// There is no need to transform stack chunks - marking already did that.
|
||||
cast_to_oop(destination)->init_mark();
|
||||
assert(cast_to_oop(destination)->klass() != NULL, "should have a class");
|
||||
}
|
||||
|
||||
HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee());
|
||||
|
||||
// copy object and reinit its mark
|
||||
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
|
||||
assert(obj_addr != destination, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(obj_addr, destination, size);
|
||||
|
||||
// There is no need to transform stack chunks - marking already did that.
|
||||
cast_to_oop(destination)->init_mark();
|
||||
assert(cast_to_oop(destination)->klass() != NULL, "should have a class");
|
||||
|
||||
// Clear the mark for the compacted object to allow reuse of the
|
||||
// bitmap without an additional clearing step.
|
||||
clear_in_prev_bitmap(obj);
|
||||
clear_in_bitmap(obj);
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -105,7 +101,7 @@ void G1FullGCCompactTask::compact_region(HeapRegion* hr) {
|
||||
if (!collector()->is_free(hr->hrm_index())) {
|
||||
// The compaction closure not only copies the object to the new
|
||||
// location, but also clears the bitmap for it. This is needed
|
||||
// for bitmap verification and to be able to use the prev_bitmap
|
||||
// for bitmap verification and to be able to use the bitmap
|
||||
// for evacuation failures in the next young collection. Testing
|
||||
// showed that it was better overall to clear bit by bit, compared
|
||||
// to clearing the whole region at the end. This difference was
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,7 +50,7 @@ public:
|
||||
|
||||
class G1CompactRegionClosure : public StackObj {
|
||||
G1CMBitMap* _bitmap;
|
||||
void clear_in_prev_bitmap(oop object);
|
||||
void clear_in_bitmap(oop object);
|
||||
public:
|
||||
G1CompactRegionClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { }
|
||||
size_t apply(oop object);
|
||||
|
@ -131,12 +131,8 @@ bool G1FullGCPrepareTask::G1ResetMetadataClosure::do_heap_region(HeapRegion* hr)
|
||||
if (!_collector->is_compaction_target(region_idx)) {
|
||||
assert(!hr->is_free(), "all free regions should be compaction targets");
|
||||
assert(_collector->is_skip_compacting(region_idx) || hr->is_closed_archive(), "must be");
|
||||
if (hr->is_young()) {
|
||||
// G1 updates the BOT for old region contents incrementally, but young regions
|
||||
// lack BOT information for performance reasons.
|
||||
// Recreate BOT information of high live ratio young regions here to keep expected
|
||||
// performance during scanning their card tables in the collection pauses later.
|
||||
hr->update_bot();
|
||||
if (hr->needs_scrubbing_during_full_gc()) {
|
||||
scrub_skip_compacting_region(hr, hr->is_young());
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,3 +157,31 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(Hea
|
||||
hr->apply_to_marked_objects(_bitmap, &prepare_compact);
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullGCPrepareTask::G1ResetMetadataClosure::scrub_skip_compacting_region(HeapRegion* hr, bool update_bot_for_live) {
|
||||
assert(hr->needs_scrubbing_during_full_gc(), "must be");
|
||||
|
||||
HeapWord* limit = hr->top();
|
||||
HeapWord* current_obj = hr->bottom();
|
||||
G1CMBitMap* bitmap = _collector->mark_bitmap();
|
||||
|
||||
while (current_obj < limit) {
|
||||
if (bitmap->is_marked(current_obj)) {
|
||||
oop current = cast_to_oop(current_obj);
|
||||
size_t size = current->size();
|
||||
if (update_bot_for_live) {
|
||||
hr->update_bot_for_block(current_obj, current_obj + size);
|
||||
}
|
||||
current_obj += size;
|
||||
continue;
|
||||
}
|
||||
// Found dead object, which is potentially unloaded, scrub to next
|
||||
// marked object.
|
||||
HeapWord* scrub_start = current_obj;
|
||||
HeapWord* scrub_end = bitmap->get_next_marked_addr(scrub_start, limit);
|
||||
assert(scrub_start != scrub_end, "must advance");
|
||||
hr->fill_range_with_dead_objects(scrub_start, scrub_end);
|
||||
|
||||
current_obj = scrub_end;
|
||||
}
|
||||
}
|
||||
|
@ -95,6 +95,10 @@ private:
|
||||
G1FullCollector* _collector;
|
||||
|
||||
void reset_region_metadata(HeapRegion* hr);
|
||||
// Scrub all runs of dead objects within the given region by putting filler
|
||||
// objects and updating the corresponding BOT. If update_bot_for_live is true,
|
||||
// also update the BOT for live objects.
|
||||
void scrub_skip_compacting_region(HeapRegion* hr, bool update_bot_for_live);
|
||||
|
||||
public:
|
||||
G1ResetMetadataClosure(G1FullCollector* collector);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -224,8 +224,8 @@ public:
|
||||
}
|
||||
|
||||
o->oop_iterate(&isLive);
|
||||
if (!_hr->obj_allocated_since_prev_marking(o)) {
|
||||
size_t obj_size = o->size(); // Make sure we don't overflow
|
||||
if (_hr->obj_in_unparsable_area(o, _hr->parsable_bottom())) {
|
||||
size_t obj_size = o->size();
|
||||
_live_bytes += (obj_size * HeapWordSize);
|
||||
}
|
||||
}
|
||||
@ -396,9 +396,9 @@ public:
|
||||
} else if (!r->is_starts_humongous()) {
|
||||
VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
|
||||
r->object_iterate(¬_dead_yet_cl);
|
||||
if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
|
||||
log_error(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
|
||||
p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
|
||||
if (r->live_bytes() < not_dead_yet_cl.live_bytes()) {
|
||||
log_error(gc, verify)(HR_FORMAT " max_live_bytes %zu < calculated %zu",
|
||||
HR_FORMAT_PARAMS(r), r->live_bytes(), not_dead_yet_cl.live_bytes());
|
||||
_failures = true;
|
||||
}
|
||||
}
|
||||
@ -583,13 +583,37 @@ void G1HeapVerifier::verify(G1VerifyType type, VerifyOption vo, const char* msg)
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_before_gc(G1VerifyType type) {
|
||||
verify(type, VerifyOption::G1UsePrevMarking, "Before GC");
|
||||
verify(type, VerifyOption::G1UseConcMarking, "Before GC");
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_after_gc(G1VerifyType type) {
|
||||
verify(type, VerifyOption::G1UsePrevMarking, "After GC");
|
||||
verify(type, VerifyOption::G1UseConcMarking, "After GC");
|
||||
}
|
||||
|
||||
void G1HeapVerifier::verify_bitmap_clear(bool from_tams) {
|
||||
if (!G1VerifyBitmaps) {
|
||||
return;
|
||||
}
|
||||
|
||||
class G1VerifyBitmapClear : public HeapRegionClosure {
|
||||
bool _from_tams;
|
||||
|
||||
public:
|
||||
G1VerifyBitmapClear(bool from_tams) : _from_tams(from_tams) { }
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
G1CMBitMap* bitmap = G1CollectedHeap::heap()->concurrent_mark()->mark_bitmap();
|
||||
|
||||
HeapWord* start = _from_tams ? r->top_at_mark_start() : r->bottom();
|
||||
|
||||
HeapWord* mark = bitmap->get_next_marked_addr(start, r->end());
|
||||
guarantee(mark == r->end(), "Found mark at " PTR_FORMAT " in region %u from start " PTR_FORMAT, p2i(mark), r->hrm_index(), p2i(start));
|
||||
return false;
|
||||
}
|
||||
} cl(from_tams);
|
||||
|
||||
G1CollectedHeap::heap()->heap_region_iterate(&cl);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
class G1VerifyCardTableCleanup: public HeapRegionClosure {
|
||||
@ -654,81 +678,6 @@ void G1HeapVerifier::verify_dirty_young_regions() {
|
||||
_g1h->collection_set()->iterate(&cl);
|
||||
}
|
||||
|
||||
bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, const G1CMBitMap* const bitmap,
|
||||
HeapWord* tams, HeapWord* end) {
|
||||
guarantee(tams <= end,
|
||||
"tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
|
||||
HeapWord* result = bitmap->get_next_marked_addr(tams, end);
|
||||
if (result < end) {
|
||||
log_error(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
|
||||
log_error(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
|
||||
const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prev_mark_bitmap();
|
||||
const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->next_mark_bitmap();
|
||||
|
||||
HeapWord* ptams = hr->prev_top_at_mark_start();
|
||||
HeapWord* ntams = hr->next_top_at_mark_start();
|
||||
HeapWord* end = hr->end();
|
||||
|
||||
bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
|
||||
|
||||
bool res_n = true;
|
||||
// We cannot verify the next bitmap while we are about to clear it.
|
||||
if (!_g1h->collector_state()->clearing_next_bitmap()) {
|
||||
res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
|
||||
}
|
||||
if (!res_p || !res_n) {
|
||||
log_error(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
|
||||
log_error(gc, verify)("#### Caller: %s", caller);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
|
||||
if (!G1VerifyBitmaps) {
|
||||
return;
|
||||
}
|
||||
|
||||
guarantee(verify_bitmaps(caller, hr), "bitmap verification");
|
||||
}
|
||||
|
||||
class G1VerifyBitmapClosure : public HeapRegionClosure {
|
||||
private:
|
||||
const char* _caller;
|
||||
G1HeapVerifier* _verifier;
|
||||
bool _failures;
|
||||
|
||||
public:
|
||||
G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) :
|
||||
_caller(caller), _verifier(verifier), _failures(false) { }
|
||||
|
||||
bool failures() { return _failures; }
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* hr) {
|
||||
bool result = _verifier->verify_bitmaps(_caller, hr);
|
||||
if (!result) {
|
||||
_failures = true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1HeapVerifier::check_bitmaps(const char* caller) {
|
||||
if (!G1VerifyBitmaps) {
|
||||
return;
|
||||
}
|
||||
|
||||
G1VerifyBitmapClosure cl(caller, this);
|
||||
_g1h->heap_region_iterate(&cl);
|
||||
guarantee(!cl.failures(), "bitmap verification");
|
||||
}
|
||||
|
||||
class G1CheckRegionAttrTableClosure : public HeapRegionClosure {
|
||||
private:
|
||||
bool _failures;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,29 +70,7 @@ public:
|
||||
void verify_before_gc(G1VerifyType type);
|
||||
void verify_after_gc(G1VerifyType type);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Make sure that the given bitmap has no marked objects in the
|
||||
// range [from,limit). If it does, print an error message and return
|
||||
// false. Otherwise, just return true. bitmap_name should be "prev"
|
||||
// or "next".
|
||||
bool verify_no_bits_over_tams(const char* bitmap_name, const G1CMBitMap* const bitmap,
|
||||
HeapWord* from, HeapWord* limit);
|
||||
|
||||
// Verify that the prev / next bitmap range [tams,end) for the given
|
||||
// region has no marks. Return true if all is well, false if errors
|
||||
// are detected.
|
||||
bool verify_bitmaps(const char* caller, HeapRegion* hr);
|
||||
#endif // PRODUCT
|
||||
|
||||
// If G1VerifyBitmaps is set, verify that the marking bitmaps for
|
||||
// the given region do not have any spurious marks. If errors are
|
||||
// detected, print appropriate error messages and crash.
|
||||
void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
|
||||
|
||||
// If G1VerifyBitmaps is set, verify that the marking bitmaps do not
|
||||
// have any spurious marks. If errors are detected, print
|
||||
// appropriate error messages and crash.
|
||||
void check_bitmaps(const char* caller) PRODUCT_RETURN;
|
||||
void verify_bitmap_clear(bool above_tams_only);
|
||||
|
||||
// Do sanity check on the contents of the in-cset fast test table.
|
||||
bool check_region_attr_table() PRODUCT_RETURN_( return true; );
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -108,7 +108,7 @@ inline void G1RootRegionScanClosure::do_oop_work(T* p) {
|
||||
return;
|
||||
}
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
_cm->mark_in_next_bitmap(_worker_id, obj);
|
||||
_cm->mark_in_bitmap(_worker_id, obj);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
@ -210,7 +210,7 @@ void G1ParCopyHelper::mark_object(oop obj) {
|
||||
assert(!_g1h->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||
|
||||
// We know that the object is not moving so it's safe to read its size.
|
||||
_cm->mark_in_next_bitmap(_worker_id, obj);
|
||||
_cm->mark_in_bitmap(_worker_id, obj);
|
||||
}
|
||||
|
||||
void G1ParCopyHelper::trim_queue_partially() {
|
||||
|
@ -624,8 +624,8 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
|
||||
HeapRegion* r = _g1h->heap_region_containing(old);
|
||||
|
||||
// Objects failing evacuation will turn into old objects since the regions
|
||||
// are relabeled as such. We mark the failing objects in the prev bitmap and
|
||||
// later use it to handle all failed objects.
|
||||
// are relabeled as such. We mark the failing objects in the marking bitmap
|
||||
// and later use it to handle all failed objects.
|
||||
_g1h->mark_evac_failure_object(old);
|
||||
|
||||
if (_evac_failure_regions->record(r->hrm_index())) {
|
||||
|
@ -448,7 +448,7 @@ void G1Policy::record_full_collection_end() {
|
||||
collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC"));
|
||||
collector_state()->set_in_concurrent_start_gc(false);
|
||||
collector_state()->set_mark_or_rebuild_in_progress(false);
|
||||
collector_state()->set_clearing_next_bitmap(false);
|
||||
collector_state()->set_clearing_bitmap(false);
|
||||
|
||||
_eden_surv_rate_group->start_adding_regions();
|
||||
// also call this on any additional surv rate groups
|
||||
@ -915,7 +915,7 @@ double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
|
||||
size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
|
||||
size_t bytes_to_copy;
|
||||
if (!hr->is_young()) {
|
||||
bytes_to_copy = hr->max_live_bytes();
|
||||
bytes_to_copy = hr->live_bytes();
|
||||
} else {
|
||||
bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor));
|
||||
}
|
||||
@ -1138,6 +1138,7 @@ void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_se
|
||||
}
|
||||
collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
|
||||
collector_state()->set_mark_or_rebuild_in_progress(false);
|
||||
collector_state()->set_clearing_bitmap(true);
|
||||
|
||||
double end_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,8 +35,8 @@
|
||||
//
|
||||
// This includes
|
||||
// * the number of live words gathered during marking for the area from bottom
|
||||
// to ntams. This is an exact measure.
|
||||
// The code corrects later for the live data between ntams and top.
|
||||
// to tams. This is an exact measure.
|
||||
// The code corrects later for the live data between tams and top.
|
||||
struct G1RegionMarkStats {
|
||||
size_t _live_words;
|
||||
|
||||
|
@ -1260,25 +1260,35 @@ class G1MergeHeapRootsTask : public WorkerTask {
|
||||
G1MergeCardSetStats stats() const { return _stats; }
|
||||
};
|
||||
|
||||
// Closure to clear the prev bitmap for any old region in the collection set.
|
||||
// Closure to make sure that the marking bitmap is clear for any old region in
|
||||
// the collection set.
|
||||
// This is needed to be able to use the bitmap for evacuation failure handling.
|
||||
class G1ClearBitmapClosure : public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
void assert_bitmap_clear(HeapRegion* hr, const G1CMBitMap* bitmap) {
|
||||
assert(bitmap->get_next_marked_addr(hr->bottom(), hr->end()) == hr->end(),
|
||||
"Bitmap should have no mark for young regions");
|
||||
"Bitmap should have no mark for region %u", hr->hrm_index());
|
||||
}
|
||||
|
||||
public:
|
||||
G1ClearBitmapClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
|
||||
|
||||
bool do_heap_region(HeapRegion* hr) {
|
||||
assert(_g1h->is_in_cset(hr), "Should only be used iterating the collection set");
|
||||
// Young regions should always have cleared bitmaps, so only clear old.
|
||||
if (hr->is_old()) {
|
||||
_g1h->clear_prev_bitmap_for_region(hr);
|
||||
|
||||
// Evacuation failure uses the bitmap to record evacuation failed objects,
|
||||
// so the bitmap for the regions in the collection set must be cleared if not already.
|
||||
//
|
||||
// A clear bitmap is obvious for young regions as we never mark through them;
|
||||
// old regions are only in the collection set after the concurrent cycle completed,
|
||||
// so their bitmaps must also be clear except when the pause occurs during the
|
||||
// concurrent bitmap clear. At that point the region's bitmap may contain marks
|
||||
// while being in the collection set at the same time.
|
||||
if (_g1h->collector_state()->clearing_bitmap() && hr->is_old()) {
|
||||
_g1h->clear_bitmap_for_region(hr);
|
||||
} else {
|
||||
assert(hr->is_young(), "Should only be young and old regions in collection set");
|
||||
assert_bitmap_clear(hr, _g1h->concurrent_mark()->prev_mark_bitmap());
|
||||
assert_bitmap_clear(hr, _g1h->concurrent_mark()->mark_bitmap());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -1790,266 +1800,3 @@ void G1RemSet::print_summary_info() {
|
||||
current.print_on(&ls, true /* show_thread_times*/);
|
||||
}
|
||||
}
|
||||
|
||||
class G1RebuildRemSetTask: public WorkerTask {
|
||||
// Aggregate the counting data that was constructed concurrently
|
||||
// with marking.
|
||||
class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure {
|
||||
G1ConcurrentMark* _cm;
|
||||
G1RebuildRemSetClosure _update_cl;
|
||||
|
||||
// Applies _update_cl to the references of the given object, limiting objArrays
|
||||
// to the given MemRegion. Returns the amount of words actually scanned.
|
||||
size_t scan_for_references(oop const obj, MemRegion mr) {
|
||||
size_t const obj_size = obj->size();
|
||||
// All non-objArrays and objArrays completely within the mr
|
||||
// can be scanned without passing the mr.
|
||||
if (!obj->is_objArray() || mr.contains(MemRegion(cast_from_oop<HeapWord*>(obj), obj_size))) {
|
||||
obj->oop_iterate(&_update_cl);
|
||||
return obj_size;
|
||||
}
|
||||
// This path is for objArrays crossing the given MemRegion. Only scan the
|
||||
// area within the MemRegion.
|
||||
obj->oop_iterate(&_update_cl, mr);
|
||||
return mr.intersection(MemRegion(cast_from_oop<HeapWord*>(obj), obj_size)).word_size();
|
||||
}
|
||||
|
||||
// A humongous object is live (with respect to the scanning) either
|
||||
// a) it is marked on the bitmap as such
|
||||
// b) its TARS is larger than TAMS, i.e. has been allocated during marking.
|
||||
bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const {
|
||||
return bitmap->is_marked(humongous_obj) || (tars > tams);
|
||||
}
|
||||
|
||||
// Iterator over the live objects within the given MemRegion.
|
||||
class LiveObjIterator : public StackObj {
|
||||
const G1CMBitMap* const _bitmap;
|
||||
const HeapWord* _tams;
|
||||
const MemRegion _mr;
|
||||
HeapWord* _current;
|
||||
|
||||
bool is_below_tams() const {
|
||||
return _current < _tams;
|
||||
}
|
||||
|
||||
bool is_live(HeapWord* obj) const {
|
||||
return !is_below_tams() || _bitmap->is_marked(obj);
|
||||
}
|
||||
|
||||
HeapWord* bitmap_limit() const {
|
||||
return MIN2(const_cast<HeapWord*>(_tams), _mr.end());
|
||||
}
|
||||
|
||||
void move_if_below_tams() {
|
||||
if (is_below_tams() && has_next()) {
|
||||
_current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
|
||||
}
|
||||
}
|
||||
public:
|
||||
LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) :
|
||||
_bitmap(bitmap),
|
||||
_tams(tams),
|
||||
_mr(mr),
|
||||
_current(first_oop_into_mr) {
|
||||
|
||||
assert(_current <= _mr.start(),
|
||||
"First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end()));
|
||||
|
||||
// Step to the next live object within the MemRegion if needed.
|
||||
if (is_live(_current)) {
|
||||
// Non-objArrays were scanned by the previous part of that region.
|
||||
if (_current < mr.start() && !cast_to_oop(_current)->is_objArray()) {
|
||||
_current += cast_to_oop(_current)->size();
|
||||
// We might have positioned _current on a non-live object. Reposition to the next
|
||||
// live one if needed.
|
||||
move_if_below_tams();
|
||||
}
|
||||
} else {
|
||||
// The object at _current can only be dead if below TAMS, so we can use the bitmap.
|
||||
// immediately.
|
||||
_current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
|
||||
assert(_current == _mr.end() || is_live(_current),
|
||||
"Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")",
|
||||
p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end()));
|
||||
}
|
||||
}
|
||||
|
||||
void move_to_next() {
|
||||
_current += next()->size();
|
||||
move_if_below_tams();
|
||||
}
|
||||
|
||||
oop next() const {
|
||||
oop result = cast_to_oop(_current);
|
||||
assert(is_live(_current),
|
||||
"Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d",
|
||||
p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
bool has_next() const {
|
||||
return _current < _mr.end();
|
||||
}
|
||||
};
|
||||
|
||||
// Rebuild remembered sets in the part of the region specified by mr and hr.
|
||||
// Objects between the bottom of the region and the TAMS are checked for liveness
|
||||
// using the given bitmap. Objects between TAMS and TARS are assumed to be live.
|
||||
// Returns the number of live words between bottom and TAMS.
|
||||
size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap,
|
||||
HeapWord* const top_at_mark_start,
|
||||
HeapWord* const top_at_rebuild_start,
|
||||
HeapRegion* hr,
|
||||
MemRegion mr) {
|
||||
size_t marked_words = 0;
|
||||
|
||||
if (hr->is_humongous()) {
|
||||
oop const humongous_obj = cast_to_oop(hr->humongous_start_region()->bottom());
|
||||
if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) {
|
||||
// We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start);
|
||||
// however in case of humongous objects it is sufficient to scan the encompassing
|
||||
// area (top_at_rebuild_start is always larger or equal to TAMS) as one of the
|
||||
// two areas will be zero sized. I.e. TAMS is either
|
||||
// the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different
|
||||
// value: this would mean that TAMS points somewhere into the object.
|
||||
assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start,
|
||||
"More than one object in the humongous region?");
|
||||
humongous_obj->oop_iterate(&_update_cl, mr);
|
||||
return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion(cast_from_oop<HeapWord*>(humongous_obj), humongous_obj->size())).byte_size() : 0;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) {
|
||||
oop obj = it.next();
|
||||
size_t scanned_size = scan_for_references(obj, mr);
|
||||
if (cast_from_oop<HeapWord*>(obj) < top_at_mark_start) {
|
||||
marked_words += scanned_size;
|
||||
}
|
||||
}
|
||||
|
||||
return marked_words * HeapWordSize;
|
||||
}
|
||||
public:
|
||||
G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h,
|
||||
G1ConcurrentMark* cm,
|
||||
uint worker_id) :
|
||||
HeapRegionClosure(),
|
||||
_cm(cm),
|
||||
_update_cl(g1h, worker_id) { }
|
||||
|
||||
bool do_heap_region(HeapRegion* hr) {
|
||||
if (_cm->has_aborted()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
uint const region_idx = hr->hrm_index();
|
||||
DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);)
|
||||
assert(top_at_rebuild_start_check == NULL ||
|
||||
top_at_rebuild_start_check > hr->bottom(),
|
||||
"A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)",
|
||||
p2i(top_at_rebuild_start_check), p2i(hr->bottom()), region_idx, hr->get_type_str());
|
||||
|
||||
size_t total_marked_bytes = 0;
|
||||
size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize;
|
||||
|
||||
HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start();
|
||||
|
||||
HeapWord* cur = hr->bottom();
|
||||
while (true) {
|
||||
// After every iteration (yield point) we need to check whether the region's
|
||||
// TARS changed due to e.g. eager reclaim.
|
||||
HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);
|
||||
if (top_at_rebuild_start == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words));
|
||||
if (next_chunk.is_empty()) {
|
||||
break;
|
||||
}
|
||||
|
||||
const Ticks start = Ticks::now();
|
||||
size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(),
|
||||
top_at_mark_start,
|
||||
top_at_rebuild_start,
|
||||
hr,
|
||||
next_chunk);
|
||||
Tickspan time = Ticks::now() - start;
|
||||
|
||||
log_trace(gc, remset, tracking)("Rebuilt region %u "
|
||||
"live " SIZE_FORMAT " "
|
||||
"time %.3fms "
|
||||
"marked bytes " SIZE_FORMAT " "
|
||||
"bot " PTR_FORMAT " "
|
||||
"TAMS " PTR_FORMAT " "
|
||||
"TARS " PTR_FORMAT,
|
||||
region_idx,
|
||||
_cm->live_bytes(region_idx),
|
||||
time.seconds() * 1000.0,
|
||||
marked_bytes,
|
||||
p2i(hr->bottom()),
|
||||
p2i(top_at_mark_start),
|
||||
p2i(top_at_rebuild_start));
|
||||
|
||||
if (marked_bytes > 0) {
|
||||
total_marked_bytes += marked_bytes;
|
||||
}
|
||||
cur += chunk_size_in_words;
|
||||
|
||||
_cm->do_yield_check();
|
||||
if (_cm->has_aborted()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// In the final iteration of the loop the region might have been eagerly reclaimed.
|
||||
// Simply filter out those regions. We can not just use region type because there
|
||||
// might have already been new allocations into these regions.
|
||||
DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);)
|
||||
assert(top_at_rebuild_start == NULL ||
|
||||
total_marked_bytes == hr->marked_bytes(),
|
||||
"Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " "
|
||||
"(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")",
|
||||
total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(),
|
||||
p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start));
|
||||
// Abort state may have changed after the yield check.
|
||||
return _cm->has_aborted();
|
||||
}
|
||||
};
|
||||
|
||||
HeapRegionClaimer _hr_claimer;
|
||||
G1ConcurrentMark* _cm;
|
||||
|
||||
uint _worker_id_offset;
|
||||
public:
|
||||
G1RebuildRemSetTask(G1ConcurrentMark* cm,
|
||||
uint n_workers,
|
||||
uint worker_id_offset) :
|
||||
WorkerTask("G1 Rebuild Remembered Set"),
|
||||
_hr_claimer(n_workers),
|
||||
_cm(cm),
|
||||
_worker_id_offset(worker_id_offset) {
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id);
|
||||
g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
|
||||
}
|
||||
};
|
||||
|
||||
void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm,
|
||||
WorkerThreads* workers,
|
||||
uint worker_id_offset) {
|
||||
uint num_workers = workers->active_workers();
|
||||
|
||||
G1RebuildRemSetTask cl(cm,
|
||||
num_workers,
|
||||
worker_id_offset);
|
||||
workers->run_task(&cl, num_workers);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -147,10 +147,6 @@ public:
|
||||
|
||||
// Print accumulated summary info from the last time called.
|
||||
void print_periodic_summary_info(const char* header, uint period_count, bool show_thread_times);
|
||||
|
||||
// Rebuilds the remembered set by scanning from bottom to TARS for all regions
|
||||
// using the given workers.
|
||||
void rebuild_rem_set(G1ConcurrentMark* cm, WorkerThreads* workers, uint worker_id_offset);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1REMSET_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,19 +62,17 @@ void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
|
||||
|
||||
static void print_before_rebuild(HeapRegion* r, bool selected_for_rebuild, size_t total_live_bytes, size_t live_bytes) {
|
||||
log_trace(gc, remset, tracking)("Before rebuild region %u "
|
||||
"(ntams: " PTR_FORMAT ") "
|
||||
"total_live_bytes " SIZE_FORMAT " "
|
||||
"(tams: " PTR_FORMAT ") "
|
||||
"total_live_bytes %zu "
|
||||
"selected %s "
|
||||
"(live_bytes " SIZE_FORMAT " "
|
||||
"next_marked " SIZE_FORMAT " "
|
||||
"marked " SIZE_FORMAT " "
|
||||
"(live_bytes %zu "
|
||||
"marked %zu "
|
||||
"type %s)",
|
||||
r->hrm_index(),
|
||||
p2i(r->next_top_at_mark_start()),
|
||||
p2i(r->top_at_mark_start()),
|
||||
total_live_bytes,
|
||||
BOOL_TO_STR(selected_for_rebuild),
|
||||
live_bytes,
|
||||
r->next_marked_bytes(),
|
||||
r->marked_bytes(),
|
||||
r->get_type_str());
|
||||
}
|
||||
@ -116,8 +114,8 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by
|
||||
|
||||
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
|
||||
|
||||
size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize;
|
||||
size_t total_live_bytes = live_bytes + between_ntams_and_top;
|
||||
size_t between_tams_and_top = (r->top() - r->top_at_mark_start()) * HeapWordSize;
|
||||
size_t total_live_bytes = live_bytes + between_tams_and_top;
|
||||
|
||||
bool selected_for_rebuild = false;
|
||||
// For old regions, to be of interest for rebuilding the remembered set the following must apply:
|
||||
@ -163,15 +161,13 @@ void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
|
||||
}
|
||||
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
|
||||
log_trace(gc, remset, tracking)("After rebuild region %u "
|
||||
"(ntams " PTR_FORMAT " "
|
||||
"liveness " SIZE_FORMAT " "
|
||||
"next_marked_bytes " SIZE_FORMAT " "
|
||||
"remset occ " SIZE_FORMAT " "
|
||||
"size " SIZE_FORMAT ")",
|
||||
"(tams " PTR_FORMAT " "
|
||||
"liveness %zu "
|
||||
"remset occ %zu "
|
||||
"size %zu)",
|
||||
r->hrm_index(),
|
||||
p2i(r->next_top_at_mark_start()),
|
||||
p2i(r->top_at_mark_start()),
|
||||
cm->live_bytes(r->hrm_index()),
|
||||
r->next_marked_bytes(),
|
||||
r->rem_set()->occupied(),
|
||||
r->rem_set()->mem_size());
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,10 +53,10 @@ SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const
|
||||
// be a NULL pointer. NULL pointers are pre-filtered and never
|
||||
// inserted into a SATB buffer.
|
||||
//
|
||||
// An entry that is below the NTAMS pointer for the containing heap
|
||||
// An entry that is below the TAMS pointer for the containing heap
|
||||
// region requires marking. Such an entry must point to a valid object.
|
||||
//
|
||||
// An entry that is at least the NTAMS pointer for the containing heap
|
||||
// An entry that is at least the TAMS pointer for the containing heap
|
||||
// region might be any of the following, none of which should be marked.
|
||||
//
|
||||
// * A reference to an object allocated since marking started.
|
||||
@ -75,7 +75,7 @@ SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const
|
||||
// humongous object is recorded and then reclaimed, the reference
|
||||
// becomes stale.
|
||||
//
|
||||
// The stale reference cases are implicitly handled by the NTAMS
|
||||
// The stale reference cases are implicitly handled by the TAMS
|
||||
// comparison. Because of the possibility of stale references, buffer
|
||||
// processing must be somewhat circumspect and not assume entries
|
||||
// in an unfiltered buffer refer to valid objects.
|
||||
@ -87,7 +87,7 @@ static inline bool requires_marking(const void* entry, G1CollectedHeap* g1h) {
|
||||
|
||||
HeapRegion* region = g1h->heap_region_containing(entry);
|
||||
assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry));
|
||||
if (entry >= region->next_top_at_mark_start()) {
|
||||
if (entry >= region->top_at_mark_start()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ static inline bool requires_marking(const void* entry, G1CollectedHeap* g1h) {
|
||||
}
|
||||
|
||||
static inline bool discard_entry(const void* entry, G1CollectedHeap* g1h) {
|
||||
return !requires_marking(entry, g1h) || g1h->is_marked_next(cast_to_oop(entry));
|
||||
return !requires_marking(entry, g1h) || g1h->is_marked(cast_to_oop(entry));
|
||||
}
|
||||
|
||||
// Workaround for not yet having std::bind.
|
||||
|
@ -389,13 +389,13 @@ class G1PrepareEvacuationTask : public WorkerTask {
|
||||
} else {
|
||||
_g1h->register_region_with_region_attr(hr);
|
||||
}
|
||||
log_debug(gc, humongous)("Humongous region %u (object size " SIZE_FORMAT " @ " PTR_FORMAT ") remset " SIZE_FORMAT " code roots " SIZE_FORMAT " marked %d reclaim candidate %d type array %d",
|
||||
log_debug(gc, humongous)("Humongous region %u (object size %zu @ " PTR_FORMAT ") remset %zu code roots %zu marked %d reclaim candidate %d type array %d",
|
||||
index,
|
||||
cast_to_oop(hr->bottom())->size() * HeapWordSize,
|
||||
p2i(hr->bottom()),
|
||||
hr->rem_set()->occupied(),
|
||||
hr->rem_set()->code_roots_list_length(),
|
||||
_g1h->concurrent_mark()->next_mark_bitmap()->is_marked(hr->bottom()),
|
||||
_g1h->concurrent_mark()->mark_bitmap()->is_marked(hr->bottom()),
|
||||
_g1h->is_humongous_reclaim_candidate(index),
|
||||
cast_to_oop(hr->bottom())->is_typeArray()
|
||||
);
|
||||
|
@ -205,11 +205,10 @@ public:
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1ConcurrentMark* const cm = g1h->concurrent_mark();
|
||||
cm->humongous_object_eagerly_reclaimed(r);
|
||||
assert(!cm->is_marked_in_prev_bitmap(obj) && !cm->is_marked_in_next_bitmap(obj),
|
||||
"Eagerly reclaimed humongous region %u should not be marked at all but is in prev %s next %s",
|
||||
assert(!cm->is_marked_in_bitmap(obj),
|
||||
"Eagerly reclaimed humongous region %u should not be marked at all but is in bitmap %s",
|
||||
region_idx,
|
||||
BOOL_TO_STR(cm->is_marked_in_prev_bitmap(obj)),
|
||||
BOOL_TO_STR(cm->is_marked_in_next_bitmap(obj)));
|
||||
BOOL_TO_STR(cm->is_marked_in_bitmap(obj)));
|
||||
_humongous_objects_reclaimed++;
|
||||
do {
|
||||
HeapRegion* next = g1h->next_region_in_humongous(r);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -104,7 +104,6 @@ void HeapRegion::handle_evacuation_failure() {
|
||||
uninstall_surv_rate_group();
|
||||
clear_young_index_in_cset();
|
||||
set_old();
|
||||
_next_marked_bytes = 0;
|
||||
}
|
||||
|
||||
void HeapRegion::unlink_from_list() {
|
||||
@ -125,8 +124,6 @@ void HeapRegion::hr_clear(bool clear_space) {
|
||||
|
||||
rem_set()->clear_locked();
|
||||
|
||||
zero_marked_bytes();
|
||||
|
||||
init_top_at_mark_start();
|
||||
if (clear_space) clear(SpaceDecorator::Mangle);
|
||||
|
||||
@ -238,8 +235,10 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
#ifdef ASSERT
|
||||
_containing_set(NULL),
|
||||
#endif
|
||||
_prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
|
||||
_prev_marked_bytes(0), _next_marked_bytes(0),
|
||||
_top_at_mark_start(NULL),
|
||||
_parsable_bottom(NULL),
|
||||
_garbage_bytes(0),
|
||||
_marked_bytes(0),
|
||||
_young_index_in_cset(-1),
|
||||
_surv_rate_group(NULL), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0),
|
||||
_node_index(G1NUMA::UnknownNodeIndex)
|
||||
@ -274,31 +273,29 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
|
||||
|
||||
void HeapRegion::note_self_forwarding_removal_start(bool during_concurrent_start,
|
||||
bool during_conc_mark) {
|
||||
// We always recreate the prev marking info and we'll explicitly
|
||||
// mark all objects we find to be self-forwarded on the prev
|
||||
// bitmap. So all objects need to be below PTAMS.
|
||||
_prev_marked_bytes = 0;
|
||||
// We always scrub the region to make sure the entire region is
|
||||
// parsable after the self-forwarding point removal, and update _marked_bytes
|
||||
// at the end.
|
||||
_marked_bytes = 0;
|
||||
_garbage_bytes = 0;
|
||||
|
||||
if (during_concurrent_start) {
|
||||
// During concurrent start, we'll also explicitly mark all objects
|
||||
// we find to be self-forwarded on the next bitmap. So all
|
||||
// objects need to be below NTAMS.
|
||||
_next_top_at_mark_start = top();
|
||||
_next_marked_bytes = 0;
|
||||
} else if (during_conc_mark) {
|
||||
// During concurrent mark, all objects in the CSet (including
|
||||
// the ones we find to be self-forwarded) are implicitly live.
|
||||
// So all objects need to be above NTAMS.
|
||||
_next_top_at_mark_start = bottom();
|
||||
_next_marked_bytes = 0;
|
||||
// Self-forwarding marks all objects. Adjust TAMS so that these marks are
|
||||
// below it.
|
||||
set_top_at_mark_start(top());
|
||||
} else {
|
||||
// Outside of the mixed phase all regions that had an evacuation failure must
|
||||
// be young regions, and their TAMS is always bottom. Similarly, before the
|
||||
// start of the mixed phase, we scrubbed and reset TAMS to bottom.
|
||||
assert(top_at_mark_start() == bottom(), "must be");
|
||||
}
|
||||
}
|
||||
|
||||
void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) {
|
||||
assert(marked_bytes <= used(),
|
||||
"marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
|
||||
_prev_top_at_mark_start = top();
|
||||
_prev_marked_bytes = marked_bytes;
|
||||
_marked_bytes = marked_bytes;
|
||||
_garbage_bytes = used() - marked_bytes;
|
||||
}
|
||||
|
||||
// Code roots support
|
||||
@ -456,8 +453,8 @@ void HeapRegion::print_on(outputStream* st) const {
|
||||
} else {
|
||||
st->print("| ");
|
||||
}
|
||||
st->print("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ",
|
||||
p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str());
|
||||
st->print("|TAMS " PTR_FORMAT "| PB " PTR_FORMAT "| %s ",
|
||||
p2i(top_at_mark_start()), p2i(parsable_bottom_acquire()), rem_set()->get_state_str());
|
||||
if (UseNUMA) {
|
||||
G1NUMA* numa = G1NUMA::numa();
|
||||
if (node_index() < numa->num_active_nodes()) {
|
||||
@ -479,6 +476,7 @@ protected:
|
||||
VerifyOption _vo;
|
||||
|
||||
public:
|
||||
|
||||
G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
|
||||
_g1h(g1h), _ct(g1h->card_table()),
|
||||
_containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) {
|
||||
@ -523,14 +521,15 @@ public:
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
bool failed = false;
|
||||
if (!_g1h->is_in(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
|
||||
bool is_in_heap = _g1h->is_in(obj);
|
||||
if (!is_in_heap || _g1h->is_obj_dead_cond(obj, _vo)) {
|
||||
MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
if (!_failures) {
|
||||
log.error("----------");
|
||||
}
|
||||
ResourceMark rm;
|
||||
if (!_g1h->is_in(obj)) {
|
||||
if (!is_in_heap) {
|
||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||
log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT,
|
||||
p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
|
||||
@ -764,7 +763,7 @@ void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const {
|
||||
|
||||
void HeapRegion::verify_rem_set() const {
|
||||
bool failures = false;
|
||||
verify_rem_set(VerifyOption::G1UsePrevMarking, &failures);
|
||||
verify_rem_set(VerifyOption::G1UseConcMarking, &failures);
|
||||
guarantee(!failures, "HeapRegion RemSet verification failed");
|
||||
}
|
||||
|
||||
@ -790,7 +789,7 @@ void HeapRegion::update_bot_for_block(HeapWord* start, HeapWord* end) {
|
||||
void HeapRegion::object_iterate(ObjectClosure* blk) {
|
||||
HeapWord* p = bottom();
|
||||
while (p < top()) {
|
||||
if (block_is_obj(p)) {
|
||||
if (block_is_obj(p, parsable_bottom())) {
|
||||
blk->do_object(cast_to_oop(p));
|
||||
}
|
||||
p += block_size(p);
|
||||
@ -805,3 +804,21 @@ void HeapRegion::fill_with_dummy_object(HeapWord* address, size_t word_size, boo
|
||||
// Fill in the object.
|
||||
CollectedHeap::fill_with_object(address, word_size, zap);
|
||||
}
|
||||
|
||||
void HeapRegion::fill_range_with_dead_objects(HeapWord* start, HeapWord* end) {
|
||||
size_t range_size = pointer_delta(end, start);
|
||||
|
||||
// Fill the dead range with objects. G1 might need to create two objects if
|
||||
// the range is larger than half a region, which is the max_fill_size().
|
||||
CollectedHeap::fill_with_objects(start, range_size);
|
||||
HeapWord* current = start;
|
||||
do {
|
||||
// Update the BOT if the a threshold is crossed.
|
||||
size_t obj_size = cast_to_oop(current)->size();
|
||||
update_bot_for_block(current, current + obj_size);
|
||||
|
||||
// Advance to the next object.
|
||||
current += obj_size;
|
||||
guarantee(current <= end, "Should never go past end");
|
||||
} while (current != end);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -101,7 +101,7 @@ public:
|
||||
assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
|
||||
_pre_dummy_top = pre_dummy_top;
|
||||
}
|
||||
HeapWord* pre_dummy_top() { return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; }
|
||||
HeapWord* pre_dummy_top() const { return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; }
|
||||
void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
|
||||
|
||||
// Returns true iff the given the heap region contains the
|
||||
@ -144,8 +144,10 @@ private:
|
||||
// This version synchronizes with other calls to par_allocate_impl().
|
||||
inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
|
||||
|
||||
static bool obj_is_filler(oop obj);
|
||||
|
||||
public:
|
||||
HeapWord* block_start(const void* p);
|
||||
HeapWord* block_start(const void* addr, HeapWord* const pb);
|
||||
|
||||
void object_iterate(ObjectClosure* blk);
|
||||
|
||||
@ -153,6 +155,11 @@ public:
|
||||
// is old the BOT will be updated if the object spans a threshold.
|
||||
void fill_with_dummy_object(HeapWord* address, size_t word_size, bool zap = true);
|
||||
|
||||
// Create objects in the given range. The BOT will be updated if needed and
|
||||
// the created objects will have their header marked to show that they are
|
||||
// dead.
|
||||
void fill_range_with_dead_objects(HeapWord* start, HeapWord* end);
|
||||
|
||||
// All allocations are done without updating the BOT. The BOT
|
||||
// needs to be kept in sync for old generation regions and
|
||||
// this is done by explicit updates when crossing thresholds.
|
||||
@ -172,26 +179,27 @@ public:
|
||||
// Update skip-compacting heap region to be consistent after Full GC.
|
||||
void reset_skip_compacting_after_full_gc();
|
||||
|
||||
// All allocated blocks are occupied by objects in a HeapRegion
|
||||
bool block_is_obj(const HeapWord* p) const;
|
||||
// All allocated blocks are occupied by objects in a HeapRegion.
|
||||
bool block_is_obj(const HeapWord* p, HeapWord* pb) const;
|
||||
|
||||
// Returns whether the given object is dead based on TAMS and bitmap.
|
||||
// An object is dead iff a) it was not allocated since the last mark (>TAMS), b) it
|
||||
// is not marked (bitmap).
|
||||
bool is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const;
|
||||
// Returns whether the given object is dead based on the given parsable_bottom (pb).
|
||||
// For an object to be considered dead it must be below pb and scrubbed.
|
||||
bool is_obj_dead(oop obj, HeapWord* pb) const;
|
||||
|
||||
// Returns the object size for all valid block starts
|
||||
// and the amount of unallocated words if called on top()
|
||||
// Returns the object size for all valid block starts. If parsable_bottom (pb)
|
||||
// is given, calculates the block size based on that parsable_bottom, not the
|
||||
// current value of this HeapRegion.
|
||||
size_t block_size(const HeapWord* p) const;
|
||||
size_t block_size(const HeapWord* p, HeapWord* pb) const;
|
||||
|
||||
// Scans through the region using the bitmap to determine what
|
||||
// objects to call size_t ApplyToMarkedClosure::apply(oop) for.
|
||||
template<typename ApplyToMarkedClosure>
|
||||
inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
|
||||
|
||||
void update_bot() {
|
||||
_bot_part.update();
|
||||
}
|
||||
// Update the BOT for the entire region - assumes that all objects are parsable
|
||||
// and contiguous for this region.
|
||||
void update_bot();
|
||||
|
||||
private:
|
||||
// The remembered set for this region.
|
||||
@ -222,21 +230,27 @@ private:
|
||||
// word until the top and/or end of the region, and is the part
|
||||
// of the region for which no marking was done, i.e. objects may
|
||||
// have been allocated in this part since the last mark phase.
|
||||
// "prev" is the top at the start of the last completed marking.
|
||||
// "next" is the top at the start of the in-progress marking (if any.)
|
||||
HeapWord* _prev_top_at_mark_start;
|
||||
HeapWord* _next_top_at_mark_start;
|
||||
HeapWord* volatile _top_at_mark_start;
|
||||
|
||||
// The area above this limit is fully parsable. This limit
|
||||
// is equal to bottom except from Remark and until the region has been
|
||||
// scrubbed concurrently. The scrubbing ensures that all dead objects (with
|
||||
// possibly unloaded classes) have beenreplaced with filler objects that
|
||||
// are parsable. Below this limit the marking bitmap must be used to
|
||||
// determine size and liveness.
|
||||
HeapWord* volatile _parsable_bottom;
|
||||
|
||||
// Amount of dead data in the region.
|
||||
size_t _garbage_bytes;
|
||||
// We use concurrent marking to determine the amount of live data
|
||||
// in each heap region.
|
||||
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
|
||||
size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
|
||||
size_t _marked_bytes; // Bytes known to be live via last completed marking.
|
||||
|
||||
void init_top_at_mark_start() {
|
||||
assert(_prev_marked_bytes == 0 &&
|
||||
_next_marked_bytes == 0,
|
||||
"Must be called after zero_marked_bytes.");
|
||||
_prev_top_at_mark_start = _next_top_at_mark_start = bottom();
|
||||
set_top_at_mark_start(bottom());
|
||||
_parsable_bottom = bottom();
|
||||
_garbage_bytes = 0;
|
||||
_marked_bytes = 0;
|
||||
}
|
||||
|
||||
// Data for young region survivor prediction.
|
||||
@ -253,12 +267,11 @@ private:
|
||||
|
||||
void report_region_type_change(G1HeapRegionTraceType::Type to);
|
||||
|
||||
// Returns whether the given object address refers to a dead object, and either the
|
||||
// size of the object (if live) or the size of the block (if dead) in size.
|
||||
// May
|
||||
// - only called with obj < top()
|
||||
// - not called on humongous objects or archive regions
|
||||
inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
|
||||
template <class Closure, bool in_gc_pause>
|
||||
inline HeapWord* oops_on_memregion_iterate(MemRegion mr, Closure* cl);
|
||||
|
||||
template <class Closure>
|
||||
inline HeapWord* oops_on_memregion_iterate_in_unparsable(MemRegion mr, HeapWord* pb, Closure* cl);
|
||||
|
||||
// Iterate over the references covered by the given MemRegion in a humongous
|
||||
// object and apply the given closure to them.
|
||||
@ -267,14 +280,15 @@ private:
|
||||
// Returns the address after the last actually scanned or NULL if the area could
|
||||
// not be scanned (That should only happen when invoked concurrently with the
|
||||
// mutator).
|
||||
template <class Closure, bool is_gc_active>
|
||||
template <class Closure, bool in_gc_pause>
|
||||
inline HeapWord* do_oops_on_memregion_in_humongous(MemRegion mr,
|
||||
Closure* cl,
|
||||
G1CollectedHeap* g1h);
|
||||
Closure* cl);
|
||||
|
||||
inline bool is_marked_in_bitmap(oop obj) const;
|
||||
|
||||
inline HeapWord* next_live_in_unparsable(G1CMBitMap* bitmap, const HeapWord* p, HeapWord* limit) const;
|
||||
inline HeapWord* next_live_in_unparsable(const HeapWord* p, HeapWord* limit) const;
|
||||
|
||||
// Returns the block size of the given (dead, potentially having its class unloaded) object
|
||||
// starting at p extending to at most the prev TAMS using the given mark bitmap.
|
||||
inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMap* const prev_bitmap) const;
|
||||
public:
|
||||
HeapRegion(uint hrm_index,
|
||||
G1BlockOffsetTable* bot,
|
||||
@ -322,25 +336,14 @@ public:
|
||||
static void setup_heap_region_size(size_t max_heap_size);
|
||||
|
||||
// The number of bytes marked live in the region in the last marking phase.
|
||||
size_t marked_bytes() { return _prev_marked_bytes; }
|
||||
size_t live_bytes() {
|
||||
return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
|
||||
}
|
||||
|
||||
// The number of bytes counted in the next marking.
|
||||
size_t next_marked_bytes() { return _next_marked_bytes; }
|
||||
// The number of bytes live wrt the next marking.
|
||||
size_t next_live_bytes() {
|
||||
return
|
||||
(top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
|
||||
size_t marked_bytes() const { return _marked_bytes; }
|
||||
// An upper bound on the number of live bytes in the region.
|
||||
size_t live_bytes() const {
|
||||
return used() - garbage_bytes();
|
||||
}
|
||||
|
||||
// A lower bound on the amount of garbage bytes in the region.
|
||||
size_t garbage_bytes() {
|
||||
size_t used_at_mark_start_bytes =
|
||||
(prev_top_at_mark_start() - bottom()) * HeapWordSize;
|
||||
return used_at_mark_start_bytes - marked_bytes();
|
||||
}
|
||||
size_t garbage_bytes() const { return _garbage_bytes; }
|
||||
|
||||
// Return the amount of bytes we'll reclaim if we collect this
|
||||
// region. This includes not only the known garbage bytes in the
|
||||
@ -352,19 +355,15 @@ public:
|
||||
return capacity() - known_live_bytes;
|
||||
}
|
||||
|
||||
// An upper bound on the number of live bytes in the region.
|
||||
size_t max_live_bytes() { return used() - garbage_bytes(); }
|
||||
|
||||
void add_to_marked_bytes(size_t incr_bytes) {
|
||||
_next_marked_bytes = _next_marked_bytes + incr_bytes;
|
||||
}
|
||||
|
||||
void zero_marked_bytes() {
|
||||
_prev_marked_bytes = _next_marked_bytes = 0;
|
||||
}
|
||||
// Get the start of the unmarked area in this region.
|
||||
HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
|
||||
HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
|
||||
HeapWord* top_at_mark_start() const;
|
||||
void set_top_at_mark_start(HeapWord* value);
|
||||
|
||||
// Retrieve parsable bottom; since it may be modified concurrently, outside a
|
||||
// safepoint the _acquire method must be used.
|
||||
HeapWord* parsable_bottom() const;
|
||||
HeapWord* parsable_bottom_acquire() const;
|
||||
void reset_parsable_bottom();
|
||||
|
||||
// Note the start or end of marking. This tells the heap region
|
||||
// that the collector is about to start or has finished (concurrently)
|
||||
@ -374,10 +373,27 @@ public:
|
||||
// all fields related to the next marking info.
|
||||
inline void note_start_of_marking();
|
||||
|
||||
// Notify the region that concurrent marking has finished. Copy the
|
||||
// (now finalized) next marking info fields into the prev marking
|
||||
// info fields.
|
||||
inline void note_end_of_marking();
|
||||
// Notify the region that concurrent marking has finished. Passes the number of
|
||||
// bytes between bottom and TAMS.
|
||||
inline void note_end_of_marking(size_t marked_bytes);
|
||||
|
||||
// Notify the region that scrubbing has completed.
|
||||
inline void note_end_of_scrubbing();
|
||||
|
||||
// Notify the region that the (corresponding) bitmap has been cleared.
|
||||
inline void note_end_of_clearing();
|
||||
|
||||
// During the concurrent scrubbing phase, can there be any areas with unloaded
|
||||
// classes or dead objects in this region?
|
||||
// This set only includes old and open archive regions - humongous regions only
|
||||
// contain a single object which is either dead or live, contents of closed archive
|
||||
// regions never die (so is always contiguous), and young regions are never even
|
||||
// considered during concurrent scrub.
|
||||
bool needs_scrubbing() const { return is_old() || is_open_archive(); }
|
||||
// Same question as above, during full gc. Full gc needs to scrub any region that
|
||||
// might be skipped for compaction. This includes young generation regions as the
|
||||
// region relabeling to old happens later than scrubbing.
|
||||
bool needs_scrubbing_during_full_gc() const { return is_young() || needs_scrubbing(); }
|
||||
|
||||
const char* get_type_str() const { return _type.get_str(); }
|
||||
const char* get_short_type_str() const { return _type.get_short_str(); }
|
||||
@ -535,14 +551,12 @@ public:
|
||||
|
||||
void record_surv_words_in_group(size_t words_survived);
|
||||
|
||||
// Determine if an object has been allocated since the last
|
||||
// mark performed by the collector. This returns true iff the object
|
||||
// is within the unmarked area of the region.
|
||||
bool obj_allocated_since_prev_marking(oop obj) const {
|
||||
return cast_from_oop<HeapWord*>(obj) >= prev_top_at_mark_start();
|
||||
}
|
||||
bool obj_allocated_since_next_marking(oop obj) const {
|
||||
return cast_from_oop<HeapWord*>(obj) >= next_top_at_mark_start();
|
||||
// Determine if an object is in the parsable or the to-be-scrubbed area.
|
||||
inline static bool obj_in_parsable_area(const HeapWord* addr, HeapWord* pb);
|
||||
inline static bool obj_in_unparsable_area(oop obj, HeapWord* pb);
|
||||
|
||||
bool obj_allocated_since_marking_start(oop obj) const {
|
||||
return cast_from_oop<HeapWord*>(obj) >= top_at_mark_start();
|
||||
}
|
||||
|
||||
// Update the region state after a failed evacuation.
|
||||
@ -556,7 +570,7 @@ public:
|
||||
// Returns the next unscanned address if the designated objects were successfully
|
||||
// processed, NULL if an unparseable part of the heap was encountered (That should
|
||||
// only happen when invoked concurrently with the mutator).
|
||||
template <bool is_gc_active, class Closure>
|
||||
template <bool in_gc_pause, class Closure>
|
||||
inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);
|
||||
|
||||
// Routines for managing a list of code roots (attached to the
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
|
||||
@ -34,7 +35,9 @@
|
||||
#include "gc/g1/g1SegmentedArray.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@ -79,75 +82,85 @@ inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size,
|
||||
} while (true);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::block_start(const void* p) {
|
||||
return _bot_part.block_start(p);
|
||||
inline HeapWord* HeapRegion::block_start(const void* addr, HeapWord* const pb) {
|
||||
return _bot_part.block_start(addr, pb);
|
||||
}
|
||||
|
||||
inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const {
|
||||
HeapWord* addr = cast_from_oop<HeapWord*>(obj);
|
||||
|
||||
assert(addr < top(), "must be");
|
||||
assert(!is_closed_archive(),
|
||||
"Closed archive regions should not have references into other regions");
|
||||
assert(!is_humongous(), "Humongous objects not handled here");
|
||||
bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
|
||||
|
||||
if (ClassUnloading && obj_is_dead) {
|
||||
assert(!block_is_obj(addr), "must be");
|
||||
*size = block_size_using_bitmap(addr, prev_bitmap);
|
||||
} else {
|
||||
assert(block_is_obj(addr), "must be");
|
||||
*size = obj->size();
|
||||
}
|
||||
return obj_is_dead;
|
||||
inline bool HeapRegion::obj_in_unparsable_area(oop obj, HeapWord* const pb) {
|
||||
return !HeapRegion::obj_in_parsable_area(cast_from_oop<HeapWord*>(obj), pb);
|
||||
}
|
||||
|
||||
inline bool HeapRegion::block_is_obj(const HeapWord* p) const {
|
||||
inline bool HeapRegion::obj_in_parsable_area(const HeapWord* addr, HeapWord* const pb) {
|
||||
return addr >= pb;
|
||||
}
|
||||
|
||||
inline bool HeapRegion::is_marked_in_bitmap(oop obj) const {
|
||||
return G1CollectedHeap::heap()->concurrent_mark()->mark_bitmap()->is_marked(obj);
|
||||
}
|
||||
|
||||
inline bool HeapRegion::block_is_obj(const HeapWord* const p, HeapWord* const pb) const {
|
||||
assert(p >= bottom() && p < top(), "precondition");
|
||||
assert(!is_continues_humongous(), "p must point to block-start");
|
||||
|
||||
if (obj_in_parsable_area(p, pb)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// When class unloading is enabled it is not safe to only consider top() to conclude if the
|
||||
// given pointer is a valid object. The situation can occur both for class unloading in a
|
||||
// Full GC and during a concurrent cycle.
|
||||
// During a Full GC regions can be excluded from compaction due to high live ratio, and
|
||||
// because of this there can be stale objects for unloaded classes left in these regions.
|
||||
// During a concurrent cycle class unloading is done after marking is complete and objects
|
||||
// for the unloaded classes will be stale until the regions are collected.
|
||||
if (ClassUnloading) {
|
||||
return !G1CollectedHeap::heap()->is_obj_dead(cast_to_oop(p), this);
|
||||
}
|
||||
return true;
|
||||
// To make sure dead objects can be handled without always keeping an additional bitmap, we
|
||||
// scrub dead objects and create filler objects that are considered dead. We do this even if
|
||||
// class unloading is disabled to avoid special code.
|
||||
// From Remark until the region has been completely scrubbed obj_is_parsable will return false
|
||||
// and we have to use the bitmap to know if a block is a valid object.
|
||||
return is_marked_in_bitmap(cast_to_oop(p));
|
||||
}
|
||||
|
||||
inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const {
|
||||
assert(ClassUnloading,
|
||||
"All blocks should be objects if class unloading isn't used, so this method should not be called. "
|
||||
"HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
|
||||
"addr: " PTR_FORMAT,
|
||||
p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
|
||||
|
||||
// Old regions' dead objects may have dead classes
|
||||
// We need to find the next live object using the bitmap
|
||||
HeapWord* next = prev_bitmap->get_next_marked_addr(addr, prev_top_at_mark_start());
|
||||
|
||||
assert(next > addr, "must get the next live object");
|
||||
return pointer_delta(next, addr);
|
||||
inline bool HeapRegion::obj_is_filler(const oop obj) {
|
||||
Klass* k = obj->klass();
|
||||
return k == Universe::fillerArrayKlassObj() || k == vmClasses::FillerObject_klass();
|
||||
}
|
||||
|
||||
inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const {
|
||||
inline bool HeapRegion::is_obj_dead(const oop obj, HeapWord* const pb) const {
|
||||
assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
|
||||
return !obj_allocated_since_prev_marking(obj) &&
|
||||
!prev_bitmap->is_marked(obj) &&
|
||||
!is_closed_archive();
|
||||
}
|
||||
|
||||
inline size_t HeapRegion::block_size(const HeapWord* addr) const {
|
||||
assert(addr < top(), "precondition");
|
||||
|
||||
if (block_is_obj(addr)) {
|
||||
return cast_to_oop(addr)->size();
|
||||
// Objects in closed archive regions are always live.
|
||||
if (is_closed_archive()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap());
|
||||
// From Remark until a region has been concurrently scrubbed, parts of the
|
||||
// region is not guaranteed to be parsable. Use the bitmap for liveness.
|
||||
if (obj_in_unparsable_area(obj, pb)) {
|
||||
return !is_marked_in_bitmap(obj);
|
||||
}
|
||||
|
||||
// This object is in the parsable part of the heap, live unless scrubbed.
|
||||
return obj_is_filler(obj);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::next_live_in_unparsable(G1CMBitMap* const bitmap, const HeapWord* p, HeapWord* const limit) const {
|
||||
return bitmap->get_next_marked_addr(p, limit);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::next_live_in_unparsable(const HeapWord* p, HeapWord* const limit) const {
|
||||
G1CMBitMap* bitmap = G1CollectedHeap::heap()->concurrent_mark()->mark_bitmap();
|
||||
return next_live_in_unparsable(bitmap, p, limit);
|
||||
}
|
||||
|
||||
inline size_t HeapRegion::block_size(const HeapWord* p) const {
|
||||
return block_size(p, parsable_bottom());
|
||||
}
|
||||
|
||||
inline size_t HeapRegion::block_size(const HeapWord* p, HeapWord* const pb) const {
|
||||
assert(p < top(), "precondition");
|
||||
|
||||
if (!block_is_obj(p, pb)) {
|
||||
return pointer_delta(next_live_in_unparsable(p, pb), p);
|
||||
}
|
||||
|
||||
return cast_to_oop(p)->size();
|
||||
}
|
||||
|
||||
inline void HeapRegion::reset_compaction_top_after_compaction() {
|
||||
@ -160,8 +173,7 @@ inline void HeapRegion::reset_compacted_after_full_gc() {
|
||||
|
||||
reset_compaction_top_after_compaction();
|
||||
// After a compaction the mark bitmap in a non-pinned regions is invalid.
|
||||
// We treat all objects as being above PTAMS.
|
||||
zero_marked_bytes();
|
||||
// But all objects are live, we get this by setting TAMS to bottom.
|
||||
init_top_at_mark_start();
|
||||
|
||||
reset_after_full_gc_common();
|
||||
@ -174,15 +186,18 @@ inline void HeapRegion::reset_skip_compacting_after_full_gc() {
|
||||
"region %u compaction_top " PTR_FORMAT " must not be different from bottom " PTR_FORMAT,
|
||||
hrm_index(), p2i(compaction_top()), p2i(bottom()));
|
||||
|
||||
_prev_top_at_mark_start = top(); // Keep existing top and usage.
|
||||
_prev_marked_bytes = used();
|
||||
_next_top_at_mark_start = bottom();
|
||||
_next_marked_bytes = 0;
|
||||
_marked_bytes = used();
|
||||
_garbage_bytes = 0;
|
||||
|
||||
set_top_at_mark_start(bottom());
|
||||
|
||||
reset_after_full_gc_common();
|
||||
}
|
||||
|
||||
inline void HeapRegion::reset_after_full_gc_common() {
|
||||
// Everything above bottom() is parsable and live.
|
||||
_parsable_bottom = bottom();
|
||||
|
||||
// Clear unused heap memory in debug builds.
|
||||
if (ZapUnusedHeapArea) {
|
||||
mangle_unused_area();
|
||||
@ -227,6 +242,18 @@ inline HeapWord* HeapRegion::allocate(size_t min_word_size,
|
||||
return allocate_impl(min_word_size, desired_word_size, actual_word_size);
|
||||
}
|
||||
|
||||
inline void HeapRegion::update_bot() {
|
||||
HeapWord* next_addr = bottom();
|
||||
|
||||
HeapWord* prev_addr;
|
||||
while (next_addr < top()) {
|
||||
prev_addr = next_addr;
|
||||
next_addr = prev_addr + cast_to_oop(prev_addr)->size();
|
||||
update_bot_for_block(prev_addr, next_addr);
|
||||
}
|
||||
assert(next_addr == top(), "Should stop the scan at the limit.");
|
||||
}
|
||||
|
||||
inline void HeapRegion::update_bot_for_obj(HeapWord* obj_start, size_t obj_size) {
|
||||
assert(is_old(), "should only do BOT updates for old regions");
|
||||
|
||||
@ -240,30 +267,68 @@ inline void HeapRegion::update_bot_for_obj(HeapWord* obj_start, size_t obj_size)
|
||||
_bot_part.update_for_block(obj_start, obj_end);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::top_at_mark_start() const {
|
||||
return Atomic::load(&_top_at_mark_start);
|
||||
}
|
||||
|
||||
inline void HeapRegion::set_top_at_mark_start(HeapWord* value) {
|
||||
Atomic::store(&_top_at_mark_start, value);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::parsable_bottom() const {
|
||||
assert(!is_init_completed() || SafepointSynchronize::is_at_safepoint(), "only during initialization or safepoint");
|
||||
return _parsable_bottom;
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::parsable_bottom_acquire() const {
|
||||
return Atomic::load_acquire(&_parsable_bottom);
|
||||
}
|
||||
|
||||
inline void HeapRegion::reset_parsable_bottom() {
|
||||
Atomic::release_store(&_parsable_bottom, bottom());
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_start_of_marking() {
|
||||
_next_marked_bytes = 0;
|
||||
assert(!is_closed_archive() || top_at_mark_start() == bottom(), "CA region's TAMS must always be at bottom");
|
||||
if (!is_closed_archive()) {
|
||||
_next_top_at_mark_start = top();
|
||||
set_top_at_mark_start(top());
|
||||
}
|
||||
assert(!is_closed_archive() || next_top_at_mark_start() == bottom(), "CA region's nTAMS must always be at bottom");
|
||||
_gc_efficiency = -1.0;
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_end_of_marking() {
|
||||
_prev_top_at_mark_start = _next_top_at_mark_start;
|
||||
_next_top_at_mark_start = bottom();
|
||||
_prev_marked_bytes = _next_marked_bytes;
|
||||
_next_marked_bytes = 0;
|
||||
inline void HeapRegion::note_end_of_marking(size_t marked_bytes) {
|
||||
assert_at_safepoint();
|
||||
|
||||
_marked_bytes = marked_bytes;
|
||||
_garbage_bytes = byte_size(bottom(), top_at_mark_start()) - _marked_bytes;
|
||||
|
||||
if (needs_scrubbing()) {
|
||||
_parsable_bottom = top_at_mark_start();
|
||||
}
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_end_of_scrubbing() {
|
||||
reset_parsable_bottom();
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_end_of_clearing() {
|
||||
// We do not need a release store here because
|
||||
//
|
||||
// - if this method is called during concurrent bitmap clearing, we do not read
|
||||
// the bitmap any more for live/dead information (we do not read the bitmap at
|
||||
// all at that point).
|
||||
// - otherwise we reclaim regions only during GC and we do not read tams and the
|
||||
// bitmap concurrently.
|
||||
set_top_at_mark_start(bottom());
|
||||
}
|
||||
|
||||
inline bool HeapRegion::in_collection_set() const {
|
||||
return G1CollectedHeap::heap()->is_in_cset(this);
|
||||
}
|
||||
|
||||
template <class Closure, bool is_gc_active>
|
||||
template <class Closure, bool in_gc_pause>
|
||||
HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
|
||||
Closure* cl,
|
||||
G1CollectedHeap* g1h) {
|
||||
Closure* cl) {
|
||||
assert(is_humongous(), "precondition");
|
||||
HeapRegion* sr = humongous_start_region();
|
||||
oop obj = cast_to_oop(sr->bottom());
|
||||
@ -274,7 +339,7 @@ HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
|
||||
// we've already set the card clean, so we must return failure,
|
||||
// since the allocating thread could have performed a write to the
|
||||
// card that might be missed otherwise.
|
||||
if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) {
|
||||
if (!in_gc_pause && (obj->klass_or_null_acquire() == NULL)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -282,7 +347,8 @@ HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
|
||||
// Only filler objects follow a humongous object in the containing
|
||||
// regions, and we can ignore those. So only process the one
|
||||
// humongous object.
|
||||
if (g1h->is_obj_dead(obj, sr)) {
|
||||
HeapWord* const pb = in_gc_pause ? sr->parsable_bottom() : sr->parsable_bottom_acquire();
|
||||
if (sr->is_obj_dead(obj, pb)) {
|
||||
// The object is dead. There can be no other object in this region, so return
|
||||
// the end of that region.
|
||||
return end();
|
||||
@ -308,55 +374,114 @@ HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
|
||||
}
|
||||
}
|
||||
|
||||
template <bool is_gc_active, class Closure>
|
||||
HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
|
||||
Closure* cl) {
|
||||
assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
template <class Closure>
|
||||
inline HeapWord* HeapRegion::oops_on_memregion_iterate_in_unparsable(MemRegion mr, HeapWord* const pb, Closure* cl) {
|
||||
// Cache the boundaries of the area to scan in some locals.
|
||||
HeapWord* const start = mr.start();
|
||||
// Only scan until parsable_bottom.
|
||||
HeapWord* const end = MIN2(mr.end(), pb);
|
||||
|
||||
// Special handling for humongous regions.
|
||||
if (is_humongous()) {
|
||||
return do_oops_on_memregion_in_humongous<Closure, is_gc_active>(mr, cl, g1h);
|
||||
G1CMBitMap* bitmap = G1CollectedHeap::heap()->concurrent_mark()->mark_bitmap();
|
||||
// Find the obj that extends onto mr.start().
|
||||
//
|
||||
// The BOT itself is stable enough to be read at any time as
|
||||
//
|
||||
// * during refinement the individual elements of the BOT are read and written
|
||||
// atomically and any visible mix of new and old BOT entries will eventually lead
|
||||
// to some (possibly outdated) object start.
|
||||
// The result of block_start() during concurrent refinement may be outdated - the
|
||||
// scrubbing may have written a (partial) filler object header exactly crossing
|
||||
// that perceived object start. So we have to advance to the next live object
|
||||
// (using the bitmap) to be able to start the following iteration.
|
||||
//
|
||||
// * during GC the BOT does not change while reading, and the objects corresponding
|
||||
// to these block starts are valid as "holes" are filled atomically wrt to
|
||||
// safepoints.
|
||||
//
|
||||
HeapWord* cur = block_start(start, pb);
|
||||
|
||||
if (!bitmap->is_marked(cur)) {
|
||||
cur = bitmap->get_next_marked_addr(cur, end);
|
||||
}
|
||||
assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str());
|
||||
|
||||
// Because mr has been trimmed to what's been allocated in this
|
||||
// region, the parts of the heap that are examined here are always
|
||||
// parsable; there's no need to use klass_or_null to detect
|
||||
// in-progress allocation.
|
||||
while (cur != end) {
|
||||
assert(bitmap->is_marked(cur), "must be");
|
||||
|
||||
oop obj = cast_to_oop(cur);
|
||||
assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
|
||||
|
||||
cur += obj->size();
|
||||
bool is_precise = false;
|
||||
|
||||
if (!obj->is_objArray() || (cast_from_oop<HeapWord*>(obj) >= start && cur <= end)) {
|
||||
obj->oop_iterate(cl);
|
||||
} else {
|
||||
obj->oop_iterate(cl, mr);
|
||||
is_precise = true;
|
||||
}
|
||||
|
||||
if (cur >= end) {
|
||||
return is_precise ? end : cur;
|
||||
}
|
||||
|
||||
cur = bitmap->get_next_marked_addr(cur, end);
|
||||
}
|
||||
return end;
|
||||
}
|
||||
|
||||
// Applies cl to all reference fields of live objects in mr in non-humongous regions.
|
||||
//
|
||||
// For performance, the strategy here is to divide the work into two parts: areas
|
||||
// below parsable_bottom (unparsable) and above parsable_bottom. The unparsable parts
|
||||
// use the bitmap to locate live objects.
|
||||
// Otherwise we would need to check for every object what the current location is;
|
||||
// we expect that the amount of GCs executed during scrubbing is very low so such
|
||||
// tests would be unnecessary almost all the time.
|
||||
template <class Closure, bool in_gc_pause>
|
||||
inline HeapWord* HeapRegion::oops_on_memregion_iterate(MemRegion mr, Closure* cl) {
|
||||
// Cache the boundaries of the memory region in some const locals
|
||||
HeapWord* const start = mr.start();
|
||||
HeapWord* const end = mr.end();
|
||||
|
||||
// Find the obj that extends onto mr.start().
|
||||
HeapWord* cur = block_start(start);
|
||||
// Snapshot the region's parsable_bottom.
|
||||
HeapWord* const pb = in_gc_pause ? parsable_bottom() : parsable_bottom_acquire();
|
||||
|
||||
const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap();
|
||||
// Find the obj that extends onto mr.start()
|
||||
HeapWord* cur;
|
||||
if (obj_in_parsable_area(start, pb)) {
|
||||
cur = block_start(start, pb);
|
||||
} else {
|
||||
cur = oops_on_memregion_iterate_in_unparsable<Closure>(mr, pb, cl);
|
||||
// We might have scanned beyond end at this point because of imprecise iteration.
|
||||
if (cur >= end) {
|
||||
return cur;
|
||||
}
|
||||
// Parsable_bottom is always the start of a valid parsable object, so we must either
|
||||
// have stopped at parsable_bottom, or already iterated beyond end. The
|
||||
// latter case is handled above.
|
||||
assert(cur == pb, "must be cur " PTR_FORMAT " pb " PTR_FORMAT, p2i(cur), p2i(pb));
|
||||
}
|
||||
assert(cur < top(), "must be cur " PTR_FORMAT " top " PTR_FORMAT, p2i(cur), p2i(top()));
|
||||
|
||||
// All objects >= pb are parsable. So we can just take object sizes directly.
|
||||
while (true) {
|
||||
oop obj = cast_to_oop(cur);
|
||||
assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
|
||||
assert(obj->klass_or_null() != NULL,
|
||||
"Unparsable heap at " PTR_FORMAT, p2i(cur));
|
||||
|
||||
size_t size;
|
||||
bool is_dead = is_obj_dead_with_size(obj, bitmap, &size);
|
||||
bool is_precise = false;
|
||||
|
||||
cur += size;
|
||||
if (!is_dead) {
|
||||
// Process live object's references.
|
||||
cur += obj->size();
|
||||
// Process live object's references.
|
||||
|
||||
// Non-objArrays are usually marked imprecise at the object
|
||||
// start, in which case we need to iterate over them in full.
|
||||
// objArrays are precisely marked, but can still be iterated
|
||||
// over in full if completely covered.
|
||||
if (!obj->is_objArray() || (cast_from_oop<HeapWord*>(obj) >= start && cur <= end)) {
|
||||
obj->oop_iterate(cl);
|
||||
} else {
|
||||
obj->oop_iterate(cl, mr);
|
||||
is_precise = true;
|
||||
}
|
||||
// Non-objArrays are usually marked imprecise at the object
|
||||
// start, in which case we need to iterate over them in full.
|
||||
// objArrays are precisely marked, but can still be iterated
|
||||
// over in full if completely covered.
|
||||
if (!obj->is_objArray() || (cast_from_oop<HeapWord*>(obj) >= start && cur <= end)) {
|
||||
obj->oop_iterate(cl);
|
||||
} else {
|
||||
obj->oop_iterate(cl, mr);
|
||||
is_precise = true;
|
||||
}
|
||||
if (cur >= end) {
|
||||
return is_precise ? end : cur;
|
||||
@ -364,6 +489,28 @@ HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
|
||||
}
|
||||
}
|
||||
|
||||
template <bool in_gc_pause, class Closure>
|
||||
HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
|
||||
Closure* cl) {
|
||||
assert(MemRegion(bottom(), top()).contains(mr), "Card region not in heap region");
|
||||
|
||||
// Special handling for humongous regions.
|
||||
if (is_humongous()) {
|
||||
return do_oops_on_memregion_in_humongous<Closure, in_gc_pause>(mr, cl);
|
||||
}
|
||||
assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str());
|
||||
|
||||
// Because mr has been trimmed to what's been allocated in this
|
||||
// region, the objects in these parts of the heap have non-NULL
|
||||
// klass pointers. There's no need to use klass_or_null to detect
|
||||
// in-progress allocation.
|
||||
// We might be in the progress of scrubbing this region and in this
|
||||
// case there might be objects that have their classes unloaded and
|
||||
// therefore needs to be scanned using the bitmap.
|
||||
|
||||
return oops_on_memregion_iterate<Closure, in_gc_pause>(mr, cl);
|
||||
}
|
||||
|
||||
inline int HeapRegion::age_in_surv_rate_group() const {
|
||||
assert(has_surv_rate_group(), "pre-condition");
|
||||
assert(has_valid_age_in_surv_rate(), "pre-condition");
|
||||
|
@ -68,14 +68,12 @@ HeapRegionManager::HeapRegionManager() :
|
||||
_committed_map(),
|
||||
_allocated_heapregions_length(0),
|
||||
_regions(), _heap_mapper(NULL),
|
||||
_prev_bitmap_mapper(NULL),
|
||||
_next_bitmap_mapper(NULL),
|
||||
_bitmap_mapper(NULL),
|
||||
_free_list("Free list", new MasterFreeRegionListChecker())
|
||||
{ }
|
||||
|
||||
void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* prev_bitmap,
|
||||
G1RegionToSpaceMapper* next_bitmap,
|
||||
G1RegionToSpaceMapper* bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
G1RegionToSpaceMapper* cardtable,
|
||||
G1RegionToSpaceMapper* card_counts) {
|
||||
@ -83,8 +81,7 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
|
||||
_heap_mapper = heap_storage;
|
||||
|
||||
_prev_bitmap_mapper = prev_bitmap;
|
||||
_next_bitmap_mapper = next_bitmap;
|
||||
_bitmap_mapper = bitmap;
|
||||
|
||||
_bot_mapper = bot;
|
||||
_cardtable_mapper = cardtable;
|
||||
@ -190,8 +187,7 @@ void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkerThr
|
||||
_heap_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
|
||||
// Also commit auxiliary data
|
||||
_prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
_next_bitmap_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
_bitmap_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
|
||||
_bot_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
_cardtable_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
@ -217,8 +213,7 @@ void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
|
||||
_heap_mapper->uncommit_regions(start, num_regions);
|
||||
|
||||
// Also uncommit auxiliary data
|
||||
_prev_bitmap_mapper->uncommit_regions(start, num_regions);
|
||||
_next_bitmap_mapper->uncommit_regions(start, num_regions);
|
||||
_bitmap_mapper->uncommit_regions(start, num_regions);
|
||||
|
||||
_bot_mapper->uncommit_regions(start, num_regions);
|
||||
_cardtable_mapper->uncommit_regions(start, num_regions);
|
||||
@ -271,8 +266,7 @@ void HeapRegionManager::deactivate_regions(uint start, uint num_regions) {
|
||||
|
||||
void HeapRegionManager::clear_auxiliary_data_structures(uint start, uint num_regions) {
|
||||
// Signal marking bitmaps to clear the given regions.
|
||||
_prev_bitmap_mapper->signal_mapping_changed(start, num_regions);
|
||||
_next_bitmap_mapper->signal_mapping_changed(start, num_regions);
|
||||
_bitmap_mapper->signal_mapping_changed(start, num_regions);
|
||||
// Signal G1BlockOffsetTable to clear the given regions.
|
||||
_bot_mapper->signal_mapping_changed(start, num_regions);
|
||||
// Signal G1CardTable to clear the given regions.
|
||||
@ -283,15 +277,13 @@ void HeapRegionManager::clear_auxiliary_data_structures(uint start, uint num_reg
|
||||
|
||||
MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
|
||||
size_t used_sz =
|
||||
_prev_bitmap_mapper->committed_size() +
|
||||
_next_bitmap_mapper->committed_size() +
|
||||
_bitmap_mapper->committed_size() +
|
||||
_bot_mapper->committed_size() +
|
||||
_cardtable_mapper->committed_size() +
|
||||
_card_counts_mapper->committed_size();
|
||||
|
||||
size_t committed_sz =
|
||||
_prev_bitmap_mapper->reserved_size() +
|
||||
_next_bitmap_mapper->reserved_size() +
|
||||
_bitmap_mapper->reserved_size() +
|
||||
_bot_mapper->reserved_size() +
|
||||
_cardtable_mapper->reserved_size() +
|
||||
_card_counts_mapper->reserved_size();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -123,8 +123,7 @@ class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
|
||||
G1HeapRegionTable _regions;
|
||||
G1RegionToSpaceMapper* _heap_mapper;
|
||||
G1RegionToSpaceMapper* _prev_bitmap_mapper;
|
||||
G1RegionToSpaceMapper* _next_bitmap_mapper;
|
||||
G1RegionToSpaceMapper* _bitmap_mapper;
|
||||
FreeRegionList _free_list;
|
||||
|
||||
void expand(uint index, uint num_regions, WorkerThreads* pretouch_workers = NULL);
|
||||
@ -162,8 +161,7 @@ public:
|
||||
HeapRegionManager();
|
||||
|
||||
void initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* prev_bitmap,
|
||||
G1RegionToSpaceMapper* next_bitmap,
|
||||
G1RegionToSpaceMapper* bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
G1RegionToSpaceMapper* cardtable,
|
||||
G1RegionToSpaceMapper* card_counts);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,10 +53,11 @@ protected:
|
||||
// Clear bitmap range
|
||||
void do_clear(MemRegion mr, bool large);
|
||||
|
||||
public:
|
||||
static size_t compute_size(size_t heap_size);
|
||||
// Returns the amount of bytes on the heap between two marks in the bitmap.
|
||||
static size_t mark_distance();
|
||||
|
||||
public:
|
||||
static size_t compute_size(size_t heap_size);
|
||||
// Returns how many bytes (or bits) of the heap a single byte (or bit) of the
|
||||
// mark bitmap corresponds to. This is the same as the mark distance above.
|
||||
static size_t heap_map_factor() {
|
||||
@ -81,7 +82,7 @@ public:
|
||||
// "addr", and before "limit", if "limit" is non-NULL. If there is no
|
||||
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
|
||||
inline HeapWord* get_next_marked_addr(const HeapWord* addr,
|
||||
const HeapWord* limit) const;
|
||||
HeapWord* limit) const;
|
||||
|
||||
void print_on_error(outputStream* st, const char* prefix) const;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,11 +29,12 @@
|
||||
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
inline HeapWord* MarkBitMap::get_next_marked_addr(const HeapWord* addr,
|
||||
const HeapWord* limit) const {
|
||||
inline HeapWord* MarkBitMap::get_next_marked_addr(const HeapWord* const addr,
|
||||
HeapWord* const limit) const {
|
||||
assert(limit != NULL, "limit must not be NULL");
|
||||
// Round addr up to a possible object boundary to be safe.
|
||||
size_t const addr_offset = addr_to_offset(align_up(addr, HeapWordSize << _shifter));
|
||||
|
@ -31,12 +31,11 @@ enum class VerifyOption : uint {
|
||||
Default = 0,
|
||||
|
||||
// G1
|
||||
|
||||
// Use "prev" mark bitmap information using pTAMS.
|
||||
G1UsePrevMarking = Default,
|
||||
// Use "next" mark bitmap information from full gc marking. This does not
|
||||
// Use mark bitmap information (from concurrent marking) using TAMS.
|
||||
G1UseConcMarking = Default,
|
||||
// Use mark bitmap information from full gc marking. This does not
|
||||
// use (or need) TAMS.
|
||||
G1UseFullMarking = G1UsePrevMarking + 1
|
||||
G1UseFullMarking = G1UseConcMarking + 1,
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_VERIFYOPTION_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -75,12 +75,12 @@ void VM_HeapRegionApplyToMarkedObjectsTest::doit() {
|
||||
HeapRegion* region = heap->heap_region_containing(heap->bottom_addr_for_region(0));
|
||||
|
||||
// Mark some "oops" in the bitmap.
|
||||
G1CMBitMap* bitmap = heap->concurrent_mark()->next_mark_bitmap();
|
||||
bitmap->mark(region->bottom());
|
||||
bitmap->mark(region->bottom() + MARK_OFFSET_1);
|
||||
bitmap->mark(region->bottom() + MARK_OFFSET_2);
|
||||
bitmap->mark(region->bottom() + MARK_OFFSET_3);
|
||||
bitmap->mark(region->end());
|
||||
G1CMBitMap* bitmap = heap->concurrent_mark()->mark_bitmap();
|
||||
bitmap->par_mark(region->bottom());
|
||||
bitmap->par_mark(region->bottom() + MARK_OFFSET_1);
|
||||
bitmap->par_mark(region->bottom() + MARK_OFFSET_2);
|
||||
bitmap->par_mark(region->bottom() + MARK_OFFSET_3);
|
||||
bitmap->par_mark(region->end());
|
||||
|
||||
VerifyAndCountMarkClosure cl(bitmap);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -92,9 +92,9 @@ bool TestIteratorFn::do_bit(size_t offset) {
|
||||
}
|
||||
|
||||
static idx_t compute_expected(idx_t search_start,
|
||||
idx_t search_end,
|
||||
idx_t left_bit,
|
||||
idx_t right_bit) {
|
||||
idx_t search_end,
|
||||
idx_t left_bit,
|
||||
idx_t right_bit) {
|
||||
idx_t expected = search_end;
|
||||
if (search_start <= left_bit) {
|
||||
if (left_bit < search_end) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,7 +61,7 @@ public class TestLargePageUseForAuxMemory {
|
||||
// being checked. In case of a large page allocation failure the output will
|
||||
// include logs like this for the affected data structure:
|
||||
// [0.048s][debug][gc,heap,coops] Reserve regular memory without large pages
|
||||
// [0.048s][info ][pagesize ] Next Bitmap: ... page_size=4K ...
|
||||
// [0.048s][info ][pagesize ] Mark Bitmap: ... page_size=4K ...
|
||||
//
|
||||
// The pattern passed in should match the second line.
|
||||
String failureMatch = output.firstMatch("Reserve regular memory without large pages\\n.*" + pattern, 1);
|
||||
@ -101,9 +101,8 @@ public class TestLargePageUseForAuxMemory {
|
||||
checkSize(output, expectedPageSize, "Card Counts Table: .*page_size=([^ ]+)");
|
||||
}
|
||||
|
||||
static void checkBitmaps(OutputAnalyzer output, long expectedPageSize) throws Exception {
|
||||
checkSize(output, expectedPageSize, "Prev Bitmap: .*page_size=([^ ]+)");
|
||||
checkSize(output, expectedPageSize, "Next Bitmap: .*page_size=([^ ]+)");
|
||||
static void checkBitmap(OutputAnalyzer output, long expectedPageSize) throws Exception {
|
||||
checkSize(output, expectedPageSize, "Mark Bitmap: .*page_size=([^ ]+)");
|
||||
}
|
||||
|
||||
static void testVM(String what, long heapsize, boolean cardsShouldUseLargePages, boolean bitmapShouldUseLargePages) throws Exception {
|
||||
@ -124,10 +123,10 @@ public class TestLargePageUseForAuxMemory {
|
||||
// Only expect large page size if large pages are enabled.
|
||||
if (largePagesEnabled(output)) {
|
||||
checkSmallTables(output, (cardsShouldUseLargePages ? largePageSize : smallPageSize));
|
||||
checkBitmaps(output, (bitmapShouldUseLargePages ? largePageSize : smallPageSize));
|
||||
checkBitmap(output, (bitmapShouldUseLargePages ? largePageSize : smallPageSize));
|
||||
} else {
|
||||
checkSmallTables(output, smallPageSize);
|
||||
checkBitmaps(output, smallPageSize);
|
||||
checkBitmap(output, smallPageSize);
|
||||
}
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
@ -143,7 +142,7 @@ public class TestLargePageUseForAuxMemory {
|
||||
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
checkSmallTables(output, smallPageSize);
|
||||
checkBitmaps(output, smallPageSize);
|
||||
checkBitmap(output, smallPageSize);
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user