8331385: G1: Prefix HeapRegion helper classes with G1

Reviewed-by: ayang, dholmes
This commit is contained in:
Thomas Schatzl 2024-07-05 07:18:34 +00:00
parent b9d8056d5c
commit 4ec1ae1097
66 changed files with 576 additions and 578 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -125,15 +125,13 @@ void G1Arguments::initialize_mark_stack_size() {
MAX2(MarkStackSize, (size_t)ConcGCThreads * TASKQUEUE_SIZE));
FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
}
}
void G1Arguments::initialize_card_set_configuration() {
assert(G1HeapRegion::LogOfHRGrainBytes != 0, "not initialized");
// Array of Cards card set container globals.
const uint LOG_M = 20;
assert(log2i_exact(HeapRegionBounds::min_size()) == LOG_M, "inv");
assert(log2i_exact(G1HeapRegionBounds::min_size()) == LOG_M, "inv");
assert(G1HeapRegion::LogOfHRGrainBytes >= LOG_M, "from the above");
uint region_size_log_mb = G1HeapRegion::LogOfHRGrainBytes - LOG_M;

View File

@ -129,7 +129,7 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// is done by clients of this interface.)
void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
G1HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
}
void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
@ -162,7 +162,7 @@ G1HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
// Private methods.
G1HeapRegion* G1CollectedHeap::new_region(size_t word_size,
HeapRegionType type,
G1HeapRegionType type,
bool do_expand,
uint node_index) {
assert(!is_humongous(word_size) || word_size <= G1HeapRegion::GrainWords,
@ -710,7 +710,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
ShouldNotReachHere();
}
class PostCompactionPrinterClosure: public HeapRegionClosure {
class PostCompactionPrinterClosure: public G1HeapRegionClosure {
public:
bool do_heap_region(G1HeapRegion* hr) {
assert(!hr->is_young(), "not expecting to find young regions");
@ -1070,7 +1070,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
_verifier->verify_region_sets_optional();
}
class OldRegionSetChecker : public HeapRegionSetChecker {
class OldRegionSetChecker : public G1HeapRegionSetChecker {
public:
void check_mt_safety() {
// Master Old Set MT safety protocol:
@ -1098,7 +1098,7 @@ public:
const char* get_description() { return "Old Regions"; }
};
class HumongousRegionSetChecker : public HeapRegionSetChecker {
class HumongousRegionSetChecker : public G1HeapRegionSetChecker {
public:
void check_mt_safety() {
// Humongous Set MT safety protocol:
@ -1352,9 +1352,9 @@ jint G1CollectedHeap::initialize() {
guarantee(G1HeapRegion::CardsPerRegion < max_cards_per_region,
"too many cards per region");
HeapRegionRemSet::initialize(_reserved);
G1HeapRegionRemSet::initialize(_reserved);
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
G1FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
_bot = new G1BlockOffsetTable(reserved(), bot_storage);
@ -1536,7 +1536,7 @@ size_t G1CollectedHeap::used_unlocked() const {
return _summary_bytes_used;
}
class SumUsedClosure: public HeapRegionClosure {
class SumUsedClosure: public G1HeapRegionClosure {
size_t _used;
public:
SumUsedClosure() : _used(0) {}
@ -1887,7 +1887,7 @@ bool G1CollectedHeap::is_in(const void* p) const {
// Iterates an ObjectClosure over all objects within a G1HeapRegion.
class IterateObjectClosureRegionClosure: public HeapRegionClosure {
class IterateObjectClosureRegionClosure: public G1HeapRegionClosure {
ObjectClosure* _cl;
public:
IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
@ -1907,7 +1907,7 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
class G1ParallelObjectIterator : public ParallelObjectIteratorImpl {
private:
G1CollectedHeap* _heap;
HeapRegionClaimer _claimer;
G1HeapRegionClaimer _claimer;
public:
G1ParallelObjectIterator(uint thread_num) :
@ -1923,7 +1923,7 @@ ParallelObjectIteratorImpl* G1CollectedHeap::parallel_object_iterator(uint threa
return new G1ParallelObjectIterator(thread_num);
}
void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) {
void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, G1HeapRegionClaimer* claimer) {
IterateObjectClosureRegionClosure blk(cl);
heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
}
@ -1932,43 +1932,43 @@ void G1CollectedHeap::keep_alive(oop obj) {
G1BarrierSet::enqueue_preloaded(obj);
}
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
void G1CollectedHeap::heap_region_iterate(G1HeapRegionClosure* cl) const {
_hrm.iterate(cl);
}
void G1CollectedHeap::heap_region_iterate(HeapRegionIndexClosure* cl) const {
void G1CollectedHeap::heap_region_iterate(G1HeapRegionIndexClosure* cl) const {
_hrm.iterate(cl);
}
void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
HeapRegionClaimer *hrclaimer,
void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(G1HeapRegionClosure* cl,
G1HeapRegionClaimer *hrclaimer,
uint worker_id) const {
_hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
}
void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
HeapRegionClaimer *hrclaimer) const {
void G1CollectedHeap::heap_region_par_iterate_from_start(G1HeapRegionClosure* cl,
G1HeapRegionClaimer *hrclaimer) const {
_hrm.par_iterate(cl, hrclaimer, 0);
}
void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
void G1CollectedHeap::collection_set_iterate_all(G1HeapRegionClosure* cl) {
_collection_set.iterate(cl);
}
void G1CollectedHeap::collection_set_par_iterate_all(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
void G1CollectedHeap::collection_set_par_iterate_all(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
uint worker_id) {
_collection_set.par_iterate(cl, hr_claimer, worker_id);
}
void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl,
HeapRegionClaimer* hr_claimer,
void G1CollectedHeap::collection_set_iterate_increment_from(G1HeapRegionClosure *cl,
G1HeapRegionClaimer* hr_claimer,
uint worker_id) {
_collection_set.iterate_incremental_part_from(cl, hr_claimer, worker_id);
}
void G1CollectedHeap::par_iterate_regions_array(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
void G1CollectedHeap::par_iterate_regions_array(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
const uint regions[],
size_t length,
uint worker_id) const {
@ -2046,10 +2046,10 @@ bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
return true;
}
class PrintRegionClosure: public HeapRegionClosure {
class G1PrintRegionClosure: public G1HeapRegionClosure {
outputStream* _st;
public:
PrintRegionClosure(outputStream* st) : _st(st) {}
G1PrintRegionClosure(outputStream* st) : _st(st) {}
bool do_heap_region(G1HeapRegion* r) {
r->print_on(_st);
return false;
@ -2121,7 +2121,7 @@ void G1CollectedHeap::print_regions_on(outputStream* st) const {
"CS=collection set, F=free, "
"TAMS=top-at-mark-start, "
"PB=parsable bottom");
PrintRegionClosure blk(st);
G1PrintRegionClosure blk(st);
heap_region_iterate(&blk);
}
@ -2281,14 +2281,14 @@ void G1CollectedHeap::start_concurrent_cycle(bool concurrent_operation_is_full_m
bool G1CollectedHeap::is_potential_eager_reclaim_candidate(G1HeapRegion* r) const {
// We don't nominate objects with many remembered set entries, on
// the assumption that such objects are likely still live.
HeapRegionRemSet* rem_set = r->rem_set();
G1HeapRegionRemSet* rem_set = r->rem_set();
return rem_set->occupancy_less_or_equal_than(G1EagerReclaimRemSetThreshold);
}
#ifndef PRODUCT
void G1CollectedHeap::verify_region_attr_remset_is_tracked() {
class VerifyRegionAttrRemSet : public HeapRegionClosure {
class VerifyRegionAttrRemSet : public G1HeapRegionClosure {
public:
virtual bool do_heap_region(G1HeapRegion* r) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -2538,9 +2538,9 @@ void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjec
}
class G1BulkUnregisterNMethodTask : public WorkerTask {
HeapRegionClaimer _hrclaimer;
G1HeapRegionClaimer _hrclaimer;
class UnregisterNMethodsHeapRegionClosure : public HeapRegionClosure {
class UnregisterNMethodsHeapRegionClosure : public G1HeapRegionClosure {
public:
bool do_heap_region(G1HeapRegion* hr) {
@ -2614,7 +2614,7 @@ void G1CollectedHeap::clear_bitmap_for_region(G1HeapRegion* hr) {
concurrent_mark()->clear_bitmap_for_region(hr);
}
void G1CollectedHeap::free_region(G1HeapRegion* hr, FreeRegionList* free_list) {
void G1CollectedHeap::free_region(G1HeapRegion* hr, G1FreeRegionList* free_list) {
assert(!hr->is_free(), "the region should not be free");
assert(!hr->is_empty(), "the region should not be empty");
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
@ -2636,7 +2636,7 @@ void G1CollectedHeap::retain_region(G1HeapRegion* hr) {
}
void G1CollectedHeap::free_humongous_region(G1HeapRegion* hr,
FreeRegionList* free_list) {
G1FreeRegionList* free_list) {
assert(hr->is_humongous(), "this is only for humongous regions");
hr->clear_humongous();
free_region(hr, free_list);
@ -2652,7 +2652,7 @@ void G1CollectedHeap::remove_from_old_gen_sets(const uint old_regions_removed,
}
void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
void G1CollectedHeap::prepend_to_freelist(G1FreeRegionList* list) {
assert(list != nullptr, "list can't be null");
if (!list->is_empty()) {
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
@ -2678,7 +2678,7 @@ void G1CollectedHeap::rebuild_free_region_list() {
phase_times()->record_total_rebuild_freelist_time_ms((Ticks::now() - start).seconds() * 1000.0);
}
class G1AbandonCollectionSetClosure : public HeapRegionClosure {
class G1AbandonCollectionSetClosure : public G1HeapRegionClosure {
public:
virtual bool do_heap_region(G1HeapRegion* r) {
assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
@ -2707,7 +2707,7 @@ void G1CollectedHeap::set_region_short_lived_locked(G1HeapRegion* hr) {
#ifdef ASSERT
class NoYoungRegionsClosure: public HeapRegionClosure {
class NoYoungRegionsClosure: public G1HeapRegionClosure {
private:
bool _success;
public:
@ -2768,22 +2768,22 @@ void G1CollectedHeap::set_used(size_t bytes) {
_summary_bytes_used = bytes;
}
class RebuildRegionSetsClosure : public HeapRegionClosure {
class RebuildRegionSetsClosure : public G1HeapRegionClosure {
private:
bool _free_list_only;
HeapRegionSet* _old_set;
HeapRegionSet* _humongous_set;
G1HeapRegionSet* _old_set;
G1HeapRegionSet* _humongous_set;
HeapRegionManager* _hrm;
G1HeapRegionManager* _hrm;
size_t _total_used;
public:
RebuildRegionSetsClosure(bool free_list_only,
HeapRegionSet* old_set,
HeapRegionSet* humongous_set,
HeapRegionManager* hrm) :
G1HeapRegionSet* old_set,
G1HeapRegionSet* humongous_set,
G1HeapRegionManager* hrm) :
_free_list_only(free_list_only), _old_set(old_set),
_humongous_set(humongous_set), _hrm(hrm), _total_used(0) {
assert(_hrm->num_free_regions() == 0, "pre-condition");
@ -2849,7 +2849,7 @@ G1HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
bool should_allocate = policy()->should_allocate_mutator_region();
if (should_allocate) {
G1HeapRegion* new_alloc_region = new_region(word_size,
HeapRegionType::Eden,
G1HeapRegionType::Eden,
false /* do_expand */,
node_index);
if (new_alloc_region != nullptr) {
@ -2895,11 +2895,11 @@ G1HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegio
return nullptr;
}
HeapRegionType type;
G1HeapRegionType type;
if (dest.is_young()) {
type = HeapRegionType::Survivor;
type = G1HeapRegionType::Survivor;
} else {
type = HeapRegionType::Old;
type = G1HeapRegionType::Old;
}
G1HeapRegion* new_alloc_region = new_region(word_size,

View File

@ -162,7 +162,7 @@ class G1CollectedHeap : public CollectedHeap {
// Other related classes.
friend class G1HeapPrinterMark;
friend class HeapRegionClaimer;
friend class G1HeapRegionClaimer;
// Testing classes.
friend class G1CheckRegionAttrTableClosure;
@ -180,8 +180,8 @@ private:
static size_t _humongous_object_threshold_in_words;
// These sets keep track of old and humongous regions respectively.
HeapRegionSet _old_set;
HeapRegionSet _humongous_set;
G1HeapRegionSet _old_set;
G1HeapRegionSet _humongous_set;
// Young gen memory statistics before GC.
G1MonotonicArenaMemoryStats _young_gen_card_set_stats;
@ -212,7 +212,7 @@ private:
G1NUMA* _numa;
// The sequence of all heap regions in the heap.
HeapRegionManager _hrm;
G1HeapRegionManager _hrm;
// Manages all allocations with regions except humongous object allocations.
G1Allocator* _allocator;
@ -386,9 +386,9 @@ private:
// an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation
// request. 'type' takes the type of region to be allocated. (Use constants
// Old, Eden, Humongous, Survivor defined in HeapRegionType.)
// Old, Eden, Humongous, Survivor defined in G1HeapRegionType.)
G1HeapRegion* new_region(size_t word_size,
HeapRegionType type,
G1HeapRegionType type,
bool do_expand,
uint node_index = G1NUMA::AnyNodeIndex);
@ -679,7 +679,7 @@ public:
// in another way).
// Callers must ensure they are the only one calling free on the given region
// at the same time.
void free_region(G1HeapRegion* hr, FreeRegionList* free_list);
void free_region(G1HeapRegion* hr, G1FreeRegionList* free_list);
// Add the given region to the retained regions collection set candidates.
void retain_region(G1HeapRegion* hr);
@ -697,7 +697,7 @@ public:
// The method assumes that only a single thread is ever calling
// this for a particular region at once.
void free_humongous_region(G1HeapRegion* hr,
FreeRegionList* free_list);
G1FreeRegionList* free_list);
// Execute func(G1HeapRegion* r, bool is_last) on every region covered by the
// given range.
@ -1022,7 +1022,7 @@ public:
void remove_from_old_gen_sets(const uint old_regions_removed,
const uint humongous_regions_removed);
void prepend_to_freelist(FreeRegionList* list);
void prepend_to_freelist(G1FreeRegionList* list);
void decrement_summary_bytes(size_t bytes);
bool is_in(const void* p) const override;
@ -1060,7 +1060,7 @@ public:
// Iteration functions.
void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
void object_iterate_parallel(ObjectClosure* cl, uint worker_id, G1HeapRegionClaimer* claimer);
// Iterate over all objects, calling "cl.do_object" on each.
void object_iterate(ObjectClosure* cl) override;
@ -1072,8 +1072,8 @@ public:
// Iterate over heap regions, in address order, terminating the
// iteration early if the "do_heap_region" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk) const;
void heap_region_iterate(HeapRegionIndexClosure* blk) const;
void heap_region_iterate(G1HeapRegionClosure* blk) const;
void heap_region_iterate(G1HeapRegionIndexClosure* blk) const;
// Return the region with the given index. It assumes the index is valid.
inline G1HeapRegion* region_at(uint index) const;
@ -1091,41 +1091,41 @@ public:
inline HeapWord* bottom_addr_for_region(uint index) const;
// Two functions to iterate over the heap regions in parallel. Threads
// compete using the HeapRegionClaimer to claim the regions before
// compete using the G1HeapRegionClaimer to claim the regions before
// applying the closure on them.
// The _from_worker_offset version uses the HeapRegionClaimer and
// The _from_worker_offset version uses the G1HeapRegionClaimer and
// the worker id to calculate a start offset to prevent all workers to
// start from the point.
void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
HeapRegionClaimer* hrclaimer,
void heap_region_par_iterate_from_worker_offset(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hrclaimer,
uint worker_id) const;
void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
HeapRegionClaimer* hrclaimer) const;
void heap_region_par_iterate_from_start(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hrclaimer) const;
// Iterate over all regions in the collection set in parallel.
void collection_set_par_iterate_all(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
void collection_set_par_iterate_all(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
uint worker_id);
// Iterate over all regions currently in the current collection set.
void collection_set_iterate_all(HeapRegionClosure* blk);
void collection_set_iterate_all(G1HeapRegionClosure* blk);
// Iterate over the regions in the current increment of the collection set.
// Starts the iteration so that the start regions of a given worker id over the
// set active_workers are evenly spread across the set of collection set regions
// to be iterated.
// The variant with the HeapRegionClaimer guarantees that the closure will be
// The variant with the G1HeapRegionClaimer guarantees that the closure will be
// applied to a particular region exactly once.
void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
void collection_set_iterate_increment_from(G1HeapRegionClosure *blk, uint worker_id) {
collection_set_iterate_increment_from(blk, nullptr, worker_id);
}
void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
void collection_set_iterate_increment_from(G1HeapRegionClosure *blk, G1HeapRegionClaimer* hr_claimer, uint worker_id);
// Iterate over the array of region indexes, uint regions[length], applying
// the given HeapRegionClosure on each region. The worker_id will determine where
// the given G1HeapRegionClosure on each region. The worker_id will determine where
// to start the iteration to allow for more efficient parallel iteration.
void par_iterate_regions_array(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
void par_iterate_regions_array(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
const uint regions[],
size_t length,
uint worker_id) const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -129,7 +129,7 @@ void G1CollectionSet::clear() {
_collection_set_cur_length = 0;
}
void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
void G1CollectionSet::iterate(G1HeapRegionClosure* cl) const {
size_t len = _collection_set_cur_length;
OrderAccess::loadload();
@ -143,13 +143,13 @@ void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
}
}
void G1CollectionSet::par_iterate(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
void G1CollectionSet::par_iterate(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
uint worker_id) const {
iterate_part_from(cl, hr_claimer, 0, cur_length(), worker_id);
}
void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
void G1CollectionSet::iterate_optional(G1HeapRegionClosure* cl) const {
assert_at_safepoint();
for (G1HeapRegion* r : _optional_old_regions) {
@ -158,14 +158,14 @@ void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
}
}
void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
void G1CollectionSet::iterate_incremental_part_from(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
uint worker_id) const {
iterate_part_from(cl, hr_claimer, _inc_part_start, increment_length(), worker_id);
}
void G1CollectionSet::iterate_part_from(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
void G1CollectionSet::iterate_part_from(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
size_t offset,
size_t length,
uint worker_id) const {
@ -207,11 +207,11 @@ void G1CollectionSet::add_eden_region(G1HeapRegion* hr) {
}
#ifndef PRODUCT
class G1VerifyYoungAgesClosure : public HeapRegionClosure {
class G1VerifyYoungAgesClosure : public G1HeapRegionClosure {
public:
bool _valid;
G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
G1VerifyYoungAgesClosure() : G1HeapRegionClosure(), _valid(true) { }
virtual bool do_heap_region(G1HeapRegion* r) {
guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
@ -246,10 +246,10 @@ bool G1CollectionSet::verify_young_ages() {
return cl.valid();
}
class G1PrintCollectionSetDetailClosure : public HeapRegionClosure {
class G1PrintCollectionSetDetailClosure : public G1HeapRegionClosure {
outputStream* _st;
public:
G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
G1PrintCollectionSetDetailClosure(outputStream* st) : G1HeapRegionClosure(), _st(st) { }
virtual bool do_heap_region(G1HeapRegion* r) {
assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
@ -471,12 +471,12 @@ void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* p
}
#ifdef ASSERT
class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
class G1VerifyYoungCSetIndicesClosure : public G1HeapRegionClosure {
private:
size_t _young_length;
uint* _heap_region_indices;
public:
G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
G1VerifyYoungCSetIndicesClosure(size_t young_length) : G1HeapRegionClosure(), _young_length(young_length) {
_heap_region_indices = NEW_C_HEAP_ARRAY(uint, young_length + 1, mtGC);
for (size_t i = 0; i < young_length + 1; i++) {
_heap_region_indices[i] = UINT_MAX;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,8 +36,8 @@ class G1ParScanThreadStateSet;
class G1Policy;
class G1SurvivorRegions;
class G1HeapRegion;
class HeapRegionClaimer;
class HeapRegionClosure;
class G1HeapRegionClaimer;
class G1HeapRegionClosure;
// The collection set.
//
@ -197,10 +197,10 @@ class G1CollectionSet {
void finalize_old_part(double time_remaining_ms);
// Iterate the part of the collection set given by the offset and length applying the given
// HeapRegionClosure. The worker_id will determine where in the part to start the iteration
// G1HeapRegionClosure. The worker_id will determine where in the part to start the iteration
// to allow for more efficient parallel iteration.
void iterate_part_from(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
void iterate_part_from(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
size_t offset,
size_t length,
uint worker_id) const;
@ -243,9 +243,9 @@ public:
// Stop adding regions to the current collection set increment.
void stop_incremental_building() { _inc_build_state = Inactive; }
// Iterate over the current collection set increment applying the given HeapRegionClosure
// Iterate over the current collection set increment applying the given G1HeapRegionClosure
// from a starting position determined by the given worker id.
void iterate_incremental_part_from(HeapRegionClosure* cl, HeapRegionClaimer* hr_claimer, uint worker_id) const;
void iterate_incremental_part_from(G1HeapRegionClosure* cl, G1HeapRegionClaimer* hr_claimer, uint worker_id) const;
// Returns the length of the current increment in number of regions.
size_t increment_length() const { return _collection_set_cur_length - _inc_part_start; }
@ -253,13 +253,13 @@ public:
size_t cur_length() const { return _collection_set_cur_length; }
// Iterate over the entire collection set (all increments calculated so far), applying
// the given HeapRegionClosure on all of them.
void iterate(HeapRegionClosure* cl) const;
void par_iterate(HeapRegionClosure* cl,
HeapRegionClaimer* hr_claimer,
// the given G1HeapRegionClosure on all of them.
void iterate(G1HeapRegionClosure* cl) const;
void par_iterate(G1HeapRegionClosure* cl,
G1HeapRegionClaimer* hr_claimer,
uint worker_id) const;
void iterate_optional(HeapRegionClosure* cl) const;
void iterate_optional(G1HeapRegionClosure* cl) const;
// Finalize the initial collection set consisting of all young regions potentially a
// few old gen regions.

View File

@ -36,7 +36,7 @@
class G1CollectionCandidateList;
class G1CollectionSetCandidates;
class G1HeapRegion;
class HeapRegionClosure;
class G1HeapRegionClosure;
using G1CollectionCandidateRegionListIterator = GrowableArrayIterator<G1HeapRegion*>;
@ -110,7 +110,7 @@ public:
// Restore sorting order by decreasing gc efficiency, using the existing efficiency
// values.
void sort_by_efficiency();
// Removes any HeapRegions stored in this list also in the other list. The other
// Removes any heap regions stored in this list also in the other list. The other
// list may only contain regions in this list, sorted by gc efficiency. It need
// not be a prefix of this list. Returns the number of regions removed.
// E.g. if this list is "A B G H", the other list may be "A G H", but not "F" (not in

View File

@ -116,7 +116,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// Per-region closure. In addition to determining whether a region should be
// added to the candidates, and calculating those regions' gc efficiencies, also
// gather additional statistics.
class G1BuildCandidateRegionsClosure : public HeapRegionClosure {
class G1BuildCandidateRegionsClosure : public G1HeapRegionClosure {
G1BuildCandidateArray* _array;
uint _cur_chunk_idx;
@ -177,7 +177,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
};
G1CollectedHeap* _g1h;
HeapRegionClaimer _hrclaimer;
G1HeapRegionClaimer _hrclaimer;
uint volatile _num_regions_added;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
HeapRegionRange::HeapRegionRange(uint start, uint end) :
G1HeapRegionRange::G1HeapRegionRange(uint start, uint end) :
_start(start),
_end(end) {
assert(start <= end, "Invariant");
@ -97,21 +97,21 @@ void G1CommittedRegionMap::uncommit(uint start, uint end) {
inactive_clear_range(start, end);
}
HeapRegionRange G1CommittedRegionMap::next_active_range(uint offset) const {
G1HeapRegionRange G1CommittedRegionMap::next_active_range(uint offset) const {
// Find first active index from offset.
uint start = (uint) _active.find_first_set_bit(offset);
if (start == max_length()) {
// Early out when no active regions are found.
return HeapRegionRange(max_length(), max_length());
return G1HeapRegionRange(max_length(), max_length());
}
uint end = (uint) _active.find_first_clear_bit(start);
verify_active_range(start, end);
return HeapRegionRange(start, end);
return G1HeapRegionRange(start, end);
}
HeapRegionRange G1CommittedRegionMap::next_committable_range(uint offset) const {
G1HeapRegionRange G1CommittedRegionMap::next_committable_range(uint offset) const {
// We should only call this function when there are no inactive regions.
verify_no_inactive_regons();
@ -119,28 +119,28 @@ HeapRegionRange G1CommittedRegionMap::next_committable_range(uint offset) const
uint start = (uint) _active.find_first_clear_bit(offset);
if (start == max_length()) {
// Early out when no free regions are found.
return HeapRegionRange(max_length(), max_length());
return G1HeapRegionRange(max_length(), max_length());
}
uint end = (uint) _active.find_first_set_bit(start);
verify_free_range(start, end);
return HeapRegionRange(start, end);
return G1HeapRegionRange(start, end);
}
HeapRegionRange G1CommittedRegionMap::next_inactive_range(uint offset) const {
G1HeapRegionRange G1CommittedRegionMap::next_inactive_range(uint offset) const {
// Find first inactive region from offset.
uint start = (uint) _inactive.find_first_set_bit(offset);
if (start == max_length()) {
// Early when no inactive regions are found.
return HeapRegionRange(max_length(), max_length());
return G1HeapRegionRange(max_length(), max_length());
}
uint end = (uint) _inactive.find_first_clear_bit(start);
verify_inactive_range(start, end);
return HeapRegionRange(start, end);
return G1HeapRegionRange(start, end);
}
void G1CommittedRegionMap::active_set_range(uint start, uint end) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,13 +30,13 @@
#include "utilities/macros.hpp"
// Helper class to define a range [start, end) of regions.
class HeapRegionRange : public StackObj {
class G1HeapRegionRange : public StackObj {
// Inclusive start of the range.
uint _start;
// Exclusive end of the range.
uint _end;
public:
HeapRegionRange(uint start, uint end);
G1HeapRegionRange(uint start, uint end);
uint start() const { return _start; }
uint end() const { return _end; }
@ -101,13 +101,13 @@ public:
void uncommit(uint start, uint end);
// Finds the next range of active regions starting at offset.
HeapRegionRange next_active_range(uint offset) const;
G1HeapRegionRange next_active_range(uint offset) const;
// Finds the next range of inactive regions starting at offset.
HeapRegionRange next_inactive_range(uint offset) const;
G1HeapRegionRange next_inactive_range(uint offset) const;
// Finds the next range of committable regions starting at offset.
// This function must only be called when no inactive regions are
// present and can be used to activate more regions.
HeapRegionRange next_committable_range(uint offset) const;
G1HeapRegionRange next_committable_range(uint offset) const;
protected:
virtual void guarantee_mt_safety_active() const;

View File

@ -675,7 +675,7 @@ public:
private:
// Heap region closure used for clearing the _mark_bitmap.
class G1ClearBitmapHRClosure : public HeapRegionClosure {
class G1ClearBitmapHRClosure : public G1HeapRegionClosure {
private:
G1ConcurrentMark* _cm;
G1CMBitMap* _bitmap;
@ -715,7 +715,7 @@ private:
public:
G1ClearBitmapHRClosure(G1ConcurrentMark* cm, bool suspendible) :
HeapRegionClosure(),
G1HeapRegionClosure(),
_cm(cm),
_bitmap(cm->mark_bitmap()),
_suspendible(suspendible)
@ -759,7 +759,7 @@ private:
};
G1ClearBitmapHRClosure _cl;
HeapRegionClaimer _hr_claimer;
G1HeapRegionClaimer _hr_claimer;
bool _suspendible; // If the task is suspendible, workers must join the STS.
public:
@ -843,7 +843,7 @@ public:
};
class G1PreConcurrentStartTask::NoteStartOfMarkTask : public G1AbstractSubTask {
HeapRegionClaimer _claimer;
G1HeapRegionClaimer _claimer;
public:
NoteStartOfMarkTask() : G1AbstractSubTask(G1GCPhaseTimes::NoteStartOfMark), _claimer(0) { }
@ -863,11 +863,11 @@ void G1PreConcurrentStartTask::ResetMarkingStateTask::do_work(uint worker_id) {
_cm->reset();
}
class NoteStartOfMarkHRClosure : public HeapRegionClosure {
class NoteStartOfMarkHRClosure : public G1HeapRegionClosure {
G1ConcurrentMark* _cm;
public:
NoteStartOfMarkHRClosure() : HeapRegionClosure(), _cm(G1CollectedHeap::heap()->concurrent_mark()) { }
NoteStartOfMarkHRClosure() : G1HeapRegionClosure(), _cm(G1CollectedHeap::heap()->concurrent_mark()) { }
bool do_heap_region(G1HeapRegion* r) override {
if (r->is_old_or_humongous() && !r->is_collection_set_candidate() && !r->in_collection_set()) {
@ -1204,14 +1204,14 @@ void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type,
class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
G1CollectedHeap* _g1h;
G1ConcurrentMark* _cm;
HeapRegionClaimer _hrclaimer;
G1HeapRegionClaimer _hrclaimer;
uint volatile _total_selected_for_rebuild;
// Reclaimed empty regions
FreeRegionList _cleanup_list;
G1FreeRegionList _cleanup_list;
struct G1OnRegionClosure : public HeapRegionClosure {
struct G1OnRegionClosure : public G1HeapRegionClosure {
G1CollectedHeap* _g1h;
G1ConcurrentMark* _cm;
// The number of regions actually selected for rebuild.
@ -1220,11 +1220,11 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
size_t _freed_bytes;
uint _num_old_regions_removed;
uint _num_humongous_regions_removed;
FreeRegionList* _local_cleanup_list;
G1FreeRegionList* _local_cleanup_list;
G1OnRegionClosure(G1CollectedHeap* g1h,
G1ConcurrentMark* cm,
FreeRegionList* local_cleanup_list) :
G1FreeRegionList* local_cleanup_list) :
_g1h(g1h),
_cm(cm),
_num_selected_for_rebuild(0),
@ -1325,7 +1325,7 @@ public:
}
void work(uint worker_id) override {
FreeRegionList local_cleanup_list("Local Cleanup List");
G1FreeRegionList local_cleanup_list("Local Cleanup List");
G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list);
_g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id);
@ -1352,7 +1352,7 @@ public:
}
};
class G1UpdateRegionsAfterRebuild : public HeapRegionClosure {
class G1UpdateRegionsAfterRebuild : public G1HeapRegionClosure {
G1CollectedHeap* _g1h;
public:
@ -3078,7 +3078,7 @@ G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_total_remset_bytes += g1h->card_set_freelist_pool()->mem_size();
// add static memory usages to remembered set sizes
_total_remset_bytes += HeapRegionRemSet::static_mem_size();
_total_remset_bytes += G1HeapRegionRemSet::static_mem_size();
// Print the footer of the output.
log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
log_trace(gc, liveness)(G1PPRL_LINE_PREFIX

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -949,7 +949,7 @@ public:
// Class that's used to to print out per-region liveness
// information. It's currently used at the end of marking and also
// after we sort the old regions at the end of the cleanup operation.
class G1PrintRegionLivenessInfoClosure : public HeapRegionClosure {
class G1PrintRegionLivenessInfoClosure : public G1HeapRegionClosure {
// Accumulators for these values.
size_t _total_used_bytes;
size_t _total_capacity_bytes;

View File

@ -63,11 +63,11 @@
// a pause.
class G1RebuildRSAndScrubTask : public WorkerTask {
G1ConcurrentMark* _cm;
HeapRegionClaimer _hr_claimer;
G1HeapRegionClaimer _hr_claimer;
const bool _should_rebuild_remset;
class G1RebuildRSAndScrubRegionClosure : public HeapRegionClosure {
class G1RebuildRSAndScrubRegionClosure : public G1HeapRegionClosure {
G1ConcurrentMark* _cm;
const G1CMBitMap* _bitmap;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -253,7 +253,7 @@ uint64_t G1ConcurrentRefine::adjust_threads_wait_ms() const {
}
}
class G1ConcurrentRefine::RemSetSamplingClosure : public HeapRegionClosure {
class G1ConcurrentRefine::RemSetSamplingClosure : public G1HeapRegionClosure {
G1CollectionSet* _cset;
size_t _sampled_card_rs_length;
size_t _sampled_code_root_rs_length;
@ -263,7 +263,7 @@ public:
_cset(cset), _sampled_card_rs_length(0), _sampled_code_root_rs_length(0) {}
bool do_heap_region(G1HeapRegion* r) override {
HeapRegionRemSet* rem_set = r->rem_set();
G1HeapRegionRemSet* rem_set = r->rem_set();
_sampled_card_rs_length += rem_set->occupied();
_sampled_code_root_rs_length += rem_set->code_roots_list_length();
return false;

View File

@ -64,8 +64,8 @@ bool G1EvacFailureRegions::contains(uint region_idx) const {
return _regions_evac_failed.par_at(region_idx, memory_order_relaxed);
}
void G1EvacFailureRegions::par_iterate(HeapRegionClosure* closure,
HeapRegionClaimer* hrclaimer,
void G1EvacFailureRegions::par_iterate(G1HeapRegionClosure* closure,
G1HeapRegionClaimer* hrclaimer,
uint worker_id) const {
G1CollectedHeap::heap()->par_iterate_regions_array(closure,
hrclaimer,

View File

@ -28,8 +28,8 @@
#include "utilities/bitMap.hpp"
class G1AbstractSubTask;
class HeapRegionClosure;
class HeapRegionClaimer;
class G1HeapRegionClaimer;
class G1HeapRegionClosure;
// This class records for every region on the heap whether it had experienced an
// evacuation failure.
@ -70,8 +70,8 @@ public:
void post_collection();
bool contains(uint region_idx) const;
void par_iterate(HeapRegionClosure* closure,
HeapRegionClaimer* hrclaimer,
void par_iterate(G1HeapRegionClosure* closure,
G1HeapRegionClaimer* hrclaimer,
uint worker_id) const;
// Return a G1AbstractSubTask which does necessary preparation for evacuation failed regions

View File

@ -164,7 +164,7 @@ G1FullCollector::~G1FullCollector() {
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
}
class PrepareRegionsClosure : public HeapRegionClosure {
class PrepareRegionsClosure : public G1HeapRegionClosure {
G1FullCollector* _collector;
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,7 @@ public:
}
};
class G1AdjustRegionClosure : public HeapRegionClosure {
class G1AdjustRegionClosure : public G1HeapRegionClosure {
G1FullCollector* _collector;
G1CMBitMap* _bitmap;
uint _worker_id;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ class G1CollectedHeap;
class G1FullGCAdjustTask : public G1FullGCTask {
G1RootProcessor _root_processor;
WeakProcessor::Task _weak_proc_task;
HeapRegionClaimer _hrclaimer;
G1HeapRegionClaimer _hrclaimer;
G1AdjustClosure _adjust;
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ class G1FullCollector;
class G1FullGCCompactTask : public G1FullGCTask {
G1FullCollector* _collector;
HeapRegionClaimer _claimer;
G1HeapRegionClaimer _claimer;
G1CollectedHeap* _g1h;
void compact_region(G1HeapRegion* hr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#include "gc/g1/g1BiasedArray.hpp"
// This table is used to store attribute values of all HeapRegions that need
// This table is used to store attribute values of all heap regions that need
// fast access during the full collection. In particular some parts of the
// region type information is encoded in these per-region bytes. Value encoding
// has been specifically chosen to make required accesses fast. In particular,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ class G1HeapRegion;
// Determines the regions in the heap that should be part of the compaction and
// distributes them among the compaction queues in round-robin fashion.
class G1DetermineCompactionQueueClosure : public HeapRegionClosure {
class G1DetermineCompactionQueueClosure : public G1HeapRegionClosure {
G1CollectedHeap* _g1h;
G1FullCollector* _collector;
uint _cur_worker;
@ -62,7 +62,7 @@ public:
class G1FullGCPrepareTask : public G1FullGCTask {
volatile bool _has_free_compaction_targets;
HeapRegionClaimer _hrclaimer;
G1HeapRegionClaimer _hrclaimer;
void set_has_free_compaction_targets();
@ -74,7 +74,7 @@ public:
bool has_free_compaction_targets();
private:
class G1CalculatePointersClosure : public HeapRegionClosure {
class G1CalculatePointersClosure : public G1HeapRegionClosure {
G1CollectedHeap* _g1h;
G1FullCollector* _collector;
G1CMBitMap* _bitmap;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,9 +29,9 @@
class G1FullGCResetMetadataTask : public G1FullGCTask {
G1FullCollector* _collector;
HeapRegionClaimer _claimer;
G1HeapRegionClaimer _claimer;
class G1ResetMetadataClosure : public HeapRegionClosure {
class G1ResetMetadataClosure : public G1HeapRegionClosure {
G1CollectedHeap* _g1h;
G1FullCollector* _collector;

View File

@ -55,20 +55,20 @@ size_t G1HeapRegion::GrainWords = 0;
size_t G1HeapRegion::CardsPerRegion = 0;
size_t G1HeapRegion::max_region_size() {
return HeapRegionBounds::max_size();
return G1HeapRegionBounds::max_size();
}
size_t G1HeapRegion::min_region_size_in_words() {
return HeapRegionBounds::min_size() >> LogHeapWordSize;
return G1HeapRegionBounds::min_size() >> LogHeapWordSize;
}
void G1HeapRegion::setup_heap_region_size(size_t max_heap_size) {
size_t region_size = G1HeapRegionSize;
// G1HeapRegionSize = 0 means decide ergonomically.
if (region_size == 0) {
region_size = clamp(max_heap_size / HeapRegionBounds::target_number(),
HeapRegionBounds::min_size(),
HeapRegionBounds::max_ergonomics_size());
region_size = clamp(max_heap_size / G1HeapRegionBounds::target_number(),
G1HeapRegionBounds::min_size(),
G1HeapRegionBounds::max_ergonomics_size());
}
// Make sure region size is a power of 2. Rounding up since this
@ -76,7 +76,7 @@ void G1HeapRegion::setup_heap_region_size(size_t max_heap_size) {
region_size = round_up_power_of_2(region_size);
// Now make sure that we don't go over or under our limits.
region_size = clamp(region_size, HeapRegionBounds::min_size(), HeapRegionBounds::max_size());
region_size = clamp(region_size, G1HeapRegionBounds::min_size(), G1HeapRegionBounds::max_size());
// Now, set up the globals.
guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
@ -247,7 +247,7 @@ G1HeapRegion::G1HeapRegion(uint hrm_index,
assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
"invalid space boundaries");
_rem_set = new HeapRegionRemSet(this, config);
_rem_set = new G1HeapRegionRemSet(this, config);
initialize();
}
@ -264,11 +264,11 @@ void G1HeapRegion::initialize(bool clear_space, bool mangle_space) {
}
void G1HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
HeapRegionTracer::send_region_type_change(_hrm_index,
get_trace_type(),
to,
(uintptr_t)bottom(),
used());
G1HeapRegionTracer::send_region_type_change(_hrm_index,
get_trace_type(),
to,
(uintptr_t)bottom(),
used());
}
void G1HeapRegion::note_evacuation_failure() {
@ -377,7 +377,7 @@ bool G1HeapRegion::verify_code_roots(VerifyOption vo) const {
return false;
}
HeapRegionRemSet* hrrs = rem_set();
G1HeapRegionRemSet* hrrs = rem_set();
size_t code_roots_length = hrrs->code_roots_list_length();
// if this region is empty then there should be no entries

View File

@ -40,9 +40,9 @@ class G1CardSetConfiguration;
class G1CollectedHeap;
class G1CMBitMap;
class G1Predictions;
class HeapRegionRemSet;
class G1HeapRegion;
class HeapRegionSetBase;
class G1HeapRegionRemSet;
class G1HeapRegionSetBase;
class nmethod;
#define HR_FORMAT "%u:(%s)[" PTR_FORMAT "," PTR_FORMAT "," PTR_FORMAT "]"
@ -195,12 +195,12 @@ public:
private:
// The remembered set for this region.
HeapRegionRemSet* _rem_set;
G1HeapRegionRemSet* _rem_set;
// Cached index of this region in the heap region sequence.
const uint _hrm_index;
HeapRegionType _type;
G1HeapRegionType _type;
// For a humongous region, region in which it starts.
G1HeapRegion* _humongous_start_region;
@ -211,11 +211,11 @@ private:
// is considered optional during a mixed collections.
uint _index_in_opt_cset;
// Fields used by the HeapRegionSetBase class and subclasses.
// Fields used by the G1HeapRegionSetBase class and subclasses.
G1HeapRegion* _next;
G1HeapRegion* _prev;
#ifdef ASSERT
HeapRegionSetBase* _containing_set;
G1HeapRegionSetBase* _containing_set;
#endif // ASSERT
// The area above this limit is fully parsable. This limit
@ -276,7 +276,7 @@ public:
MemRegion mr,
G1CardSetConfiguration* config);
// If this region is a member of a HeapRegionManager, the index in that
// If this region is a member of a G1HeapRegionManager, the index in that
// sequence, otherwise -1.
uint hrm_index() const { return _hrm_index; }
@ -418,9 +418,9 @@ public:
// Unsets the humongous-related fields on the region.
void clear_humongous();
void set_rem_set(HeapRegionRemSet* rem_set) { _rem_set = rem_set; }
void set_rem_set(G1HeapRegionRemSet* rem_set) { _rem_set = rem_set; }
// If the region has a remembered set, return a pointer to it.
HeapRegionRemSet* rem_set() const {
G1HeapRegionRemSet* rem_set() const {
return _rem_set;
}
@ -428,7 +428,7 @@ public:
void prepare_remset_for_scan();
// Methods used by the HeapRegionSetBase class and subclasses.
// Methods used by the G1HeapRegionSetBase class and subclasses.
// Getter and setter for the next and prev fields used to link regions into
// linked lists.
@ -445,7 +445,7 @@ public:
// the contents of a set are as they should be and it's only
// available in non-product builds.
#ifdef ASSERT
void set_containing_set(HeapRegionSetBase* containing_set) {
void set_containing_set(G1HeapRegionSetBase* containing_set) {
assert((containing_set != nullptr && _containing_set == nullptr) ||
containing_set == nullptr,
"containing_set: " PTR_FORMAT " "
@ -455,9 +455,9 @@ public:
_containing_set = containing_set;
}
HeapRegionSetBase* containing_set() { return _containing_set; }
G1HeapRegionSetBase* containing_set() { return _containing_set; }
#else // ASSERT
void set_containing_set(HeapRegionSetBase* containing_set) { }
void set_containing_set(G1HeapRegionSetBase* containing_set) { }
// containing_set() is only used in asserts so there's no reason
// to provide a dummy version of it.
@ -552,10 +552,10 @@ public:
bool verify(VerifyOption vo) const;
};
// HeapRegionClosure is used for iterating over regions.
// G1HeapRegionClosure is used for iterating over regions.
// Terminates the iteration when the "do_heap_region" method returns "true".
class HeapRegionClosure : public StackObj {
friend class HeapRegionManager;
class G1HeapRegionClosure : public StackObj {
friend class G1HeapRegionManager;
friend class G1CollectionSet;
friend class G1CollectionSetCandidates;
@ -563,7 +563,7 @@ class HeapRegionClosure : public StackObj {
void set_incomplete() { _is_complete = false; }
public:
HeapRegionClosure(): _is_complete(true) {}
G1HeapRegionClosure(): _is_complete(true) {}
// Typically called on each region until it returns true.
virtual bool do_heap_region(G1HeapRegion* r) = 0;
@ -573,8 +573,8 @@ public:
bool is_complete() { return _is_complete; }
};
class HeapRegionIndexClosure : public StackObj {
friend class HeapRegionManager;
class G1HeapRegionIndexClosure : public StackObj {
friend class G1HeapRegionManager;
friend class G1CollectionSet;
friend class G1CollectionSetCandidates;
@ -582,7 +582,7 @@ class HeapRegionIndexClosure : public StackObj {
void set_incomplete() { _is_complete = false; }
public:
HeapRegionIndexClosure(): _is_complete(true) {}
G1HeapRegionIndexClosure(): _is_complete(true) {}
// Typically called on each region until it returns true.
virtual bool do_heap_region_index(uint region_index) = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
class HeapRegionBounds : public AllStatic {
class G1HeapRegionBounds : public AllStatic {
private:
// Minimum region size; we won't go lower than that.
// We might want to decrease this in the future, to deal with small

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,19 +27,19 @@
#include "gc/g1/g1HeapRegionBounds.hpp"
size_t HeapRegionBounds::min_size() {
size_t G1HeapRegionBounds::min_size() {
return MIN_REGION_SIZE;
}
size_t HeapRegionBounds::max_ergonomics_size() {
size_t G1HeapRegionBounds::max_ergonomics_size() {
return MAX_ERGONOMICS_SIZE;
}
size_t HeapRegionBounds::max_size() {
size_t G1HeapRegionBounds::max_size() {
return MAX_REGION_SIZE;
}
size_t HeapRegionBounds::target_number() {
size_t G1HeapRegionBounds::target_number() {
return TARGET_REGION_NUMBER;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "jfr/jfrEvents.hpp"
#include "runtime/vmThread.hpp"
class DumpEventInfoClosure : public HeapRegionClosure {
class DumpEventInfoClosure : public G1HeapRegionClosure {
public:
bool do_heap_region(G1HeapRegion* r) {
EventG1HeapRegionInformation evt;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@
#include "runtime/orderAccess.hpp"
#include "utilities/bitMap.inline.hpp"
class MasterFreeRegionListChecker : public HeapRegionSetChecker {
class G1MasterFreeRegionListChecker : public G1HeapRegionSetChecker {
public:
void check_mt_safety() {
// Master Free List MT safety protocol:
@ -62,20 +62,20 @@ public:
const char* get_description() { return "Free Regions"; }
};
HeapRegionManager::HeapRegionManager() :
G1HeapRegionManager::G1HeapRegionManager() :
_bot_mapper(nullptr),
_cardtable_mapper(nullptr),
_committed_map(),
_allocated_heapregions_length(0),
_regions(), _heap_mapper(nullptr),
_bitmap_mapper(nullptr),
_free_list("Free list", new MasterFreeRegionListChecker())
_free_list("Free list", new G1MasterFreeRegionListChecker())
{ }
void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* bitmap,
G1RegionToSpaceMapper* bot,
G1RegionToSpaceMapper* cardtable) {
void G1HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* bitmap,
G1RegionToSpaceMapper* bot,
G1RegionToSpaceMapper* cardtable) {
_allocated_heapregions_length = 0;
_heap_mapper = heap_storage;
@ -90,7 +90,7 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
_committed_map.initialize(reserved_length());
}
G1HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
G1HeapRegion* G1HeapRegionManager::allocate_free_region(G1HeapRegionType type, uint requested_node_index) {
G1HeapRegion* hr = nullptr;
bool from_head = !type.is_young();
G1NUMA* numa = G1NUMA::numa();
@ -118,7 +118,7 @@ G1HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint
return hr;
}
G1HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) {
G1HeapRegion* G1HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) {
uint candidate = find_contiguous_in_free_list(num_regions);
if (candidate == G1_NO_HRM_INDEX) {
return nullptr;
@ -126,7 +126,7 @@ G1HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regi
return allocate_free_regions_starting_at(candidate, num_regions);
}
G1HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) {
G1HeapRegion* G1HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) {
uint candidate = find_contiguous_allow_expand(num_regions);
if (candidate == G1_NO_HRM_INDEX) {
return nullptr;
@ -135,25 +135,25 @@ G1HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_region
return allocate_free_regions_starting_at(candidate, num_regions);
}
G1HeapRegion* HeapRegionManager::allocate_humongous(uint num_regions) {
G1HeapRegion* G1HeapRegionManager::allocate_humongous(uint num_regions) {
// Special case a single region to avoid expensive search.
if (num_regions == 1) {
return allocate_free_region(HeapRegionType::Humongous, G1NUMA::AnyNodeIndex);
return allocate_free_region(G1HeapRegionType::Humongous, G1NUMA::AnyNodeIndex);
}
return allocate_humongous_from_free_list(num_regions);
}
G1HeapRegion* HeapRegionManager::expand_and_allocate_humongous(uint num_regions) {
G1HeapRegion* G1HeapRegionManager::expand_and_allocate_humongous(uint num_regions) {
return allocate_humongous_allow_expand(num_regions);
}
#ifdef ASSERT
bool HeapRegionManager::is_free(G1HeapRegion* hr) const {
bool G1HeapRegionManager::is_free(G1HeapRegion* hr) const {
return _free_list.contains(hr);
}
#endif
G1HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
G1HeapRegion* G1HeapRegionManager::new_heap_region(uint hrm_index) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
MemRegion mr(bottom, bottom + G1HeapRegion::GrainWords);
@ -161,7 +161,7 @@ G1HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
return g1h->new_heap_region(hrm_index, mr);
}
void HeapRegionManager::expand(uint start, uint num_regions, WorkerThreads* pretouch_workers) {
void G1HeapRegionManager::expand(uint start, uint num_regions, WorkerThreads* pretouch_workers) {
commit_regions(start, num_regions, pretouch_workers);
for (uint i = start; i < start + num_regions; i++) {
G1HeapRegion* hr = _regions.get_by_index(i);
@ -176,7 +176,7 @@ void HeapRegionManager::expand(uint start, uint num_regions, WorkerThreads* pret
activate_regions(start, num_regions);
}
void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkerThreads* pretouch_workers) {
void G1HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkerThreads* pretouch_workers) {
guarantee(num_regions > 0, "Must commit more than zero regions");
guarantee(num_regions <= available(),
"Cannot commit more than the maximum amount of regions");
@ -190,7 +190,7 @@ void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkerThr
_cardtable_mapper->commit_regions(index, num_regions, pretouch_workers);
}
void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
void G1HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
guarantee(num_regions > 0, "No point in calling this for zero regions");
uint end = start + num_regions;
@ -215,7 +215,7 @@ void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
_committed_map.uncommit(start, end);
}
void HeapRegionManager::initialize_regions(uint start, uint num_regions) {
void G1HeapRegionManager::initialize_regions(uint start, uint num_regions) {
for (uint i = start; i < start + num_regions; i++) {
assert(is_available(i), "Just made region %u available but is apparently not.", i);
G1HeapRegion* hr = at(i);
@ -227,12 +227,12 @@ void HeapRegionManager::initialize_regions(uint start, uint num_regions) {
}
}
void HeapRegionManager::activate_regions(uint start, uint num_regions) {
void G1HeapRegionManager::activate_regions(uint start, uint num_regions) {
_committed_map.activate(start, start + num_regions);
initialize_regions(start, num_regions);
}
void HeapRegionManager::reactivate_regions(uint start, uint num_regions) {
void G1HeapRegionManager::reactivate_regions(uint start, uint num_regions) {
assert(num_regions > 0, "No point in calling this for zero regions");
clear_auxiliary_data_structures(start, num_regions);
@ -241,7 +241,7 @@ void HeapRegionManager::reactivate_regions(uint start, uint num_regions) {
initialize_regions(start, num_regions);
}
void HeapRegionManager::deactivate_regions(uint start, uint num_regions) {
void G1HeapRegionManager::deactivate_regions(uint start, uint num_regions) {
assert(num_regions > 0, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
assert(length() >= num_regions, "pre-condition");
@ -256,7 +256,7 @@ void HeapRegionManager::deactivate_regions(uint start, uint num_regions) {
_committed_map.deactivate(start, end);
}
void HeapRegionManager::clear_auxiliary_data_structures(uint start, uint num_regions) {
void G1HeapRegionManager::clear_auxiliary_data_structures(uint start, uint num_regions) {
// Signal marking bitmaps to clear the given regions.
_bitmap_mapper->signal_mapping_changed(start, num_regions);
// Signal G1BlockOffsetTable to clear the given regions.
@ -265,7 +265,7 @@ void HeapRegionManager::clear_auxiliary_data_structures(uint start, uint num_reg
_cardtable_mapper->signal_mapping_changed(start, num_regions);
}
MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
MemoryUsage G1HeapRegionManager::get_auxiliary_data_memory_usage() const {
size_t used_sz =
_bitmap_mapper->committed_size() +
_bot_mapper->committed_size() +
@ -279,18 +279,18 @@ MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
return MemoryUsage(0, used_sz, committed_sz, committed_sz);
}
bool HeapRegionManager::has_inactive_regions() const {
bool G1HeapRegionManager::has_inactive_regions() const {
return _committed_map.num_inactive() > 0;
}
uint HeapRegionManager::uncommit_inactive_regions(uint limit) {
uint G1HeapRegionManager::uncommit_inactive_regions(uint limit) {
assert(limit > 0, "Need to specify at least one region to uncommit");
uint uncommitted = 0;
uint offset = 0;
do {
MutexLocker uc(Uncommit_lock, Mutex::_no_safepoint_check_flag);
HeapRegionRange range = _committed_map.next_inactive_range(offset);
G1HeapRegionRange range = _committed_map.next_inactive_range(offset);
// No more regions available for uncommit. Return the number of regions
// already uncommitted or 0 if there were no longer any inactive regions.
if (range.length() == 0) {
@ -307,12 +307,12 @@ uint HeapRegionManager::uncommit_inactive_regions(uint limit) {
return uncommitted;
}
uint HeapRegionManager::expand_inactive(uint num_regions) {
uint G1HeapRegionManager::expand_inactive(uint num_regions) {
uint offset = 0;
uint expanded = 0;
do {
HeapRegionRange regions = _committed_map.next_inactive_range(offset);
G1HeapRegionRange regions = _committed_map.next_inactive_range(offset);
if (regions.length() == 0) {
// No more unavailable regions.
break;
@ -327,14 +327,14 @@ uint HeapRegionManager::expand_inactive(uint num_regions) {
return expanded;
}
uint HeapRegionManager::expand_any(uint num_regions, WorkerThreads* pretouch_workers) {
uint G1HeapRegionManager::expand_any(uint num_regions, WorkerThreads* pretouch_workers) {
assert(num_regions > 0, "Must expand at least 1 region");
uint offset = 0;
uint expanded = 0;
do {
HeapRegionRange regions = _committed_map.next_committable_range(offset);
G1HeapRegionRange regions = _committed_map.next_committable_range(offset);
if (regions.length() == 0) {
// No more unavailable regions.
break;
@ -349,7 +349,7 @@ uint HeapRegionManager::expand_any(uint num_regions, WorkerThreads* pretouch_wor
return expanded;
}
uint HeapRegionManager::expand_by(uint num_regions, WorkerThreads* pretouch_workers) {
uint G1HeapRegionManager::expand_by(uint num_regions, WorkerThreads* pretouch_workers) {
assert(num_regions > 0, "Must expand at least 1 region");
// First "undo" any requests to uncommit memory concurrently by
@ -365,7 +365,7 @@ uint HeapRegionManager::expand_by(uint num_regions, WorkerThreads* pretouch_work
return expanded;
}
void HeapRegionManager::expand_exact(uint start, uint num_regions, WorkerThreads* pretouch_workers) {
void G1HeapRegionManager::expand_exact(uint start, uint num_regions, WorkerThreads* pretouch_workers) {
assert(num_regions != 0, "Need to request at least one region");
uint end = start + num_regions;
@ -393,7 +393,7 @@ void HeapRegionManager::expand_exact(uint start, uint num_regions, WorkerThreads
verify_optional();
}
uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
uint G1HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
uint expand_candidate = UINT_MAX;
if (available() >= 1) {
@ -420,13 +420,13 @@ uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
return 1;
}
bool HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) {
bool G1HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) {
uint region_node_index = G1NUMA::numa()->preferred_node_index_for_index(region_index);
return region_node_index == preferred_node_index;
}
#ifdef ASSERT
void HeapRegionManager::assert_contiguous_range(uint start, uint num_regions) {
void G1HeapRegionManager::assert_contiguous_range(uint start, uint num_regions) {
// General sanity check, regions found should either be available and empty
// or not available so that we can make them available and use them.
for (uint i = start; i < (start + num_regions); i++) {
@ -439,7 +439,7 @@ void HeapRegionManager::assert_contiguous_range(uint start, uint num_regions) {
}
#endif
uint HeapRegionManager::find_contiguous_in_range(uint start, uint end, uint num_regions) {
uint G1HeapRegionManager::find_contiguous_in_range(uint start, uint end, uint num_regions) {
assert(start <= end, "precondition");
assert(num_regions >= 1, "precondition");
uint candidate = start; // First region in candidate sequence.
@ -465,9 +465,9 @@ uint HeapRegionManager::find_contiguous_in_range(uint start, uint end, uint num_
return G1_NO_HRM_INDEX;
}
uint HeapRegionManager::find_contiguous_in_free_list(uint num_regions) {
uint G1HeapRegionManager::find_contiguous_in_free_list(uint num_regions) {
uint candidate = G1_NO_HRM_INDEX;
HeapRegionRange range(0,0);
G1HeapRegionRange range(0,0);
do {
range = _committed_map.next_active_range(range.end());
@ -477,7 +477,7 @@ uint HeapRegionManager::find_contiguous_in_free_list(uint num_regions) {
return candidate;
}
uint HeapRegionManager::find_contiguous_allow_expand(uint num_regions) {
uint G1HeapRegionManager::find_contiguous_allow_expand(uint num_regions) {
// Check if we can actually satisfy the allocation.
if (num_regions > available()) {
return G1_NO_HRM_INDEX;
@ -486,7 +486,7 @@ uint HeapRegionManager::find_contiguous_allow_expand(uint num_regions) {
return find_contiguous_in_range(0, reserved_length(), num_regions);
}
G1HeapRegion* HeapRegionManager::next_region_in_heap(const G1HeapRegion* r) const {
G1HeapRegion* G1HeapRegionManager::next_region_in_heap(const G1HeapRegion* r) const {
guarantee(r != nullptr, "Start region must be a valid region");
guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index());
for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
@ -498,7 +498,7 @@ G1HeapRegion* HeapRegionManager::next_region_in_heap(const G1HeapRegion* r) cons
return nullptr;
}
void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
void G1HeapRegionManager::iterate(G1HeapRegionClosure* blk) const {
uint len = reserved_length();
for (uint i = 0; i < len; i++) {
@ -514,7 +514,7 @@ void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
}
}
void HeapRegionManager::iterate(HeapRegionIndexClosure* blk) const {
void G1HeapRegionManager::iterate(G1HeapRegionIndexClosure* blk) const {
uint len = reserved_length();
for (uint i = 0; i < len; i++) {
@ -529,7 +529,7 @@ void HeapRegionManager::iterate(HeapRegionIndexClosure* blk) const {
}
}
uint HeapRegionManager::find_highest_free(bool* expanded) {
uint G1HeapRegionManager::find_highest_free(bool* expanded) {
// Loop downwards from the highest region index, looking for an
// entry which is either free or not yet committed. If not yet
// committed, expand at that index.
@ -551,7 +551,7 @@ uint HeapRegionManager::find_highest_free(bool* expanded) {
return G1_NO_HRM_INDEX;
}
bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkerThreads* pretouch_workers) {
bool G1HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkerThreads* pretouch_workers) {
size_t commits = 0;
uint start_index = (uint)_regions.get_index_by_address(range.start());
uint last_index = (uint)_regions.get_index_by_address(range.last());
@ -574,7 +574,7 @@ bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* com
return true;
}
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const {
void G1HeapRegionManager::par_iterate(G1HeapRegionClosure* blk, G1HeapRegionClaimer* hrclaimer, const uint start_index) const {
// Every worker will actually look at all regions, skipping over regions that
// are currently not committed.
// This also (potentially) iterates over regions newly allocated during GC. This
@ -603,7 +603,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* h
}
}
uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
uint G1HeapRegionManager::shrink_by(uint num_regions_to_remove) {
assert(length() > 0, "the region sequence should not be empty");
assert(length() <= _allocated_heapregions_length, "invariant");
assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
@ -633,7 +633,7 @@ uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
return removed;
}
void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
void G1HeapRegionManager::shrink_at(uint index, size_t num_regions) {
#ifdef ASSERT
for (uint i = index; i < (index + num_regions); i++) {
assert(is_available(i), "Expected available region at index %u", i);
@ -645,7 +645,7 @@ void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
deactivate_regions(index, (uint) num_regions);
}
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
uint G1HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
guarantee(start_idx <= _allocated_heapregions_length, "checking");
guarantee(res_idx != nullptr, "checking");
@ -679,7 +679,7 @@ uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_id
return num_regions_found;
}
void HeapRegionManager::verify() {
void G1HeapRegionManager::verify() {
guarantee(length() <= _allocated_heapregions_length,
"invariant: _length: %u _allocated_length: %u",
length(), _allocated_heapregions_length);
@ -724,65 +724,65 @@ void HeapRegionManager::verify() {
}
#ifndef PRODUCT
void HeapRegionManager::verify_optional() {
void G1HeapRegionManager::verify_optional() {
verify();
}
#endif // PRODUCT
HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
G1HeapRegionClaimer::G1HeapRegionClaimer(uint n_workers) :
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(nullptr) {
uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
_claims = new_claims;
}
HeapRegionClaimer::~HeapRegionClaimer() {
G1HeapRegionClaimer::~G1HeapRegionClaimer() {
FREE_C_HEAP_ARRAY(uint, _claims);
}
uint HeapRegionClaimer::offset_for_worker(uint worker_id) const {
uint G1HeapRegionClaimer::offset_for_worker(uint worker_id) const {
assert(_n_workers > 0, "must be set");
assert(worker_id < _n_workers, "Invalid worker_id.");
return _n_regions * worker_id / _n_workers;
}
bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
bool G1HeapRegionClaimer::is_region_claimed(uint region_index) const {
assert(region_index < _n_regions, "Invalid index.");
return _claims[region_index] == Claimed;
}
bool HeapRegionClaimer::claim_region(uint region_index) {
bool G1HeapRegionClaimer::claim_region(uint region_index) {
assert(region_index < _n_regions, "Invalid index.");
uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
return old_val == Unclaimed;
}
class G1RebuildFreeListTask : public WorkerTask {
HeapRegionManager* _hrm;
FreeRegionList* _worker_freelists;
uint _worker_chunk_size;
uint _num_workers;
G1HeapRegionManager* _hrm;
G1FreeRegionList* _worker_freelists;
uint _worker_chunk_size;
uint _num_workers;
public:
G1RebuildFreeListTask(HeapRegionManager* hrm, uint num_workers) :
G1RebuildFreeListTask(G1HeapRegionManager* hrm, uint num_workers) :
WorkerTask("G1 Rebuild Free List Task"),
_hrm(hrm),
_worker_freelists(NEW_C_HEAP_ARRAY(FreeRegionList, num_workers, mtGC)),
_worker_freelists(NEW_C_HEAP_ARRAY(G1FreeRegionList, num_workers, mtGC)),
_worker_chunk_size((_hrm->reserved_length() + num_workers - 1) / num_workers),
_num_workers(num_workers) {
for (uint worker = 0; worker < _num_workers; worker++) {
::new (&_worker_freelists[worker]) FreeRegionList("Appendable Worker Free List");
::new (&_worker_freelists[worker]) G1FreeRegionList("Appendable Worker Free List");
}
}
~G1RebuildFreeListTask() {
for (uint worker = 0; worker < _num_workers; worker++) {
_worker_freelists[worker].~FreeRegionList();
_worker_freelists[worker].~G1FreeRegionList();
}
FREE_C_HEAP_ARRAY(FreeRegionList, _worker_freelists);
FREE_C_HEAP_ARRAY(G1FreeRegionList, _worker_freelists);
}
FreeRegionList* worker_freelist(uint worker) {
G1FreeRegionList* worker_freelist(uint worker) {
return &_worker_freelists[worker];
}
@ -800,7 +800,7 @@ public:
return;
}
FreeRegionList* free_list = worker_freelist(worker_id);
G1FreeRegionList* free_list = worker_freelist(worker_id);
for (uint i = start; i < end; i++) {
G1HeapRegion* region = _hrm->at_or_null(i);
if (region != nullptr && region->is_free()) {
@ -815,7 +815,7 @@ public:
}
};
void HeapRegionManager::rebuild_free_list(WorkerThreads* workers) {
void G1HeapRegionManager::rebuild_free_list(WorkerThreads* workers) {
// Abandon current free list to allow a rebuild.
_free_list.abandon();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,9 +33,9 @@
#include "services/memoryUsage.hpp"
class G1HeapRegion;
class HeapRegionClosure;
class HeapRegionClaimer;
class FreeRegionList;
class G1HeapRegionClaimer;
class G1HeapRegionClosure;
class G1FreeRegionList;
class WorkerThreads;
class G1HeapRegionTable : public G1BiasedMappedArray<G1HeapRegion*> {
@ -49,7 +49,7 @@ class G1HeapRegionTable : public G1BiasedMappedArray<G1HeapRegion*> {
// This allows maximum flexibility for deciding what to commit or uncommit given
// a request from outside.
//
// HeapRegions are kept in the _regions array in address order. A region's
// G1HeapRegions are kept in the _regions array in address order. A region's
// index in the array corresponds to its index in the heap (i.e., 0 is the
// region at the bottom of the heap, 1 is the one after it, etc.). Two
// regions that are consecutive in the array should also be adjacent in the
@ -65,14 +65,14 @@ class G1HeapRegionTable : public G1BiasedMappedArray<G1HeapRegion*> {
// * _num_committed (returned by length()) is the number of currently
// committed regions. These may not be contiguous.
// * _allocated_heapregions_length (not exposed outside this class) is the
// number of regions+1 for which we have HeapRegions.
// number of regions+1 for which we have G1HeapRegions.
// * max_length() returns the maximum number of regions the heap may commit.
// * reserved_length() returns the maximum number of regions the heap has reserved.
//
class HeapRegionManager: public CHeapObj<mtGC> {
class G1HeapRegionManager: public CHeapObj<mtGC> {
friend class VMStructs;
friend class HeapRegionClaimer;
friend class G1HeapRegionClaimer;
G1RegionToSpaceMapper* _bot_mapper;
G1RegionToSpaceMapper* _cardtable_mapper;
@ -90,7 +90,7 @@ class HeapRegionManager: public CHeapObj<mtGC> {
// Pass down commit calls to the VirtualSpace.
void commit_regions(uint index, size_t num_regions = 1, WorkerThreads* pretouch_workers = nullptr);
// Initialize the HeapRegions in the range and put them on the free list.
// Initialize the G1HeapRegions in the range and put them on the free list.
void initialize_regions(uint start, uint num_regions);
// Find a contiguous set of empty or uncommitted regions of length num_regions and return
@ -123,7 +123,7 @@ class HeapRegionManager: public CHeapObj<mtGC> {
G1HeapRegionTable _regions;
G1RegionToSpaceMapper* _heap_mapper;
G1RegionToSpaceMapper* _bitmap_mapper;
FreeRegionList _free_list;
G1FreeRegionList _free_list;
void expand(uint index, uint num_regions, WorkerThreads* pretouch_workers = nullptr);
@ -157,7 +157,7 @@ public:
#endif
public:
// Empty constructor, we'll initialize it with the initialize() method.
HeapRegionManager();
G1HeapRegionManager();
void initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* bitmap,
@ -196,12 +196,12 @@ public:
void rebuild_free_list(WorkerThreads* workers);
// Insert the given region list into the global free region list.
void insert_list_into_free_list(FreeRegionList* list) {
void insert_list_into_free_list(G1FreeRegionList* list) {
_free_list.add_ordered(list);
}
// Allocate a free region with specific node index. If fails allocate with next node index.
G1HeapRegion* allocate_free_region(HeapRegionType type, uint requested_node_index);
G1HeapRegion* allocate_free_region(G1HeapRegionType type, uint requested_node_index);
// Allocate a humongous object from the free list
G1HeapRegion* allocate_humongous(uint num_regions);
@ -246,7 +246,7 @@ public:
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
// Expand the sequence to reflect that the heap has grown. Either create new
// HeapRegions, or re-use existing ones. Returns the number of regions the
// G1HeapRegions, or re-use existing ones. Returns the number of regions the
// sequence was expanded by. If a G1HeapRegion allocation fails, the resulting
// number of regions might be smaller than what's desired.
uint expand_by(uint num_regions, WorkerThreads* pretouch_workers);
@ -268,10 +268,10 @@ public:
// Apply blk->do_heap_region() on all committed regions in address order,
// terminating the iteration early if do_heap_region() returns true.
void iterate(HeapRegionClosure* blk) const;
void iterate(HeapRegionIndexClosure* blk) const;
void iterate(G1HeapRegionClosure* blk) const;
void iterate(G1HeapRegionIndexClosure* blk) const;
void par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const;
void par_iterate(G1HeapRegionClosure* blk, G1HeapRegionClaimer* hrclaimer, const uint start_index) const;
// Uncommit up to num_regions_to_remove regions that are completely free.
// Return the actual number of uncommitted regions.
@ -294,9 +294,9 @@ public:
void verify_optional() PRODUCT_RETURN;
};
// The HeapRegionClaimer is used during parallel iteration over heap regions,
// The G1HeapRegionClaimer is used during parallel iteration over heap regions,
// allowing workers to claim heap regions, gaining exclusive rights to these regions.
class HeapRegionClaimer : public StackObj {
class G1HeapRegionClaimer : public StackObj {
uint _n_workers;
uint _n_regions;
volatile uint* _claims;
@ -305,8 +305,8 @@ class HeapRegionClaimer : public StackObj {
static const uint Claimed = 1;
public:
HeapRegionClaimer(uint n_workers);
~HeapRegionClaimer();
G1HeapRegionClaimer(uint n_workers);
~G1HeapRegionClaimer();
inline uint n_regions() const {
return _n_regions;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,11 +31,11 @@
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1HeapRegionSet.inline.hpp"
inline bool HeapRegionManager::is_available(uint region) const {
inline bool G1HeapRegionManager::is_available(uint region) const {
return _committed_map.active(region);
}
inline G1HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
inline G1HeapRegion* G1HeapRegionManager::addr_to_region(HeapWord* addr) const {
assert(addr < heap_end(),
"addr: " PTR_FORMAT " end: " PTR_FORMAT, p2i(addr), p2i(heap_end()));
assert(addr >= heap_bottom(),
@ -43,7 +43,7 @@ inline G1HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
return _regions.get_by_address(addr);
}
inline G1HeapRegion* HeapRegionManager::at(uint index) const {
inline G1HeapRegion* G1HeapRegionManager::at(uint index) const {
assert(is_available(index), "pre-condition");
G1HeapRegion* hr = _regions.get_by_index(index);
assert(hr != nullptr, "sanity");
@ -51,7 +51,7 @@ inline G1HeapRegion* HeapRegionManager::at(uint index) const {
return hr;
}
inline G1HeapRegion* HeapRegionManager::at_or_null(uint index) const {
inline G1HeapRegion* G1HeapRegionManager::at_or_null(uint index) const {
if (!is_available(index)) {
return nullptr;
}
@ -61,7 +61,7 @@ inline G1HeapRegion* HeapRegionManager::at_or_null(uint index) const {
return hr;
}
inline G1HeapRegion* HeapRegionManager::next_region_in_humongous(G1HeapRegion* hr) const {
inline G1HeapRegion* G1HeapRegionManager::next_region_in_humongous(G1HeapRegion* hr) const {
uint index = hr->hrm_index();
assert(is_available(index), "pre-condition");
assert(hr->is_humongous(), "next_region_in_humongous should only be called for a humongous region.");
@ -73,11 +73,11 @@ inline G1HeapRegion* HeapRegionManager::next_region_in_humongous(G1HeapRegion* h
}
}
inline void HeapRegionManager::insert_into_free_list(G1HeapRegion* hr) {
inline void G1HeapRegionManager::insert_into_free_list(G1HeapRegion* hr) {
_free_list.add_ordered(hr);
}
inline G1HeapRegion* HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) {
inline G1HeapRegion* G1HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) {
G1HeapRegion* start = at(first);
_free_list.remove_starting_at(start, num_regions);
return start;

View File

@ -29,7 +29,7 @@
#include "logging/log.hpp"
#include "memory/allStatic.hpp"
class FreeRegionList;
class G1FreeRegionList;
class G1HeapRegionPrinter : public AllStatic {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,17 +45,17 @@
#include "utilities/growableArray.hpp"
#include "utilities/powerOfTwo.hpp"
HeapWord* HeapRegionRemSet::_heap_base_address = nullptr;
HeapWord* G1HeapRegionRemSet::_heap_base_address = nullptr;
const char* HeapRegionRemSet::_state_strings[] = {"Untracked", "Updating", "Complete"};
const char* HeapRegionRemSet::_short_state_strings[] = {"UNTRA", "UPDAT", "CMPLT"};
const char* G1HeapRegionRemSet::_state_strings[] = {"Untracked", "Updating", "Complete"};
const char* G1HeapRegionRemSet::_short_state_strings[] = {"UNTRA", "UPDAT", "CMPLT"};
void HeapRegionRemSet::initialize(MemRegion reserved) {
void G1HeapRegionRemSet::initialize(MemRegion reserved) {
G1CardSet::initialize(reserved);
_heap_base_address = reserved.start();
}
HeapRegionRemSet::HeapRegionRemSet(G1HeapRegion* hr,
G1HeapRegionRemSet::G1HeapRegionRemSet(G1HeapRegion* hr,
G1CardSetConfiguration* config) :
_code_roots(),
_card_set_mm(config, G1CollectedHeap::heap()->card_set_freelist_pool()),
@ -63,11 +63,11 @@ HeapRegionRemSet::HeapRegionRemSet(G1HeapRegion* hr,
_hr(hr),
_state(Untracked) { }
void HeapRegionRemSet::clear_fcc() {
void G1HeapRegionRemSet::clear_fcc() {
G1FromCardCache::clear(_hr->hrm_index());
}
void HeapRegionRemSet::clear(bool only_cardset, bool keep_tracked) {
void G1HeapRegionRemSet::clear(bool only_cardset, bool keep_tracked) {
if (!only_cardset) {
_code_roots.clear();
}
@ -81,17 +81,17 @@ void HeapRegionRemSet::clear(bool only_cardset, bool keep_tracked) {
assert(occupied() == 0, "Should be clear.");
}
void HeapRegionRemSet::reset_table_scanner() {
void G1HeapRegionRemSet::reset_table_scanner() {
_code_roots.reset_table_scanner();
_card_set.reset_table_scanner();
}
G1MonotonicArenaMemoryStats HeapRegionRemSet::card_set_memory_stats() const {
G1MonotonicArenaMemoryStats G1HeapRegionRemSet::card_set_memory_stats() const {
return _card_set_mm.memory_stats();
}
void HeapRegionRemSet::print_static_mem_size(outputStream* out) {
out->print_cr(" Static structures = " SIZE_FORMAT, HeapRegionRemSet::static_mem_size());
void G1HeapRegionRemSet::print_static_mem_size(outputStream* out) {
out->print_cr(" Static structures = " SIZE_FORMAT, G1HeapRegionRemSet::static_mem_size());
}
// Code roots support
@ -101,12 +101,12 @@ void HeapRegionRemSet::print_static_mem_size(outputStream* out) {
// except when doing a full gc.
// When not at safepoint the CodeCache_lock must be held during modifications.
void HeapRegionRemSet::add_code_root(nmethod* nm) {
void G1HeapRegionRemSet::add_code_root(nmethod* nm) {
assert(nm != nullptr, "sanity");
_code_roots.add(nm);
}
void HeapRegionRemSet::remove_code_root(nmethod* nm) {
void G1HeapRegionRemSet::remove_code_root(nmethod* nm) {
assert(nm != nullptr, "sanity");
_code_roots.remove(nm);
@ -115,18 +115,18 @@ void HeapRegionRemSet::remove_code_root(nmethod* nm) {
guarantee(!_code_roots.contains(nm), "duplicate entry found");
}
void HeapRegionRemSet::bulk_remove_code_roots() {
void G1HeapRegionRemSet::bulk_remove_code_roots() {
_code_roots.bulk_remove();
}
void HeapRegionRemSet::code_roots_do(NMethodClosure* blk) const {
void G1HeapRegionRemSet::code_roots_do(NMethodClosure* blk) const {
_code_roots.nmethods_do(blk);
}
void HeapRegionRemSet::clean_code_roots(G1HeapRegion* hr) {
void G1HeapRegionRemSet::clean_code_roots(G1HeapRegion* hr) {
_code_roots.clean(hr);
}
size_t HeapRegionRemSet::code_roots_mem_size() {
size_t G1HeapRegionRemSet::code_roots_mem_size() {
return _code_roots.mem_size();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@
class G1CardSetMemoryManager;
class outputStream;
class HeapRegionRemSet : public CHeapObj<mtGC> {
class G1HeapRegionRemSet : public CHeapObj<mtGC> {
friend class VMStructs;
// A set of nmethods whose code contains pointers into
@ -57,7 +57,7 @@ class HeapRegionRemSet : public CHeapObj<mtGC> {
void clear_fcc();
public:
HeapRegionRemSet(G1HeapRegion* hr, G1CardSetConfiguration* config);
G1HeapRegionRemSet(G1HeapRegion* hr, G1CardSetConfiguration* config);
bool cardset_is_empty() const {
return _card_set.is_empty();
@ -126,7 +126,7 @@ public:
// root set.
size_t mem_size() {
return _card_set.mem_size()
+ (sizeof(HeapRegionRemSet) - sizeof(G1CardSet)) // Avoid double-counting G1CardSet.
+ (sizeof(G1HeapRegionRemSet) - sizeof(G1CardSet)) // Avoid double-counting G1CardSet.
+ code_roots_mem_size();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,7 @@
#include "runtime/atomic.hpp"
#include "utilities/bitMap.inline.hpp"
void HeapRegionRemSet::set_state_untracked() {
void G1HeapRegionRemSet::set_state_untracked() {
guarantee(SafepointSynchronize::is_at_safepoint() || !is_tracked(),
"Should only set to Untracked during safepoint but is %s.", get_state_str());
if (_state == Untracked) {
@ -43,14 +43,14 @@ void HeapRegionRemSet::set_state_untracked() {
_state = Untracked;
}
void HeapRegionRemSet::set_state_updating() {
void G1HeapRegionRemSet::set_state_updating() {
guarantee(SafepointSynchronize::is_at_safepoint() && !is_tracked(),
"Should only set to Updating from Untracked during safepoint but is %s", get_state_str());
clear_fcc();
_state = Updating;
}
void HeapRegionRemSet::set_state_complete() {
void G1HeapRegionRemSet::set_state_complete() {
clear_fcc();
_state = Complete;
}
@ -107,7 +107,7 @@ public:
};
template <class CardOrRangeVisitor>
inline void HeapRegionRemSet::iterate_for_merge(CardOrRangeVisitor& cl) {
inline void G1HeapRegionRemSet::iterate_for_merge(CardOrRangeVisitor& cl) {
G1HeapRegionRemSetMergeCardClosure<CardOrRangeVisitor, G1ContainerCardsOrRanges> cl2(&_card_set,
cl,
_card_set.config()->log2_card_regions_per_heap_region(),
@ -116,11 +116,11 @@ inline void HeapRegionRemSet::iterate_for_merge(CardOrRangeVisitor& cl) {
}
uintptr_t HeapRegionRemSet::to_card(OopOrNarrowOopStar from) const {
uintptr_t G1HeapRegionRemSet::to_card(OopOrNarrowOopStar from) const {
return pointer_delta(from, _heap_base_address, 1) >> CardTable::card_shift();
}
void HeapRegionRemSet::add_reference(OopOrNarrowOopStar from, uint tid) {
void G1HeapRegionRemSet::add_reference(OopOrNarrowOopStar from, uint tid) {
assert(_state != Untracked, "must be");
uint cur_idx = _hr->hrm_index();
@ -136,11 +136,11 @@ void HeapRegionRemSet::add_reference(OopOrNarrowOopStar from, uint tid) {
_card_set.add_card(to_card(from));
}
bool HeapRegionRemSet::contains_reference(OopOrNarrowOopStar from) {
bool G1HeapRegionRemSet::contains_reference(OopOrNarrowOopStar from) {
return _card_set.contains_card(to_card(from));
}
void HeapRegionRemSet::print_info(outputStream* st, OopOrNarrowOopStar from) {
void G1HeapRegionRemSet::print_info(outputStream* st, OopOrNarrowOopStar from) {
_card_set.print_info(st, to_card(from));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,10 +28,10 @@
#include "gc/g1/g1HeapRegionSet.inline.hpp"
#include "gc/g1/g1NUMA.hpp"
uint FreeRegionList::_unrealistically_long_length = 0;
uint G1FreeRegionList::_unrealistically_long_length = 0;
#ifndef PRODUCT
void HeapRegionSetBase::verify_region(G1HeapRegion* hr) {
void G1HeapRegionSetBase::verify_region(G1HeapRegion* hr) {
assert(hr->containing_set() == this, "Inconsistent containing set for %u", hr->hrm_index());
assert(!hr->is_young(), "Adding young region %u", hr->hrm_index()); // currently we don't use these sets for young regions
assert(_checker == nullptr || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s",
@ -41,7 +41,7 @@ void HeapRegionSetBase::verify_region(G1HeapRegion* hr) {
}
#endif
void HeapRegionSetBase::verify() {
void G1HeapRegionSetBase::verify() {
// It's important that we also observe the MT safety protocol even
// for the verification calls. If we do verification without the
// appropriate locks and the set changes underneath our feet
@ -53,18 +53,18 @@ void HeapRegionSetBase::verify() {
"invariant");
}
void HeapRegionSetBase::verify_start() {
void G1HeapRegionSetBase::verify_start() {
// See comment in verify() about MT safety and verification.
check_mt_safety();
assert_heap_region_set(!_verify_in_progress, "verification should not be in progress");
// Do the basic verification first before we do the checks over the regions.
HeapRegionSetBase::verify();
G1HeapRegionSetBase::verify();
_verify_in_progress = true;
}
void HeapRegionSetBase::verify_end() {
void G1HeapRegionSetBase::verify_end() {
// See comment in verify() about MT safety and verification.
check_mt_safety();
assert_heap_region_set(_verify_in_progress, "verification should be in progress");
@ -72,30 +72,30 @@ void HeapRegionSetBase::verify_end() {
_verify_in_progress = false;
}
void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
void G1HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
out->cr();
out->print_cr("Set: %s (" PTR_FORMAT ")", name(), p2i(this));
out->print_cr(" Region Type : %s", _checker->get_description());
out->print_cr(" Length : %14u", length());
}
HeapRegionSetBase::HeapRegionSetBase(const char* name, HeapRegionSetChecker* checker)
G1HeapRegionSetBase::G1HeapRegionSetBase(const char* name, G1HeapRegionSetChecker* checker)
: _checker(checker), _length(0), _name(name), _verify_in_progress(false)
{
}
void FreeRegionList::set_unrealistically_long_length(uint len) {
void G1FreeRegionList::set_unrealistically_long_length(uint len) {
guarantee(_unrealistically_long_length == 0, "should only be set once");
_unrealistically_long_length = len;
}
void FreeRegionList::abandon() {
void G1FreeRegionList::abandon() {
check_mt_safety();
clear();
verify_optional();
}
void FreeRegionList::remove_all() {
void G1FreeRegionList::remove_all() {
check_mt_safety();
verify_optional();
@ -117,7 +117,7 @@ void FreeRegionList::remove_all() {
verify_optional();
}
void FreeRegionList::add_list_common_start(FreeRegionList* from_list) {
void G1FreeRegionList::add_list_common_start(G1FreeRegionList* from_list) {
check_mt_safety();
from_list->check_mt_safety();
verify_optional();
@ -132,7 +132,7 @@ void FreeRegionList::add_list_common_start(FreeRegionList* from_list) {
}
#ifdef ASSERT
FreeRegionListIterator iter(from_list);
G1FreeRegionListIterator iter(from_list);
while (iter.more_available()) {
G1HeapRegion* hr = iter.get_next();
// In set_containing_set() we check that we either set the value
@ -144,7 +144,7 @@ void FreeRegionList::add_list_common_start(FreeRegionList* from_list) {
#endif // ASSERT
}
void FreeRegionList::add_list_common_end(FreeRegionList* from_list) {
void G1FreeRegionList::add_list_common_end(G1FreeRegionList* from_list) {
_length += from_list->length();
from_list->clear();
@ -152,7 +152,7 @@ void FreeRegionList::add_list_common_end(FreeRegionList* from_list) {
from_list->verify_optional();
}
void FreeRegionList::append_ordered(FreeRegionList* from_list) {
void G1FreeRegionList::append_ordered(G1FreeRegionList* from_list) {
add_list_common_start(from_list);
if (from_list->is_empty()) {
@ -177,7 +177,7 @@ void FreeRegionList::append_ordered(FreeRegionList* from_list) {
add_list_common_end(from_list);
}
void FreeRegionList::add_ordered(FreeRegionList* from_list) {
void G1FreeRegionList::add_ordered(G1FreeRegionList* from_list) {
add_list_common_start(from_list);
if (from_list->is_empty()) {
@ -227,7 +227,7 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
}
#ifdef ASSERT
void FreeRegionList::verify_region_to_remove(G1HeapRegion* curr, G1HeapRegion* next) {
void G1FreeRegionList::verify_region_to_remove(G1HeapRegion* curr, G1HeapRegion* next) {
assert_free_region_list(_head != next, "invariant");
if (next != nullptr) {
assert_free_region_list(next->prev() == curr, "invariant");
@ -244,7 +244,7 @@ void FreeRegionList::verify_region_to_remove(G1HeapRegion* curr, G1HeapRegion* n
}
#endif
void FreeRegionList::remove_starting_at(G1HeapRegion* first, uint num_regions) {
void G1FreeRegionList::remove_starting_at(G1HeapRegion* first, uint num_regions) {
check_mt_safety();
assert_free_region_list(num_regions >= 1, "pre-condition");
assert_free_region_list(!is_empty(), "pre-condition");
@ -304,8 +304,8 @@ void FreeRegionList::remove_starting_at(G1HeapRegion* first, uint num_regions) {
verify_optional();
}
void FreeRegionList::verify() {
// See comment in HeapRegionSetBase::verify() about MT safety and
void G1FreeRegionList::verify() {
// See comment in G1HeapRegionSetBase::verify() about MT safety and
// verification.
check_mt_safety();
@ -317,7 +317,7 @@ void FreeRegionList::verify() {
verify_end();
}
void FreeRegionList::clear() {
void G1FreeRegionList::clear() {
_length = 0;
_head = nullptr;
_tail = nullptr;
@ -328,7 +328,7 @@ void FreeRegionList::clear() {
}
}
void FreeRegionList::verify_list() {
void G1FreeRegionList::verify_list() {
G1HeapRegion* curr = _head;
G1HeapRegion* prev1 = nullptr;
G1HeapRegion* prev0 = nullptr;
@ -364,37 +364,37 @@ void FreeRegionList::verify_list() {
}
FreeRegionList::FreeRegionList(const char* name, HeapRegionSetChecker* checker):
HeapRegionSetBase(name, checker),
G1FreeRegionList::G1FreeRegionList(const char* name, G1HeapRegionSetChecker* checker):
G1HeapRegionSetBase(name, checker),
_node_info(G1NUMA::numa()->is_enabled() ? new NodeInfo() : nullptr) {
clear();
}
FreeRegionList::~FreeRegionList() {
G1FreeRegionList::~G1FreeRegionList() {
if (_node_info != nullptr) {
delete _node_info;
}
}
FreeRegionList::NodeInfo::NodeInfo() : _numa(G1NUMA::numa()), _length_of_node(nullptr),
_num_nodes(_numa->num_active_nodes()) {
G1FreeRegionList::NodeInfo::NodeInfo() : _numa(G1NUMA::numa()), _length_of_node(nullptr),
_num_nodes(_numa->num_active_nodes()) {
assert(UseNUMA, "Invariant");
_length_of_node = NEW_C_HEAP_ARRAY(uint, _num_nodes, mtGC);
}
FreeRegionList::NodeInfo::~NodeInfo() {
G1FreeRegionList::NodeInfo::~NodeInfo() {
FREE_C_HEAP_ARRAY(uint, _length_of_node);
}
void FreeRegionList::NodeInfo::clear() {
void G1FreeRegionList::NodeInfo::clear() {
for (uint i = 0; i < _num_nodes; ++i) {
_length_of_node[i] = 0;
}
}
void FreeRegionList::NodeInfo::add(NodeInfo* info) {
void G1FreeRegionList::NodeInfo::add(NodeInfo* info) {
for (uint i = 0; i < _num_nodes; ++i) {
_length_of_node[i] += info->_length_of_node[i];
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,14 +48,14 @@
// Interface collecting various instance specific verification methods of
// HeapRegionSets.
class HeapRegionSetChecker : public CHeapObj<mtGC> {
// G1HeapRegionSets.
class G1HeapRegionSetChecker : public CHeapObj<mtGC> {
public:
// Verify MT safety for this HeapRegionSet.
// Verify MT safety for this G1HeapRegionSet.
virtual void check_mt_safety() = 0;
// Returns true if the given G1HeapRegion is of the correct type for this HeapRegionSet.
// Returns true if the given G1HeapRegion is of the correct type for this G1HeapRegionSet.
virtual bool is_correct_type(G1HeapRegion* hr) = 0;
// Return a description of the type of regions this HeapRegionSet contains.
// Return a description of the type of regions this G1HeapRegionSet contains.
virtual const char* get_description() = 0;
};
@ -64,10 +64,10 @@ public:
// (e.g., length, region num, used bytes sum) plus any shared
// functionality (e.g., verification).
class HeapRegionSetBase {
class G1HeapRegionSetBase {
friend class VMStructs;
HeapRegionSetChecker* _checker;
G1HeapRegionSetChecker* _checker;
protected:
// The number of regions in to the set.
@ -87,7 +87,7 @@ protected:
}
}
HeapRegionSetBase(const char* name, HeapRegionSetChecker* verifier);
G1HeapRegionSetBase(const char* name, G1HeapRegionSetChecker* verifier);
public:
const char* name() { return _name; }
@ -117,12 +117,12 @@ public:
// This class represents heap region sets whose members are not
// explicitly tracked. It's helpful to group regions using such sets
// so that we can reason about all the region groups in the heap using
// the same interface (namely, the HeapRegionSetBase API).
// the same interface (namely, the G1HeapRegionSetBase API).
class HeapRegionSet : public HeapRegionSetBase {
class G1HeapRegionSet : public G1HeapRegionSetBase {
public:
HeapRegionSet(const char* name, HeapRegionSetChecker* checker):
HeapRegionSetBase(name, checker) {
G1HeapRegionSet(const char* name, G1HeapRegionSetChecker* checker):
G1HeapRegionSetBase(name, checker) {
}
void bulk_remove(const uint removed) {
@ -135,11 +135,11 @@ public:
// such lists in performance critical paths. Typically we should
// add / remove one region at a time or concatenate two lists.
class FreeRegionListIterator;
class G1FreeRegionListIterator;
class G1NUMA;
class FreeRegionList : public HeapRegionSetBase {
friend class FreeRegionListIterator;
class G1FreeRegionList : public G1HeapRegionSetBase {
friend class G1FreeRegionListIterator;
private:
@ -181,17 +181,17 @@ private:
inline void decrease_length(uint node_index);
// Common checks for adding a list.
void add_list_common_start(FreeRegionList* from_list);
void add_list_common_end(FreeRegionList* from_list);
void add_list_common_start(G1FreeRegionList* from_list);
void add_list_common_end(G1FreeRegionList* from_list);
void verify_region_to_remove(G1HeapRegion* curr, G1HeapRegion* next) NOT_DEBUG_RETURN;
protected:
// See the comment for HeapRegionSetBase::clear()
// See the comment for G1HeapRegionSetBase::clear()
virtual void clear();
public:
FreeRegionList(const char* name, HeapRegionSetChecker* checker = nullptr);
~FreeRegionList();
G1FreeRegionList(const char* name, G1HeapRegionSetChecker* checker = nullptr);
~G1FreeRegionList();
void verify_list();
@ -218,8 +218,8 @@ public:
// Merge two ordered lists. The result is also ordered. The order is
// determined by hrm_index.
void add_ordered(FreeRegionList* from_list);
void append_ordered(FreeRegionList* from_list);
void add_ordered(G1FreeRegionList* from_list);
void append_ordered(G1FreeRegionList* from_list);
// It empties the list by removing all regions from it.
void remove_all();
@ -235,16 +235,16 @@ public:
virtual void verify();
using HeapRegionSetBase::length;
using G1HeapRegionSetBase::length;
uint length(uint node_index) const;
};
// Iterator class that provides a convenient way to iterate over the
// regions of a FreeRegionList.
class FreeRegionListIterator : public StackObj {
class G1FreeRegionListIterator : public StackObj {
private:
FreeRegionList* _list;
G1FreeRegionList* _list;
G1HeapRegion* _curr;
public:
@ -265,7 +265,7 @@ public:
return hr;
}
FreeRegionListIterator(FreeRegionList* list)
G1FreeRegionListIterator(G1FreeRegionList* list)
: _list(list),
_curr(list->_head) {
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "gc/g1/g1NUMA.hpp"
inline void HeapRegionSetBase::add(G1HeapRegion* hr) {
inline void G1HeapRegionSetBase::add(G1HeapRegion* hr) {
check_mt_safety();
assert_heap_region_set(hr->containing_set() == nullptr, "should not already have a containing set");
assert_heap_region_set(hr->next() == nullptr, "should not already be linked");
@ -40,7 +40,7 @@ inline void HeapRegionSetBase::add(G1HeapRegion* hr) {
verify_region(hr);
}
inline void HeapRegionSetBase::remove(G1HeapRegion* hr) {
inline void G1HeapRegionSetBase::remove(G1HeapRegion* hr) {
check_mt_safety();
verify_region(hr);
assert_heap_region_set(hr->next() == nullptr, "should already be unlinked");
@ -51,7 +51,7 @@ inline void HeapRegionSetBase::remove(G1HeapRegion* hr) {
_length--;
}
inline void FreeRegionList::add_to_tail(G1HeapRegion* region_to_add) {
inline void G1FreeRegionList::add_to_tail(G1HeapRegion* region_to_add) {
assert_free_region_list((length() == 0 && _head == nullptr && _tail == nullptr && _last == nullptr) ||
(length() > 0 && _head != nullptr && _tail != nullptr && _tail->hrm_index() < region_to_add->hrm_index()),
"invariant");
@ -71,7 +71,7 @@ inline void FreeRegionList::add_to_tail(G1HeapRegion* region_to_add) {
increase_length(region_to_add->node_index());
}
inline void FreeRegionList::add_ordered(G1HeapRegion* hr) {
inline void G1FreeRegionList::add_ordered(G1HeapRegion* hr) {
assert_free_region_list((length() == 0 && _head == nullptr && _tail == nullptr && _last == nullptr) ||
(length() > 0 && _head != nullptr && _tail != nullptr),
"invariant");
@ -120,7 +120,7 @@ inline void FreeRegionList::add_ordered(G1HeapRegion* hr) {
increase_length(hr->node_index());
}
inline G1HeapRegion* FreeRegionList::remove_from_head_impl() {
inline G1HeapRegion* G1FreeRegionList::remove_from_head_impl() {
G1HeapRegion* result = _head;
_head = result->next();
if (_head == nullptr) {
@ -132,7 +132,7 @@ inline G1HeapRegion* FreeRegionList::remove_from_head_impl() {
return result;
}
inline G1HeapRegion* FreeRegionList::remove_from_tail_impl() {
inline G1HeapRegion* G1FreeRegionList::remove_from_tail_impl() {
G1HeapRegion* result = _tail;
_tail = result->prev();
@ -145,7 +145,7 @@ inline G1HeapRegion* FreeRegionList::remove_from_tail_impl() {
return result;
}
inline G1HeapRegion* FreeRegionList::remove_region(bool from_head) {
inline G1HeapRegion* G1FreeRegionList::remove_region(bool from_head) {
check_mt_safety();
verify_optional();
@ -174,7 +174,7 @@ inline G1HeapRegion* FreeRegionList::remove_region(bool from_head) {
return hr;
}
inline G1HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
inline G1HeapRegion* G1FreeRegionList::remove_region_with_node_index(bool from_head,
uint requested_node_index) {
assert(UseNUMA, "Invariant");
@ -232,13 +232,13 @@ inline G1HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_hea
return cur;
}
inline void FreeRegionList::NodeInfo::increase_length(uint node_index) {
inline void G1FreeRegionList::NodeInfo::increase_length(uint node_index) {
if (node_index < _num_nodes) {
_length_of_node[node_index] += 1;
}
}
inline void FreeRegionList::NodeInfo::decrease_length(uint node_index) {
inline void G1FreeRegionList::NodeInfo::decrease_length(uint node_index) {
if (node_index < _num_nodes) {
assert(_length_of_node[node_index] > 0,
"Current length %u should be greater than zero for node %u",
@ -247,23 +247,23 @@ inline void FreeRegionList::NodeInfo::decrease_length(uint node_index) {
}
}
inline uint FreeRegionList::NodeInfo::length(uint node_index) const {
inline uint G1FreeRegionList::NodeInfo::length(uint node_index) const {
return _length_of_node[node_index];
}
inline void FreeRegionList::increase_length(uint node_index) {
inline void G1FreeRegionList::increase_length(uint node_index) {
if (_node_info != nullptr) {
return _node_info->increase_length(node_index);
}
}
inline void FreeRegionList::decrease_length(uint node_index) {
inline void G1FreeRegionList::decrease_length(uint node_index) {
if (_node_info != nullptr) {
return _node_info->decrease_length(node_index);
}
}
inline uint FreeRegionList::length(uint node_index) const {
inline uint G1FreeRegionList::length(uint node_index) const {
if (_node_info != nullptr) {
return _node_info->length(node_index);
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#include "gc/g1/g1HeapRegionTracer.hpp"
#include "jfr/jfrEvents.hpp"
void HeapRegionTracer::send_region_type_change(uint index,
void G1HeapRegionTracer::send_region_type_change(uint index,
G1HeapRegionTraceType::Type from,
G1HeapRegionTraceType::Type to,
uintptr_t start,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
class HeapRegionTracer : AllStatic {
class G1HeapRegionTracer : AllStatic {
public:
static void send_region_type_change(uint index,
G1HeapRegionTraceType::Type from,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,12 +26,12 @@
#include "gc/g1/g1HeapRegionTraceType.hpp"
#include "gc/g1/g1HeapRegionType.hpp"
const HeapRegionType HeapRegionType::Eden = HeapRegionType(EdenTag);
const HeapRegionType HeapRegionType::Survivor = HeapRegionType(SurvTag);
const HeapRegionType HeapRegionType::Old = HeapRegionType(OldTag);
const HeapRegionType HeapRegionType::Humongous = HeapRegionType(StartsHumongousTag);
const G1HeapRegionType G1HeapRegionType::Eden = G1HeapRegionType(EdenTag);
const G1HeapRegionType G1HeapRegionType::Survivor = G1HeapRegionType(SurvTag);
const G1HeapRegionType G1HeapRegionType::Old = G1HeapRegionType(OldTag);
const G1HeapRegionType G1HeapRegionType::Humongous = G1HeapRegionType(StartsHumongousTag);
bool HeapRegionType::is_valid(Tag tag) {
bool G1HeapRegionType::is_valid(Tag tag) {
switch (tag) {
case FreeTag:
case EdenTag:
@ -45,7 +45,7 @@ bool HeapRegionType::is_valid(Tag tag) {
}
}
const char* HeapRegionType::get_str() const {
const char* G1HeapRegionType::get_str() const {
hrt_assert_is_valid(_tag);
switch (_tag) {
case FreeTag: return "FREE";
@ -60,7 +60,7 @@ const char* HeapRegionType::get_str() const {
}
}
const char* HeapRegionType::get_short_str() const {
const char* G1HeapRegionType::get_short_str() const {
hrt_assert_is_valid(_tag);
switch (_tag) {
case FreeTag: return "F";
@ -75,7 +75,7 @@ const char* HeapRegionType::get_short_str() const {
}
}
G1HeapRegionTraceType::Type HeapRegionType::get_trace_type() {
G1HeapRegionTraceType::Type G1HeapRegionType::get_trace_type() {
hrt_assert_is_valid(_tag);
switch (_tag) {
case FreeTag: return G1HeapRegionTraceType::Free;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
#define hrt_assert_is_valid(tag) \
assert(is_valid((tag)), "invalid HR type: %u", (uint) (tag))
class HeapRegionType {
class G1HeapRegionType {
friend class VMStructs;
private:
@ -101,7 +101,7 @@ private:
}
// Private constructor used for static constants
HeapRegionType(Tag t) : _tag(t) { hrt_assert_is_valid(_tag); }
G1HeapRegionType(Tag t) : _tag(t) { hrt_assert_is_valid(_tag); }
public:
// Queries
@ -159,12 +159,12 @@ public:
const char* get_short_str() const;
G1HeapRegionTraceType::Type get_trace_type();
HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
G1HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
static const HeapRegionType Eden;
static const HeapRegionType Survivor;
static const HeapRegionType Old;
static const HeapRegionType Humongous;
static const G1HeapRegionType Eden;
static const G1HeapRegionType Survivor;
static const G1HeapRegionType Old;
static const G1HeapRegionType Humongous;
};
#endif // SHARE_GC_G1_G1HEAPREGIONTYPE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,7 +79,7 @@ struct G1HeapTransition::DetailedUsage : public StackObj {
_humongous_region_count(0) {}
};
class G1HeapTransition::DetailedUsageClosure: public HeapRegionClosure {
class G1HeapTransition::DetailedUsageClosure: public G1HeapRegionClosure {
public:
DetailedUsage _usage;
bool do_heap_region(G1HeapRegion* r) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,7 +111,7 @@ class G1VerifyCodeRootOopClosure: public OopClosure {
// Now fetch the region containing the object
G1HeapRegion* hr = _g1h->heap_region_containing(obj);
HeapRegionRemSet* hrrs = hr->rem_set();
G1HeapRegionRemSet* hrrs = hr->rem_set();
// Verify that the code root list for this region
// contains the nmethod
if (!hrrs->code_roots_list_contains(_nm)) {
@ -231,7 +231,7 @@ public:
size_t live_bytes() { return _live_bytes; }
};
class VerifyRegionClosure: public HeapRegionClosure {
class VerifyRegionClosure: public G1HeapRegionClosure {
private:
VerifyOption _vo;
bool _failures;
@ -287,10 +287,10 @@ public:
class G1VerifyTask: public WorkerTask {
private:
G1CollectedHeap* _g1h;
VerifyOption _vo;
bool _failures;
HeapRegionClaimer _hrclaimer;
G1CollectedHeap* _g1h;
VerifyOption _vo;
bool _failures;
G1HeapRegionClaimer _hrclaimer;
public:
G1VerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
@ -377,20 +377,20 @@ void G1HeapVerifier::verify(VerifyOption vo) {
// Heap region set verification
class VerifyRegionListsClosure : public HeapRegionClosure {
class VerifyRegionListsClosure : public G1HeapRegionClosure {
private:
HeapRegionSet* _old_set;
HeapRegionSet* _humongous_set;
HeapRegionManager* _hrm;
G1HeapRegionSet* _old_set;
G1HeapRegionSet* _humongous_set;
G1HeapRegionManager* _hrm;
public:
uint _old_count;
uint _humongous_count;
uint _free_count;
VerifyRegionListsClosure(HeapRegionSet* old_set,
HeapRegionSet* humongous_set,
HeapRegionManager* hrm) :
VerifyRegionListsClosure(G1HeapRegionSet* old_set,
G1HeapRegionSet* humongous_set,
G1HeapRegionManager* hrm) :
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _humongous_count(), _free_count(){ }
@ -412,7 +412,7 @@ public:
return false;
}
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
void verify_counts(G1HeapRegionSet* old_set, G1HeapRegionSet* humongous_set, G1HeapRegionManager* free_list) {
guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
@ -435,7 +435,7 @@ void G1HeapVerifier::verify_region_sets() {
_g1h->collection_set()->candidates()->verify();
}
class G1VerifyRegionMarkingStateClosure : public HeapRegionClosure {
class G1VerifyRegionMarkingStateClosure : public G1HeapRegionClosure {
class MarkedBytesClosure {
size_t _marked_words;
@ -535,7 +535,7 @@ void G1HeapVerifier::verify_bitmap_clear(bool from_tams) {
return;
}
class G1VerifyBitmapClear : public HeapRegionClosure {
class G1VerifyBitmapClear : public G1HeapRegionClosure {
bool _from_tams;
public:
@ -557,7 +557,7 @@ void G1HeapVerifier::verify_bitmap_clear(bool from_tams) {
}
#ifndef PRODUCT
class G1VerifyCardTableCleanup: public HeapRegionClosure {
class G1VerifyCardTableCleanup: public G1HeapRegionClosure {
G1HeapVerifier* _verifier;
public:
G1VerifyCardTableCleanup(G1HeapVerifier* verifier)
@ -603,11 +603,11 @@ void G1HeapVerifier::verify_dirty_region(G1HeapRegion* hr) {
}
}
class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
class G1VerifyDirtyYoungListClosure : public G1HeapRegionClosure {
private:
G1HeapVerifier* _verifier;
public:
G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : G1HeapRegionClosure(), _verifier(verifier) { }
virtual bool do_heap_region(G1HeapRegion* r) {
_verifier->verify_dirty_region(r);
return false;
@ -619,12 +619,12 @@ void G1HeapVerifier::verify_dirty_young_regions() {
_g1h->collection_set()->iterate(&cl);
}
class G1CheckRegionAttrTableClosure : public HeapRegionClosure {
class G1CheckRegionAttrTableClosure : public G1HeapRegionClosure {
private:
bool _failures;
public:
G1CheckRegionAttrTableClosure() : HeapRegionClosure(), _failures(false) { }
G1CheckRegionAttrTableClosure() : G1HeapRegionClosure(), _failures(false) { }
virtual bool do_heap_region(G1HeapRegion* hr) {
uint i = hr->hrm_index();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -224,7 +224,7 @@ void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes,
uint G1NUMA::max_search_depth() const {
// Multiple of 3 is just random number to limit iterations.
// There would be some cases that 1 page may be consisted of multiple HeapRegions.
// There would be some cases that 1 page may be consisted of multiple heap regions.
return 3 * MAX2((uint)(page_size() / region_size()), (uint)1) * num_active_nodes();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -95,7 +95,7 @@ public:
uint index_of_current_thread() const;
// Returns the preferred index for the given G1HeapRegion index.
// This assumes that HeapRegions are evenly spit, so we can decide preferred index
// This assumes that heap regions are evenly spit, so we can decide preferred index
// with the given G1HeapRegion index.
// Result is less than num_active_nodes().
uint preferred_node_index_for_index(uint region_index) const;
@ -127,7 +127,7 @@ public:
void print_statistics() const;
};
class G1NodeIndexCheckClosure : public HeapRegionClosure {
class G1NodeIndexCheckClosure : public G1HeapRegionClosure {
const char* _desc;
G1NUMA* _numa;
// Records matched count of each node.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -146,7 +146,7 @@ inline void G1ConcurrentRefineOopClosure::do_oop_work(T* p) {
return;
}
HeapRegionRemSet* to_rem_set = _g1h->heap_region_containing(obj)->rem_set();
G1HeapRegionRemSet* to_rem_set = _g1h->heap_region_containing(obj)->rem_set();
assert(to_rem_set != nullptr, "Need per-region 'into' remsets.");
if (to_rem_set->is_tracked()) {
@ -266,7 +266,7 @@ template <class T> void G1RebuildRemSetClosure::do_oop_work(T* p) {
}
G1HeapRegion* to = _g1h->heap_region_containing(obj);
HeapRegionRemSet* rem_set = to->rem_set();
G1HeapRegionRemSet* rem_set = to->rem_set();
if (rem_set->is_tracked()) {
rem_set->add_reference(p, _worker_id);
}

View File

@ -365,7 +365,7 @@ public:
_next_dirty_regions = nullptr;
}
void iterate_dirty_regions_from(HeapRegionClosure* cl, uint worker_id) {
void iterate_dirty_regions_from(G1HeapRegionClosure* cl, uint worker_id) {
uint num_regions = _next_dirty_regions->size();
if (num_regions == 0) {
@ -481,7 +481,7 @@ public:
};
// Scans a heap region for dirty cards.
class G1ScanHRForRegionClosure : public HeapRegionClosure {
class G1ScanHRForRegionClosure : public G1HeapRegionClosure {
using CardValue = CardTable::CardValue;
G1CollectedHeap* _g1h;
@ -755,7 +755,7 @@ public:
// Heap region closure to be applied to all regions in the current collection set
// increment to fix up non-card related roots.
class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
class G1ScanCollectionSetRegionClosure : public G1HeapRegionClosure {
G1ParScanThreadState* _pss;
G1RemSetScanState* _scan_state;
@ -972,13 +972,13 @@ class G1MergeHeapRootsTask : public WorkerTask {
// Visitor for remembered sets. Several methods of it are called by a region's
// card set iterator to drop card set remembered set entries onto the card.
// table. This is in addition to being the HeapRegionClosure to iterate over
// table. This is in addition to being the HG1eapRegionClosure to iterate over
// all region's remembered sets.
//
// We add a small prefetching cache in front of the actual work as dropping
// onto the card table is basically random memory access. This improves
// performance of this operation significantly.
class G1MergeCardSetClosure : public HeapRegionClosure {
class G1MergeCardSetClosure : public G1HeapRegionClosure {
friend class G1MergeCardSetCache;
G1RemSetScanState* _scan_state;
@ -1074,7 +1074,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
void merge_card_set_for_region(G1HeapRegion* r) {
assert(r->in_collection_set() || r->is_starts_humongous(), "must be");
HeapRegionRemSet* rem_set = r->rem_set();
G1HeapRegionRemSet* rem_set = r->rem_set();
if (!rem_set->is_empty()) {
rem_set->iterate_for_merge(*this);
}
@ -1098,7 +1098,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
// Closure to make sure that the marking bitmap is clear for any old region in
// the collection set.
// This is needed to be able to use the bitmap for evacuation failure handling.
class G1ClearBitmapClosure : public HeapRegionClosure {
class G1ClearBitmapClosure : public G1HeapRegionClosure {
G1CollectedHeap* _g1h;
void assert_bitmap_clear(G1HeapRegion* hr, const G1CMBitMap* bitmap) {
@ -1144,11 +1144,11 @@ class G1MergeHeapRootsTask : public WorkerTask {
// Helper to allow two closure to be applied when
// iterating through the collection set.
class G1CombinedClosure : public HeapRegionClosure {
HeapRegionClosure* _closure1;
HeapRegionClosure* _closure2;
class G1CombinedClosure : public G1HeapRegionClosure {
G1HeapRegionClosure* _closure1;
G1HeapRegionClosure* _closure2;
public:
G1CombinedClosure(HeapRegionClosure* cl1, HeapRegionClosure* cl2) :
G1CombinedClosure(G1HeapRegionClosure* cl1, G1HeapRegionClosure* cl2) :
_closure1(cl1),
_closure2(cl2) { }
@ -1160,7 +1160,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
// Visitor for the remembered sets of humongous candidate regions to merge their
// remembered set into the card table.
class G1FlushHumongousCandidateRemSets : public HeapRegionIndexClosure {
class G1FlushHumongousCandidateRemSets : public G1HeapRegionIndexClosure {
G1MergeCardSetClosure _cl;
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,6 +42,7 @@ class CardTableBarrierSet;
class G1AbstractSubTask;
class G1CollectedHeap;
class G1CMBitMap;
class G1HeapRegionClaimer;
class G1RemSetScanState;
class G1ParScanThreadState;
class G1ParScanThreadStateSet;
@ -49,7 +50,6 @@ class G1Policy;
class G1RemSetSamplingTask;
class G1ScanCardClosure;
class G1ServiceThread;
class HeapRegionClaimer;
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -181,7 +181,7 @@ public:
};
class HRRSStatsIter: public HeapRegionClosure {
class HRRSStatsIter: public G1HeapRegionClosure {
private:
RegionTypeCounter _young;
RegionTypeCounter _humongous;
@ -216,9 +216,9 @@ public:
{}
bool do_heap_region(G1HeapRegion* r) {
HeapRegionRemSet* hrrs = r->rem_set();
G1HeapRegionRemSet* hrrs = r->rem_set();
// HeapRegionRemSet::mem_size() includes the
// G1HeapRegionRemSet::mem_size() includes the
// size of the code roots
size_t rs_unused_mem_sz = hrrs->unused_mem_size();
size_t rs_mem_sz = hrrs->mem_size();
@ -274,19 +274,19 @@ public:
}
// Largest sized rem set region statistics
HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set();
G1HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set();
out->print_cr(" Region with largest rem set = " HR_FORMAT ", "
"size = " SIZE_FORMAT " occupied = " SIZE_FORMAT,
HR_FORMAT_PARAMS(max_rs_mem_sz_region()),
rem_set->mem_size(),
rem_set->occupied());
HeapRegionRemSet::print_static_mem_size(out);
G1HeapRegionRemSet::print_static_mem_size(out);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->card_set_freelist_pool()->print_on(out);
// Code root statistics
HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set();
G1HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set();
out->print_cr(" Total heap region code root sets sizes = " SIZE_FORMAT "%s."
" Max = " SIZE_FORMAT "%s.",
byte_size_in_proper_unit(total_code_root_mem_sz()),

View File

@ -259,7 +259,7 @@ void G1YoungCollector::wait_for_root_region_scanning() {
phase_times()->record_root_region_scan_wait_time(wait_time.seconds() * MILLIUNITS);
}
class G1PrintCollectionSetClosure : public HeapRegionClosure {
class G1PrintCollectionSetClosure : public G1HeapRegionClosure {
public:
virtual bool do_heap_region(G1HeapRegion* r) {
G1HeapRegionPrinter::cset(r);
@ -286,7 +286,7 @@ void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, dou
}
class G1PrepareEvacuationTask : public WorkerTask {
class G1PrepareRegionsClosure : public HeapRegionClosure {
class G1PrepareRegionsClosure : public G1HeapRegionClosure {
G1CollectedHeap* _g1h;
G1PrepareEvacuationTask* _parent_task;
uint _worker_humongous_total;
@ -418,7 +418,7 @@ class G1PrepareEvacuationTask : public WorkerTask {
};
G1CollectedHeap* _g1h;
HeapRegionClaimer _claimer;
G1HeapRegionClaimer _claimer;
volatile uint _humongous_total;
volatile uint _humongous_candidates;

View File

@ -30,7 +30,7 @@
#if ALLOCATION_FAILURE_INJECTOR
class SelectAllocationFailureRegionClosure : public HeapRegionClosure {
class SelectAllocationFailureRegionClosure : public G1HeapRegionClosure {
CHeapBitMap& _allocation_failure_regions;
size_t _allocation_failure_regions_num;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -333,7 +333,7 @@ G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1
}
}
class G1FreeHumongousRegionClosure : public HeapRegionIndexClosure {
class G1FreeHumongousRegionClosure : public G1HeapRegionIndexClosure {
uint _humongous_objects_reclaimed;
uint _humongous_regions_reclaimed;
size_t _freed_bytes;
@ -537,9 +537,9 @@ public:
class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTask : public G1AbstractSubTask {
G1EvacFailureRegions* _evac_failure_regions;
HeapRegionClaimer _claimer;
G1HeapRegionClaimer _claimer;
class ProcessEvacuationFailedRegionsClosure : public HeapRegionClosure {
class ProcessEvacuationFailedRegionsClosure : public G1HeapRegionClosure {
public:
bool do_heap_region(G1HeapRegion* r) override {
@ -706,7 +706,7 @@ public:
};
// Closure applied to all regions in the collection set.
class FreeCSetClosure : public HeapRegionClosure {
class FreeCSetClosure : public G1HeapRegionClosure {
// Helper to send JFR events for regions.
class JFREventForRegion {
EventGCPhaseParallel _event;
@ -807,7 +807,7 @@ public:
uint worker_id,
FreeCSetStats* stats,
G1EvacFailureRegions* evac_failure_regions) :
HeapRegionClosure(),
G1HeapRegionClosure(),
_g1h(G1CollectedHeap::heap()),
_surviving_young_words(surviving_young_words),
_worker_id(worker_id),
@ -853,14 +853,14 @@ public:
};
class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1AbstractSubTask {
G1CollectedHeap* _g1h;
G1EvacInfo* _evacuation_info;
FreeCSetStats* _worker_stats;
HeapRegionClaimer _claimer;
const size_t* _surviving_young_words;
uint _active_workers;
G1CollectedHeap* _g1h;
G1EvacInfo* _evacuation_info;
FreeCSetStats* _worker_stats;
G1HeapRegionClaimer _claimer;
const size_t* _surviving_young_words;
uint _active_workers;
G1EvacFailureRegions* _evac_failure_regions;
volatile uint _num_retained_regions;
volatile uint _num_retained_regions;
FreeCSetStats* worker_stats(uint worker) {
return &_worker_stats[worker];

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,7 +81,7 @@ JVMFlag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose) {
if (!UseG1GC) return JVMFlag::SUCCESS;
// Default value of G1HeapRegionSize=0 means will be set ergonomically.
if (FLAG_IS_CMDLINE(G1HeapRegionSize) && (value < HeapRegionBounds::min_size())) {
if (FLAG_IS_CMDLINE(G1HeapRegionSize) && (value < G1HeapRegionBounds::min_size())) {
JVMFlag::printError(verbose,
"G1HeapRegionSize (" SIZE_FORMAT ") must be "
"greater than or equal to ergonomic heap region minimum size\n",
@ -180,7 +180,7 @@ JVMFlag::Error NewSizeConstraintFuncG1(size_t value, bool verbose) {
}
size_t MaxSizeForHeapAlignmentG1() {
return HeapRegionBounds::max_size();
return G1HeapRegionBounds::max_size();
}
static JVMFlag::Error buffer_size_constraint_helper(JVMFlagsEnum flagid,

View File

@ -37,13 +37,13 @@
static_field(G1HeapRegion, GrainBytes, size_t) \
static_field(G1HeapRegion, LogOfHRGrainBytes, uint) \
\
nonstatic_field(G1HeapRegion, _type, HeapRegionType) \
nonstatic_field(G1HeapRegion, _type, G1HeapRegionType) \
nonstatic_field(G1HeapRegion, _bottom, HeapWord* const) \
nonstatic_field(G1HeapRegion, _top, HeapWord* volatile) \
nonstatic_field(G1HeapRegion, _end, HeapWord* const) \
volatile_nonstatic_field(G1HeapRegion, _pinned_object_count, size_t) \
\
nonstatic_field(HeapRegionType, _tag, HeapRegionType::Tag volatile) \
nonstatic_field(G1HeapRegionType, _tag, G1HeapRegionType::Tag volatile) \
\
\
nonstatic_field(G1HeapRegionTable, _base, address) \
@ -52,13 +52,13 @@
nonstatic_field(G1HeapRegionTable, _bias, size_t) \
nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
\
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
nonstatic_field(G1HeapRegionManager, _regions, G1HeapRegionTable) \
\
volatile_nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
nonstatic_field(G1CollectedHeap, _hrm, G1HeapRegionManager) \
nonstatic_field(G1CollectedHeap, _monitoring_support, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _old_set, G1HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _humongous_set, G1HeapRegionSetBase) \
\
nonstatic_field(G1MonitoringSupport, _eden_space_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _eden_space_used, size_t) \
@ -67,21 +67,21 @@
nonstatic_field(G1MonitoringSupport, _old_gen_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _old_gen_used, size_t) \
\
nonstatic_field(HeapRegionSetBase, _length, uint) \
nonstatic_field(G1HeapRegionSetBase, _length, uint) \
\
nonstatic_field(SATBMarkQueue, _active, bool) \
nonstatic_field(PtrQueue, _buf, void**) \
nonstatic_field(PtrQueue, _index, size_t)
#define VM_INT_CONSTANTS_G1GC(declare_constant, declare_constant_with_value) \
declare_constant(HeapRegionType::FreeTag) \
declare_constant(HeapRegionType::YoungMask) \
declare_constant(HeapRegionType::EdenTag) \
declare_constant(HeapRegionType::SurvTag) \
declare_constant(HeapRegionType::HumongousMask) \
declare_constant(HeapRegionType::StartsHumongousTag) \
declare_constant(HeapRegionType::ContinuesHumongousTag) \
declare_constant(HeapRegionType::OldMask) \
declare_constant(G1HeapRegionType::FreeTag) \
declare_constant(G1HeapRegionType::YoungMask) \
declare_constant(G1HeapRegionType::EdenTag) \
declare_constant(G1HeapRegionType::SurvTag) \
declare_constant(G1HeapRegionType::HumongousMask) \
declare_constant(G1HeapRegionType::StartsHumongousTag) \
declare_constant(G1HeapRegionType::ContinuesHumongousTag) \
declare_constant(G1HeapRegionType::OldMask) \
declare_constant(BarrierSet::G1BarrierSet) \
declare_constant(G1CardTable::g1_young_gen)
@ -94,11 +94,11 @@
declare_type(G1CollectedHeap, CollectedHeap) \
\
declare_toplevel_type(G1HeapRegion) \
declare_toplevel_type(HeapRegionManager) \
declare_toplevel_type(HeapRegionSetBase) \
declare_toplevel_type(G1HeapRegionManager) \
declare_toplevel_type(G1HeapRegionSetBase) \
declare_toplevel_type(G1MonitoringSupport) \
declare_toplevel_type(PtrQueue) \
declare_toplevel_type(HeapRegionType) \
declare_toplevel_type(G1HeapRegionType) \
declare_toplevel_type(SATBMarkQueue) \
declare_toplevel_type(G1DirtyCardQueue) \
\
@ -106,6 +106,6 @@
declare_toplevel_type(G1HeapRegion*) \
declare_toplevel_type(G1MonitoringSupport*) \
\
declare_integer_type(HeapRegionType::Tag volatile)
declare_integer_type(G1HeapRegionType::Tag volatile)
#endif // SHARE_GC_G1_VMSTRUCTS_G1_HPP

View File

@ -608,7 +608,7 @@ WB_ENTRY(jintArray, WB_G1MemoryNodeIds(JNIEnv* env, jobject o))
THROW_MSG_NULL(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1MemoryNodeIds: G1 GC is not enabled");
WB_END
class OldRegionsLivenessClosure: public HeapRegionClosure {
class OldRegionsLivenessClosure: public G1HeapRegionClosure {
private:
const int _liveness;

View File

@ -30,8 +30,8 @@ import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.gc.g1.HeapRegionClosure;
import sun.jvm.hotspot.gc.g1.PrintRegionClosure;
import sun.jvm.hotspot.gc.g1.G1HeapRegionClosure;
import sun.jvm.hotspot.gc.g1.G1PrintRegionClosure;
import sun.jvm.hotspot.gc.shared.CollectedHeap;
import sun.jvm.hotspot.gc.shared.CollectedHeapName;
import sun.jvm.hotspot.gc.shared.LiveRegionsClosure;
@ -47,7 +47,7 @@ import sun.jvm.hotspot.tools.HeapSummary;
// Mirror class for G1CollectedHeap.
public class G1CollectedHeap extends CollectedHeap {
// HeapRegionManager _hrm;
// G1HeapRegionManager _hrm;
private static long hrmFieldOffset;
// MemRegion _g1_reserved;
private static long g1ReservedFieldOffset;
@ -55,9 +55,9 @@ public class G1CollectedHeap extends CollectedHeap {
private static CIntegerField summaryBytesUsedField;
// G1MonitoringSupport* _monitoring_support;
private static AddressField monitoringSupportField;
// HeapRegionSet _old_set;
// G1HeapRegionSet _old_set;
private static long oldSetFieldOffset;
// HeapRegionSet _humongous_set;
// G1HeapRegionSet _humongous_set;
private static long humongousSetFieldOffset;
static {
@ -90,9 +90,9 @@ public class G1CollectedHeap extends CollectedHeap {
return hrm().length();
}
public HeapRegionManager hrm() {
public G1HeapRegionManager hrm() {
Address hrmAddr = addr.addOffsetTo(hrmFieldOffset);
return VMObjectFactory.newObject(HeapRegionManager.class, hrmAddr);
return VMObjectFactory.newObject(G1HeapRegionManager.class, hrmAddr);
}
public G1MonitoringSupport monitoringSupport() {
@ -100,21 +100,21 @@ public class G1CollectedHeap extends CollectedHeap {
return VMObjectFactory.newObject(G1MonitoringSupport.class, monitoringSupportAddr);
}
public HeapRegionSetBase oldSet() {
public G1HeapRegionSetBase oldSet() {
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
return VMObjectFactory.newObject(HeapRegionSetBase.class, oldSetAddr);
return VMObjectFactory.newObject(G1HeapRegionSetBase.class, oldSetAddr);
}
public HeapRegionSetBase humongousSet() {
public G1HeapRegionSetBase humongousSet() {
Address humongousSetAddr = addr.addOffsetTo(humongousSetFieldOffset);
return VMObjectFactory.newObject(HeapRegionSetBase.class, humongousSetAddr);
return VMObjectFactory.newObject(G1HeapRegionSetBase.class, humongousSetAddr);
}
private Iterator<G1HeapRegion> heapRegionIterator() {
return hrm().heapRegionIterator();
}
public void heapRegionIterate(HeapRegionClosure hrcl) {
public void heapRegionIterate(G1HeapRegionClosure hrcl) {
Iterator<G1HeapRegion> iter = heapRegionIterator();
while (iter.hasNext()) {
G1HeapRegion hr = iter.next();
@ -159,7 +159,7 @@ public class G1CollectedHeap extends CollectedHeap {
}
public void printRegionDetails(PrintStream tty) {
PrintRegionClosure prc = new PrintRegionClosure(tty);
G1PrintRegionClosure prc = new G1PrintRegionClosure(tty);
heapRegionIterate(prc);
}

View File

@ -55,7 +55,7 @@ public class G1HeapRegion extends ContiguousSpace implements LiveRegionsProvider
private static long typeFieldOffset;
private static long pointerSize;
private HeapRegionType type;
private G1HeapRegionType type;
static {
VM.registerVMInitializedObserver(new Observer() {
@ -88,7 +88,7 @@ public class G1HeapRegion extends ContiguousSpace implements LiveRegionsProvider
super(addr);
Address typeAddr = (addr instanceof OopHandle) ? addr.addOffsetToAsOopHandle(typeFieldOffset)
: addr.addOffsetTo(typeFieldOffset);
type = VMObjectFactory.newObject(HeapRegionType.class, typeAddr);
type = VMObjectFactory.newObject(G1HeapRegionType.class, typeAddr);
}
public Address bottom() { return bottomField.getValue(addr); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,6 @@
package sun.jvm.hotspot.gc.g1;
public interface HeapRegionClosure {
public interface G1HeapRegionClosure {
public void doHeapRegion(G1HeapRegion hr);
}

View File

@ -37,9 +37,9 @@ import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionManager.
// Mirror class for G1HeapRegionManager.
public class HeapRegionManager extends VMObject {
public class G1HeapRegionManager extends VMObject {
// G1HeapRegionTable _regions
private static long regionsFieldOffset;
@ -52,7 +52,7 @@ public class HeapRegionManager extends VMObject {
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionManager");
Type type = db.lookupType("G1HeapRegionManager");
regionsFieldOffset = type.getField("_regions").getOffset();
}
@ -74,7 +74,7 @@ public class HeapRegionManager extends VMObject {
return regions().heapRegionIterator(length());
}
public HeapRegionManager(Address addr) {
public G1HeapRegionManager(Address addr) {
super(addr);
}

View File

@ -37,9 +37,9 @@ import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSetBase. Represents a group of regions.
// Mirror class for G1HeapRegionSetBase. Represents a group of regions.
public class HeapRegionSetBase extends VMObject {
public class G1HeapRegionSetBase extends VMObject {
// uint _length
private static CIntegerField lengthField;
@ -53,7 +53,7 @@ public class HeapRegionSetBase extends VMObject {
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionSetBase");
Type type = db.lookupType("G1HeapRegionSetBase");
lengthField = type.getCIntegerField("_length");
}
@ -62,7 +62,7 @@ public class HeapRegionSetBase extends VMObject {
return lengthField.getValue(addr);
}
public HeapRegionSetBase(Address addr) {
public G1HeapRegionSetBase(Address addr) {
super(addr);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,10 +33,10 @@ import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionType. Currently we don't actually include
// Mirror class for G1HeapRegionType. Currently we don't actually include
// any of its fields but only iterate over it.
public class HeapRegionType extends VMObject {
public class G1HeapRegionType extends VMObject {
private static int freeTag;
private static int youngMask;
@ -58,18 +58,18 @@ public class HeapRegionType extends VMObject {
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionType");
Type type = db.lookupType("G1HeapRegionType");
tagField = type.getCIntegerField("_tag");
freeTag = db.lookupIntConstant("HeapRegionType::FreeTag");
youngMask = db.lookupIntConstant("HeapRegionType::YoungMask");
edenTag = db.lookupIntConstant("HeapRegionType::EdenTag");
survTag = db.lookupIntConstant("HeapRegionType::SurvTag");
startsHumongousTag = db.lookupIntConstant("HeapRegionType::StartsHumongousTag");
continuesHumongousTag = db.lookupIntConstant("HeapRegionType::ContinuesHumongousTag");
humongousMask = db.lookupIntConstant("HeapRegionType::HumongousMask");
oldMask = db.lookupIntConstant("HeapRegionType::OldMask");
freeTag = db.lookupIntConstant("G1HeapRegionType::FreeTag");
youngMask = db.lookupIntConstant("G1HeapRegionType::YoungMask");
edenTag = db.lookupIntConstant("G1HeapRegionType::EdenTag");
survTag = db.lookupIntConstant("G1HeapRegionType::SurvTag");
startsHumongousTag = db.lookupIntConstant("G1HeapRegionType::StartsHumongousTag");
continuesHumongousTag = db.lookupIntConstant("G1HeapRegionType::ContinuesHumongousTag");
humongousMask = db.lookupIntConstant("G1HeapRegionType::HumongousMask");
oldMask = db.lookupIntConstant("G1HeapRegionType::OldMask");
}
public boolean isFree() {
@ -104,7 +104,7 @@ public class HeapRegionType extends VMObject {
return (tagField.getValue(addr) & oldMask) != 0;
}
public HeapRegionType(Address addr) {
public G1HeapRegionType(Address addr) {
super(addr);
}

View File

@ -27,10 +27,10 @@ package sun.jvm.hotspot.gc.g1;
import java.io.PrintStream;
import sun.jvm.hotspot.gc.g1.G1HeapRegion;
public class PrintRegionClosure implements HeapRegionClosure {
public class G1PrintRegionClosure implements G1HeapRegionClosure {
private PrintStream tty;
public PrintRegionClosure(PrintStream tty) {
public G1PrintRegionClosure(PrintStream tty) {
this.tty = tty;
}

View File

@ -252,8 +252,8 @@ public class HeapSummary extends Tool {
G1MonitoringSupport monitoringSupport = g1h.monitoringSupport();
long edenSpaceRegionNum = monitoringSupport.edenSpaceRegionNum();
long survivorSpaceRegionNum = monitoringSupport.survivorSpaceRegionNum();
HeapRegionSetBase oldSet = g1h.oldSet();
HeapRegionSetBase humongousSet = g1h.humongousSet();
G1HeapRegionSetBase oldSet = g1h.oldSet();
G1HeapRegionSetBase humongousSet = g1h.humongousSet();
long oldGenRegionNum = oldSet.length() + humongousSet.length();
printG1Space(tty, "G1 Heap:", g1h.n_regions(),
g1h.used(), g1h.capacity());

View File

@ -34,12 +34,12 @@
#include "unittest.hpp"
// @requires UseG1GC
TEST_OTHER_VM(FreeRegionList, length) {
TEST_OTHER_VM(G1FreeRegionList, length) {
if (!UseG1GC) {
return;
}
FreeRegionList l("test");
G1FreeRegionList l("test");
const uint num_regions_in_test = 5;
// Create a fake heap. It does not need to be valid, as the G1HeapRegion constructor

View File

@ -236,8 +236,8 @@ void G1CardSetContainersTest::cardset_bitmap_test(uint threshold, uint size_in_b
}
TEST_VM_F(G1CardSetContainersTest, basic_cardset_inptr_test) {
uint const min = (uint)log2i(HeapRegionBounds::min_size());
uint const max = (uint)log2i(HeapRegionBounds::max_size());
uint const min = (uint)log2i(G1HeapRegionBounds::min_size());
uint const max = (uint)log2i(G1HeapRegionBounds::max_size());
for (uint i = min; i <= max; i++) {
G1CardSetContainersTest::cardset_inlineptr_test(i - CardTable::card_shift());

View File

@ -62,7 +62,7 @@ static void generate_random_map(G1CommittedRegionMap* map) {
static void random_deactivate(G1CommittedRegionMap* map) {
uint current_offset = 0;
do {
HeapRegionRange current = map->next_active_range(current_offset);
G1HeapRegionRange current = map->next_active_range(current_offset);
if (mutate()) {
if (current.length() < 5) {
// For short ranges, deactivate whole.
@ -79,7 +79,7 @@ static void random_deactivate(G1CommittedRegionMap* map) {
static void random_uncommit_or_reactive(G1CommittedRegionMap* map) {
uint current_offset = 0;
do {
HeapRegionRange current = map->next_inactive_range(current_offset);
G1HeapRegionRange current = map->next_inactive_range(current_offset);
// Randomly either reactivate or uncommit
if (mutate()) {
map->reactivate(current.start(), current.end());
@ -94,7 +94,7 @@ static void random_uncommit_or_reactive(G1CommittedRegionMap* map) {
static void random_activate_free(G1CommittedRegionMap* map) {
uint current_offset = 0;
do {
HeapRegionRange current = map->next_committable_range(current_offset);
G1HeapRegionRange current = map->next_committable_range(current_offset);
// Randomly either reactivate or uncommit
if (mutate()) {
if (current.length() < 5) {