8281879: Serial: Merge CardGeneration into TenuredGeneration

Reviewed-by: tschatzl, iwalulya
This commit is contained in:
Albert Mingkun Yang 2022-03-21 16:20:20 +00:00
parent 999da9bfc5
commit 19d34bdf99
14 changed files with 367 additions and 543 deletions

@ -26,8 +26,8 @@
#include "gc/serial/genMarkSweep.hpp"
#include "gc/serial/tenuredGeneration.inline.hpp"
#include "gc/shared/blockOffsetTable.inline.hpp"
#include "gc/shared/cardGeneration.inline.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/genCollectedHeap.hpp"
@ -40,13 +40,286 @@
#include "runtime/java.hpp"
#include "utilities/macros.hpp"
bool TenuredGeneration::grow_by(size_t bytes) {
assert_correct_size_change_locking();
bool result = _virtual_space.expand_by(bytes);
if (result) {
size_t new_word_size =
heap_word_size(_virtual_space.committed_size());
MemRegion mr(space()->bottom(), new_word_size);
// Expand card table
GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
// Expand shared block offset array
_bts->resize(new_word_size);
// Fix for bug #4668531
if (ZapUnusedHeapArea) {
MemRegion mangle_region(space()->end(),
(HeapWord*)_virtual_space.high());
SpaceMangler::mangle_region(mangle_region);
}
// Expand space -- also expands space's BOT
// (which uses (part of) shared array above)
space()->set_end((HeapWord*)_virtual_space.high());
// update the space and generation capacity counters
update_counters();
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size - bytes;
log_trace(gc, heap)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
return result;
}
bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
assert_locked_or_safepoint(Heap_lock);
if (bytes == 0) {
return true; // That's what grow_by(0) would return
}
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
if (aligned_bytes == 0){
// The alignment caused the number of bytes to wrap. An expand_by(0) will
// return true with the implication that an expansion was done when it
// was not. A call to expand implies a best effort to expand by "bytes"
// but not a guarantee. Align down to give a best effort. This is likely
// the most that the generation can expand since it has some capacity to
// start with.
aligned_bytes = ReservedSpace::page_align_size_down(bytes);
}
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
bool success = false;
if (aligned_expand_bytes > aligned_bytes) {
success = grow_by(aligned_expand_bytes);
}
if (!success) {
success = grow_by(aligned_bytes);
}
if (!success) {
success = grow_to_reserved();
}
if (success && GCLocker::is_active_and_needs_gc()) {
log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");
}
return success;
}
bool TenuredGeneration::grow_to_reserved() {
assert_correct_size_change_locking();
bool success = true;
const size_t remaining_bytes = _virtual_space.uncommitted_size();
if (remaining_bytes > 0) {
success = grow_by(remaining_bytes);
DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");)
}
return success;
}
void TenuredGeneration::shrink(size_t bytes) {
assert_correct_size_change_locking();
size_t size = ReservedSpace::page_align_size_down(bytes);
if (size == 0) {
return;
}
// Shrink committed space
_virtual_space.shrink_by(size);
// Shrink space; this also shrinks the space's BOT
space()->set_end((HeapWord*) _virtual_space.high());
size_t new_word_size = heap_word_size(space()->capacity());
// Shrink the shared block offset array
_bts->resize(new_word_size);
MemRegion mr(space()->bottom(), new_word_size);
// Shrink the card table
GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + size;
log_trace(gc, heap)("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, new_mem_size/K);
}
// Objects in this generation may have moved, invalidate this
// generation's cards.
void TenuredGeneration::invalidate_remembered_set() {
_rs->invalidate(used_region());
}
void TenuredGeneration::compute_new_size_inner() {
assert(_shrink_factor <= 100, "invalid shrink factor");
size_t current_shrink_factor = _shrink_factor;
if (ShrinkHeapInSteps) {
// Always reset '_shrink_factor' if the heap is shrunk in steps.
// If we shrink the heap in this iteration, '_shrink_factor' will
// be recomputed based on the old value further down in this fuction.
_shrink_factor = 0;
}
// We don't have floating point command-line arguments
// Note: argument processing ensures that MinHeapFreeRatio < 100.
const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
// Compute some numbers about the state of the heap.
const size_t used_after_gc = used();
const size_t capacity_after_gc = capacity();
const double min_tmp = used_after_gc / maximum_used_percentage;
size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
// Don't shrink less than the initial generation size
minimum_desired_capacity = MAX2(minimum_desired_capacity, initial_size());
assert(used_after_gc <= minimum_desired_capacity, "sanity check");
const size_t free_after_gc = free();
const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
log_trace(gc, heap)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
minimum_free_percentage,
maximum_used_percentage);
log_trace(gc, heap)(" free_after_gc : %6.1fK used_after_gc : %6.1fK capacity_after_gc : %6.1fK",
free_after_gc / (double) K,
used_after_gc / (double) K,
capacity_after_gc / (double) K);
log_trace(gc, heap)(" free_percentage: %6.2f", free_percentage);
if (capacity_after_gc < minimum_desired_capacity) {
// If we have less free space than we want then expand
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
// Don't expand unless it's significant
if (expand_bytes >= _min_heap_delta_bytes) {
expand(expand_bytes, 0); // safe if expansion fails
}
log_trace(gc, heap)(" expanding: minimum_desired_capacity: %6.1fK expand_bytes: %6.1fK _min_heap_delta_bytes: %6.1fK",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
_min_heap_delta_bytes / (double) K);
return;
}
// No expansion, now see if we want to shrink
size_t shrink_bytes = 0;
// We would never want to shrink more than this
size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
if (MaxHeapFreeRatio < 100) {
const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
const double max_tmp = used_after_gc / minimum_used_percentage;
size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
maximum_desired_capacity = MAX2(maximum_desired_capacity, initial_size());
log_trace(gc, heap)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
maximum_free_percentage, minimum_used_percentage);
log_trace(gc, heap)(" _capacity_at_prologue: %6.1fK minimum_desired_capacity: %6.1fK maximum_desired_capacity: %6.1fK",
_capacity_at_prologue / (double) K,
minimum_desired_capacity / (double) K,
maximum_desired_capacity / (double) K);
assert(minimum_desired_capacity <= maximum_desired_capacity,
"sanity check");
if (capacity_after_gc > maximum_desired_capacity) {
// Capacity too large, compute shrinking size
shrink_bytes = capacity_after_gc - maximum_desired_capacity;
if (ShrinkHeapInSteps) {
// If ShrinkHeapInSteps is true (the default),
// we don't want to shrink all the way back to initSize if people call
// System.gc(), because some programs do that between "phases" and then
// we'd just have to grow the heap up again for the next phase. So we
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
// on the third call, and 100% by the fourth call. But if we recompute
// size without shrinking, it goes back to 0%.
shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
if (current_shrink_factor == 0) {
_shrink_factor = 10;
} else {
_shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
}
}
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
log_trace(gc, heap)(" shrinking: initSize: %.1fK maximum_desired_capacity: %.1fK",
initial_size() / (double) K, maximum_desired_capacity / (double) K);
log_trace(gc, heap)(" shrink_bytes: %.1fK current_shrink_factor: " SIZE_FORMAT " new shrink factor: " SIZE_FORMAT " _min_heap_delta_bytes: %.1fK",
shrink_bytes / (double) K,
current_shrink_factor,
_shrink_factor,
_min_heap_delta_bytes / (double) K);
}
}
if (capacity_after_gc > _capacity_at_prologue) {
// We might have expanded for promotions, in which case we might want to
// take back that expansion if there's room after GC. That keeps us from
// stretching the heap with promotions when there's plenty of room.
size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
// We have two shrinking computations, take the largest
shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
log_trace(gc, heap)(" aggressive shrinking: _capacity_at_prologue: %.1fK capacity_after_gc: %.1fK expansion_for_promotion: %.1fK shrink_bytes: %.1fK",
capacity_after_gc / (double) K,
_capacity_at_prologue / (double) K,
expansion_for_promotion / (double) K,
shrink_bytes / (double) K);
}
// Don't shrink unless it's significant
if (shrink_bytes >= _min_heap_delta_bytes) {
shrink(shrink_bytes);
}
}
void TenuredGeneration::space_iterate(SpaceClosure* blk,
bool usedOnly) {
blk->do_space(space());
}
void TenuredGeneration::younger_refs_iterate(OopIterateClosure* blk) {
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
// "sp" that point into the young generation.
// The iteration is only over objects allocated at the start of the
// iterations; objects allocated as a result of applying the closure are
// not included.
HeapWord* gen_boundary = reserved().start();
_rs->younger_refs_in_space_iterate(space(), gen_boundary, blk);
}
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size,
size_t min_byte_size,
size_t max_byte_size,
CardTableRS* remset) :
CardGeneration(rs, initial_byte_size, remset)
Generation(rs, initial_byte_size), _rs(remset),
_min_heap_delta_bytes(), _capacity_at_prologue(),
_used_at_prologue()
{
// If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
_shrink_factor = ShrinkHeapInSteps ? 0 : 100;
HeapWord* start = (HeapWord*)rs.base();
size_t reserved_byte_size = rs.size();
assert((uintptr_t(start) & 3) == 0, "bad alignment");
assert((reserved_byte_size & 3) == 0, "bad alignment");
MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
_bts = new BlockOffsetSharedArray(reserved_mr,
heap_word_size(initial_byte_size));
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
_rs->resize_covered_region(committed_mr);
// Verify that the start and end of this generation is the start of a card.
// If this wasn't true, a single card could span more than on generation,
// which would cause problems when we commit/uncommit memory, and when we
// clear and dirty cards.
guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
// Don't check at the very end of the heap as we'll assert that we're probing off
// the end if we try.
guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
}
_min_heap_delta_bytes = MinHeapDeltaBytes;
_capacity_at_prologue = initial_byte_size;
_used_at_prologue = 0;
HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high();
_the_space = new TenuredSpace(_bts, MemRegion(bottom, end));
@ -114,7 +387,7 @@ void TenuredGeneration::compute_new_size() {
const size_t used_after_gc = used();
const size_t capacity_after_gc = capacity();
CardGeneration::compute_new_size();
compute_new_size_inner();
assert(used() == used_after_gc && used_after_gc <= capacity(),
"used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
@ -195,10 +468,6 @@ TenuredGeneration::expand_and_allocate(size_t word_size, bool is_tlab) {
return _the_space->allocate(word_size);
}
bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
return CardGeneration::expand(bytes, expand_bytes);
}
size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
return _the_space->free();
}

@ -26,36 +26,85 @@
#define SHARE_GC_SERIAL_TENUREDGENERATION_HPP
#include "gc/serial/cSpaceCounters.hpp"
#include "gc/shared/cardGeneration.hpp"
#include "gc/shared/generation.hpp"
#include "gc/shared/gcStats.hpp"
#include "gc/shared/generationCounters.hpp"
#include "utilities/macros.hpp"
class BlockOffsetSharedArray;
class CardTableRS;
class CompactibleSpace;
// TenuredGeneration models the heap containing old (promoted/tenured) objects
// contained in a single contiguous space.
//
// contained in a single contiguous space. This generation is covered by a card
// table, and uses a card-size block-offset array to implement block_start.
// Garbage collection is performed using mark-compact.
class TenuredGeneration: public CardGeneration {
class TenuredGeneration: public Generation {
friend class VMStructs;
// Abstractly, this is a subtype that gets access to protected fields.
friend class VM_PopulateDumpSharedSpace;
protected:
// This is shared with other generations.
CardTableRS* _rs;
// This is local to this generation.
BlockOffsetSharedArray* _bts;
// Current shrinking effect: this damps shrinking when the heap gets empty.
size_t _shrink_factor;
size_t _min_heap_delta_bytes; // Minimum amount to expand.
// Some statistics from before gc started.
// These are gathered in the gc_prologue (and should_collect)
// to control growing/shrinking policy in spite of promotions.
size_t _capacity_at_prologue;
size_t _used_at_prologue;
void assert_correct_size_change_locking();
ContiguousSpace* _the_space; // Actual space holding objects
GenerationCounters* _gen_counters;
CSpaceCounters* _space_counters;
// Allocation failure
virtual bool expand(size_t bytes, size_t expand_bytes);
// Accessing spaces
ContiguousSpace* space() const { return _the_space; }
void assert_correct_size_change_locking();
// Attempt to expand the generation by "bytes". Expand by at a
// minimum "expand_bytes". Return true if some amount (not
// necessarily the full "bytes") was done.
bool expand(size_t bytes, size_t expand_bytes);
// Shrink generation with specified size
void shrink(size_t bytes);
void compute_new_size_inner();
public:
virtual void compute_new_size();
virtual void invalidate_remembered_set();
// Grow generation with specified size (returns false if unable to grow)
bool grow_by(size_t bytes);
// Grow generation to reserved size.
bool grow_to_reserved();
size_t capacity() const;
size_t used() const;
size_t free() const;
MemRegion used_region() const;
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
void younger_refs_iterate(OopIterateClosure* blk);
bool is_in(const void* p) const;
CompactibleSpace* first_compaction_space() const;
TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size,
size_t min_byte_size,
@ -104,8 +153,6 @@ class TenuredGeneration: public CardGeneration {
size_t word_size,
bool is_tlab);
virtual void compute_new_size();
// Performance Counter support
void update_counters();

@ -29,6 +29,30 @@
#include "gc/shared/space.inline.hpp"
inline size_t TenuredGeneration::capacity() const {
return space()->capacity();
}
inline size_t TenuredGeneration::used() const {
return space()->used();
}
inline size_t TenuredGeneration::free() const {
return space()->free();
}
inline MemRegion TenuredGeneration::used_region() const {
return space()->used_region();
}
inline bool TenuredGeneration::is_in(const void* p) const {
return space()->is_in(p);
}
inline CompactibleSpace* TenuredGeneration::first_compaction_space() const {
return space();
}
HeapWord* TenuredGeneration::allocate(size_t word_size,
bool is_tlab) {
assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");

@ -45,7 +45,7 @@
declare_toplevel_type, \
declare_integer_type) \
declare_type(SerialHeap, GenCollectedHeap) \
declare_type(TenuredGeneration, CardGeneration) \
declare_type(TenuredGeneration, Generation) \
declare_type(TenuredSpace, OffsetTableContigSpace) \
\
declare_type(DefNewGeneration, Generation) \

@ -1,318 +0,0 @@
/*
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/blockOffsetTable.inline.hpp"
#include "gc/shared/cardGeneration.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/space.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
#include "logging/log.hpp"
#include "runtime/java.hpp"
CardGeneration::CardGeneration(ReservedSpace rs,
size_t initial_byte_size,
CardTableRS* remset) :
Generation(rs, initial_byte_size), _rs(remset),
_min_heap_delta_bytes(), _capacity_at_prologue(),
_used_at_prologue()
{
// If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
_shrink_factor = ShrinkHeapInSteps ? 0 : 100;
HeapWord* start = (HeapWord*)rs.base();
size_t reserved_byte_size = rs.size();
assert((uintptr_t(start) & 3) == 0, "bad alignment");
assert((reserved_byte_size & 3) == 0, "bad alignment");
MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
_bts = new BlockOffsetSharedArray(reserved_mr,
heap_word_size(initial_byte_size));
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
_rs->resize_covered_region(committed_mr);
// Verify that the start and end of this generation is the start of a card.
// If this wasn't true, a single card could span more than on generation,
// which would cause problems when we commit/uncommit memory, and when we
// clear and dirty cards.
guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
// Don't check at the very end of the heap as we'll assert that we're probing off
// the end if we try.
guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
}
_min_heap_delta_bytes = MinHeapDeltaBytes;
_capacity_at_prologue = initial_byte_size;
_used_at_prologue = 0;
}
bool CardGeneration::grow_by(size_t bytes) {
assert_correct_size_change_locking();
bool result = _virtual_space.expand_by(bytes);
if (result) {
size_t new_word_size =
heap_word_size(_virtual_space.committed_size());
MemRegion mr(space()->bottom(), new_word_size);
// Expand card table
GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
// Expand shared block offset array
_bts->resize(new_word_size);
// Fix for bug #4668531
if (ZapUnusedHeapArea) {
MemRegion mangle_region(space()->end(),
(HeapWord*)_virtual_space.high());
SpaceMangler::mangle_region(mangle_region);
}
// Expand space -- also expands space's BOT
// (which uses (part of) shared array above)
space()->set_end((HeapWord*)_virtual_space.high());
// update the space and generation capacity counters
update_counters();
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size - bytes;
log_trace(gc, heap)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
return result;
}
bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
assert_locked_or_safepoint(Heap_lock);
if (bytes == 0) {
return true; // That's what grow_by(0) would return
}
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
if (aligned_bytes == 0){
// The alignment caused the number of bytes to wrap. An expand_by(0) will
// return true with the implication that an expansion was done when it
// was not. A call to expand implies a best effort to expand by "bytes"
// but not a guarantee. Align down to give a best effort. This is likely
// the most that the generation can expand since it has some capacity to
// start with.
aligned_bytes = ReservedSpace::page_align_size_down(bytes);
}
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
bool success = false;
if (aligned_expand_bytes > aligned_bytes) {
success = grow_by(aligned_expand_bytes);
}
if (!success) {
success = grow_by(aligned_bytes);
}
if (!success) {
success = grow_to_reserved();
}
if (success && GCLocker::is_active_and_needs_gc()) {
log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");
}
return success;
}
bool CardGeneration::grow_to_reserved() {
assert_correct_size_change_locking();
bool success = true;
const size_t remaining_bytes = _virtual_space.uncommitted_size();
if (remaining_bytes > 0) {
success = grow_by(remaining_bytes);
DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");)
}
return success;
}
void CardGeneration::shrink(size_t bytes) {
assert_correct_size_change_locking();
size_t size = ReservedSpace::page_align_size_down(bytes);
if (size == 0) {
return;
}
// Shrink committed space
_virtual_space.shrink_by(size);
// Shrink space; this also shrinks the space's BOT
space()->set_end((HeapWord*) _virtual_space.high());
size_t new_word_size = heap_word_size(space()->capacity());
// Shrink the shared block offset array
_bts->resize(new_word_size);
MemRegion mr(space()->bottom(), new_word_size);
// Shrink the card table
GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + size;
log_trace(gc, heap)("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, new_mem_size/K);
}
// Objects in this generation may have moved, invalidate this
// generation's cards.
void CardGeneration::invalidate_remembered_set() {
_rs->invalidate(used_region());
}
void CardGeneration::compute_new_size() {
assert(_shrink_factor <= 100, "invalid shrink factor");
size_t current_shrink_factor = _shrink_factor;
if (ShrinkHeapInSteps) {
// Always reset '_shrink_factor' if the heap is shrunk in steps.
// If we shrink the heap in this iteration, '_shrink_factor' will
// be recomputed based on the old value further down in this fuction.
_shrink_factor = 0;
}
// We don't have floating point command-line arguments
// Note: argument processing ensures that MinHeapFreeRatio < 100.
const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
// Compute some numbers about the state of the heap.
const size_t used_after_gc = used();
const size_t capacity_after_gc = capacity();
const double min_tmp = used_after_gc / maximum_used_percentage;
size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
// Don't shrink less than the initial generation size
minimum_desired_capacity = MAX2(minimum_desired_capacity, initial_size());
assert(used_after_gc <= minimum_desired_capacity, "sanity check");
const size_t free_after_gc = free();
const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
log_trace(gc, heap)("CardGeneration::compute_new_size:");
log_trace(gc, heap)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
minimum_free_percentage,
maximum_used_percentage);
log_trace(gc, heap)(" free_after_gc : %6.1fK used_after_gc : %6.1fK capacity_after_gc : %6.1fK",
free_after_gc / (double) K,
used_after_gc / (double) K,
capacity_after_gc / (double) K);
log_trace(gc, heap)(" free_percentage: %6.2f", free_percentage);
if (capacity_after_gc < minimum_desired_capacity) {
// If we have less free space than we want then expand
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
// Don't expand unless it's significant
if (expand_bytes >= _min_heap_delta_bytes) {
expand(expand_bytes, 0); // safe if expansion fails
}
log_trace(gc, heap)(" expanding: minimum_desired_capacity: %6.1fK expand_bytes: %6.1fK _min_heap_delta_bytes: %6.1fK",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
_min_heap_delta_bytes / (double) K);
return;
}
// No expansion, now see if we want to shrink
size_t shrink_bytes = 0;
// We would never want to shrink more than this
size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
if (MaxHeapFreeRatio < 100) {
const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
const double max_tmp = used_after_gc / minimum_used_percentage;
size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
maximum_desired_capacity = MAX2(maximum_desired_capacity, initial_size());
log_trace(gc, heap)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
maximum_free_percentage, minimum_used_percentage);
log_trace(gc, heap)(" _capacity_at_prologue: %6.1fK minimum_desired_capacity: %6.1fK maximum_desired_capacity: %6.1fK",
_capacity_at_prologue / (double) K,
minimum_desired_capacity / (double) K,
maximum_desired_capacity / (double) K);
assert(minimum_desired_capacity <= maximum_desired_capacity,
"sanity check");
if (capacity_after_gc > maximum_desired_capacity) {
// Capacity too large, compute shrinking size
shrink_bytes = capacity_after_gc - maximum_desired_capacity;
if (ShrinkHeapInSteps) {
// If ShrinkHeapInSteps is true (the default),
// we don't want to shrink all the way back to initSize if people call
// System.gc(), because some programs do that between "phases" and then
// we'd just have to grow the heap up again for the next phase. So we
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
// on the third call, and 100% by the fourth call. But if we recompute
// size without shrinking, it goes back to 0%.
shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
if (current_shrink_factor == 0) {
_shrink_factor = 10;
} else {
_shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
}
}
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
log_trace(gc, heap)(" shrinking: initSize: %.1fK maximum_desired_capacity: %.1fK",
initial_size() / (double) K, maximum_desired_capacity / (double) K);
log_trace(gc, heap)(" shrink_bytes: %.1fK current_shrink_factor: " SIZE_FORMAT " new shrink factor: " SIZE_FORMAT " _min_heap_delta_bytes: %.1fK",
shrink_bytes / (double) K,
current_shrink_factor,
_shrink_factor,
_min_heap_delta_bytes / (double) K);
}
}
if (capacity_after_gc > _capacity_at_prologue) {
// We might have expanded for promotions, in which case we might want to
// take back that expansion if there's room after GC. That keeps us from
// stretching the heap with promotions when there's plenty of room.
size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
// We have two shrinking computations, take the largest
shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
log_trace(gc, heap)(" aggressive shrinking: _capacity_at_prologue: %.1fK capacity_after_gc: %.1fK expansion_for_promotion: %.1fK shrink_bytes: %.1fK",
capacity_after_gc / (double) K,
_capacity_at_prologue / (double) K,
expansion_for_promotion / (double) K,
shrink_bytes / (double) K);
}
// Don't shrink unless it's significant
if (shrink_bytes >= _min_heap_delta_bytes) {
shrink(shrink_bytes);
}
}
void CardGeneration::space_iterate(SpaceClosure* blk,
bool usedOnly) {
blk->do_space(space());
}
void CardGeneration::younger_refs_iterate(OopIterateClosure* blk) {
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
// "sp" that point into the young generation.
// The iteration is only over objects allocated at the start of the
// iterations; objects allocated as a result of applying the closure are
// not included.
HeapWord* gen_boundary = reserved().start();
_rs->younger_refs_in_space_iterate(space(), gen_boundary, blk);
}

@ -1,95 +0,0 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_CARDGENERATION_HPP
#define SHARE_GC_SHARED_CARDGENERATION_HPP
// Class CardGeneration is a generation that is covered by a card table,
// and uses a card-size block-offset array to implement block_start.
#include "gc/shared/generation.hpp"
class BlockOffsetSharedArray;
class CardTableRS;
class CompactibleSpace;
class CardGeneration: public Generation {
friend class VMStructs;
protected:
// This is shared with other generations.
CardTableRS* _rs;
// This is local to this generation.
BlockOffsetSharedArray* _bts;
// Current shrinking effect: this damps shrinking when the heap gets empty.
size_t _shrink_factor;
size_t _min_heap_delta_bytes; // Minimum amount to expand.
// Some statistics from before gc started.
// These are gathered in the gc_prologue (and should_collect)
// to control growing/shrinking policy in spite of promotions.
size_t _capacity_at_prologue;
size_t _used_at_prologue;
CardGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* remset);
virtual void assert_correct_size_change_locking() = 0;
virtual CompactibleSpace* space() const = 0;
public:
// Attempt to expand the generation by "bytes". Expand by at a
// minimum "expand_bytes". Return true if some amount (not
// necessarily the full "bytes") was done.
virtual bool expand(size_t bytes, size_t expand_bytes);
// Shrink generation with specified size
virtual void shrink(size_t bytes);
virtual void compute_new_size();
virtual void invalidate_remembered_set();
// Grow generation with specified size (returns false if unable to grow)
bool grow_by(size_t bytes);
// Grow generation to reserved size.
bool grow_to_reserved();
size_t capacity() const;
size_t used() const;
size_t free() const;
MemRegion used_region() const;
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
void younger_refs_iterate(OopIterateClosure* blk);
bool is_in(const void* p) const;
CompactibleSpace* first_compaction_space() const;
};
#endif // SHARE_GC_SHARED_CARDGENERATION_HPP

@ -1,56 +0,0 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_CARDGENERATION_INLINE_HPP
#define SHARE_GC_SHARED_CARDGENERATION_INLINE_HPP
#include "gc/shared/cardGeneration.hpp"
#include "gc/shared/space.hpp"
inline size_t CardGeneration::capacity() const {
return space()->capacity();
}
inline size_t CardGeneration::used() const {
return space()->used();
}
inline size_t CardGeneration::free() const {
return space()->free();
}
inline MemRegion CardGeneration::used_region() const {
return space()->used_region();
}
inline bool CardGeneration::is_in(const void* p) const {
return space()->is_in(p);
}
inline CompactibleSpace* CardGeneration::first_compaction_space() const {
return space();
}
#endif // SHARE_GC_SHARED_CARDGENERATION_INLINE_HPP

@ -41,8 +41,7 @@
//
// Generation - abstract base class
// - DefNewGeneration - allocation area (copy collected)
// - CardGeneration - abstract class adding offset array behavior
// - TenuredGeneration - tenured (old object) space (markSweepCompact)
// - TenuredGeneration - tenured (old object) space (markSweepCompact)
//
// The system configuration currently allowed is:
//

@ -26,7 +26,6 @@
#define SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
#include "gc/shared/ageTable.hpp"
#include "gc/shared/cardGeneration.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.hpp"
@ -101,11 +100,11 @@
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_threshold, HeapWord*) \
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_index, size_t) \
\
nonstatic_field(CardGeneration, _rs, CardTableRS*) \
nonstatic_field(CardGeneration, _bts, BlockOffsetSharedArray*) \
nonstatic_field(CardGeneration, _shrink_factor, size_t) \
nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(CardGeneration, _used_at_prologue, size_t) \
nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \
nonstatic_field(TenuredGeneration, _bts, BlockOffsetSharedArray*) \
nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(TenuredGeneration, _used_at_prologue, size_t) \
\
nonstatic_field(CardTable, _whole_heap, const MemRegion) \
nonstatic_field(CardTable, _guard_index, const size_t) \
@ -186,7 +185,6 @@
declare_toplevel_type(CollectedHeap) \
declare_type(GenCollectedHeap, CollectedHeap) \
declare_toplevel_type(Generation) \
declare_type(CardGeneration, Generation) \
declare_toplevel_type(Space) \
declare_type(CompactibleSpace, Space) \
declare_type(ContiguousSpace, CompactibleSpace) \

@ -39,7 +39,7 @@ import sun.jvm.hotspot.utilities.Observer;
<P> Garbage collection is performed using mark-compact. </P> */
public class TenuredGeneration extends CardGeneration {
public class TenuredGeneration extends Generation {
private static AddressField theSpaceField;
static {

@ -1,40 +0,0 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc.shared;
import sun.jvm.hotspot.debugger.*;
/** Class CardGeneration is a generation that is covered by a card
table, and uses a card-size block-offset array to implement
block_start. */
public abstract class CardGeneration extends Generation {
public CardGeneration(Address addr) {
super(addr);
}
// FIXME: not sure what I need to expose from here in order to have
// verification similar to that of the old RememberedSet
}

@ -38,10 +38,7 @@ import sun.jvm.hotspot.utilities.Observer;
<ul>
<li> Generation
<ul>
<li> CardGeneration
<ul>
<li> TenuredGeneration
</ul>
<li> TenuredGeneration
<li> DefNewGeneration
</ul>
</ul>

@ -57,7 +57,7 @@ public class ClhsdbVmStructsDump {
"field Klass _name Symbol*",
"type ClassLoaderData* null",
"field JavaThread _osthread OSThread*",
"type TenuredGeneration CardGeneration",
"type TenuredGeneration Generation",
"type Universe null",
"type ConstantPoolCache MetaspaceObj"));
test.run(theApp.getPid(), cmds, expStrMap, null);

@ -62,7 +62,6 @@ public class TestType {
"type ConstantPoolCache MetaspaceObj",
"type ConstantPool Metadata",
"type CompilerThread JavaThread",
"type CardGeneration Generation",
"type ArrayKlass Klass",
"type InstanceKlass Klass"));
// String to check for in the output of "type InstanceKlass"