8324981: Shenandoah: Move commit and soft max heap changed methods into heap
Reviewed-by: shade
This commit is contained in:
parent
1733d2ea24
commit
2cd1ba6a52
@ -84,11 +84,11 @@ void ShenandoahControlThread::run_service() {
|
||||
bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause);
|
||||
bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause);
|
||||
|
||||
// This control loop iteration have seen this much allocations.
|
||||
// This control loop iteration has seen this much allocation.
|
||||
size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
|
||||
|
||||
// Check if we have seen a new target for soft max heap size.
|
||||
bool soft_max_changed = check_soft_max_changed();
|
||||
bool soft_max_changed = heap->check_soft_max_changed();
|
||||
|
||||
// Choose which GC mode to run in. The block below should select a single mode.
|
||||
GCMode mode = none;
|
||||
@ -287,7 +287,7 @@ void ShenandoahControlThread::run_service() {
|
||||
heap->soft_max_capacity() :
|
||||
heap->min_capacity();
|
||||
|
||||
service_uncommit(shrink_before, shrink_until);
|
||||
heap->maybe_uncommit(shrink_before, shrink_until);
|
||||
heap->phase_timings()->flush_cycle_to_global();
|
||||
last_shrink_time = current;
|
||||
}
|
||||
@ -310,25 +310,6 @@ void ShenandoahControlThread::run_service() {
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahControlThread::check_soft_max_changed() const {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
|
||||
size_t old_soft_max = heap->soft_max_capacity();
|
||||
if (new_soft_max != old_soft_max) {
|
||||
new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
|
||||
new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
|
||||
if (new_soft_max != old_soft_max) {
|
||||
log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
|
||||
byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
|
||||
byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
|
||||
);
|
||||
heap->set_soft_max_capacity(new_soft_max);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
|
||||
// Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
|
||||
// any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
|
||||
@ -420,29 +401,6 @@ void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause
|
||||
gc.collect(cause);
|
||||
}
|
||||
|
||||
void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
// Determine if there is work to do. This avoids taking heap lock if there is
|
||||
// no work available, avoids spamming logs with superfluous logging messages,
|
||||
// and minimises the amount of work while locks are taken.
|
||||
|
||||
if (heap->committed() <= shrink_until) return;
|
||||
|
||||
bool has_work = false;
|
||||
for (size_t i = 0; i < heap->num_regions(); i++) {
|
||||
ShenandoahHeapRegion *r = heap->get_region(i);
|
||||
if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
|
||||
has_work = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_work) {
|
||||
heap->entry_uncommit(shrink_before, shrink_until);
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
|
||||
return GCCause::is_user_requested_gc(cause) ||
|
||||
GCCause::is_serviceability_requested_gc(cause);
|
||||
|
@ -72,7 +72,6 @@ private:
|
||||
void service_concurrent_normal_cycle(GCCause::Cause cause);
|
||||
void service_stw_full_cycle(GCCause::Cause cause);
|
||||
void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point);
|
||||
void service_uncommit(double shrink_before, size_t shrink_until);
|
||||
|
||||
bool try_set_alloc_failure_gc();
|
||||
void notify_alloc_failure_waiters();
|
||||
@ -90,8 +89,6 @@ private:
|
||||
|
||||
bool is_explicit_gc(GCCause::Cause cause) const;
|
||||
|
||||
bool check_soft_max_changed() const;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
ShenandoahControlThread();
|
||||
|
@ -754,6 +754,33 @@ bool ShenandoahHeap::is_in(const void* p) const {
|
||||
return p >= heap_base && p < last_region_end;
|
||||
}
|
||||
|
||||
void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
|
||||
assert (ShenandoahUncommit, "should be enabled");
|
||||
|
||||
// Determine if there is work to do. This avoids taking heap lock if there is
|
||||
// no work available, avoids spamming logs with superfluous logging messages,
|
||||
// and minimises the amount of work while locks are taken.
|
||||
|
||||
if (committed() <= shrink_until) return;
|
||||
|
||||
bool has_work = false;
|
||||
for (size_t i = 0; i < num_regions(); i++) {
|
||||
ShenandoahHeapRegion* r = get_region(i);
|
||||
if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
|
||||
has_work = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_work) {
|
||||
static const char* msg = "Concurrent uncommit";
|
||||
ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
op_uncommit(shrink_before, shrink_until);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
|
||||
assert (ShenandoahUncommit, "should be enabled");
|
||||
|
||||
@ -784,6 +811,24 @@ void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahHeap::check_soft_max_changed() {
|
||||
size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
|
||||
size_t old_soft_max = soft_max_capacity();
|
||||
if (new_soft_max != old_soft_max) {
|
||||
new_soft_max = MAX2(min_capacity(), new_soft_max);
|
||||
new_soft_max = MIN2(max_capacity(), new_soft_max);
|
||||
if (new_soft_max != old_soft_max) {
|
||||
log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
|
||||
byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
|
||||
byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
|
||||
);
|
||||
set_soft_max_capacity(new_soft_max);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void ShenandoahHeap::notify_heap_changed() {
|
||||
// Update monitoring counters when we took a new region. This amortizes the
|
||||
// update costs on slow path.
|
||||
@ -2259,14 +2304,6 @@ void ShenandoahHeap::safepoint_synchronize_end() {
|
||||
SuspendibleThreadSet::desynchronize();
|
||||
}
|
||||
|
||||
void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
|
||||
static const char *msg = "Concurrent uncommit";
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
op_uncommit(shrink_before, shrink_until);
|
||||
}
|
||||
|
||||
void ShenandoahHeap::try_inject_alloc_failure() {
|
||||
if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
|
||||
_inject_alloc_failure.set();
|
||||
|
@ -364,10 +364,14 @@ public:
|
||||
void cancel_gc(GCCause::Cause cause);
|
||||
|
||||
public:
|
||||
// Elastic heap support
|
||||
void entry_uncommit(double shrink_before, size_t shrink_until);
|
||||
// These will uncommit empty regions if heap::committed > shrink_until
|
||||
// and there exists at least one region which was made empty before shrink_before.
|
||||
void maybe_uncommit(double shrink_before, size_t shrink_until);
|
||||
void op_uncommit(double shrink_before, size_t shrink_until);
|
||||
|
||||
// Returns true if the soft maximum heap has been changed using management APIs.
|
||||
bool check_soft_max_changed();
|
||||
|
||||
private:
|
||||
// GC support
|
||||
// Reset bitmap, prepare regions for new GC cycle
|
||||
|
Loading…
Reference in New Issue
Block a user