8228991: Obsolete -XX:UseAdaptiveGCBoundary
Make option obsolete and removed supporting code. Reviewed-by: tschatzl, stefank
This commit is contained in:
parent
05d6a66330
commit
4d487927ad
src/hotspot/share
gc
parallel
adjoiningGenerations.cppadjoiningGenerations.hppadjoiningGenerationsForHeteroHeap.cppadjoiningGenerationsForHeteroHeap.hppadjoiningVirtualSpaces.cppadjoiningVirtualSpaces.hppasPSOldGen.cppasPSOldGen.hppasPSYoungGen.cppasPSYoungGen.hppparallelArguments.cppparallelScavengeHeap.cpppsAdaptiveSizePolicy.cpppsAdaptiveSizePolicy.hpppsGCAdaptivePolicyCounters.cpppsGCAdaptivePolicyCounters.hpppsParallelCompact.cpppsParallelCompact.hpppsScavenge.cpppsScavenge.hpppsYoungGen.hppvmStructs_parallelgc.hpp
shared
runtime
test/hotspot/jtreg/gc
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,6 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/adjoiningGenerations.hpp"
|
||||
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
|
||||
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/parallelArguments.hpp"
|
||||
@ -35,10 +34,6 @@
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// If boundary moving is being used, create the young gen and old
|
||||
// gen with ASPSYoungGen and ASPSOldGen, respectively. Revert to
|
||||
// the old behavior otherwise (with PSYoungGen and PSOldGen).
|
||||
|
||||
AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs) :
|
||||
_virtual_spaces(new AdjoiningVirtualSpaces(old_young_rs, MinOldSize,
|
||||
MinNewSize, GenAlignment)) {
|
||||
@ -53,69 +48,31 @@ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs) :
|
||||
init_low_byte_size <= max_low_byte_size, "Parameter check");
|
||||
assert(min_high_byte_size <= init_high_byte_size &&
|
||||
init_high_byte_size <= max_high_byte_size, "Parameter check");
|
||||
// Create the generations differently based on the option to
|
||||
// move the boundary.
|
||||
if (UseAdaptiveGCBoundary) {
|
||||
// Initialize the adjoining virtual spaces. Then pass the
|
||||
// a virtual to each generation for initialization of the
|
||||
// generation.
|
||||
|
||||
// Does the actual creation of the virtual spaces
|
||||
_virtual_spaces->initialize(max_low_byte_size,
|
||||
init_low_byte_size,
|
||||
init_high_byte_size);
|
||||
// Layout the reserved space for the generations.
|
||||
// If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows).
|
||||
ReservedSpace old_rs =
|
||||
virtual_spaces()->reserved_space().first_part(max_low_byte_size, ParallelArguments::is_heterogeneous_heap() /* split */);
|
||||
ReservedSpace heap_rs =
|
||||
virtual_spaces()->reserved_space().last_part(max_low_byte_size);
|
||||
ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
|
||||
assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap");
|
||||
|
||||
// Place the young gen at the high end. Passes in the virtual space.
|
||||
_young_gen = new ASPSYoungGen(_virtual_spaces->high(),
|
||||
_virtual_spaces->high()->committed_size(),
|
||||
min_high_byte_size,
|
||||
_virtual_spaces->high_byte_size_limit());
|
||||
// Create the generations. Virtual spaces are not passed in.
|
||||
_young_gen = new PSYoungGen(init_high_byte_size,
|
||||
min_high_byte_size,
|
||||
max_high_byte_size);
|
||||
_old_gen = new PSOldGen(init_low_byte_size,
|
||||
min_low_byte_size,
|
||||
max_low_byte_size,
|
||||
"old", 1);
|
||||
|
||||
// Place the old gen at the low end. Passes in the virtual space.
|
||||
_old_gen = new ASPSOldGen(_virtual_spaces->low(),
|
||||
_virtual_spaces->low()->committed_size(),
|
||||
min_low_byte_size,
|
||||
_virtual_spaces->low_byte_size_limit(),
|
||||
"old", 1);
|
||||
|
||||
young_gen()->initialize_work();
|
||||
assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(),
|
||||
"Consistency check");
|
||||
assert(old_young_rs.size() >= young_gen()->gen_size_limit(),
|
||||
"Consistency check");
|
||||
|
||||
old_gen()->initialize_work("old", 1);
|
||||
assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(),
|
||||
"Consistency check");
|
||||
assert(old_young_rs.size() >= old_gen()->gen_size_limit(),
|
||||
"Consistency check");
|
||||
} else {
|
||||
|
||||
// Layout the reserved space for the generations.
|
||||
// If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows).
|
||||
ReservedSpace old_rs =
|
||||
virtual_spaces()->reserved_space().first_part(max_low_byte_size, ParallelArguments::is_heterogeneous_heap() /* split */);
|
||||
ReservedSpace heap_rs =
|
||||
virtual_spaces()->reserved_space().last_part(max_low_byte_size);
|
||||
ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
|
||||
assert(young_rs.size() == heap_rs.size(), "Didn't reserve all of the heap");
|
||||
|
||||
// Create the generations. Virtual spaces are not passed in.
|
||||
_young_gen = new PSYoungGen(init_high_byte_size,
|
||||
min_high_byte_size,
|
||||
max_high_byte_size);
|
||||
_old_gen = new PSOldGen(init_low_byte_size,
|
||||
min_low_byte_size,
|
||||
max_low_byte_size,
|
||||
"old", 1);
|
||||
|
||||
// The virtual spaces are created by the initialization of the gens.
|
||||
_young_gen->initialize(young_rs, GenAlignment);
|
||||
assert(young_gen()->gen_size_limit() == young_rs.size(),
|
||||
"Consistency check");
|
||||
_old_gen->initialize(old_rs, GenAlignment, "old", 1);
|
||||
assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check");
|
||||
}
|
||||
// The virtual spaces are created by the initialization of the gens.
|
||||
_young_gen->initialize(young_rs, GenAlignment);
|
||||
assert(young_gen()->gen_size_limit() == young_rs.size(),
|
||||
"Consistency check");
|
||||
_old_gen->initialize(old_rs, GenAlignment, "old", 1);
|
||||
assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check");
|
||||
}
|
||||
|
||||
AdjoiningGenerations::AdjoiningGenerations(): _young_gen(NULL), _old_gen(NULL), _virtual_spaces(NULL) { }
|
||||
@ -124,169 +81,6 @@ size_t AdjoiningGenerations::reserved_byte_size() {
|
||||
return virtual_spaces()->reserved_space().size();
|
||||
}
|
||||
|
||||
void log_before_expansion(bool old, size_t expand_in_bytes, size_t change_in_bytes, size_t max_size) {
|
||||
Log(gc, ergo, heap) log;
|
||||
if (!log.is_debug()) {
|
||||
return;
|
||||
}
|
||||
log.debug("Before expansion of %s gen with boundary move", old ? "old" : "young");
|
||||
log.debug(" Requested change: " SIZE_FORMAT_HEX " Attempted change: " SIZE_FORMAT_HEX,
|
||||
expand_in_bytes, change_in_bytes);
|
||||
ResourceMark rm;
|
||||
LogStream ls(log.debug());
|
||||
ParallelScavengeHeap::heap()->print_on(&ls);
|
||||
log.debug(" PS%sGen max size: " SIZE_FORMAT "K", old ? "Old" : "Young", max_size/K);
|
||||
}
|
||||
|
||||
void log_after_expansion(bool old, size_t max_size) {
|
||||
Log(gc, ergo, heap) log;
|
||||
if (!log.is_debug()) {
|
||||
return;
|
||||
}
|
||||
log.debug("After expansion of %s gen with boundary move", old ? "old" : "young");
|
||||
ResourceMark rm;
|
||||
LogStream ls(log.debug());
|
||||
ParallelScavengeHeap::heap()->print_on(&ls);
|
||||
log.debug(" PS%sGen max size: " SIZE_FORMAT "K", old ? "Old" : "Young", max_size/K);
|
||||
}
|
||||
|
||||
// Make checks on the current sizes of the generations and
|
||||
// the constraints on the sizes of the generations. Push
|
||||
// up the boundary within the constraints. A partial
|
||||
// push can occur.
|
||||
void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
|
||||
assert_lock_strong(ExpandHeap_lock);
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
|
||||
// These sizes limit the amount the boundaries can move. Effectively,
|
||||
// the generation says how much it is willing to yield to the other
|
||||
// generation.
|
||||
const size_t young_gen_available = young_gen()->available_for_contraction();
|
||||
const size_t old_gen_available = old_gen()->available_for_expansion();
|
||||
const size_t alignment = virtual_spaces()->alignment();
|
||||
size_t change_in_bytes = MIN3(young_gen_available,
|
||||
old_gen_available,
|
||||
align_up(expand_in_bytes, alignment));
|
||||
|
||||
if (change_in_bytes == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
log_before_expansion(true, expand_in_bytes, change_in_bytes, old_gen()->max_gen_size());
|
||||
|
||||
// Move the boundary between the generations up (smaller young gen).
|
||||
if (virtual_spaces()->adjust_boundary_up(change_in_bytes)) {
|
||||
young_gen()->reset_after_change();
|
||||
old_gen()->reset_after_change();
|
||||
}
|
||||
|
||||
// The total reserved for the generations should match the sum
|
||||
// of the two even if the boundary is moving.
|
||||
assert(reserved_byte_size() ==
|
||||
old_gen()->max_gen_size() + young_gen()->max_size(),
|
||||
"Space is missing");
|
||||
young_gen()->space_invariants();
|
||||
old_gen()->space_invariants();
|
||||
|
||||
log_after_expansion(true, old_gen()->max_gen_size());
|
||||
}
|
||||
|
||||
// See comments on request_old_gen_expansion()
|
||||
bool AdjoiningGenerations::request_young_gen_expansion(size_t expand_in_bytes) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
|
||||
// If eden is not empty, the boundary can be moved but no advantage
|
||||
// can be made of the move since eden cannot be moved.
|
||||
if (!young_gen()->eden_space()->is_empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool result = false;
|
||||
const size_t young_gen_available = young_gen()->available_for_expansion();
|
||||
const size_t old_gen_available = old_gen()->available_for_contraction();
|
||||
const size_t alignment = virtual_spaces()->alignment();
|
||||
size_t change_in_bytes = MIN3(young_gen_available,
|
||||
old_gen_available,
|
||||
align_up(expand_in_bytes, alignment));
|
||||
|
||||
if (change_in_bytes == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
log_before_expansion(false, expand_in_bytes, change_in_bytes, young_gen()->max_size());
|
||||
|
||||
// Move the boundary between the generations down (smaller old gen).
|
||||
MutexLocker x(ExpandHeap_lock);
|
||||
if (virtual_spaces()->adjust_boundary_down(change_in_bytes)) {
|
||||
young_gen()->reset_after_change();
|
||||
old_gen()->reset_after_change();
|
||||
result = true;
|
||||
}
|
||||
|
||||
// The total reserved for the generations should match the sum
|
||||
// of the two even if the boundary is moving.
|
||||
assert(reserved_byte_size() ==
|
||||
old_gen()->max_gen_size() + young_gen()->max_size(),
|
||||
"Space is missing");
|
||||
young_gen()->space_invariants();
|
||||
old_gen()->space_invariants();
|
||||
|
||||
log_after_expansion(false, young_gen()->max_size());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Additional space is needed in the old generation. Try to move the boundary
|
||||
// up to meet the need. Moves boundary up only
|
||||
void AdjoiningGenerations::adjust_boundary_for_old_gen_needs(
|
||||
size_t desired_free_space) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
|
||||
// Stress testing.
|
||||
if (PSAdaptiveSizePolicyResizeVirtualSpaceAlot == 1) {
|
||||
MutexLocker x(ExpandHeap_lock);
|
||||
request_old_gen_expansion(virtual_spaces()->alignment() * 3 / 2);
|
||||
}
|
||||
|
||||
// Expand only if the entire generation is already committed.
|
||||
if (old_gen()->virtual_space()->uncommitted_size() == 0) {
|
||||
if (old_gen()->free_in_bytes() < desired_free_space) {
|
||||
MutexLocker x(ExpandHeap_lock);
|
||||
request_old_gen_expansion(desired_free_space);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See comment on adjust_boundary_for_old_gen_needss().
|
||||
// Adjust boundary down only.
|
||||
void AdjoiningGenerations::adjust_boundary_for_young_gen_needs(size_t eden_size,
|
||||
size_t survivor_size) {
|
||||
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
|
||||
// Stress testing.
|
||||
if (PSAdaptiveSizePolicyResizeVirtualSpaceAlot == 0) {
|
||||
request_young_gen_expansion(virtual_spaces()->alignment() * 3 / 2);
|
||||
eden_size = young_gen()->eden_space()->capacity_in_bytes();
|
||||
}
|
||||
|
||||
// Expand only if the entire generation is already committed.
|
||||
if (young_gen()->virtual_space()->uncommitted_size() == 0) {
|
||||
size_t desired_size = eden_size + 2 * survivor_size;
|
||||
const size_t committed = young_gen()->virtual_space()->committed_size();
|
||||
if (desired_size > committed) {
|
||||
request_young_gen_expansion(desired_size - committed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AdjoiningGenerations* AdjoiningGenerations::create_adjoining_generations(ReservedSpace old_young_rs) {
|
||||
if (ParallelArguments::is_heterogeneous_heap() && UseAdaptiveGCBoundary) {
|
||||
return new AdjoiningGenerationsForHeteroHeap(old_young_rs);
|
||||
} else {
|
||||
return new AdjoiningGenerations(old_young_rs);
|
||||
}
|
||||
return new AdjoiningGenerations(old_young_rs);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,9 +26,9 @@
|
||||
#define SHARE_GC_PARALLEL_ADJOININGGENERATIONS_HPP
|
||||
|
||||
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
||||
#include "gc/parallel/asPSOldGen.hpp"
|
||||
#include "gc/parallel/asPSYoungGen.hpp"
|
||||
|
||||
class PSOldGen;
|
||||
class PSYoungGen;
|
||||
|
||||
// Contains two generations that both use an AdjoiningVirtualSpaces.
|
||||
// The two generations are adjacent in the reserved space for the
|
||||
@ -41,12 +41,6 @@
|
||||
|
||||
class AdjoiningGenerations : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Move boundary up to expand old gen. Checks are made to
|
||||
// determine if the move can be done with specified limits.
|
||||
void request_old_gen_expansion(size_t desired_change_in_bytes);
|
||||
// Move boundary down to expand young gen.
|
||||
bool request_young_gen_expansion(size_t desired_change_in_bytes);
|
||||
|
||||
protected:
|
||||
AdjoiningGenerations();
|
||||
@ -66,18 +60,11 @@ class AdjoiningGenerations : public CHeapObj<mtGC> {
|
||||
|
||||
AdjoiningVirtualSpaces* virtual_spaces() { return _virtual_spaces; }
|
||||
|
||||
// Additional space is needed in the old generation. Check
|
||||
// the available space and attempt to move the boundary if more space
|
||||
// is needed. The growth is not guaranteed to occur.
|
||||
void adjust_boundary_for_old_gen_needs(size_t desired_change_in_bytes);
|
||||
// Similarly for a growth of the young generation.
|
||||
void adjust_boundary_for_young_gen_needs(size_t eden_size, size_t survivor_size);
|
||||
|
||||
// Return the total byte size of the reserved space
|
||||
// for the adjoining generations.
|
||||
virtual size_t reserved_byte_size();
|
||||
size_t reserved_byte_size();
|
||||
|
||||
// Return new AdjoiningGenerations instance based on arguments (specifically - whether heap is heterogeneous).
|
||||
// Return new AdjoiningGenerations instance.
|
||||
static AdjoiningGenerations* create_adjoining_generations(ReservedSpace rs);
|
||||
};
|
||||
#endif // SHARE_GC_PARALLEL_ADJOININGGENERATIONS_HPP
|
||||
|
@ -1,261 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
|
||||
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
||||
#include "gc/parallel/parallelArguments.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psFileBackedVirtualspace.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// Create two virtual spaces (HeteroVirtualSpaces), low() on nv-dimm memory, high() on dram.
|
||||
// create ASPSOldGen and ASPSYoungGen the same way as in base class
|
||||
|
||||
AdjoiningGenerationsForHeteroHeap::AdjoiningGenerationsForHeteroHeap(ReservedSpace old_young_rs) :
|
||||
_total_size_limit(ParallelArguments::heap_max_size_bytes()) {
|
||||
size_t init_old_byte_size = OldSize;
|
||||
size_t min_old_byte_size = MinOldSize;
|
||||
size_t max_old_byte_size = MaxOldSize;
|
||||
size_t init_young_byte_size = NewSize;
|
||||
size_t min_young_byte_size = MinNewSize;
|
||||
size_t max_young_byte_size = MaxNewSize;
|
||||
// create HeteroVirtualSpaces which is composed of non-overlapping virtual spaces.
|
||||
HeteroVirtualSpaces* hetero_virtual_spaces = new HeteroVirtualSpaces(old_young_rs, min_old_byte_size,
|
||||
min_young_byte_size, _total_size_limit);
|
||||
|
||||
assert(min_old_byte_size <= init_old_byte_size &&
|
||||
init_old_byte_size <= max_old_byte_size, "Parameter check");
|
||||
assert(min_young_byte_size <= init_young_byte_size &&
|
||||
init_young_byte_size <= max_young_byte_size, "Parameter check");
|
||||
|
||||
assert(UseAdaptiveGCBoundary, "Should be used only when UseAdaptiveGCBoundary is true");
|
||||
|
||||
// Initialize the virtual spaces. Then pass a virtual space to each generation
|
||||
// for initialization of the generation.
|
||||
|
||||
// Does the actual creation of the virtual spaces
|
||||
hetero_virtual_spaces->initialize(max_old_byte_size, init_old_byte_size, init_young_byte_size);
|
||||
|
||||
_young_gen = new ASPSYoungGen(hetero_virtual_spaces->high(),
|
||||
hetero_virtual_spaces->high()->committed_size() /* intial_size */,
|
||||
min_young_byte_size,
|
||||
hetero_virtual_spaces->max_young_size());
|
||||
|
||||
_old_gen = new ASPSOldGen(hetero_virtual_spaces->low(),
|
||||
hetero_virtual_spaces->low()->committed_size() /* intial_size */,
|
||||
min_old_byte_size,
|
||||
hetero_virtual_spaces->max_old_size(), "old", 1);
|
||||
|
||||
young_gen()->initialize_work();
|
||||
assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(), "Consistency check");
|
||||
assert(old_young_rs.size() >= young_gen()->gen_size_limit(), "Consistency check");
|
||||
|
||||
old_gen()->initialize_work("old", 1);
|
||||
assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(), "Consistency check");
|
||||
assert(old_young_rs.size() >= old_gen()->gen_size_limit(), "Consistency check");
|
||||
|
||||
_virtual_spaces = hetero_virtual_spaces;
|
||||
}
|
||||
|
||||
size_t AdjoiningGenerationsForHeteroHeap::required_reserved_memory() {
|
||||
// This is the size that young gen can grow to, when AdaptiveGCBoundary is true.
|
||||
size_t max_yg_size = ParallelArguments::heap_max_size_bytes() - MinOldSize;
|
||||
// This is the size that old gen can grow to, when AdaptiveGCBoundary is true.
|
||||
size_t max_old_size = ParallelArguments::heap_max_size_bytes() - MinNewSize;
|
||||
|
||||
return max_yg_size + max_old_size;
|
||||
}
|
||||
|
||||
// We override this function since size of reservedspace here is more than heap size and
|
||||
// callers expect this function to return heap size.
|
||||
size_t AdjoiningGenerationsForHeteroHeap::reserved_byte_size() {
|
||||
return total_size_limit();
|
||||
}
|
||||
|
||||
AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::HeteroVirtualSpaces(ReservedSpace rs, size_t min_old_byte_size, size_t min_yg_byte_size, size_t max_total_size) :
|
||||
AdjoiningVirtualSpaces(rs, min_old_byte_size, min_yg_byte_size, GenAlignment),
|
||||
_max_total_size(max_total_size),
|
||||
_min_old_byte_size(min_old_byte_size),
|
||||
_min_young_byte_size(min_yg_byte_size),
|
||||
_max_old_byte_size(_max_total_size - _min_young_byte_size),
|
||||
_max_young_byte_size(_max_total_size - _min_old_byte_size) {
|
||||
}
|
||||
|
||||
void AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::initialize(size_t initial_old_reserved_size, size_t init_old_byte_size,
|
||||
size_t init_young_byte_size) {
|
||||
|
||||
// This is the reserved space exclusively for old generation.
|
||||
ReservedSpace low_rs = _reserved_space.first_part(_max_old_byte_size, true);
|
||||
// Intially we only assign 'initial_old_reserved_size' of the reserved space to old virtual space.
|
||||
low_rs = low_rs.first_part(initial_old_reserved_size);
|
||||
|
||||
// This is the reserved space exclusively for young generation.
|
||||
ReservedSpace high_rs = _reserved_space.last_part(_max_old_byte_size).first_part(_max_young_byte_size);
|
||||
|
||||
// Carve out 'initial_young_reserved_size' of reserved space.
|
||||
size_t initial_young_reserved_size = _max_total_size - initial_old_reserved_size;
|
||||
high_rs = high_rs.last_part(_max_young_byte_size - initial_young_reserved_size);
|
||||
|
||||
_low = new PSFileBackedVirtualSpace(low_rs, alignment(), AllocateOldGenAt);
|
||||
if (!static_cast <PSFileBackedVirtualSpace*>(_low)->initialize()) {
|
||||
vm_exit_during_initialization("Could not map space for old generation at given AllocateOldGenAt path");
|
||||
}
|
||||
|
||||
if (!_low->expand_by(init_old_byte_size)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for object heap");
|
||||
}
|
||||
|
||||
_high = new PSVirtualSpaceHighToLow(high_rs, alignment());
|
||||
if (!_high->expand_by(init_young_byte_size)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for object heap");
|
||||
}
|
||||
}
|
||||
|
||||
// Since the virtual spaces are non-overlapping, there is no boundary as such.
|
||||
// We replicate the same behavior and maintain the same invariants as base class 'AdjoiningVirtualSpaces' by
|
||||
// increasing old generation size and decreasing young generation size by same amount.
|
||||
bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_up(size_t change_in_bytes) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
|
||||
|
||||
size_t bytes_needed = change_in_bytes;
|
||||
size_t uncommitted_in_old = MIN2(old_vs()->uncommitted_size(), bytes_needed);
|
||||
bool old_expanded = false;
|
||||
|
||||
// 1. Try to expand old within its reserved space.
|
||||
if (uncommitted_in_old != 0) {
|
||||
if (!old_vs()->expand_by(uncommitted_in_old)) {
|
||||
return false;
|
||||
}
|
||||
old_expanded = true;
|
||||
bytes_needed -= uncommitted_in_old;
|
||||
if (bytes_needed == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t bytes_to_add_in_old = 0;
|
||||
|
||||
// 2. Get uncommitted memory from Young virtualspace.
|
||||
size_t young_uncommitted = MIN2(young_vs()->uncommitted_size(), bytes_needed);
|
||||
if (young_uncommitted > 0) {
|
||||
young_vs()->set_reserved(young_vs()->reserved_low_addr() + young_uncommitted,
|
||||
young_vs()->reserved_high_addr(),
|
||||
young_vs()->special());
|
||||
bytes_needed -= young_uncommitted;
|
||||
bytes_to_add_in_old = young_uncommitted;
|
||||
}
|
||||
|
||||
// 3. Get committed memory from Young virtualspace
|
||||
if (bytes_needed > 0) {
|
||||
size_t shrink_size = align_down(bytes_needed, young_vs()->alignment());
|
||||
bool ret = young_vs()->shrink_by(shrink_size);
|
||||
assert(ret, "We should be able to shrink young space");
|
||||
young_vs()->set_reserved(young_vs()->reserved_low_addr() + shrink_size,
|
||||
young_vs()->reserved_high_addr(),
|
||||
young_vs()->special());
|
||||
|
||||
bytes_to_add_in_old += shrink_size;
|
||||
}
|
||||
|
||||
// 4. Increase size of old space
|
||||
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
|
||||
old_vs()->reserved_high_addr() + bytes_to_add_in_old,
|
||||
old_vs()->special());
|
||||
if (!old_vs()->expand_by(bytes_to_add_in_old) && !old_expanded) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
|
||||
assert(total_size_after == total_size_before, "should be equal");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Read comment for adjust_boundary_up()
|
||||
// Increase young generation size and decrease old generation size by same amount.
|
||||
bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_down(size_t change_in_bytes) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
|
||||
|
||||
size_t bytes_needed = change_in_bytes;
|
||||
size_t uncommitted_in_young = MIN2(young_vs()->uncommitted_size(), bytes_needed);
|
||||
bool young_expanded = false;
|
||||
|
||||
// 1. Try to expand old within its reserved space.
|
||||
if (uncommitted_in_young > 0) {
|
||||
if (!young_vs()->expand_by(uncommitted_in_young)) {
|
||||
return false;
|
||||
}
|
||||
young_expanded = true;
|
||||
bytes_needed -= uncommitted_in_young;
|
||||
if (bytes_needed == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t bytes_to_add_in_young = 0;
|
||||
|
||||
// 2. Get uncommitted memory from Old virtualspace.
|
||||
size_t old_uncommitted = MIN2(old_vs()->uncommitted_size(), bytes_needed);
|
||||
if (old_uncommitted > 0) {
|
||||
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
|
||||
old_vs()->reserved_high_addr() - old_uncommitted,
|
||||
old_vs()->special());
|
||||
bytes_needed -= old_uncommitted;
|
||||
bytes_to_add_in_young = old_uncommitted;
|
||||
}
|
||||
|
||||
// 3. Get committed memory from Old virtualspace
|
||||
if (bytes_needed > 0) {
|
||||
size_t shrink_size = align_down(bytes_needed, old_vs()->alignment());
|
||||
bool ret = old_vs()->shrink_by(shrink_size);
|
||||
assert(ret, "We should be able to shrink young space");
|
||||
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
|
||||
old_vs()->reserved_high_addr() - shrink_size,
|
||||
old_vs()->special());
|
||||
|
||||
bytes_to_add_in_young += shrink_size;
|
||||
}
|
||||
|
||||
assert(bytes_to_add_in_young <= change_in_bytes, "should not be more than requested size");
|
||||
// 4. Increase size of young space
|
||||
young_vs()->set_reserved(young_vs()->reserved_low_addr() - bytes_to_add_in_young,
|
||||
young_vs()->reserved_high_addr(),
|
||||
young_vs()->special());
|
||||
if (!young_vs()->expand_by(bytes_to_add_in_young) && !young_expanded) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
|
||||
assert(total_size_after == total_size_before, "should be equal");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1,84 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
|
||||
#define SHARE_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
|
||||
|
||||
#include "gc/parallel/adjoiningGenerations.hpp"
|
||||
|
||||
class AdjoiningGenerationsForHeteroHeap : public AdjoiningGenerations {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Maximum total size of the generations. This is equal to the heap size specified by user.
|
||||
// When adjusting young and old generation sizes, we need ensure that sum of the generation sizes does not exceed this.
|
||||
size_t _total_size_limit;
|
||||
|
||||
size_t total_size_limit() const {
|
||||
return _total_size_limit;
|
||||
}
|
||||
|
||||
// HeteroVirtualSpaces creates non-overlapping virtual spaces. Here _low and _high do not share a reserved space, i.e. there is no boundary
|
||||
// separating the two virtual spaces.
|
||||
class HeteroVirtualSpaces : public AdjoiningVirtualSpaces {
|
||||
size_t _max_total_size;
|
||||
size_t _min_old_byte_size;
|
||||
size_t _min_young_byte_size;
|
||||
size_t _max_old_byte_size;
|
||||
size_t _max_young_byte_size;
|
||||
|
||||
// Internally we access the virtual spaces using these methods. It increases readability, since we were not really
|
||||
// dealing with adjoining virtual spaces separated by a boundary as is the case in base class.
|
||||
// Externally they are accessed using low() and high() methods of base class.
|
||||
PSVirtualSpace* young_vs() { return high(); }
|
||||
PSVirtualSpace* old_vs() { return low(); }
|
||||
|
||||
public:
|
||||
HeteroVirtualSpaces(ReservedSpace rs,
|
||||
size_t min_old_byte_size,
|
||||
size_t min_young_byte_size,
|
||||
size_t max_total_size);
|
||||
|
||||
// Increase old generation size and decrease young generation size by same amount
|
||||
bool adjust_boundary_up(size_t size_in_bytes);
|
||||
// Increase young generation size and decrease old generation size by same amount
|
||||
bool adjust_boundary_down(size_t size_in_bytes);
|
||||
|
||||
size_t max_young_size() const { return _max_young_byte_size; }
|
||||
size_t max_old_size() const { return _max_old_byte_size; }
|
||||
|
||||
void initialize(size_t initial_old_reserved_size, size_t init_low_byte_size,
|
||||
size_t init_high_byte_size);
|
||||
};
|
||||
|
||||
public:
|
||||
AdjoiningGenerationsForHeteroHeap(ReservedSpace rs);
|
||||
|
||||
// Given the size policy, calculate the total amount of memory that needs to be reserved.
|
||||
// We need to reserve more memory than Xmx, since we use non-overlapping virtual spaces for the young and old generations.
|
||||
static size_t required_reserved_memory();
|
||||
|
||||
// Return the total byte size of the reserved space
|
||||
size_t reserved_byte_size();
|
||||
};
|
||||
#endif // SHARE_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,15 +58,3 @@ void AdjoiningVirtualSpaces::initialize(size_t max_low_byte_size,
|
||||
"object heap");
|
||||
}
|
||||
}
|
||||
|
||||
bool AdjoiningVirtualSpaces::adjust_boundary_up(size_t change_in_bytes) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
size_t actual_change = low()->expand_into(high(), change_in_bytes);
|
||||
return actual_change != 0;
|
||||
}
|
||||
|
||||
bool AdjoiningVirtualSpaces::adjust_boundary_down(size_t change_in_bytes) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
size_t actual_change = high()->expand_into(low(), change_in_bytes);
|
||||
return actual_change != 0;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -85,18 +85,13 @@ protected:
|
||||
size_t alignment);
|
||||
|
||||
// accessors
|
||||
virtual PSVirtualSpace* high() { return _high; }
|
||||
virtual PSVirtualSpace* low() { return _low; }
|
||||
PSVirtualSpace* high() { return _high; }
|
||||
PSVirtualSpace* low() { return _low; }
|
||||
ReservedSpace reserved_space() { return _reserved_space; }
|
||||
size_t min_low_byte_size() { return _min_low_byte_size; }
|
||||
size_t min_high_byte_size() { return _min_high_byte_size; }
|
||||
size_t alignment() const { return _alignment; }
|
||||
|
||||
// move boundary between the two spaces up
|
||||
virtual bool adjust_boundary_up(size_t size_in_bytes);
|
||||
// and down
|
||||
virtual bool adjust_boundary_down(size_t size_in_bytes);
|
||||
|
||||
// Maximum byte size for the high space.
|
||||
size_t high_byte_size_limit() {
|
||||
return _reserved_space.size() - _min_low_byte_size;
|
||||
@ -108,7 +103,7 @@ protected:
|
||||
|
||||
// Sets the boundaries for the virtual spaces and commits and
|
||||
// initial size;
|
||||
virtual void initialize(size_t max_low_byte_size,
|
||||
void initialize(size_t max_low_byte_size,
|
||||
size_t init_low_byte_size,
|
||||
size_t init_high_byte_size);
|
||||
};
|
||||
|
@ -1,144 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/asPSOldGen.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "gc/shared/genArguments.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
// Whereas PSOldGen takes the maximum size of the generation
|
||||
// (which doesn't change in the case of PSOldGen) as a parameter,
|
||||
// ASPSOldGen takes the upper limit on the size of
|
||||
// the generation as a parameter. In ASPSOldGen the
|
||||
// maximum size of the generation can change as the boundary
|
||||
// moves. The "maximum size of the generation" is still a valid
|
||||
// concept since the generation can grow and shrink within that
|
||||
// maximum. There are lots of useful checks that use that
|
||||
// maximum. In PSOldGen the method max_gen_size() returns
|
||||
// _max_gen_size (as set by the PSOldGen constructor). This
|
||||
// is how it always worked. In ASPSOldGen max_gen_size()
|
||||
// returned the size of the reserved space for the generation.
|
||||
// That can change as the boundary moves. Below the limit of
|
||||
// the size of the generation is passed to the PSOldGen constructor
|
||||
// for "_max_gen_size" (have to pass something) but it is not used later.
|
||||
//
|
||||
ASPSOldGen::ASPSOldGen(size_t initial_size,
|
||||
size_t min_size,
|
||||
size_t size_limit,
|
||||
const char* gen_name,
|
||||
int level) :
|
||||
PSOldGen(initial_size, min_size, size_limit, gen_name, level),
|
||||
_gen_size_limit(size_limit)
|
||||
{}
|
||||
|
||||
ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs,
|
||||
size_t initial_size,
|
||||
size_t min_size,
|
||||
size_t size_limit,
|
||||
const char* gen_name,
|
||||
int level) :
|
||||
PSOldGen(initial_size, min_size, size_limit, gen_name, level),
|
||||
_gen_size_limit(size_limit)
|
||||
{
|
||||
_virtual_space = vs;
|
||||
}
|
||||
|
||||
void ASPSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||
PSOldGen::initialize_work(perf_data_name, level);
|
||||
|
||||
// The old gen can grow to gen_size_limit(). _reserve reflects only
|
||||
// the current maximum that can be committed.
|
||||
assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
|
||||
|
||||
initialize_performance_counters(perf_data_name, level);
|
||||
}
|
||||
|
||||
void ASPSOldGen::reset_after_change() {
|
||||
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
|
||||
(HeapWord*)virtual_space()->high_boundary());
|
||||
post_resize();
|
||||
}
|
||||
|
||||
|
||||
size_t ASPSOldGen::available_for_expansion() {
|
||||
assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
|
||||
assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
|
||||
|
||||
size_t result = gen_size_limit() - virtual_space()->committed_size();
|
||||
size_t result_aligned = align_down(result, GenAlignment);
|
||||
return result_aligned;
|
||||
}
|
||||
|
||||
size_t ASPSOldGen::available_for_contraction() {
|
||||
size_t uncommitted_bytes = virtual_space()->uncommitted_size();
|
||||
if (uncommitted_bytes != 0) {
|
||||
return uncommitted_bytes;
|
||||
}
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSAdaptiveSizePolicy* policy = heap->size_policy();
|
||||
const size_t working_size =
|
||||
used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
|
||||
const size_t working_aligned = align_up(working_size, GenAlignment);
|
||||
const size_t working_or_min = MAX2(working_aligned, min_gen_size());
|
||||
if (working_or_min > reserved().byte_size()) {
|
||||
// If the used or minimum gen size (aligned up) is greater
|
||||
// than the total reserved size, then the space available
|
||||
// for contraction should (after proper alignment) be 0
|
||||
return 0;
|
||||
}
|
||||
const size_t max_contraction =
|
||||
reserved().byte_size() - working_or_min;
|
||||
|
||||
// Use the "increment" fraction instead of the "decrement" fraction
|
||||
// to allow the other gen to expand more aggressively. The
|
||||
// "decrement" fraction is conservative because its intent is to
|
||||
// only reduce the footprint.
|
||||
|
||||
size_t result = policy->promo_increment_aligned_down(max_contraction);
|
||||
// Also adjust for inter-generational alignment
|
||||
size_t result_aligned = align_down(result, GenAlignment);
|
||||
|
||||
Log(gc, ergo) log;
|
||||
if (log.is_trace()) {
|
||||
size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
|
||||
size_t promo_increment = policy->promo_increment(max_contraction);
|
||||
log.trace("ASPSOldGen::available_for_contraction: " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, result_aligned/K, result_aligned);
|
||||
log.trace(" reserved().byte_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, reserved().byte_size()/K, reserved().byte_size());
|
||||
log.trace(" padded promoted " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, working_promoted/K, working_promoted);
|
||||
log.trace(" used " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, used_in_bytes()/K, used_in_bytes());
|
||||
log.trace(" min_gen_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, min_gen_size()/K, min_gen_size());
|
||||
log.trace(" max_contraction " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, max_contraction/K, max_contraction);
|
||||
log.trace(" without alignment " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, promo_increment/K, promo_increment);
|
||||
log.trace(" alignment " SIZE_FORMAT_HEX, GenAlignment);
|
||||
}
|
||||
|
||||
assert(result_aligned <= max_contraction, "arithmetic is wrong");
|
||||
return result_aligned;
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_PARALLEL_ASPSOLDGEN_HPP
|
||||
#define SHARE_GC_PARALLEL_ASPSOLDGEN_HPP
|
||||
|
||||
#include "gc/parallel/mutableSpace.hpp"
|
||||
#include "gc/parallel/objectStartArray.hpp"
|
||||
#include "gc/parallel/psOldGen.hpp"
|
||||
#include "gc/parallel/psVirtualspace.hpp"
|
||||
#include "gc/parallel/spaceCounters.hpp"
|
||||
#include "gc/shared/generationCounters.hpp"
|
||||
|
||||
class ASPSOldGen : public PSOldGen {
|
||||
friend class VMStructs;
|
||||
size_t _gen_size_limit; // Largest size the generation's reserved size
|
||||
// can grow.
|
||||
public:
|
||||
ASPSOldGen(size_t initial_byte_size,
|
||||
size_t minimum_byte_size,
|
||||
size_t byte_size_limit,
|
||||
const char* gen_name, int level);
|
||||
ASPSOldGen(PSVirtualSpace* vs,
|
||||
size_t initial_byte_size,
|
||||
size_t minimum_byte_size,
|
||||
size_t byte_size_limit,
|
||||
const char* gen_name, int level);
|
||||
size_t gen_size_limit() { return _gen_size_limit; }
|
||||
size_t max_gen_size() { return _reserved.byte_size(); }
|
||||
void set_gen_size_limit(size_t v) { _gen_size_limit = v; }
|
||||
|
||||
virtual void initialize_work(const char* perf_data_name, int level);
|
||||
|
||||
// After a shrink or expand reset the generation
|
||||
void reset_after_change();
|
||||
|
||||
// Return number of bytes that the virtual space in the generation is willing
|
||||
// to expand or contract. The results from these methods should feed into the
|
||||
// decisions about adjusting the virtual space.
|
||||
size_t available_for_expansion();
|
||||
size_t available_for_contraction();
|
||||
|
||||
// Accessors
|
||||
void set_reserved(MemRegion v) { _reserved = v; }
|
||||
|
||||
// Debugging support
|
||||
virtual const char* short_name() const { return "ASPSOldGen"; }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_PARALLEL_ASPSOLDGEN_HPP
|
@ -1,510 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/asPSYoungGen.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psScavenge.inline.hpp"
|
||||
#include "gc/parallel/psYoungGen.hpp"
|
||||
#include "gc/shared/gcUtil.hpp"
|
||||
#include "gc/shared/genArguments.hpp"
|
||||
#include "gc/shared/spaceDecorator.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
ASPSYoungGen::ASPSYoungGen(size_t init_byte_size,
|
||||
size_t minimum_byte_size,
|
||||
size_t byte_size_limit) :
|
||||
PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
|
||||
_gen_size_limit(byte_size_limit) {
|
||||
}
|
||||
|
||||
|
||||
ASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs,
|
||||
size_t init_byte_size,
|
||||
size_t minimum_byte_size,
|
||||
size_t byte_size_limit) :
|
||||
//PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
|
||||
PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit),
|
||||
_gen_size_limit(byte_size_limit) {
|
||||
|
||||
assert(vs->committed_size() == init_byte_size, "Cannot replace with");
|
||||
|
||||
_virtual_space = vs;
|
||||
}
|
||||
|
||||
void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
|
||||
size_t alignment) {
|
||||
assert(_init_gen_size != 0, "Should have a finite size");
|
||||
_virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
|
||||
if (!_virtual_space->expand_by(_init_gen_size)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for object heap");
|
||||
}
|
||||
}
|
||||
|
||||
void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
|
||||
initialize_virtual_space(rs, alignment);
|
||||
initialize_work();
|
||||
}
|
||||
|
||||
size_t ASPSYoungGen::available_for_expansion() {
|
||||
size_t current_committed_size = virtual_space()->committed_size();
|
||||
assert((gen_size_limit() >= current_committed_size),
|
||||
"generation size limit is wrong");
|
||||
|
||||
size_t result = gen_size_limit() - current_committed_size;
|
||||
size_t result_aligned = align_down(result, GenAlignment);
|
||||
return result_aligned;
|
||||
}
|
||||
|
||||
// Return the number of bytes the young gen is willing give up.
|
||||
//
|
||||
// Future implementations could check the survivors and if to_space is in the
|
||||
// right place (below from_space), take a chunk from to_space.
|
||||
size_t ASPSYoungGen::available_for_contraction() {
|
||||
size_t uncommitted_bytes = virtual_space()->uncommitted_size();
|
||||
if (uncommitted_bytes != 0) {
|
||||
return uncommitted_bytes;
|
||||
}
|
||||
|
||||
if (eden_space()->is_empty()) {
|
||||
// Respect the minimum size for eden and for the young gen as a whole.
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const size_t eden_alignment = SpaceAlignment;
|
||||
|
||||
assert(eden_space()->capacity_in_bytes() >= eden_alignment,
|
||||
"Alignment is wrong");
|
||||
size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
|
||||
eden_avail = align_down(eden_avail, GenAlignment);
|
||||
|
||||
assert(virtual_space()->committed_size() >= min_gen_size(),
|
||||
"minimum gen size is wrong");
|
||||
size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
|
||||
assert(virtual_space()->is_aligned(gen_avail), "not aligned");
|
||||
|
||||
const size_t max_contraction = MIN2(eden_avail, gen_avail);
|
||||
// See comment for ASPSOldGen::available_for_contraction()
|
||||
// for reasons the "increment" fraction is used.
|
||||
PSAdaptiveSizePolicy* policy = heap->size_policy();
|
||||
size_t result = policy->eden_increment_aligned_down(max_contraction);
|
||||
size_t result_aligned = align_down(result, GenAlignment);
|
||||
|
||||
log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K);
|
||||
log_trace(gc, ergo)(" max_contraction " SIZE_FORMAT " K", max_contraction/K);
|
||||
log_trace(gc, ergo)(" eden_avail " SIZE_FORMAT " K", eden_avail/K);
|
||||
log_trace(gc, ergo)(" gen_avail " SIZE_FORMAT " K", gen_avail/K);
|
||||
|
||||
return result_aligned;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// The current implementation only considers to the end of eden.
|
||||
// If to_space is below from_space, to_space is not considered.
|
||||
// to_space can be.
|
||||
size_t ASPSYoungGen::available_to_live() {
|
||||
const size_t alignment = SpaceAlignment;
|
||||
|
||||
// Include any space that is committed but is not in eden.
|
||||
size_t available = pointer_delta(eden_space()->bottom(),
|
||||
virtual_space()->low(),
|
||||
sizeof(char));
|
||||
|
||||
const size_t eden_capacity = eden_space()->capacity_in_bytes();
|
||||
if (eden_space()->is_empty() && eden_capacity > alignment) {
|
||||
available += eden_capacity - alignment;
|
||||
}
|
||||
return available;
|
||||
}
|
||||
|
||||
// Similar to PSYoungGen::resize_generation() but
|
||||
// allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size
|
||||
// expands at the low end of the virtual space
|
||||
// moves the boundary between the generations in order to expand
|
||||
// some additional diagnostics
|
||||
// If no additional changes are required, this can be deleted
|
||||
// and the changes factored back into PSYoungGen::resize_generation().
|
||||
bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
|
||||
const size_t alignment = virtual_space()->alignment();
|
||||
size_t orig_size = virtual_space()->committed_size();
|
||||
bool size_changed = false;
|
||||
|
||||
// There used to be a guarantee here that
|
||||
// (eden_size + 2*survivor_size) <= _max_gen_size
|
||||
// This requirement is enforced by the calculation of desired_size
|
||||
// below. It may not be true on entry since the size of the
|
||||
// eden_size is no bounded by the generation size.
|
||||
|
||||
assert(max_size() == reserved().byte_size(), "max gen size problem?");
|
||||
assert(min_gen_size() <= orig_size && orig_size <= max_size(),
|
||||
"just checking");
|
||||
|
||||
// Adjust new generation size
|
||||
const size_t eden_plus_survivors =
|
||||
align_up(eden_size + 2 * survivor_size, alignment);
|
||||
size_t desired_size = clamp(eden_plus_survivors, min_gen_size(), gen_size_limit());
|
||||
assert(desired_size <= gen_size_limit(), "just checking");
|
||||
|
||||
if (desired_size > orig_size) {
|
||||
// Grow the generation
|
||||
size_t change = desired_size - orig_size;
|
||||
HeapWord* prev_low = (HeapWord*) virtual_space()->low();
|
||||
if (!virtual_space()->expand_by(change)) {
|
||||
return false;
|
||||
}
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Mangle newly committed space immediately because it
|
||||
// can be done here more simply that after the new
|
||||
// spaces have been computed.
|
||||
HeapWord* new_low = (HeapWord*) virtual_space()->low();
|
||||
assert(new_low < prev_low, "Did not grow");
|
||||
|
||||
MemRegion mangle_region(new_low, prev_low);
|
||||
SpaceMangler::mangle_region(mangle_region);
|
||||
}
|
||||
size_changed = true;
|
||||
} else if (desired_size < orig_size) {
|
||||
size_t desired_change = orig_size - desired_size;
|
||||
|
||||
// How much is available for shrinking.
|
||||
size_t available_bytes = limit_gen_shrink(desired_change);
|
||||
size_t change = MIN2(desired_change, available_bytes);
|
||||
virtual_space()->shrink_by(change);
|
||||
size_changed = true;
|
||||
} else {
|
||||
if (orig_size == gen_size_limit()) {
|
||||
log_trace(gc)("ASPSYoung generation size at maximum: " SIZE_FORMAT "K", orig_size/K);
|
||||
} else if (orig_size == min_gen_size()) {
|
||||
log_trace(gc)("ASPSYoung generation size at minium: " SIZE_FORMAT "K", orig_size/K);
|
||||
}
|
||||
}
|
||||
|
||||
if (size_changed) {
|
||||
reset_after_change();
|
||||
log_trace(gc)("ASPSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
|
||||
orig_size/K, virtual_space()->committed_size()/K);
|
||||
}
|
||||
|
||||
guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
|
||||
virtual_space()->committed_size() == max_size(), "Sanity");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Similar to PSYoungGen::resize_spaces() but
|
||||
// eden always starts at the low end of the committed virtual space
|
||||
// current implementation does not allow holes between the spaces
|
||||
// _young_generation_boundary has to be reset because it changes.
|
||||
// so additional verification
|
||||
|
||||
void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
size_t requested_survivor_size) {
|
||||
assert(UseAdaptiveSizePolicy, "sanity check");
|
||||
assert(requested_eden_size > 0 && requested_survivor_size > 0,
|
||||
"just checking");
|
||||
|
||||
space_invariants();
|
||||
|
||||
// We require eden and to space to be empty
|
||||
if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
|
||||
return;
|
||||
}
|
||||
|
||||
log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: "
|
||||
SIZE_FORMAT
|
||||
", requested_survivor_size: " SIZE_FORMAT ")",
|
||||
requested_eden_size, requested_survivor_size);
|
||||
log_trace(gc, ergo)(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
||||
SIZE_FORMAT,
|
||||
p2i(eden_space()->bottom()),
|
||||
p2i(eden_space()->end()),
|
||||
pointer_delta(eden_space()->end(), eden_space()->bottom(), sizeof(char)));
|
||||
log_trace(gc, ergo)(" from: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
||||
SIZE_FORMAT,
|
||||
p2i(from_space()->bottom()),
|
||||
p2i(from_space()->end()),
|
||||
pointer_delta(from_space()->end(), from_space()->bottom(), sizeof(char)));
|
||||
log_trace(gc, ergo)(" to: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
||||
SIZE_FORMAT,
|
||||
p2i(to_space()->bottom()),
|
||||
p2i(to_space()->end()),
|
||||
pointer_delta( to_space()->end(), to_space()->bottom(), sizeof(char)));
|
||||
|
||||
// There's nothing to do if the new sizes are the same as the current
|
||||
if (requested_survivor_size == to_space()->capacity_in_bytes() &&
|
||||
requested_survivor_size == from_space()->capacity_in_bytes() &&
|
||||
requested_eden_size == eden_space()->capacity_in_bytes()) {
|
||||
log_trace(gc, ergo)(" capacities are the right sizes, returning");
|
||||
return;
|
||||
}
|
||||
|
||||
char* eden_start = (char*)virtual_space()->low();
|
||||
char* eden_end = (char*)eden_space()->end();
|
||||
char* from_start = (char*)from_space()->bottom();
|
||||
char* from_end = (char*)from_space()->end();
|
||||
char* to_start = (char*)to_space()->bottom();
|
||||
char* to_end = (char*)to_space()->end();
|
||||
|
||||
assert(eden_start < from_start, "Cannot push into from_space");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const bool maintain_minimum =
|
||||
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
|
||||
|
||||
bool eden_from_to_order = from_start < to_start;
|
||||
// Check whether from space is below to space
|
||||
if (eden_from_to_order) {
|
||||
// Eden, from, to
|
||||
|
||||
log_trace(gc, ergo)(" Eden, from, to:");
|
||||
|
||||
// Set eden
|
||||
// "requested_eden_size" is a goal for the size of eden
|
||||
// and may not be attainable. "eden_size" below is
|
||||
// calculated based on the location of from-space and
|
||||
// the goal for the size of eden. from-space is
|
||||
// fixed in place because it contains live data.
|
||||
// The calculation is done this way to avoid 32bit
|
||||
// overflow (i.e., eden_start + requested_eden_size
|
||||
// may too large for representation in 32bits).
|
||||
size_t eden_size;
|
||||
if (maintain_minimum) {
|
||||
// Only make eden larger than the requested size if
|
||||
// the minimum size of the generation has to be maintained.
|
||||
// This could be done in general but policy at a higher
|
||||
// level is determining a requested size for eden and that
|
||||
// should be honored unless there is a fundamental reason.
|
||||
eden_size = pointer_delta(from_start,
|
||||
eden_start,
|
||||
sizeof(char));
|
||||
} else {
|
||||
eden_size = MIN2(requested_eden_size,
|
||||
pointer_delta(from_start, eden_start, sizeof(char)));
|
||||
}
|
||||
|
||||
eden_end = eden_start + eden_size;
|
||||
assert(eden_end >= eden_start, "addition overflowed");
|
||||
|
||||
// To may resize into from space as long as it is clear of live data.
|
||||
// From space must remain page aligned, though, so we need to do some
|
||||
// extra calculations.
|
||||
|
||||
// First calculate an optimal to-space
|
||||
to_end = (char*)virtual_space()->high();
|
||||
to_start = (char*)pointer_delta(to_end,
|
||||
(char*)requested_survivor_size,
|
||||
sizeof(char));
|
||||
|
||||
// Does the optimal to-space overlap from-space?
|
||||
if (to_start < (char*)from_space()->end()) {
|
||||
// Calculate the minimum offset possible for from_end
|
||||
size_t from_size =
|
||||
pointer_delta(from_space()->top(), from_start, sizeof(char));
|
||||
|
||||
// Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
|
||||
if (from_size == 0) {
|
||||
from_size = SpaceAlignment;
|
||||
} else {
|
||||
from_size = align_up(from_size, SpaceAlignment);
|
||||
}
|
||||
|
||||
from_end = from_start + from_size;
|
||||
assert(from_end > from_start, "addition overflow or from_size problem");
|
||||
|
||||
guarantee(from_end <= (char*)from_space()->end(),
|
||||
"from_end moved to the right");
|
||||
|
||||
// Now update to_start with the new from_end
|
||||
to_start = MAX2(from_end, to_start);
|
||||
}
|
||||
|
||||
guarantee(to_start != to_end, "to space is zero sized");
|
||||
|
||||
log_trace(gc, ergo)(" [eden_start .. eden_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(eden_start),
|
||||
p2i(eden_end),
|
||||
pointer_delta(eden_end, eden_start, sizeof(char)));
|
||||
log_trace(gc, ergo)(" [from_start .. from_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(from_start),
|
||||
p2i(from_end),
|
||||
pointer_delta(from_end, from_start, sizeof(char)));
|
||||
log_trace(gc, ergo)(" [ to_start .. to_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(to_start),
|
||||
p2i(to_end),
|
||||
pointer_delta( to_end, to_start, sizeof(char)));
|
||||
} else {
|
||||
// Eden, to, from
|
||||
log_trace(gc, ergo)(" Eden, to, from:");
|
||||
|
||||
// To space gets priority over eden resizing. Note that we position
|
||||
// to space as if we were able to resize from space, even though from
|
||||
// space is not modified.
|
||||
// Giving eden priority was tried and gave poorer performance.
|
||||
to_end = (char*)pointer_delta(virtual_space()->high(),
|
||||
(char*)requested_survivor_size,
|
||||
sizeof(char));
|
||||
to_end = MIN2(to_end, from_start);
|
||||
to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
|
||||
sizeof(char));
|
||||
// if the space sizes are to be increased by several times then
|
||||
// 'to_start' will point beyond the young generation. In this case
|
||||
// 'to_start' should be adjusted.
|
||||
to_start = MAX2(to_start, eden_start + SpaceAlignment);
|
||||
|
||||
// Compute how big eden can be, then adjust end.
|
||||
// See comments above on calculating eden_end.
|
||||
size_t eden_size;
|
||||
if (maintain_minimum) {
|
||||
eden_size = pointer_delta(to_start, eden_start, sizeof(char));
|
||||
} else {
|
||||
eden_size = MIN2(requested_eden_size,
|
||||
pointer_delta(to_start, eden_start, sizeof(char)));
|
||||
}
|
||||
eden_end = eden_start + eden_size;
|
||||
assert(eden_end >= eden_start, "addition overflowed");
|
||||
|
||||
// Don't let eden shrink down to 0 or less.
|
||||
eden_end = MAX2(eden_end, eden_start + SpaceAlignment);
|
||||
to_start = MAX2(to_start, eden_end);
|
||||
|
||||
log_trace(gc, ergo)(" [eden_start .. eden_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(eden_start),
|
||||
p2i(eden_end),
|
||||
pointer_delta(eden_end, eden_start, sizeof(char)));
|
||||
log_trace(gc, ergo)(" [ to_start .. to_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(to_start),
|
||||
p2i(to_end),
|
||||
pointer_delta( to_end, to_start, sizeof(char)));
|
||||
log_trace(gc, ergo)(" [from_start .. from_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(from_start),
|
||||
p2i(from_end),
|
||||
pointer_delta(from_end, from_start, sizeof(char)));
|
||||
}
|
||||
|
||||
|
||||
guarantee((HeapWord*)from_start <= from_space()->bottom(),
|
||||
"from start moved to the right");
|
||||
guarantee((HeapWord*)from_end >= from_space()->top(),
|
||||
"from end moved into live data");
|
||||
assert(is_object_aligned(eden_start), "checking alignment");
|
||||
assert(is_object_aligned(from_start), "checking alignment");
|
||||
assert(is_object_aligned(to_start), "checking alignment");
|
||||
|
||||
MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
|
||||
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
|
||||
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
|
||||
|
||||
// Let's make sure the call to initialize doesn't reset "top"!
|
||||
DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
|
||||
|
||||
// For logging block below
|
||||
size_t old_from = from_space()->capacity_in_bytes();
|
||||
size_t old_to = to_space()->capacity_in_bytes();
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
// NUMA is a special case because a numa space is not mangled
|
||||
// in order to not prematurely bind its address to memory to
|
||||
// the wrong memory (i.e., don't want the GC thread to first
|
||||
// touch the memory). The survivor spaces are not numa
|
||||
// spaces and are mangled.
|
||||
if (UseNUMA) {
|
||||
if (eden_from_to_order) {
|
||||
mangle_survivors(from_space(), fromMR, to_space(), toMR);
|
||||
} else {
|
||||
mangle_survivors(to_space(), toMR, from_space(), fromMR);
|
||||
}
|
||||
}
|
||||
|
||||
// If not mangling the spaces, do some checking to verify that
|
||||
// the spaces are already mangled.
|
||||
// The spaces should be correctly mangled at this point so
|
||||
// do some checking here. Note that they are not being mangled
|
||||
// in the calls to initialize().
|
||||
// Must check mangling before the spaces are reshaped. Otherwise,
|
||||
// the bottom or end of one space may have moved into an area
|
||||
// covered by another space and a failure of the check may
|
||||
// not correctly indicate which space is not properly mangled.
|
||||
|
||||
HeapWord* limit = (HeapWord*) virtual_space()->high();
|
||||
eden_space()->check_mangled_unused_area(limit);
|
||||
from_space()->check_mangled_unused_area(limit);
|
||||
to_space()->check_mangled_unused_area(limit);
|
||||
}
|
||||
// When an existing space is being initialized, it is not
|
||||
// mangled because the space has been previously mangled.
|
||||
eden_space()->initialize(edenMR,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
to_space()->initialize(toMR,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
from_space()->initialize(fromMR,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::DontMangle);
|
||||
|
||||
PSScavenge::set_young_generation_boundary(eden_space()->bottom());
|
||||
|
||||
assert(from_space()->top() == old_from_top, "from top changed!");
|
||||
|
||||
log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: "
|
||||
"collection: %d "
|
||||
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
|
||||
"(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
|
||||
ParallelScavengeHeap::heap()->total_collections(),
|
||||
old_from, old_to,
|
||||
from_space()->capacity_in_bytes(),
|
||||
to_space()->capacity_in_bytes());
|
||||
|
||||
space_invariants();
|
||||
}
|
||||
void ASPSYoungGen::reset_after_change() {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
|
||||
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
|
||||
(HeapWord*)virtual_space()->high_boundary());
|
||||
PSScavenge::set_subject_to_discovery_span(_reserved);
|
||||
|
||||
HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
|
||||
HeapWord* eden_bottom = eden_space()->bottom();
|
||||
if (new_eden_bottom != eden_bottom) {
|
||||
MemRegion eden_mr(new_eden_bottom, eden_space()->end());
|
||||
eden_space()->initialize(eden_mr,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::Mangle);
|
||||
PSScavenge::set_young_generation_boundary(eden_space()->bottom());
|
||||
}
|
||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||
(HeapWord*)virtual_space()->high());
|
||||
ParallelScavengeHeap::heap()->barrier_set()->card_table()->resize_covered_region(cmr);
|
||||
|
||||
space_invariants();
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_PARALLEL_ASPSYOUNGGEN_HPP
|
||||
#define SHARE_GC_PARALLEL_ASPSYOUNGGEN_HPP
|
||||
|
||||
#include "gc/parallel/mutableSpace.hpp"
|
||||
#include "gc/parallel/objectStartArray.hpp"
|
||||
#include "gc/parallel/psVirtualspace.hpp"
|
||||
#include "gc/parallel/psYoungGen.hpp"
|
||||
#include "gc/parallel/spaceCounters.hpp"
|
||||
#include "gc/shared/generationCounters.hpp"
|
||||
#include "gc/shared/spaceDecorator.hpp"
|
||||
|
||||
class ASPSYoungGen : public PSYoungGen {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
size_t _gen_size_limit;
|
||||
protected:
|
||||
virtual size_t available_to_live();
|
||||
|
||||
public:
|
||||
ASPSYoungGen(size_t initial_byte_size,
|
||||
size_t minimum_byte_size,
|
||||
size_t byte_size_limit);
|
||||
|
||||
ASPSYoungGen(PSVirtualSpace* vs,
|
||||
size_t initial_byte_size,
|
||||
size_t minimum_byte_size,
|
||||
size_t byte_size_limit);
|
||||
|
||||
void initialize(ReservedSpace rs, size_t alignment);
|
||||
void initialize_virtual_space(ReservedSpace rs, size_t alignment);
|
||||
|
||||
size_t gen_size_limit() { return _gen_size_limit; }
|
||||
void set_gen_size_limit(size_t v) { _gen_size_limit = v; }
|
||||
|
||||
bool resize_generation(size_t eden_size, size_t survivor_size);
|
||||
void resize_spaces(size_t eden_size, size_t survivor_size);
|
||||
|
||||
// Adjust eden to be consistent with the virtual space.
|
||||
void reset_after_change();
|
||||
|
||||
// Adaptive size policy support
|
||||
// Return number of bytes that the generation can expand/contract.
|
||||
size_t available_for_expansion();
|
||||
size_t available_for_contraction();
|
||||
|
||||
// Accessors
|
||||
void set_reserved(MemRegion v) { _reserved = v; }
|
||||
|
||||
// Printing support
|
||||
virtual const char* short_name() const { return "ASPSYoungGen"; }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_PARALLEL_ASPSYOUNGGEN_HPP
|
@ -194,18 +194,7 @@ bool ParallelArguments::is_heterogeneous_heap() {
|
||||
}
|
||||
|
||||
size_t ParallelArguments::heap_reserved_size_bytes() {
|
||||
if (!is_heterogeneous_heap() || !UseAdaptiveGCBoundary) {
|
||||
return MaxHeapSize;
|
||||
}
|
||||
|
||||
// Heterogeneous heap and adaptive size gc boundary
|
||||
|
||||
// This is the size that young gen can grow to, when UseAdaptiveGCBoundary is true.
|
||||
size_t max_yg_size = MaxHeapSize - MinOldSize;
|
||||
// This is the size that old gen can grow to, when UseAdaptiveGCBoundary is true.
|
||||
size_t max_old_size = MaxHeapSize - MinNewSize;
|
||||
|
||||
return max_yg_size + max_old_size;
|
||||
return MaxHeapSize;
|
||||
}
|
||||
|
||||
size_t ParallelArguments::heap_max_size_bytes() {
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc/parallel/adjoiningGenerations.hpp"
|
||||
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
|
||||
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
||||
#include "gc/parallel/parallelArguments.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
@ -107,10 +106,10 @@ jint ParallelScavengeHeap::initialize() {
|
||||
GCTimeRatio
|
||||
);
|
||||
|
||||
assert(ParallelArguments::is_heterogeneous_heap() || !UseAdaptiveGCBoundary ||
|
||||
(old_gen()->virtual_space()->high_boundary() ==
|
||||
young_gen()->virtual_space()->low_boundary()),
|
||||
"Boundaries must meet");
|
||||
assert(ParallelArguments::is_heterogeneous_heap() ||
|
||||
(old_gen()->virtual_space()->high_boundary() ==
|
||||
young_gen()->virtual_space()->low_boundary()),
|
||||
"Boundaries must meet");
|
||||
// initialize the policy counters - 2 collectors, 2 generations
|
||||
_gc_policy_counters =
|
||||
new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
|
||||
@ -689,35 +688,13 @@ PSCardTable* ParallelScavengeHeap::card_table() {
|
||||
return static_cast<PSCardTable*>(barrier_set()->card_table());
|
||||
}
|
||||
|
||||
// Before delegating the resize to the young generation,
|
||||
// the reserved space for the young and old generations
|
||||
// may be changed to accommodate the desired resize.
|
||||
void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
|
||||
size_t survivor_size) {
|
||||
if (UseAdaptiveGCBoundary) {
|
||||
if (size_policy()->bytes_absorbed_from_eden() != 0) {
|
||||
size_policy()->reset_bytes_absorbed_from_eden();
|
||||
return; // The generation changed size already.
|
||||
}
|
||||
gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
|
||||
}
|
||||
|
||||
size_t survivor_size) {
|
||||
// Delegate the resize to the generation.
|
||||
_young_gen->resize(eden_size, survivor_size);
|
||||
}
|
||||
|
||||
// Before delegating the resize to the old generation,
|
||||
// the reserved space for the young and old generations
|
||||
// may be changed to accommodate the desired resize.
|
||||
void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
|
||||
if (UseAdaptiveGCBoundary) {
|
||||
if (size_policy()->bytes_absorbed_from_eden() != 0) {
|
||||
size_policy()->reset_bytes_absorbed_from_eden();
|
||||
return; // The generation changed size already.
|
||||
}
|
||||
gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
|
||||
}
|
||||
|
||||
// Delegate the resize to the generation.
|
||||
_old_gen->resize(desired_free_space);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,8 +62,7 @@ PSAdaptiveSizePolicy::PSAdaptiveSizePolicy(size_t init_eden_size,
|
||||
_change_young_gen_for_maj_pauses(0),
|
||||
_old_gen_policy_is_ready(false),
|
||||
_young_gen_size_increment_supplement(YoungGenerationSizeSupplement),
|
||||
_old_gen_size_increment_supplement(TenuredGenerationSizeSupplement),
|
||||
_bytes_absorbed_from_eden(0)
|
||||
_old_gen_size_increment_supplement(TenuredGenerationSizeSupplement)
|
||||
{
|
||||
// Start the timers
|
||||
_major_timer.start();
|
||||
@ -915,16 +914,6 @@ size_t PSAdaptiveSizePolicy::eden_increment(size_t cur_eden) {
|
||||
return eden_increment(cur_eden, YoungGenerationSizeIncrement);
|
||||
}
|
||||
|
||||
size_t PSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) {
|
||||
size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement);
|
||||
return align_up(result, _space_alignment);
|
||||
}
|
||||
|
||||
size_t PSAdaptiveSizePolicy::eden_increment_aligned_down(size_t cur_eden) {
|
||||
size_t result = eden_increment(cur_eden);
|
||||
return align_down(result, _space_alignment);
|
||||
}
|
||||
|
||||
size_t PSAdaptiveSizePolicy::eden_increment_with_supplement_aligned_up(
|
||||
size_t cur_eden) {
|
||||
size_t result = eden_increment(cur_eden,
|
||||
@ -954,16 +943,6 @@ size_t PSAdaptiveSizePolicy::promo_increment(size_t cur_promo) {
|
||||
return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
|
||||
}
|
||||
|
||||
size_t PSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) {
|
||||
size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement);
|
||||
return align_up(result, _space_alignment);
|
||||
}
|
||||
|
||||
size_t PSAdaptiveSizePolicy::promo_increment_aligned_down(size_t cur_promo) {
|
||||
size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement);
|
||||
return align_down(result, _space_alignment);
|
||||
}
|
||||
|
||||
size_t PSAdaptiveSizePolicy::promo_increment_with_supplement_aligned_up(
|
||||
size_t cur_promo) {
|
||||
size_t result = promo_increment(cur_promo,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -117,10 +117,6 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
||||
uint _young_gen_size_increment_supplement;
|
||||
uint _old_gen_size_increment_supplement;
|
||||
|
||||
// The number of bytes absorbed from eden into the old gen by moving the
|
||||
// boundary over live data.
|
||||
size_t _bytes_absorbed_from_eden;
|
||||
|
||||
private:
|
||||
|
||||
// Accessors
|
||||
@ -193,12 +189,6 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
||||
virtual GCPolicyKind kind() const { return _gc_ps_adaptive_size_policy; }
|
||||
|
||||
public:
|
||||
// Use by ASPSYoungGen and ASPSOldGen to limit boundary moving.
|
||||
size_t eden_increment_aligned_up(size_t cur_eden);
|
||||
size_t eden_increment_aligned_down(size_t cur_eden);
|
||||
size_t promo_increment_aligned_up(size_t cur_promo);
|
||||
size_t promo_increment_aligned_down(size_t cur_promo);
|
||||
|
||||
virtual size_t eden_increment(size_t cur_eden);
|
||||
virtual size_t promo_increment(size_t cur_promo);
|
||||
|
||||
@ -374,13 +364,6 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
||||
return _live_at_last_full_gc;
|
||||
}
|
||||
|
||||
size_t bytes_absorbed_from_eden() const { return _bytes_absorbed_from_eden; }
|
||||
void reset_bytes_absorbed_from_eden() { _bytes_absorbed_from_eden = 0; }
|
||||
|
||||
void set_bytes_absorbed_from_eden(size_t val) {
|
||||
_bytes_absorbed_from_eden = val;
|
||||
}
|
||||
|
||||
// Update averages that are always used (even
|
||||
// if adaptive sizing is turned off).
|
||||
void update_averages(bool is_survivor_overflow,
|
||||
|
@ -55,10 +55,6 @@ PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
|
||||
_old_capacity = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Bytes, (jlong) InitialHeapSize, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "boundaryMoved");
|
||||
_boundary_moved = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Bytes, (jlong) 0, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
|
||||
_avg_promoted_avg_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
|
@ -53,7 +53,6 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
||||
PerfVariable* _avg_base_footprint;
|
||||
PerfVariable* _live_at_last_full_gc_counter;
|
||||
PerfVariable* _old_capacity;
|
||||
PerfVariable* _boundary_moved;
|
||||
|
||||
PerfVariable* _change_old_gen_for_min_pauses;
|
||||
PerfVariable* _change_young_gen_for_maj_pauses_counter;
|
||||
@ -85,9 +84,6 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
||||
inline void update_old_promo_size(size_t old_size) {
|
||||
_old_promo_size->set_value(old_size);
|
||||
}
|
||||
inline void update_boundary_moved(int size_in_bytes) {
|
||||
_boundary_moved->set_value(size_in_bytes);
|
||||
}
|
||||
inline void update_avg_promoted_avg() {
|
||||
_avg_promoted_avg_counter->set_value(
|
||||
(jlong)(ps_size_policy()->avg_promoted()->average())
|
||||
|
@ -1039,10 +1039,6 @@ void PSParallelCompact::post_compact()
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
bool eden_empty = eden_space->is_empty();
|
||||
if (!eden_empty) {
|
||||
eden_empty = absorb_live_data_from_eden(heap->size_policy(),
|
||||
heap->young_gen(), heap->old_gen());
|
||||
}
|
||||
|
||||
// Update heap occupancy information which is used as input to the soft ref
|
||||
// clearing policy at the next gc.
|
||||
@ -1982,95 +1978,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
PSYoungGen* young_gen,
|
||||
PSOldGen* old_gen) {
|
||||
MutableSpace* const eden_space = young_gen->eden_space();
|
||||
assert(!eden_space->is_empty(), "eden must be non-empty");
|
||||
assert(young_gen->virtual_space()->alignment() ==
|
||||
old_gen->virtual_space()->alignment(), "alignments do not match");
|
||||
|
||||
// We also return false when it's a heterogeneous heap because old generation cannot absorb data from eden
|
||||
// when it is allocated on different memory (example, nv-dimm) than young.
|
||||
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary) ||
|
||||
ParallelArguments::is_heterogeneous_heap()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Both generations must be completely committed.
|
||||
if (young_gen->virtual_space()->uncommitted_size() != 0) {
|
||||
return false;
|
||||
}
|
||||
if (old_gen->virtual_space()->uncommitted_size() != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Figure out how much to take from eden. Include the average amount promoted
|
||||
// in the total; otherwise the next young gen GC will simply bail out to a
|
||||
// full GC.
|
||||
const size_t alignment = old_gen->virtual_space()->alignment();
|
||||
const size_t eden_used = eden_space->used_in_bytes();
|
||||
const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
|
||||
const size_t absorb_size = align_up(eden_used + promoted, alignment);
|
||||
const size_t eden_capacity = eden_space->capacity_in_bytes();
|
||||
|
||||
if (absorb_size >= eden_capacity) {
|
||||
return false; // Must leave some space in eden.
|
||||
}
|
||||
|
||||
const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
|
||||
if (new_young_size < young_gen->min_gen_size()) {
|
||||
return false; // Respect young gen minimum size.
|
||||
}
|
||||
|
||||
log_trace(gc, ergo, heap)(" absorbing " SIZE_FORMAT "K: "
|
||||
"eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
|
||||
"from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
|
||||
"young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
|
||||
absorb_size / K,
|
||||
eden_capacity / K, (eden_capacity - absorb_size) / K,
|
||||
young_gen->from_space()->used_in_bytes() / K,
|
||||
young_gen->to_space()->used_in_bytes() / K,
|
||||
young_gen->capacity_in_bytes() / K, new_young_size / K);
|
||||
|
||||
// Fill the unused part of the old gen.
|
||||
MutableSpace* const old_space = old_gen->object_space();
|
||||
HeapWord* const unused_start = old_space->top();
|
||||
size_t const unused_words = pointer_delta(old_space->end(), unused_start);
|
||||
|
||||
if (unused_words > 0) {
|
||||
if (unused_words < CollectedHeap::min_fill_size()) {
|
||||
return false; // If the old gen cannot be filled, must give up.
|
||||
}
|
||||
CollectedHeap::fill_with_objects(unused_start, unused_words);
|
||||
}
|
||||
|
||||
// Take the live data from eden and set both top and end in the old gen to
|
||||
// eden top. (Need to set end because reset_after_change() mangles the region
|
||||
// from end to virtual_space->high() in debug builds).
|
||||
HeapWord* const new_top = eden_space->top();
|
||||
old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
|
||||
absorb_size);
|
||||
young_gen->reset_after_change();
|
||||
old_space->set_top(new_top);
|
||||
old_space->set_end(new_top);
|
||||
old_gen->reset_after_change();
|
||||
|
||||
// Update the object start array for the filler object and the data from eden.
|
||||
ObjectStartArray* const start_array = old_gen->start_array();
|
||||
for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
|
||||
start_array->allocate_block(p);
|
||||
}
|
||||
|
||||
// Could update the promoted average here, but it is not typically updated at
|
||||
// full GCs and the value to use is unclear. Something like
|
||||
//
|
||||
// cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
|
||||
|
||||
size_policy->set_bytes_absorbed_from_eden(absorb_size);
|
||||
return true;
|
||||
}
|
||||
|
||||
class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
|
||||
private:
|
||||
uint _worker_id;
|
||||
|
@ -1123,12 +1123,6 @@ class PSParallelCompact : AllStatic {
|
||||
static void enqueue_dense_prefix_tasks(TaskQueue& task_queue,
|
||||
uint parallel_gc_threads);
|
||||
|
||||
// If objects are left in eden after a collection, try to move the boundary
|
||||
// and absorb them into the old gen. Returns true if eden was emptied.
|
||||
static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
PSYoungGen* young_gen,
|
||||
PSOldGen* old_gen);
|
||||
|
||||
// Reset time since last full gc
|
||||
static void reset_millis_since_last_gc();
|
||||
|
||||
|
@ -803,8 +803,7 @@ bool PSScavenge::should_attempt_scavenge() {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Adaptive size policy support. When the young generation/old generation
|
||||
// boundary moves, _young_generation_boundary must be reset
|
||||
// Adaptive size policy support.
|
||||
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
|
||||
_young_generation_boundary = v;
|
||||
if (UseCompressedOops) {
|
||||
|
@ -113,8 +113,7 @@ class PSScavenge: AllStatic {
|
||||
static void set_survivor_overflow(bool state) {
|
||||
_survivor_overflow = state;
|
||||
}
|
||||
// Adaptive size policy support. When the young generation/old generation
|
||||
// boundary moves, _young_generation_boundary must be reset
|
||||
// Adaptive size policy support.
|
||||
static void set_young_generation_boundary(HeapWord* v);
|
||||
|
||||
// Called by parallelScavengeHeap to init the tenuring threshold
|
||||
|
@ -131,8 +131,7 @@ class PSYoungGen : public CHeapObj<mtGC> {
|
||||
// The max this generation can grow to
|
||||
size_t max_size() const { return _reserved.byte_size(); }
|
||||
|
||||
// The max this generation can grow to if the boundary between
|
||||
// the generations are allowed to move.
|
||||
// The max this generation can grow to
|
||||
size_t gen_size_limit() const { return _max_gen_size; }
|
||||
|
||||
bool is_maximal_no_gc() const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,8 +25,6 @@
|
||||
#ifndef SHARE_GC_PARALLEL_VMSTRUCTS_PARALLELGC_HPP
|
||||
#define SHARE_GC_PARALLEL_VMSTRUCTS_PARALLELGC_HPP
|
||||
|
||||
#include "gc/parallel/asPSOldGen.hpp"
|
||||
#include "gc/parallel/asPSYoungGen.hpp"
|
||||
#include "gc/parallel/immutableSpace.hpp"
|
||||
#include "gc/parallel/mutableSpace.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
@ -88,9 +86,7 @@
|
||||
declare_toplevel_type(ImmutableSpace) \
|
||||
declare_type(MutableSpace, ImmutableSpace) \
|
||||
declare_toplevel_type(PSYoungGen) \
|
||||
declare_type(ASPSYoungGen, PSYoungGen) \
|
||||
declare_toplevel_type(PSOldGen) \
|
||||
declare_type(ASPSOldGen, PSOldGen) \
|
||||
\
|
||||
/*****************************/ \
|
||||
/* Parallel GC pointer types */ \
|
||||
@ -100,9 +96,7 @@
|
||||
declare_toplevel_type(ImmutableSpace*) \
|
||||
declare_toplevel_type(MutableSpace*) \
|
||||
declare_toplevel_type(PSYoungGen*) \
|
||||
declare_toplevel_type(ASPSYoungGen*) \
|
||||
declare_toplevel_type(PSOldGen*) \
|
||||
declare_toplevel_type(ASPSOldGen*) \
|
||||
declare_toplevel_type(ParallelScavengeHeap*)
|
||||
|
||||
#define VM_INT_CONSTANTS_PARALLELGC(declare_constant, \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -253,19 +253,12 @@ void CardTable::resize_covered_region(MemRegion new_region) {
|
||||
committed_unique_to_self(ind, MemRegion(new_end_aligned,
|
||||
cur_committed.end()));
|
||||
if (!uncommit_region.is_empty()) {
|
||||
// It is not safe to uncommit cards if the boundary between
|
||||
// the generations is moving. A shrink can uncommit cards
|
||||
// owned by generation A but being used by generation B.
|
||||
if (!UseAdaptiveGCBoundary) {
|
||||
if (!os::uncommit_memory((char*)uncommit_region.start(),
|
||||
uncommit_region.byte_size())) {
|
||||
assert(false, "Card table contraction failed");
|
||||
// The call failed so don't change the end of the
|
||||
// committed region. This is better than taking the
|
||||
// VM down.
|
||||
new_end_aligned = _committed[ind].end();
|
||||
}
|
||||
} else {
|
||||
if (!os::uncommit_memory((char*)uncommit_region.start(),
|
||||
uncommit_region.byte_size())) {
|
||||
assert(false, "Card table contraction failed");
|
||||
// The call failed so don't change the end of the
|
||||
// committed region. This is better than taking the
|
||||
// VM down.
|
||||
new_end_aligned = _committed[ind].end();
|
||||
}
|
||||
}
|
||||
|
@ -420,9 +420,6 @@
|
||||
product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \
|
||||
"Include statistics from System.gc() for adaptive size policy") \
|
||||
\
|
||||
product(bool, UseAdaptiveGCBoundary, false, \
|
||||
"Allow young-old boundary to move") \
|
||||
\
|
||||
develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1, \
|
||||
"Resize the virtual spaces of the young or old generations") \
|
||||
range(-1, 1) \
|
||||
|
@ -548,6 +548,7 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
#ifndef X86
|
||||
{ "UseSSE", JDK_Version::undefined(), JDK_Version::jdk(15), JDK_Version::jdk(16) },
|
||||
#endif // !X86
|
||||
{ "UseAdaptiveGCBoundary", JDK_Version::undefined(), JDK_Version::jdk(15), JDK_Version::jdk(16) },
|
||||
|
||||
#ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
|
||||
// These entries will generate build errors. Their purpose is to test the macros.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,8 +58,7 @@ public class TestAllocateOldGenAt {
|
||||
"-version"});
|
||||
|
||||
runTest("-XX:+UseG1GC");
|
||||
runTest("-XX:+UseParallelGC -XX:-UseAdaptiveGCBoundary");
|
||||
runTest("-XX:+UseParallelGC -XX:+UseAdaptiveGCBoundary");
|
||||
runTest("-XX:+UseParallelGC");
|
||||
}
|
||||
|
||||
private static void runTest(String... extraFlags) throws Exception {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,13 +82,8 @@ public class TestAllocateOldGenAtError {
|
||||
}
|
||||
|
||||
private static void testParallelOld() throws Exception {
|
||||
System.out.println("Testing Parallel GC with UseAdaptiveGCBoundary disabled");
|
||||
OutputAnalyzer output = runTest("-XX:+UseParallelGC -XX:-UseAdaptiveGCBoundary");
|
||||
output.shouldContain("Error occurred during initialization of VM");
|
||||
output.shouldNotHaveExitValue(0);
|
||||
|
||||
System.out.println("Testing Parallel GC with UseAdaptiveGCBoundary enabled");
|
||||
output = runTest("-XX:+UseParallelGC -XX:+UseAdaptiveGCBoundary");
|
||||
System.out.println("Testing Parallel GC");
|
||||
OutputAnalyzer output = runTest("-XX:+UseParallelGC");
|
||||
output.shouldContain("Error occurred during initialization of VM");
|
||||
output.shouldNotHaveExitValue(0);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,11 +72,8 @@ public class TestOldObjectsOnNvdimm {
|
||||
}
|
||||
Collections.addAll(testOpts, common_options);
|
||||
|
||||
// Test with G1 GC
|
||||
runTest("-XX:+UseG1GC");
|
||||
// Test with ParallelOld GC
|
||||
runTest("-XX:+UseParallelGC -XX:-UseAdaptiveGCBoundary");
|
||||
runTest("-XX:+UseParallelGC -XX:+UseAdaptiveGCBoundary");
|
||||
runTest("-XX:+UseParallelGC");
|
||||
}
|
||||
|
||||
private static void runTest(String... extraFlags) throws Exception {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -73,11 +73,8 @@ public class TestYoungObjectsOnDram {
|
||||
}
|
||||
Collections.addAll(testOpts, common_options);
|
||||
|
||||
// Test with G1 GC
|
||||
runTest("-XX:+UseG1GC");
|
||||
// Test with ParallelOld GC
|
||||
runTest("-XX:+UseParallelGC -XX:-UseAdaptiveGCBoundary");
|
||||
runTest("-XX:+UseParallelGC -XX:+UseAdaptiveGCBoundary");
|
||||
runTest("-XX:+UseParallelGC");
|
||||
}
|
||||
|
||||
private static void runTest(String... extraFlags) throws Exception {
|
||||
|
@ -1,66 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package gc.parallel;
|
||||
|
||||
/**
|
||||
* @test AdaptiveGCBoundary
|
||||
* @key gc regression
|
||||
* @requires vm.gc.Parallel
|
||||
* @summary UseAdaptiveGCBoundary is broken
|
||||
* @bug 8014546
|
||||
* @library /test/lib
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @run main/othervm gc.parallel.AdaptiveGCBoundary
|
||||
* @author jon.masamitsu@oracle.com
|
||||
*/
|
||||
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
|
||||
public class AdaptiveGCBoundary {
|
||||
public static void main(String args[]) throws Exception {
|
||||
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-showversion",
|
||||
"-XX:+UseParallelGC",
|
||||
"-XX:+UseAdaptiveGCBoundary",
|
||||
"-XX:+PrintCommandLineFlags",
|
||||
SystemGCCaller.class.getName()
|
||||
);
|
||||
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
|
||||
output.shouldContain("+UseAdaptiveGCBoundary");
|
||||
|
||||
output.shouldNotContain("error");
|
||||
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
static class SystemGCCaller {
|
||||
public static void main(String [] args) {
|
||||
System.gc();
|
||||
}
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user