Merge
This commit is contained in:
commit
24d624d6cb
@ -311,8 +311,7 @@ void CMSCollector::ref_processor_init() {
|
||||
_cmsGen->refs_discovery_is_mt(), // mt discovery
|
||||
(int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
|
||||
_cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
|
||||
&_is_alive_closure, // closure for liveness info
|
||||
false); // next field updates do not need write barrier
|
||||
&_is_alive_closure); // closure for liveness info
|
||||
// Initialize the _ref_processor field of CMSGen
|
||||
_cmsGen->set_ref_processor(_ref_processor);
|
||||
|
||||
|
@ -2246,12 +2246,9 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
// degree of mt discovery
|
||||
false,
|
||||
// Reference discovery is not atomic
|
||||
&_is_alive_closure_cm,
|
||||
&_is_alive_closure_cm);
|
||||
// is alive closure
|
||||
// (for efficiency/performance)
|
||||
true);
|
||||
// Setting next fields of discovered
|
||||
// lists requires a barrier.
|
||||
|
||||
// STW ref processor
|
||||
_ref_processor_stw =
|
||||
@ -2266,12 +2263,9 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
// degree of mt discovery
|
||||
true,
|
||||
// Reference discovery is atomic
|
||||
&_is_alive_closure_stw,
|
||||
&_is_alive_closure_stw);
|
||||
// is alive closure
|
||||
// (for efficiency/performance)
|
||||
false);
|
||||
// Setting next fields of discovered
|
||||
// lists does not require a barrier.
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::capacity() const {
|
||||
|
@ -1636,8 +1636,7 @@ void ParNewGeneration::ref_processor_init() {
|
||||
refs_discovery_is_mt(), // mt discovery
|
||||
(int) ParallelGCThreads, // mt discovery degree
|
||||
refs_discovery_is_atomic(), // atomic_discovery
|
||||
NULL, // is_alive_non_header
|
||||
false); // write barrier for next field updates
|
||||
NULL); // is_alive_non_header
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -854,8 +854,7 @@ void PSParallelCompact::post_initialize() {
|
||||
true, // mt discovery
|
||||
(int) ParallelGCThreads, // mt discovery degree
|
||||
true, // atomic_discovery
|
||||
&_is_alive_closure, // non-header is alive closure
|
||||
false); // write barrier for next field updates
|
||||
&_is_alive_closure); // non-header is alive closure
|
||||
_counters = new CollectorCounters("PSParallelCompact", 1);
|
||||
|
||||
// Initialize static fields in ParCompactionManager.
|
||||
|
@ -864,8 +864,7 @@ void PSScavenge::initialize() {
|
||||
true, // mt discovery
|
||||
(int) ParallelGCThreads, // mt discovery degree
|
||||
true, // atomic_discovery
|
||||
NULL, // header provides liveness info
|
||||
false); // next field updates do not need write barrier
|
||||
NULL); // header provides liveness info
|
||||
|
||||
// Cache the cardtable
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
|
@ -1423,6 +1423,17 @@ size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
|
||||
return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
|
||||
}
|
||||
|
||||
void MetaspaceGC::initialize() {
|
||||
// Set the high-water mark to MaxMetapaceSize during VM initializaton since
|
||||
// we can't do a GC during initialization.
|
||||
_capacity_until_GC = MaxMetaspaceSize;
|
||||
}
|
||||
|
||||
void MetaspaceGC::post_initialize() {
|
||||
// Reset the high-water mark once the VM initialization is done.
|
||||
_capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
|
||||
}
|
||||
|
||||
bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
|
||||
// Check if the compressed class space is full.
|
||||
if (is_class && Metaspace::using_class_space()) {
|
||||
@ -1443,21 +1454,13 @@ bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
|
||||
|
||||
size_t MetaspaceGC::allowed_expansion() {
|
||||
size_t committed_bytes = MetaspaceAux::committed_bytes();
|
||||
|
||||
size_t left_until_max = MaxMetaspaceSize - committed_bytes;
|
||||
|
||||
// Always grant expansion if we are initiating the JVM,
|
||||
// or if the GC_locker is preventing GCs.
|
||||
if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
|
||||
return left_until_max / BytesPerWord;
|
||||
}
|
||||
|
||||
size_t capacity_until_gc = capacity_until_GC();
|
||||
|
||||
if (capacity_until_gc <= committed_bytes) {
|
||||
return 0;
|
||||
}
|
||||
assert(capacity_until_gc >= committed_bytes,
|
||||
err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
|
||||
capacity_until_gc, committed_bytes));
|
||||
|
||||
size_t left_until_max = MaxMetaspaceSize - committed_bytes;
|
||||
size_t left_until_GC = capacity_until_gc - committed_bytes;
|
||||
size_t left_to_commit = MIN2(left_until_GC, left_until_max);
|
||||
|
||||
@ -1469,7 +1472,15 @@ void MetaspaceGC::compute_new_size() {
|
||||
uint current_shrink_factor = _shrink_factor;
|
||||
_shrink_factor = 0;
|
||||
|
||||
const size_t used_after_gc = MetaspaceAux::capacity_bytes();
|
||||
// Using committed_bytes() for used_after_gc is an overestimation, since the
|
||||
// chunk free lists are included in committed_bytes() and the memory in an
|
||||
// un-fragmented chunk free list is available for future allocations.
|
||||
// However, if the chunk free lists becomes fragmented, then the memory may
|
||||
// not be available for future allocations and the memory is therefore "in use".
|
||||
// Including the chunk free lists in the definition of "in use" is therefore
|
||||
// necessary. Not including the chunk free lists can cause capacity_until_GC to
|
||||
// shrink below committed_bytes() and this has caused serious bugs in the past.
|
||||
const size_t used_after_gc = MetaspaceAux::committed_bytes();
|
||||
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
|
||||
|
||||
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
|
||||
@ -3094,6 +3105,8 @@ void Metaspace::ergo_initialize() {
|
||||
}
|
||||
|
||||
void Metaspace::global_initialize() {
|
||||
MetaspaceGC::initialize();
|
||||
|
||||
// Initialize the alignment for shared spaces.
|
||||
int max_alignment = os::vm_allocation_granularity();
|
||||
size_t cds_total = 0;
|
||||
@ -3201,10 +3214,13 @@ void Metaspace::global_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
MetaspaceGC::initialize();
|
||||
_tracer = new MetaspaceTracer();
|
||||
}
|
||||
|
||||
void Metaspace::post_initialize() {
|
||||
MetaspaceGC::post_initialize();
|
||||
}
|
||||
|
||||
Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
|
||||
size_t chunk_word_size,
|
||||
size_t chunk_bunch) {
|
||||
|
@ -208,6 +208,7 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
|
||||
static void ergo_initialize();
|
||||
static void global_initialize();
|
||||
static void post_initialize();
|
||||
|
||||
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
|
||||
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
|
||||
@ -398,7 +399,8 @@ class MetaspaceGC : AllStatic {
|
||||
|
||||
public:
|
||||
|
||||
static void initialize() { _capacity_until_GC = MetaspaceSize; }
|
||||
static void initialize();
|
||||
static void post_initialize();
|
||||
|
||||
static size_t capacity_until_GC();
|
||||
static size_t inc_capacity_until_GC(size_t v);
|
||||
|
@ -96,12 +96,10 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
|
||||
bool mt_discovery,
|
||||
uint mt_discovery_degree,
|
||||
bool atomic_discovery,
|
||||
BoolObjectClosure* is_alive_non_header,
|
||||
bool discovered_list_needs_post_barrier) :
|
||||
BoolObjectClosure* is_alive_non_header) :
|
||||
_discovering_refs(false),
|
||||
_enqueuing_is_done(false),
|
||||
_is_alive_non_header(is_alive_non_header),
|
||||
_discovered_list_needs_post_barrier(discovered_list_needs_post_barrier),
|
||||
_processing_is_mt(mt_processing),
|
||||
_next_id(0)
|
||||
{
|
||||
@ -340,10 +338,18 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
|
||||
// (java.lang.ref.Reference.discovered), self-loop their "next" field
|
||||
// thus distinguishing them from active References, then
|
||||
// prepend them to the pending list.
|
||||
//
|
||||
// The Java threads will see the Reference objects linked together through
|
||||
// the discovered field. Instead of trying to do the write barrier updates
|
||||
// in all places in the reference processor where we manipulate the discovered
|
||||
// field we make sure to do the barrier here where we anyway iterate through
|
||||
// all linked Reference objects. Note that it is important to not dirty any
|
||||
// cards during reference processing since this will cause card table
|
||||
// verification to fail for G1.
|
||||
//
|
||||
// BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
|
||||
// the "next" field is used to chain the pending list, not the discovered
|
||||
// field.
|
||||
|
||||
if (TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
|
||||
INTPTR_FORMAT, (address)refs_list.head());
|
||||
@ -365,15 +371,15 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
|
||||
assert(java_lang_ref_Reference::next(obj) == NULL,
|
||||
"Reference not active; should not be discovered");
|
||||
// Self-loop next, so as to make Ref not active.
|
||||
// Post-barrier not needed when looping to self.
|
||||
java_lang_ref_Reference::set_next_raw(obj, obj);
|
||||
if (next_d == obj) { // obj is last
|
||||
if (next_d != obj) {
|
||||
oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
|
||||
} else {
|
||||
// This is the last object.
|
||||
// Swap refs_list into pending_list_addr and
|
||||
// set obj's discovered to what we read from pending_list_addr.
|
||||
oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
|
||||
// Need post-barrier on pending_list_addr above;
|
||||
// see special post-barrier code at the end of
|
||||
// enqueue_discovered_reflists() further below.
|
||||
// Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above.
|
||||
java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
|
||||
oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
|
||||
}
|
||||
@ -496,20 +502,15 @@ void DiscoveredListIterator::remove() {
|
||||
// pre-barrier here because we know the Reference has already been found/marked,
|
||||
// that's how it ended up in the discovered list in the first place.
|
||||
oop_store_raw(_prev_next, new_next);
|
||||
if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) {
|
||||
// Needs post-barrier and this is not the list head (which is not on the heap)
|
||||
oopDesc::bs()->write_ref_field(_prev_next, new_next);
|
||||
}
|
||||
NOT_PRODUCT(_removed++);
|
||||
_refs_list.dec_length(1);
|
||||
}
|
||||
|
||||
// Make the Reference object active again.
|
||||
void DiscoveredListIterator::make_active() {
|
||||
// For G1 we don't want to use set_next - it
|
||||
// will dirty the card for the next field of
|
||||
// the reference object and will fail
|
||||
// CT verification.
|
||||
// The pre barrier for G1 is probably just needed for the old
|
||||
// reference processing behavior. Should we guard this with
|
||||
// ReferenceProcessor::pending_list_uses_discovered_field() ?
|
||||
if (UseG1GC) {
|
||||
HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
|
||||
if (UseCompressedOops) {
|
||||
@ -517,10 +518,8 @@ void DiscoveredListIterator::make_active() {
|
||||
} else {
|
||||
oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL);
|
||||
}
|
||||
java_lang_ref_Reference::set_next_raw(_ref, NULL);
|
||||
} else {
|
||||
java_lang_ref_Reference::set_next(_ref, NULL);
|
||||
}
|
||||
java_lang_ref_Reference::set_next_raw(_ref, NULL);
|
||||
}
|
||||
|
||||
void DiscoveredListIterator::clear_referent() {
|
||||
@ -546,7 +545,7 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc) {
|
||||
assert(policy != NULL, "Must have a non-NULL policy");
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
// Decide which softly reachable refs should be kept alive.
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
|
||||
@ -586,7 +585,7 @@ ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
|
||||
BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive) {
|
||||
assert(discovery_is_atomic(), "Error");
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
|
||||
DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
|
||||
@ -623,7 +622,7 @@ ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc) {
|
||||
assert(!discovery_is_atomic(), "Error");
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
|
||||
HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
|
||||
@ -666,7 +665,7 @@ ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc) {
|
||||
ResourceMark rm;
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
while (iter.has_next()) {
|
||||
iter.update_discovered();
|
||||
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
|
||||
@ -782,13 +781,6 @@ private:
|
||||
bool _clear_referent;
|
||||
};
|
||||
|
||||
void ReferenceProcessor::set_discovered(oop ref, oop value) {
|
||||
java_lang_ref_Reference::set_discovered_raw(ref, value);
|
||||
if (_discovered_list_needs_post_barrier) {
|
||||
oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value);
|
||||
}
|
||||
}
|
||||
|
||||
// Balances reference queues.
|
||||
// Move entries from all queues[0, 1, ..., _max_num_q-1] to
|
||||
// queues[0, 1, ..., _num_q-1] because only the first _num_q
|
||||
@ -846,9 +838,9 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
|
||||
// Add the chain to the to list.
|
||||
if (ref_lists[to_idx].head() == NULL) {
|
||||
// to list is empty. Make a loop at the end.
|
||||
set_discovered(move_tail, move_tail);
|
||||
java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail);
|
||||
} else {
|
||||
set_discovered(move_tail, ref_lists[to_idx].head());
|
||||
java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head());
|
||||
}
|
||||
ref_lists[to_idx].set_head(move_head);
|
||||
ref_lists[to_idx].inc_length(refs_to_move);
|
||||
@ -982,7 +974,7 @@ void ReferenceProcessor::clean_up_discovered_references() {
|
||||
|
||||
void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
|
||||
assert(!discovery_is_atomic(), "Else why call this method?");
|
||||
DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier);
|
||||
DiscoveredListIterator iter(refs_list, NULL, NULL);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
|
||||
oop next = java_lang_ref_Reference::next(iter.obj());
|
||||
@ -1071,16 +1063,6 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
||||
// The last ref must have its discovered field pointing to itself.
|
||||
oop next_discovered = (current_head != NULL) ? current_head : obj;
|
||||
|
||||
// Note: In the case of G1, this specific pre-barrier is strictly
|
||||
// not necessary because the only case we are interested in
|
||||
// here is when *discovered_addr is NULL (see the CAS further below),
|
||||
// so this will expand to nothing. As a result, we have manually
|
||||
// elided this out for G1, but left in the test for some future
|
||||
// collector that might have need for a pre-barrier here, e.g.:-
|
||||
// oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
|
||||
assert(!_discovered_list_needs_post_barrier || UseG1GC,
|
||||
"Need to check non-G1 collector: "
|
||||
"may need a pre-write-barrier for CAS from NULL below");
|
||||
oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
|
||||
NULL);
|
||||
if (retest == NULL) {
|
||||
@ -1089,9 +1071,6 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
||||
// is necessary.
|
||||
refs_list.set_head(obj);
|
||||
refs_list.inc_length(1);
|
||||
if (_discovered_list_needs_post_barrier) {
|
||||
oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
|
||||
}
|
||||
|
||||
if (TraceReferenceGC) {
|
||||
gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
|
||||
@ -1242,24 +1221,14 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
||||
if (_discovery_is_mt) {
|
||||
add_to_discovered_list_mt(*list, obj, discovered_addr);
|
||||
} else {
|
||||
// If "_discovered_list_needs_post_barrier", we do write barriers when
|
||||
// updating the discovered reference list. Otherwise, we do a raw store
|
||||
// here: the field will be visited later when processing the discovered
|
||||
// references.
|
||||
// We do a raw store here: the field will be visited later when processing
|
||||
// the discovered references.
|
||||
oop current_head = list->head();
|
||||
// The last ref must have its discovered field pointing to itself.
|
||||
oop next_discovered = (current_head != NULL) ? current_head : obj;
|
||||
|
||||
// As in the case further above, since we are over-writing a NULL
|
||||
// pre-value, we can safely elide the pre-barrier here for the case of G1.
|
||||
// e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
|
||||
assert(discovered == NULL, "control point invariant");
|
||||
assert(!_discovered_list_needs_post_barrier || UseG1GC,
|
||||
"For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
|
||||
oop_store_raw(discovered_addr, next_discovered);
|
||||
if (_discovered_list_needs_post_barrier) {
|
||||
oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
|
||||
}
|
||||
list->set_head(obj);
|
||||
list->inc_length(1);
|
||||
|
||||
@ -1353,7 +1322,7 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc,
|
||||
YieldClosure* yield) {
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
|
||||
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
|
||||
while (iter.has_next()) {
|
||||
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
|
||||
oop obj = iter.obj();
|
||||
|
@ -99,7 +99,6 @@ private:
|
||||
oop _referent;
|
||||
OopClosure* _keep_alive;
|
||||
BoolObjectClosure* _is_alive;
|
||||
bool _discovered_list_needs_post_barrier;
|
||||
|
||||
DEBUG_ONLY(
|
||||
oop _first_seen; // cyclic linked list check
|
||||
@ -113,8 +112,7 @@ private:
|
||||
public:
|
||||
inline DiscoveredListIterator(DiscoveredList& refs_list,
|
||||
OopClosure* keep_alive,
|
||||
BoolObjectClosure* is_alive,
|
||||
bool discovered_list_needs_post_barrier = false):
|
||||
BoolObjectClosure* is_alive):
|
||||
_refs_list(refs_list),
|
||||
_prev_next(refs_list.adr_head()),
|
||||
_prev(NULL),
|
||||
@ -128,8 +126,7 @@ public:
|
||||
#endif
|
||||
_next(NULL),
|
||||
_keep_alive(keep_alive),
|
||||
_is_alive(is_alive),
|
||||
_discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
|
||||
_is_alive(is_alive)
|
||||
{ }
|
||||
|
||||
// End Of List.
|
||||
@ -230,14 +227,6 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
// other collectors in configuration
|
||||
bool _discovery_is_mt; // true if reference discovery is MT.
|
||||
|
||||
// If true, setting "next" field of a discovered refs list requires
|
||||
// write post barrier. (Must be true if used in a collector in which
|
||||
// elements of a discovered list may be moved during discovery: for
|
||||
// example, a collector like Garbage-First that moves objects during a
|
||||
// long-term concurrent marking phase that does weak reference
|
||||
// discovery.)
|
||||
bool _discovered_list_needs_post_barrier;
|
||||
|
||||
bool _enqueuing_is_done; // true if all weak references enqueued
|
||||
bool _processing_is_mt; // true during phases when
|
||||
// reference processing is MT.
|
||||
@ -382,11 +371,6 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
|
||||
|
||||
protected:
|
||||
// Set the 'discovered' field of the given reference to
|
||||
// the given value - emitting post barriers depending upon
|
||||
// the value of _discovered_list_needs_post_barrier.
|
||||
void set_discovered(oop ref, oop value);
|
||||
|
||||
// "Preclean" the given discovered reference list
|
||||
// by removing references with strongly reachable referents.
|
||||
// Currently used in support of CMS only.
|
||||
@ -427,8 +411,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
bool mt_processing = false, uint mt_processing_degree = 1,
|
||||
bool mt_discovery = false, uint mt_discovery_degree = 1,
|
||||
bool atomic_discovery = true,
|
||||
BoolObjectClosure* is_alive_non_header = NULL,
|
||||
bool discovered_list_needs_post_barrier = false);
|
||||
BoolObjectClosure* is_alive_non_header = NULL);
|
||||
|
||||
// RefDiscoveryPolicy values
|
||||
enum DiscoveryPolicy {
|
||||
|
@ -3543,6 +3543,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
// debug stuff, that does not work until all basic classes have been initialized.
|
||||
set_init_completed();
|
||||
|
||||
Metaspace::post_initialize();
|
||||
|
||||
HOTSPOT_VM_INIT_END();
|
||||
|
||||
// record VM initialization completion time
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* @ignore 8027915
|
||||
* @test TestParallelHeapSizeFlags
|
||||
* @key gc
|
||||
* @bug 8006088
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* @ignore 8025645
|
||||
* @test TestUseCompressedOopsErgo
|
||||
* @key gc
|
||||
* @bug 8010722
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @ignore 8041506, 8041946, 8042051
|
||||
* @test TestHumongousShrinkHeap
|
||||
* @bug 8036025
|
||||
* @summary Verify that heap shrinks after GC in the presence of fragmentation due to humongous objects
|
||||
|
@ -294,55 +294,6 @@ class TestStringDeduplicationTools {
|
||||
}
|
||||
}
|
||||
|
||||
private static class MemoryUsageTest {
|
||||
public static void main(String[] args) {
|
||||
System.out.println("Begin: MemoryUsageTest");
|
||||
|
||||
final boolean useStringDeduplication = Boolean.parseBoolean(args[0]);
|
||||
final int numberOfStrings = LargeNumberOfStrings;
|
||||
final int numberOfUniqueStrings = 1;
|
||||
|
||||
ArrayList<String> list = createStrings(numberOfStrings, numberOfUniqueStrings);
|
||||
forceDeduplication(DefaultAgeThreshold, FullGC);
|
||||
|
||||
if (useStringDeduplication) {
|
||||
verifyStrings(list, numberOfUniqueStrings);
|
||||
}
|
||||
|
||||
System.gc();
|
||||
|
||||
System.out.println("Heap Memory Usage: " + ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed());
|
||||
System.out.println("Array Header Size: " + unsafe.ARRAY_CHAR_BASE_OFFSET);
|
||||
|
||||
System.out.println("End: MemoryUsageTest");
|
||||
}
|
||||
|
||||
public static OutputAnalyzer run(boolean useStringDeduplication) throws Exception {
|
||||
String[] extraArgs = new String[0];
|
||||
|
||||
if (useStringDeduplication) {
|
||||
extraArgs = new String[] {
|
||||
"-XX:+UseStringDeduplication",
|
||||
"-XX:+PrintStringDeduplicationStatistics",
|
||||
"-XX:StringDeduplicationAgeThreshold=" + DefaultAgeThreshold
|
||||
};
|
||||
}
|
||||
|
||||
String[] defaultArgs = new String[] {
|
||||
"-XX:+PrintGC",
|
||||
"-XX:+PrintGCDetails",
|
||||
MemoryUsageTest.class.getName(),
|
||||
"" + useStringDeduplication
|
||||
};
|
||||
|
||||
ArrayList<String> args = new ArrayList<String>();
|
||||
args.addAll(Arrays.asList(extraArgs));
|
||||
args.addAll(Arrays.asList(defaultArgs));
|
||||
|
||||
return runTest(args.toArray(new String[args.size()]));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Tests
|
||||
*/
|
||||
@ -480,44 +431,4 @@ class TestStringDeduplicationTools {
|
||||
OutputAnalyzer output = InternedTest.run();
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
public static void testMemoryUsage() throws Exception {
|
||||
// Test that memory usage is reduced after deduplication
|
||||
OutputAnalyzer output;
|
||||
final String heapMemoryUsagePattern = "Heap Memory Usage: (\\d+)";
|
||||
final String arrayHeaderSizePattern = "Array Header Size: (\\d+)";
|
||||
|
||||
// Run without deduplication
|
||||
output = MemoryUsageTest.run(false);
|
||||
output.shouldHaveExitValue(0);
|
||||
final long heapMemoryUsageWithoutDedup = Long.parseLong(output.firstMatch(heapMemoryUsagePattern, 1));
|
||||
final long arrayHeaderSizeWithoutDedup = Long.parseLong(output.firstMatch(arrayHeaderSizePattern, 1));
|
||||
|
||||
// Run with deduplication
|
||||
output = MemoryUsageTest.run(true);
|
||||
output.shouldHaveExitValue(0);
|
||||
final long heapMemoryUsageWithDedup = Long.parseLong(output.firstMatch(heapMemoryUsagePattern, 1));
|
||||
final long arrayHeaderSizeWithDedup = Long.parseLong(output.firstMatch(arrayHeaderSizePattern, 1));
|
||||
|
||||
// Sanity check to make sure one instance isn't using compressed class pointers and the other not
|
||||
if (arrayHeaderSizeWithoutDedup != arrayHeaderSizeWithDedup) {
|
||||
throw new Exception("Unexpected difference between array header sizes");
|
||||
}
|
||||
|
||||
// Calculate expected memory usage with deduplication enabled. This calculation does
|
||||
// not take alignment and padding into account, so it's a conservative estimate.
|
||||
final long sizeOfChar = unsafe.ARRAY_CHAR_INDEX_SCALE;
|
||||
final long sizeOfCharArray = StringLength * sizeOfChar + arrayHeaderSizeWithoutDedup;
|
||||
final long bytesSaved = (LargeNumberOfStrings - 1) * sizeOfCharArray;
|
||||
final long heapMemoryUsageWithDedupExpected = heapMemoryUsageWithoutDedup - bytesSaved;
|
||||
|
||||
System.out.println("Memory usage summary:");
|
||||
System.out.println(" heapMemoryUsageWithoutDedup: " + heapMemoryUsageWithoutDedup);
|
||||
System.out.println(" heapMemoryUsageWithDedup: " + heapMemoryUsageWithDedup);
|
||||
System.out.println(" heapMemoryUsageWithDedupExpected: " + heapMemoryUsageWithDedupExpected);
|
||||
|
||||
if (heapMemoryUsageWithDedup > heapMemoryUsageWithDedupExpected) {
|
||||
throw new Exception("Unexpected memory usage, heapMemoryUsageWithDedup should be less or equal to heapMemoryUsageWithDedupExpected");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -21,16 +21,28 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test TestStringDeduplicationMemoryUsage
|
||||
* @summary Test string deduplication memory usage
|
||||
* @bug 8029075
|
||||
* @key gc
|
||||
* @library /testlibrary
|
||||
*/
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class TestStringDeduplicationMemoryUsage {
|
||||
public static void main(String[] args) throws Exception {
|
||||
TestStringDeduplicationTools.testMemoryUsage();
|
||||
/* @test TestMetaspaceInitialization
|
||||
* @bug 8042933
|
||||
* @summary Tests to initialize metaspace with a very low MetaspaceSize
|
||||
* @library /testlibrary
|
||||
* @run main/othervm -XX:MetaspaceSize=2m TestMetaspaceInitialization
|
||||
*/
|
||||
public class TestMetaspaceInitialization {
|
||||
private class Internal {
|
||||
public int x;
|
||||
public Internal(int x) {
|
||||
this.x = x;
|
||||
}
|
||||
}
|
||||
|
||||
private void test() {
|
||||
ArrayList<Internal> l = new ArrayList<>();
|
||||
l.add(new Internal(17));
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
new TestMetaspaceInitialization().test();
|
||||
}
|
||||
}
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @ignore 8042051
|
||||
* @test TestDynShrinkHeap
|
||||
* @bug 8016479
|
||||
* @summary Verify that the heap shrinks after full GC according to the current values of the Min/MaxHeapFreeRatio flags
|
||||
|
Loading…
Reference in New Issue
Block a user