8077413: Avoid use of Universe::heap() inside collectors
Reviewed-by: stefank, kbarrett
This commit is contained in:
parent
f078697a45
commit
ca9afd25f6
@ -299,8 +299,6 @@ void CMSCollector::ref_processor_init() {
|
||||
|
||||
AdaptiveSizePolicy* CMSCollector::size_policy() {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
||||
"Wrong type of heap");
|
||||
return gch->gen_policy()->size_policy();
|
||||
}
|
||||
|
||||
@ -981,7 +979,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
|
||||
assert_lock_strong(freelistLock());
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (Universe::heap()->promotion_should_fail()) {
|
||||
if (GenCollectedHeap::heap()->promotion_should_fail()) {
|
||||
return NULL;
|
||||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
@ -1058,7 +1056,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
|
||||
oop old, markOop m,
|
||||
size_t word_sz) {
|
||||
#ifndef PRODUCT
|
||||
if (Universe::heap()->promotion_should_fail()) {
|
||||
if (GenCollectedHeap::heap()->promotion_should_fail()) {
|
||||
return NULL;
|
||||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
@ -2468,7 +2466,7 @@ void CMSCollector::verify_after_remark_work_1() {
|
||||
verification_mark_bm()->iterate(&vcl);
|
||||
if (vcl.failed()) {
|
||||
gclog_or_tty->print("Verification failed");
|
||||
Universe::heap()->print_on(gclog_or_tty);
|
||||
gch->print_on(gclog_or_tty);
|
||||
fatal("CMS: failed marking verification after remark");
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,7 +62,7 @@ void VM_CMS_Operation::verify_before_gc() {
|
||||
HandleMark hm;
|
||||
FreelistLocker x(_collector);
|
||||
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
|
||||
Universe::heap()->prepare_for_verify();
|
||||
GenCollectedHeap::heap()->prepare_for_verify();
|
||||
Universe::verify();
|
||||
}
|
||||
}
|
||||
|
@ -1326,7 +1326,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
Universe::heap()->prepare_for_verify();
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(before)");
|
||||
}
|
||||
@ -1353,7 +1353,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
// Verify the heap w.r.t. the previous marking bitmap.
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
Universe::heap()->prepare_for_verify();
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(overflow)");
|
||||
}
|
||||
@ -1379,7 +1379,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
Universe::heap()->prepare_for_verify();
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UseNextMarking,
|
||||
" VerifyDuringGC:(after)");
|
||||
}
|
||||
@ -1987,13 +1987,13 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
Universe::heap()->prepare_for_verify();
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(before)");
|
||||
}
|
||||
g1h->check_bitmaps("Cleanup Start");
|
||||
|
||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
g1p->record_concurrent_mark_cleanup_start();
|
||||
|
||||
double start = os::elapsedTime();
|
||||
@ -2098,7 +2098,7 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
Universe::heap()->prepare_for_verify();
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(after)");
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "memory/space.hpp"
|
||||
@ -303,9 +304,9 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
||||
assert(blk_start <= threshold, "blk_start should be at or before threshold");
|
||||
assert(pointer_delta(threshold, blk_start) <= N_words,
|
||||
"offset should be <= BlockOffsetSharedArray::N");
|
||||
assert(Universe::heap()->is_in_reserved(blk_start),
|
||||
assert(G1CollectedHeap::heap()->is_in_reserved(blk_start),
|
||||
"reference must be into the heap");
|
||||
assert(Universe::heap()->is_in_reserved(blk_end-1),
|
||||
assert(G1CollectedHeap::heap()->is_in_reserved(blk_end-1),
|
||||
"limit must be within the heap");
|
||||
assert(threshold == _array->_reserved.start() + index*N_words,
|
||||
"index must agree with threshold");
|
||||
@ -458,7 +459,7 @@ G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
|
||||
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
|
||||
"just checking");
|
||||
_next_offset_index = _array->index_for_raw(_bottom);
|
||||
_next_offset_index++;
|
||||
@ -468,7 +469,7 @@ HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
|
||||
}
|
||||
|
||||
void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
|
||||
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
|
||||
"just checking");
|
||||
size_t bottom_index = _array->index_for_raw(_bottom);
|
||||
assert(_array->address_for_index_raw(bottom_index) == _bottom,
|
||||
@ -477,7 +478,7 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
|
||||
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
||||
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
|
||||
"just checking");
|
||||
_next_offset_index = _array->index_for(_bottom);
|
||||
_next_offset_index++;
|
||||
|
@ -3318,6 +3318,8 @@ void G1CollectedHeap::print_all_rsets() {
|
||||
#endif // PRODUCT
|
||||
|
||||
G1CollectedHeap* G1CollectedHeap::heap() {
|
||||
assert(_g1h != NULL, "Uninitialized access to G1CollectedHeap::heap()");
|
||||
assert(_g1h->kind() == CollectedHeap::G1CollectedHeap, "Not a G1 heap");
|
||||
return _g1h;
|
||||
}
|
||||
|
||||
|
@ -101,11 +101,6 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
BiasedLocking::restore_marks();
|
||||
GenMarkSweep::deallocate_stacks();
|
||||
|
||||
// "free at last gc" is calculated from these.
|
||||
// CHF: cheating for now!!!
|
||||
// Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
|
||||
// Universe::set_heap_used_at_last_gc(Universe::heap()->used());
|
||||
|
||||
CodeCache::gc_epilogue();
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
@ -167,12 +162,12 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
|
||||
|
||||
// Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
|
||||
G1CollectedHeap::heap()->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
|
||||
g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
||||
Universe::heap()->prepare_for_verify();
|
||||
g1h->prepare_for_verify();
|
||||
// Note: we can verify only the heap here. When an object is
|
||||
// marked, the previous value of the mark word (including
|
||||
// identity hash values, ages, etc) is preserved, and the mark
|
||||
@ -186,7 +181,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
if (!VerifySilently) {
|
||||
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
|
||||
}
|
||||
Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
|
||||
g1h->verify(VerifySilently, VerifyOption_G1UseMarkWord);
|
||||
if (!VerifySilently) {
|
||||
gclog_or_tty->print_cr("]");
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -172,7 +172,7 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
|
||||
oopDesc* o = obj;
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
assert(_g1->is_in_reserved(obj), "must be in heap");
|
||||
#endif // ASSERT
|
||||
|
||||
assert(_from != NULL, "from region must be non-NULL");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -63,7 +63,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -57,7 +57,7 @@ inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
|
||||
oopDesc* o = obj;
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
assert(_g1->is_in_reserved(obj), "must be in heap");
|
||||
#endif // ASSERT
|
||||
|
||||
assert(from == NULL || from->is_in_reserved(p), "p is not in from");
|
||||
|
@ -206,7 +206,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
|
||||
if (new_val == NULL) return;
|
||||
// Otherwise, log it.
|
||||
G1SATBCardTableLoggingModRefBS* g1_bs =
|
||||
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
|
||||
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
|
||||
g1_bs->write_ref_field_work(field, new_val);
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||
#include "gc_implementation/g1/g1StringDedupQueue.hpp"
|
||||
#include "memory/gcLocker.hpp"
|
||||
@ -163,7 +164,7 @@ void G1StringDedupQueue::verify() {
|
||||
while (!iter.is_empty()) {
|
||||
oop obj = iter.next();
|
||||
if (obj != NULL) {
|
||||
guarantee(Universe::heap()->is_in_reserved(obj), "Object must be on the heap");
|
||||
guarantee(G1CollectedHeap::heap()->is_in_reserved(obj), "Object must be on the heap");
|
||||
guarantee(!obj->is_forwarded(), "Object must not be forwarded");
|
||||
guarantee(java_lang_String::is_instance(obj), "Object must be a String");
|
||||
}
|
||||
|
@ -519,7 +519,7 @@ void G1StringDedupTable::verify() {
|
||||
while (*entry != NULL) {
|
||||
typeArrayOop value = (*entry)->obj();
|
||||
guarantee(value != NULL, "Object must not be NULL");
|
||||
guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap");
|
||||
guarantee(G1CollectedHeap::heap()->is_in_reserved(value), "Object must be on the heap");
|
||||
guarantee(!value->is_forwarded(), "Object must not be forwarded");
|
||||
guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
|
||||
unsigned int hash = hash_code(value);
|
||||
|
@ -27,8 +27,8 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "memory/cardTableRS.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@ -449,7 +449,7 @@ get_LNC_array_for_space(Space* sp,
|
||||
// Do a dirty read here. If we pass the conditional then take the rare
|
||||
// event lock and do the read again in case some other thread had already
|
||||
// succeeded and done the resize.
|
||||
int cur_collection = Universe::heap()->total_collections();
|
||||
int cur_collection = GenCollectedHeap::heap()->total_collections();
|
||||
if (_last_LNC_resizing_collection[i] != cur_collection) {
|
||||
MutexLocker x(ParGCRareEvent_lock);
|
||||
if (_last_LNC_resizing_collection[i] != cur_collection) {
|
||||
|
@ -117,7 +117,7 @@ bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) c
|
||||
void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
|
||||
assert(old->is_objArray(), "must be obj array");
|
||||
assert(old->is_forwarded(), "must be forwarded");
|
||||
assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
|
||||
assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
|
||||
assert(!old_gen()->is_in(old), "must be in young generation.");
|
||||
|
||||
objArrayOop obj = objArrayOop(old->forwardee());
|
||||
@ -199,9 +199,9 @@ bool ParScanThreadState::take_from_overflow_stack() {
|
||||
for (size_t i = 0; i != num_take_elems; i++) {
|
||||
oop cur = of_stack->pop();
|
||||
oop obj_to_push = cur->forwardee();
|
||||
assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
|
||||
assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
|
||||
assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
|
||||
assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
|
||||
assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
|
||||
if (should_be_partially_scanned(obj_to_push, cur)) {
|
||||
assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
|
||||
obj_to_push = cur;
|
||||
@ -695,7 +695,7 @@ void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
|
||||
|
||||
_par_cl->do_oop_nv(p);
|
||||
|
||||
if (Universe::heap()->is_in_reserved(p)) {
|
||||
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
_rs->write_ref_field_gc_par(p, obj);
|
||||
}
|
||||
@ -722,7 +722,7 @@ void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
|
||||
|
||||
_cl->do_oop_nv(p);
|
||||
|
||||
if (Universe::heap()->is_in_reserved(p)) {
|
||||
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
_rs->write_ref_field_gc_par(p, obj);
|
||||
}
|
||||
@ -821,8 +821,6 @@ public:
|
||||
void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
|
||||
{
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
||||
"not a generational heap");
|
||||
FlexibleWorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
_state_set.reset(workers->active_workers(), _generation.promotion_failed());
|
||||
@ -897,7 +895,7 @@ void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThr
|
||||
_gc_tracer.report_promotion_failed(_promotion_failed_info);
|
||||
}
|
||||
// Reset the PromotionFailureALot counters.
|
||||
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
|
||||
NOT_PRODUCT(gch->reset_promotion_should_fail();)
|
||||
}
|
||||
|
||||
void ParNewGeneration::collect(bool full,
|
||||
@ -910,8 +908,6 @@ void ParNewGeneration::collect(bool full,
|
||||
|
||||
_gc_timer->register_gc_start();
|
||||
|
||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
||||
"not a CMS generational heap");
|
||||
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
|
||||
FlexibleWorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need workgang for parallel work");
|
||||
@ -1190,7 +1186,7 @@ oop ParNewGeneration::copy_to_survivor_space(
|
||||
} else {
|
||||
// Is in to-space; do copying ourselves.
|
||||
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
|
||||
assert(Universe::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
|
||||
assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
|
||||
forward_ptr = old->forward_to_atomic(new_obj);
|
||||
// Restore the mark word copied above.
|
||||
new_obj->set_mark(m);
|
||||
|
@ -70,7 +70,7 @@ template <class T>
|
||||
inline void ParScanClosure::do_oop_work(T* p,
|
||||
bool gc_barrier,
|
||||
bool root_scan) {
|
||||
assert((!Universe::heap()->is_in_reserved(p) ||
|
||||
assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
|
||||
generation()->is_in_reserved(p))
|
||||
&& (generation()->level() == 0 || gc_barrier),
|
||||
"The gen must be right, and we must be doing the barrier "
|
||||
@ -82,7 +82,7 @@ inline void ParScanClosure::do_oop_work(T* p,
|
||||
#ifndef PRODUCT
|
||||
if (_g->to()->is_in_reserved(obj)) {
|
||||
tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
|
||||
GenCollectedHeap* gch = (GenCollectedHeap*)Universe::heap();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
Space* sp = gch->space_containing(p);
|
||||
oop obj = oop(sp->block_start(p));
|
||||
assert((HeapWord*)obj < (HeapWord*)p, "Error");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -89,7 +89,7 @@ size_t ASPSOldGen::available_for_expansion() {
|
||||
assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
|
||||
assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
size_t result = gen_size_limit() - virtual_space()->committed_size();
|
||||
size_t result_aligned = align_size_down(result, heap->generation_alignment());
|
||||
return result_aligned;
|
||||
@ -101,7 +101,7 @@ size_t ASPSOldGen::available_for_contraction() {
|
||||
return uncommitted_bytes;
|
||||
}
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const size_t gen_alignment = heap->generation_alignment();
|
||||
PSAdaptiveSizePolicy* policy = heap->size_policy();
|
||||
const size_t working_size =
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -73,7 +73,7 @@ size_t ASPSYoungGen::available_for_expansion() {
|
||||
size_t current_committed_size = virtual_space()->committed_size();
|
||||
assert((gen_size_limit() >= current_committed_size),
|
||||
"generation size limit is wrong");
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
size_t result = gen_size_limit() - current_committed_size;
|
||||
size_t result_aligned = align_size_down(result, heap->generation_alignment());
|
||||
return result_aligned;
|
||||
@ -91,7 +91,7 @@ size_t ASPSYoungGen::available_for_contraction() {
|
||||
|
||||
if (eden_space()->is_empty()) {
|
||||
// Respect the minimum size for eden and for the young gen as a whole.
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const size_t eden_alignment = heap->space_alignment();
|
||||
const size_t gen_alignment = heap->generation_alignment();
|
||||
|
||||
@ -128,7 +128,7 @@ size_t ASPSYoungGen::available_for_contraction() {
|
||||
// If to_space is below from_space, to_space is not considered.
|
||||
// to_space can be.
|
||||
size_t ASPSYoungGen::available_to_live() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const size_t alignment = heap->space_alignment();
|
||||
|
||||
// Include any space that is committed but is not in eden.
|
||||
@ -292,7 +292,7 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
|
||||
assert(eden_start < from_start, "Cannot push into from_space");
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const size_t alignment = heap->space_alignment();
|
||||
const bool maintain_minimum =
|
||||
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
|
||||
@ -345,8 +345,6 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
|
||||
// Does the optimal to-space overlap from-space?
|
||||
if (to_start < (char*)from_space()->end()) {
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
// Calculate the minimum offset possible for from_end
|
||||
size_t from_size =
|
||||
pointer_delta(from_space()->top(), from_start, sizeof(char));
|
||||
@ -509,9 +507,7 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
assert(from_space()->top() == old_from_top, "from top changed!");
|
||||
|
||||
if (PrintAdaptiveSizePolicy) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
|
||||
"collection: %d "
|
||||
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
|
||||
@ -542,7 +538,7 @@ void ASPSYoungGen::reset_after_change() {
|
||||
}
|
||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||
(HeapWord*)virtual_space()->high());
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
|
||||
space_invariants();
|
||||
}
|
||||
|
@ -76,9 +76,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
||||
|
||||
public:
|
||||
CheckForUnmarkedObjects() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
_young_gen = heap->young_gen();
|
||||
_card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
|
||||
// No point in asserting barrier set type here. Need to make CardTableExtension
|
||||
@ -325,9 +323,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
|
||||
void CardTableExtension::verify_all_young_refs_imprecise() {
|
||||
CheckForUnmarkedObjects check;
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
old_gen->object_iterate(&check);
|
||||
@ -335,9 +331,7 @@ void CardTableExtension::verify_all_young_refs_imprecise() {
|
||||
|
||||
// This should be called immediately after a scavenge, before mutators resume.
|
||||
void CardTableExtension::verify_all_young_refs_precise() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
CheckForPreciseMarks check(
|
||||
@ -351,7 +345,7 @@ void CardTableExtension::verify_all_young_refs_precise() {
|
||||
|
||||
void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
|
||||
CardTableExtension* card_table =
|
||||
barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set());
|
||||
barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
|
||||
|
||||
jbyte* bot = card_table->byte_for(mr.start());
|
||||
jbyte* top = card_table->byte_for(mr.end());
|
||||
@ -523,7 +517,7 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
|
||||
cur_committed = new_committed;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
assert(cur_committed.start() ==
|
||||
(HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
|
||||
os::vm_page_size()),
|
||||
|
@ -89,6 +89,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
|
||||
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
|
||||
|
||||
_psh = this;
|
||||
_gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
|
||||
|
||||
_old_gen = _gens->old_gen();
|
||||
@ -114,7 +115,6 @@ jint ParallelScavengeHeap::initialize() {
|
||||
// initialize the policy counters - 2 collectors, 3 generations
|
||||
_gc_policy_counters =
|
||||
new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
|
||||
_psh = this;
|
||||
|
||||
// Set up the GCTaskManager
|
||||
_gc_task_manager = GCTaskManager::create(ParallelGCThreads);
|
||||
@ -259,7 +259,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
// total_collections() value!
|
||||
{
|
||||
MutexLocker ml(Heap_lock);
|
||||
gc_count = Universe::heap()->total_collections();
|
||||
gc_count = total_collections();
|
||||
|
||||
result = young_gen()->allocate(size);
|
||||
if (result != NULL) {
|
||||
@ -309,8 +309,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
// This prevents us from looping until time out on requests that can
|
||||
// not be satisfied.
|
||||
if (op.prologue_succeeded()) {
|
||||
assert(Universe::heap()->is_in_or_null(op.result()),
|
||||
"result not in heap");
|
||||
assert(is_in_or_null(op.result()), "result not in heap");
|
||||
|
||||
// If GC was locked out during VM operation then retry allocation
|
||||
// and/or stall as necessary.
|
||||
@ -420,7 +419,7 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
assert(!Universe::heap()->is_gc_active(), "not reentrant");
|
||||
assert(!is_gc_active(), "not reentrant");
|
||||
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
||||
|
||||
// We assume that allocation in eden will fail unless we collect.
|
||||
@ -508,8 +507,8 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
||||
{
|
||||
MutexLocker ml(Heap_lock);
|
||||
// This value is guarded by the Heap_lock
|
||||
gc_count = Universe::heap()->total_collections();
|
||||
full_gc_count = Universe::heap()->total_full_collections();
|
||||
gc_count = total_collections();
|
||||
full_gc_count = total_full_collections();
|
||||
}
|
||||
|
||||
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
|
||||
|
@ -48,7 +48,7 @@
|
||||
//
|
||||
|
||||
void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
ResourceMark rm;
|
||||
|
||||
@ -79,7 +79,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
|
||||
void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
|
||||
@ -150,7 +150,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
|
||||
{
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
NOT_PRODUCT(GCTraceTime tm("RefProcTask",
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
|
||||
@ -168,7 +168,7 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
|
||||
|
||||
void RefProcTaskExecutor::execute(ProcessTask& task)
|
||||
{
|
||||
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||
uint active_gc_threads = heap->gc_task_manager()->active_workers();
|
||||
RegionTaskQueueSet* qset = ParCompactionManager::region_array();
|
||||
@ -189,7 +189,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
|
||||
|
||||
void RefProcTaskExecutor::execute(EnqueueTask& task)
|
||||
{
|
||||
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||
GCTaskQueue* q = GCTaskQueue::create();
|
||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||
@ -206,7 +206,7 @@ StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
|
||||
_terminator(t) {}
|
||||
|
||||
void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
|
||||
@ -238,7 +238,7 @@ StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
|
||||
_terminator(t) {}
|
||||
|
||||
void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
|
||||
@ -320,7 +320,7 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
||||
}
|
||||
|
||||
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
|
||||
|
@ -60,8 +60,7 @@ ParCompactionManager::ParCompactionManager() :
|
||||
_region_stack(NULL),
|
||||
_region_stack_index((uint)max_uintx) {
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
_old_gen = heap->old_gen();
|
||||
_start_array = old_gen()->start_array();
|
||||
|
@ -59,7 +59,7 @@ jlong PSMarkSweep::_time_of_last_gc = 0;
|
||||
CollectorCounters* PSMarkSweep::_counters = NULL;
|
||||
|
||||
void PSMarkSweep::initialize() {
|
||||
MemRegion mr = Universe::heap()->reserved_region();
|
||||
MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
|
||||
_ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
|
||||
_counters = new CollectorCounters("PSMarkSweep", 1);
|
||||
}
|
||||
@ -81,9 +81,9 @@ void PSMarkSweep::initialize() {
|
||||
void PSMarkSweep::invoke(bool maximum_heap_compaction) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
assert(!Universe::heap()->is_gc_active(), "not reentrant");
|
||||
assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
GCCause::Cause gc_cause = heap->gc_cause();
|
||||
PSAdaptiveSizePolicy* policy = heap->size_policy();
|
||||
IsGCActiveMark mark;
|
||||
@ -110,8 +110,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
GCCause::Cause gc_cause = heap->gc_cause();
|
||||
|
||||
_gc_timer->register_gc_start();
|
||||
@ -487,9 +486,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
}
|
||||
|
||||
void PSMarkSweep::allocate_stacks() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
|
||||
MutableSpace* to_space = young_gen->to_space();
|
||||
@ -515,8 +512,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
|
||||
trace(" 1");
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
// Need to clear claim bits before the tracing starts.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
@ -582,9 +578,7 @@ void PSMarkSweep::mark_sweep_phase2() {
|
||||
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
|
||||
// tracking expects us to do so. See comment under phase4.
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
// Begin compacting into the old gen
|
||||
@ -606,9 +600,7 @@ void PSMarkSweep::mark_sweep_phase3() {
|
||||
GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
|
||||
trace("3");
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
@ -651,9 +643,7 @@ void PSMarkSweep::mark_sweep_phase4() {
|
||||
|
||||
// All pointers are now adjusted, move objects accordingly
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
|
@ -38,15 +38,12 @@ PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
|
||||
|
||||
|
||||
void PSMarkSweepDecorator::set_destination_decorator_tenured() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
_destination_decorator = heap->old_gen()->object_mark_sweep();
|
||||
}
|
||||
|
||||
void PSMarkSweepDecorator::advance_destination_decorator() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
assert(_destination_decorator != NULL, "Sanity");
|
||||
|
||||
|
@ -107,20 +107,22 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||
SpaceMangler::mangle_region(cmr);
|
||||
}
|
||||
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
BarrierSet* bs = heap->barrier_set();
|
||||
|
||||
CardTableModRefBS* _ct =
|
||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||
bs->resize_covered_region(cmr);
|
||||
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
|
||||
// Verify that the start and end of this generation is the start of a card.
|
||||
// If this wasn't true, a single card could span more than one generation,
|
||||
// which would cause problems when we commit/uncommit memory, and when we
|
||||
// clear and dirty cards.
|
||||
guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
|
||||
if (_reserved.end() != Universe::heap()->reserved_region().end()) {
|
||||
guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
|
||||
if (_reserved.end() != heap->reserved_region().end()) {
|
||||
// Don't check at the very end of the heap as we'll assert that we're probing off
|
||||
// the end if we try.
|
||||
guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
|
||||
guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
|
||||
}
|
||||
|
||||
//
|
||||
@ -161,8 +163,7 @@ bool PSOldGen::is_allocated() {
|
||||
}
|
||||
|
||||
void PSOldGen::precompact() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
// Reset start array first.
|
||||
start_array()->reset();
|
||||
@ -197,7 +198,7 @@ HeapWord* PSOldGen::allocate(size_t word_size) {
|
||||
|
||||
// Allocations in the old generation need to be reported
|
||||
if (res != NULL) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
heap->size_policy()->tenured_allocation(word_size);
|
||||
}
|
||||
|
||||
@ -376,8 +377,7 @@ void PSOldGen::resize(size_t desired_free_space) {
|
||||
}
|
||||
|
||||
if (PrintAdaptiveSizePolicy) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
|
||||
"collection: %d "
|
||||
"(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
|
||||
@ -397,7 +397,7 @@ void PSOldGen::post_resize() {
|
||||
size_t new_word_size = new_memregion.word_size();
|
||||
|
||||
start_array()->set_covered_region(new_memregion);
|
||||
Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
|
||||
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
|
||||
|
||||
// ALWAYS do this last!!
|
||||
object_space()->initialize(new_memregion,
|
||||
|
@ -748,7 +748,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
|
||||
|
||||
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
|
||||
assert(addr != NULL, "Should detect NULL oop earlier");
|
||||
assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
|
||||
assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
|
||||
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
|
||||
|
||||
// Region covering the object.
|
||||
@ -836,9 +836,7 @@ void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
|
||||
}
|
||||
|
||||
void PSParallelCompact::post_initialize() {
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
MemRegion mr = heap->reserved_region();
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(mr, // span
|
||||
@ -855,8 +853,7 @@ void PSParallelCompact::post_initialize() {
|
||||
}
|
||||
|
||||
bool PSParallelCompact::initialize() {
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
MemRegion mr = heap->reserved_region();
|
||||
|
||||
// Was the old gen get allocated successfully?
|
||||
@ -890,7 +887,7 @@ void PSParallelCompact::initialize_space_info()
|
||||
{
|
||||
memset(&_space_info, 0, sizeof(_space_info));
|
||||
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
|
||||
_space_info[old_space_id].set_space(heap->old_gen()->object_space());
|
||||
@ -973,7 +970,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
|
||||
// promotion failure does not swap spaces) because an unknown number of minor
|
||||
// collections will have swapped the spaces an unknown number of times.
|
||||
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
|
||||
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
|
||||
|
||||
@ -1028,7 +1025,7 @@ void PSParallelCompact::post_compact()
|
||||
MutableSpace* const from_space = _space_info[from_space_id].space();
|
||||
MutableSpace* const to_space = _space_info[to_space_id].space();
|
||||
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
bool eden_empty = eden_space->is_empty();
|
||||
if (!eden_empty) {
|
||||
eden_empty = absorb_live_data_from_eden(heap->size_policy(),
|
||||
@ -1966,7 +1963,7 @@ void PSParallelCompact::invoke(bool maximum_heap_compaction) {
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
|
||||
"should be in vm thread");
|
||||
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
GCCause::Cause gc_cause = heap->gc_cause();
|
||||
assert(!heap->is_gc_active(), "not reentrant");
|
||||
|
||||
@ -1994,7 +1991,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
_gc_timer.register_gc_start();
|
||||
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
|
||||
@ -2347,7 +2344,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
||||
// Recursively traverse all live objects and mark them
|
||||
GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
|
||||
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||
uint active_gc_threads = heap->gc_task_manager()->active_workers();
|
||||
TaskQueueSetSuper* qset = ParCompactionManager::region_array();
|
||||
@ -2687,8 +2684,7 @@ void PSParallelCompact::compact() {
|
||||
// trace("5");
|
||||
GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
old_gen->start_array()->reset();
|
||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||
@ -2839,7 +2835,7 @@ PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
|
||||
// heap, last_space_id is returned. In debug mode it expects the address to be
|
||||
// in the heap and asserts such.
|
||||
PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
|
||||
assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
|
||||
assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
|
||||
|
||||
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
|
||||
if (_space_info[id].space()->contains(addr)) {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
|
||||
|
||||
#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/shared/collectorCounters.hpp"
|
||||
@ -1168,11 +1169,6 @@ class PSParallelCompact : AllStatic {
|
||||
|
||||
PSParallelCompact();
|
||||
|
||||
// Convenient accessor for Universe::heap().
|
||||
static ParallelScavengeHeap* gc_heap() {
|
||||
return (ParallelScavengeHeap*)Universe::heap();
|
||||
}
|
||||
|
||||
static void invoke(bool maximum_heap_compaction);
|
||||
static bool invoke_no_policy(bool maximum_heap_compaction);
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
@ -36,7 +37,7 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
|
||||
cm->push(obj);
|
||||
@ -62,14 +63,14 @@ inline void PSParallelCompact::adjust_pointer(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = (oop)summary_data().calc_new_pointer(obj);
|
||||
assert(new_obj != NULL, // is forwarding ptr?
|
||||
"should be forwarded");
|
||||
// Just always do the update unconditionally?
|
||||
if (new_obj != NULL) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -103,7 +103,7 @@ void PSPromotionLAB::flush() {
|
||||
}
|
||||
|
||||
bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
|
||||
assert(Universe::heap()->is_in(obj), "Object outside heap");
|
||||
assert(ParallelScavengeHeap::heap()->is_in(obj), "Object outside heap");
|
||||
|
||||
if (contains(obj)) {
|
||||
HeapWord* object_end = obj + obj_size;
|
||||
@ -137,9 +137,7 @@ void PSOldPromotionLAB::flush() {
|
||||
#ifdef ASSERT
|
||||
|
||||
bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
MutableSpace* to_space = heap->young_gen()->to_space();
|
||||
MemRegion used = to_space->used_region();
|
||||
if (used.contains(lab)) {
|
||||
@ -150,10 +148,9 @@ bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
|
||||
}
|
||||
|
||||
bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
assert(_start_array->covered_region().contains(lab), "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
MemRegion used = old_gen->object_space()->used_region();
|
||||
|
||||
|
@ -44,8 +44,7 @@ PSOldGen* PSPromotionManager::_old_gen = NULL;
|
||||
MutableSpace* PSPromotionManager::_young_space = NULL;
|
||||
|
||||
void PSPromotionManager::initialize() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
_old_gen = heap->old_gen();
|
||||
_young_space = heap->young_gen()->to_space();
|
||||
@ -88,8 +87,7 @@ PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
|
||||
}
|
||||
|
||||
void PSPromotionManager::pre_scavenge() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
_young_space = heap->young_gen()->to_space();
|
||||
|
||||
@ -132,7 +130,7 @@ static const char* const pm_stats_hdr[] = {
|
||||
void
|
||||
PSPromotionManager::print_taskqueue_stats(outputStream* const out) {
|
||||
out->print_cr("== GC Tasks Stats, GC %3d",
|
||||
Universe::heap()->total_collections());
|
||||
ParallelScavengeHeap::heap()->total_collections());
|
||||
|
||||
TaskQueueStats totals;
|
||||
out->print("thr "); TaskQueueStats::print_header(1, out); out->cr();
|
||||
@ -160,8 +158,7 @@ PSPromotionManager::reset_stats() {
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
PSPromotionManager::PSPromotionManager() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
// We set the old lab's start array.
|
||||
_old_lab.set_start_array(old_gen()->start_array());
|
||||
@ -191,8 +188,7 @@ void PSPromotionManager::reset() {
|
||||
|
||||
// We need to get an assert in here to make sure the labs are always flushed.
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
// Do not prefill the LAB's, save heap wastage!
|
||||
HeapWord* lab_base = young_space()->top();
|
||||
@ -213,8 +209,7 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
totally_drain = totally_drain || _totally_drain;
|
||||
|
||||
#ifdef ASSERT
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
MutableSpace* to_space = heap->young_gen()->to_space();
|
||||
MutableSpace* old_space = heap->old_gen()->object_space();
|
||||
#endif /* ASSERT */
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
|
||||
@ -57,9 +58,7 @@ inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_depth(T* p) {
|
||||
assert(should_scavenge(p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"Sanity");
|
||||
assert(Universe::heap()->is_in(p), "pointer outside heap");
|
||||
assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
|
||||
|
||||
claim_or_forward_internal_depth(p);
|
||||
}
|
||||
@ -150,7 +149,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
|
||||
// Otherwise try allocating obj tenured
|
||||
if (new_obj == NULL) {
|
||||
#ifndef PRODUCT
|
||||
if (Universe::heap()->promotion_should_fail()) {
|
||||
if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
|
||||
return oop_promotion_failed(o, test_mark);
|
||||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
@ -296,7 +295,7 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
|
||||
// that are outside the heap. These pointers are either from roots
|
||||
// or from metadata.
|
||||
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
|
||||
Universe::heap()->is_in_reserved(p)) {
|
||||
ParallelScavengeHeap::heap()->is_in_reserved(p)) {
|
||||
if (PSScavenge::is_obj_in_young(new_obj)) {
|
||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
|
||||
}
|
||||
|
@ -87,8 +87,7 @@ protected:
|
||||
|
||||
public:
|
||||
PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
_to_space = heap->young_gen()->to_space();
|
||||
|
||||
assert(_promotion_manager != NULL, "Sanity");
|
||||
@ -218,11 +217,9 @@ void PSRefProcTaskExecutor::execute(EnqueueTask& task)
|
||||
bool PSScavenge::invoke() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
assert(!Universe::heap()->is_gc_active(), "not reentrant");
|
||||
|
||||
ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
|
||||
|
||||
ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
|
||||
PSAdaptiveSizePolicy* policy = heap->size_policy();
|
||||
IsGCActiveMark mark;
|
||||
|
||||
@ -273,9 +270,8 @@ bool PSScavenge::invoke_no_policy() {
|
||||
return false;
|
||||
}
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
GCCause::Cause gc_cause = heap->gc_cause();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
// Check for potential problems.
|
||||
if (!should_attempt_scavenge()) {
|
||||
@ -713,9 +709,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
// unforwarding markOops. It then restores any preserved mark oops,
|
||||
// and clears the _preserved_mark_stack.
|
||||
void PSScavenge::clean_up_failed_promotion() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
|
||||
{
|
||||
@ -742,7 +736,7 @@ void PSScavenge::clean_up_failed_promotion() {
|
||||
}
|
||||
|
||||
// Reset the PromotionFailureALot counters.
|
||||
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
|
||||
NOT_PRODUCT(heap->reset_promotion_should_fail();)
|
||||
}
|
||||
|
||||
// This method is called whenever an attempt to promote an object
|
||||
@ -761,8 +755,7 @@ void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
|
||||
}
|
||||
|
||||
bool PSScavenge::should_attempt_scavenge() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
|
||||
|
||||
if (UsePerfData) {
|
||||
@ -838,9 +831,7 @@ void PSScavenge::initialize() {
|
||||
MaxTenuringThreshold;
|
||||
}
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,7 +33,7 @@
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
inline void PSScavenge::save_to_space_top_before_gc() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
_to_space_top_before_gc = heap->young_gen()->to_space()->top();
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
|
||||
template <class T>
|
||||
inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
|
||||
if (check_to_space) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
return should_scavenge(p, heap->young_gen()->to_space());
|
||||
}
|
||||
return should_scavenge(p);
|
||||
@ -97,7 +97,6 @@ class PSScavengeFromKlassClosure: public OopClosure {
|
||||
ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
|
||||
assert(!psh->is_in_reserved(p), "GC barrier needed");
|
||||
if (PSScavenge::should_scavenge(p)) {
|
||||
assert(!Universe::heap()->is_in_reserved(p), "Not from meta-data?");
|
||||
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
|
||||
|
||||
oop o = *p;
|
||||
|
@ -47,7 +47,7 @@
|
||||
//
|
||||
|
||||
void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
|
||||
PSScavengeRootsClosure roots_closure(pm);
|
||||
@ -118,7 +118,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
//
|
||||
|
||||
void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
|
||||
PSScavengeRootsClosure roots_closure(pm);
|
||||
@ -143,7 +143,7 @@ StealTask::StealTask(ParallelTaskTerminator* t) :
|
||||
_terminator(t) {}
|
||||
|
||||
void StealTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
PSPromotionManager* pm =
|
||||
PSPromotionManager::gc_thread_promotion_manager(which);
|
||||
@ -181,10 +181,8 @@ void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
{
|
||||
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
|
||||
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
CardTableExtension* card_table =
|
||||
barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set());
|
||||
barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
|
||||
|
||||
card_table->scavenge_contents_parallel(_gen->start_array(),
|
||||
_gen->object_space(),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,7 +62,7 @@ void PSYoungGen::initialize_work() {
|
||||
|
||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||
(HeapWord*)virtual_space()->high());
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Mangle newly committed space immediately because it
|
||||
@ -103,7 +103,7 @@ void PSYoungGen::initialize_work() {
|
||||
_max_gen_size, _virtual_space);
|
||||
|
||||
// Compute maximum space sizes for performance counters
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
size_t alignment = heap->space_alignment();
|
||||
size_t size = virtual_space()->reserved_size();
|
||||
|
||||
@ -153,8 +153,7 @@ void PSYoungGen::initialize_work() {
|
||||
}
|
||||
|
||||
void PSYoungGen::compute_initial_space_boundaries() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
// Compute sizes
|
||||
size_t alignment = heap->space_alignment();
|
||||
@ -208,7 +207,7 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
|
||||
|
||||
#ifndef PRODUCT
|
||||
void PSYoungGen::space_invariants() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const size_t alignment = heap->space_alignment();
|
||||
|
||||
// Currently, our eden size cannot shrink to zero
|
||||
@ -494,7 +493,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
char* to_start = (char*)to_space()->bottom();
|
||||
char* to_end = (char*)to_space()->end();
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const size_t alignment = heap->space_alignment();
|
||||
const bool maintain_minimum =
|
||||
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
|
||||
@ -546,8 +545,6 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
|
||||
// Does the optimal to-space overlap from-space?
|
||||
if (to_start < (char*)from_space()->end()) {
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
// Calculate the minimum offset possible for from_end
|
||||
size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
|
||||
|
||||
@ -708,9 +705,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
assert(from_space()->top() == old_from_top, "from top changed!");
|
||||
|
||||
if (PrintAdaptiveSizePolicy) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
|
||||
"collection: %d "
|
||||
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
|
||||
@ -843,7 +838,7 @@ size_t PSYoungGen::available_to_min_gen() {
|
||||
// from-space.
|
||||
size_t PSYoungGen::available_to_live() {
|
||||
size_t delta_in_survivor = 0;
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
const size_t space_alignment = heap->space_alignment();
|
||||
const size_t gen_alignment = heap->generation_alignment();
|
||||
|
||||
@ -927,7 +922,7 @@ void PSYoungGen::post_resize() {
|
||||
|
||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||
(HeapWord*)virtual_space()->high());
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
space_invariants();
|
||||
}
|
||||
|
||||
|
@ -41,8 +41,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size,
|
||||
void VM_ParallelGCFailedAllocation::doit() {
|
||||
SvcGCMarker sgcm(SvcGCMarker::MINOR);
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
GCCauseSetter gccs(heap, _gc_cause);
|
||||
_result = heap->failed_mem_allocate(_word_size);
|
||||
@ -63,9 +62,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
|
||||
void VM_ParallelGCSystemGC::doit() {
|
||||
SvcGCMarker sgcm(SvcGCMarker::FULL);
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"must be a ParallelScavengeHeap");
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
GCCauseSetter gccs(heap, _gc_cause);
|
||||
if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,7 +60,7 @@ CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
|
||||
// which would cause problems when we commit/uncommit memory, and when we
|
||||
// clear and dirty cards.
|
||||
guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
|
||||
if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
|
||||
if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
|
||||
// Don't check at the very end of the heap as we'll assert that we're probing off
|
||||
// the end if we try.
|
||||
guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
|
||||
@ -78,7 +78,7 @@ bool CardGeneration::grow_by(size_t bytes) {
|
||||
heap_word_size(_virtual_space.committed_size());
|
||||
MemRegion mr(space()->bottom(), new_word_size);
|
||||
// Expand card table
|
||||
Universe::heap()->barrier_set()->resize_covered_region(mr);
|
||||
GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
|
||||
// Expand shared block offset array
|
||||
_bts->resize(new_word_size);
|
||||
|
||||
@ -170,7 +170,7 @@ void CardGeneration::shrink(size_t bytes) {
|
||||
_bts->resize(new_word_size);
|
||||
MemRegion mr(space()->bottom(), new_word_size);
|
||||
// Shrink the card table
|
||||
Universe::heap()->barrier_set()->resize_covered_region(mr);
|
||||
GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
|
||||
|
||||
if (Verbose && PrintGC) {
|
||||
size_t new_mem_size = _virtual_space.committed_size();
|
||||
|
@ -38,7 +38,6 @@ CardTableRS::CardTableRS(MemRegion whole_heap) :
|
||||
GenRemSet(),
|
||||
_cur_youngergen_card_val(youngergenP1_card)
|
||||
{
|
||||
guarantee(Universe::heap()->kind() == CollectedHeap::GenCollectedHeap, "sanity");
|
||||
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
|
||||
_ct_bs->initialize();
|
||||
set_bs(_ct_bs);
|
||||
@ -598,10 +597,6 @@ void CardTableRS::verify() {
|
||||
// At present, we only know how to verify the card table RS for
|
||||
// generational heaps.
|
||||
VerifyCTGenClosure blk(this);
|
||||
CollectedHeap* ch = Universe::heap();
|
||||
|
||||
if (ch->kind() == CollectedHeap::GenCollectedHeap) {
|
||||
GenCollectedHeap::heap()->generation_iterate(&blk, false);
|
||||
_ct_bs->verify();
|
||||
}
|
||||
}
|
||||
GenCollectedHeap::heap()->generation_iterate(&blk, false);
|
||||
_ct_bs->verify();
|
||||
}
|
||||
|
@ -669,7 +669,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
||||
}
|
||||
|
||||
// Read the gc count while the heap lock is held.
|
||||
gc_count_before = Universe::heap()->total_collections();
|
||||
gc_count_before = gch->total_collections();
|
||||
}
|
||||
|
||||
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
|
||||
|
@ -193,7 +193,9 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||
{
|
||||
MemRegion cmr((HeapWord*)_virtual_space.low(),
|
||||
(HeapWord*)_virtual_space.high());
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
|
||||
gch->barrier_set()->resize_covered_region(cmr);
|
||||
|
||||
_eden_space = new ContiguousSpace();
|
||||
_from_space = new ContiguousSpace();
|
||||
@ -205,13 +207,13 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||
// Compute the maximum eden and survivor space sizes. These sizes
|
||||
// are computed assuming the entire reserved space is committed.
|
||||
// These values are exported as performance counters.
|
||||
uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
|
||||
uintx alignment = gch->collector_policy()->space_alignment();
|
||||
uintx size = _virtual_space.reserved_size();
|
||||
_max_survivor_size = compute_survivor_size(size, alignment);
|
||||
_max_eden_size = size - (2*_max_survivor_size);
|
||||
|
||||
// allocate the performance counters
|
||||
GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
|
||||
GenCollectorPolicy* gcp = (GenCollectorPolicy*)gch->collector_policy();
|
||||
|
||||
// Generation counters -- generation 0, 3 subspaces
|
||||
_gen_counters = new GenerationCounters("new", 0, 3,
|
||||
@ -433,7 +435,7 @@ void DefNewGeneration::compute_new_size() {
|
||||
SpaceDecorator::DontMangle);
|
||||
MemRegion cmr((HeapWord*)_virtual_space.low(),
|
||||
(HeapWord*)_virtual_space.high());
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
gch->barrier_set()->resize_covered_region(cmr);
|
||||
if (Verbose && PrintGC) {
|
||||
size_t new_size_after = _virtual_space.committed_size();
|
||||
size_t eden_size_after = eden()->capacity();
|
||||
@ -691,7 +693,7 @@ void DefNewGeneration::collect(bool full,
|
||||
gc_tracer.report_promotion_failed(_promotion_failed_info);
|
||||
|
||||
// Reset the PromotionFailureALot counters.
|
||||
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
|
||||
NOT_PRODUCT(gch->reset_promotion_should_fail();)
|
||||
}
|
||||
if (PrintGC && !PrintGCDetails) {
|
||||
gch->print_heap_change(gch_prev_used);
|
||||
|
@ -25,9 +25,9 @@
|
||||
#ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
|
||||
#define SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
|
||||
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/cardTableRS.hpp"
|
||||
#include "memory/defNewGeneration.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/space.hpp"
|
||||
|
||||
@ -60,7 +60,7 @@ inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
|
||||
// We could check that p is also in an older generation, but
|
||||
// dirty cards in the youngest gen are never scanned, so the
|
||||
// extra check probably isn't worthwhile.
|
||||
if (Universe::heap()->is_in_reserved(p)) {
|
||||
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
}
|
||||
@ -84,7 +84,7 @@ inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
|
||||
// we set a younger_gen card if we have an older->youngest
|
||||
// generation pointer.
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) {
|
||||
if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) {
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ oop Generation::promote(oop obj, size_t obj_size) {
|
||||
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (Universe::heap()->promotion_should_fail()) {
|
||||
if (GenCollectedHeap::heap()->promotion_should_fail()) {
|
||||
return NULL;
|
||||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
|
@ -78,12 +78,6 @@ void InstanceRefKlass::oop_verify_on(oop obj, outputStream* st) {
|
||||
InstanceKlass::oop_verify_on(obj, st);
|
||||
// Verify referent field
|
||||
oop referent = java_lang_ref_Reference::referent(obj);
|
||||
|
||||
// We should make this general to all heaps
|
||||
GenCollectedHeap* gch = NULL;
|
||||
if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap)
|
||||
gch = GenCollectedHeap::heap();
|
||||
|
||||
if (referent != NULL) {
|
||||
guarantee(referent->is_oop(), "referent field heap failed");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user