Merge
This commit is contained in:
commit
94677d4faf
@ -117,8 +117,6 @@ public class JMap extends Tool {
|
||||
mode = MODE_HEAP_SUMMARY;
|
||||
} else if (modeFlag.equals("-histo")) {
|
||||
mode = MODE_HISTOGRAM;
|
||||
} else if (modeFlag.equals("-permstat")) {
|
||||
mode = MODE_CLSTATS;
|
||||
} else if (modeFlag.equals("-clstats")) {
|
||||
mode = MODE_CLSTATS;
|
||||
} else if (modeFlag.equals("-finalizerinfo")) {
|
||||
|
@ -53,6 +53,7 @@
|
||||
#include "classfile/metadataOnStackMark.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/gcLocker.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
@ -65,17 +66,19 @@
|
||||
|
||||
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
|
||||
|
||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
|
||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
|
||||
_class_loader(h_class_loader()),
|
||||
_is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
|
||||
_metaspace(NULL), _unloading(false), _klasses(NULL),
|
||||
_claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
|
||||
_next(NULL), _dependencies(),
|
||||
_next(NULL), _dependencies(dependencies),
|
||||
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
|
||||
// empty
|
||||
}
|
||||
|
||||
void ClassLoaderData::init_dependencies(TRAPS) {
|
||||
assert(!Universe::is_fully_initialized(), "should only be called when initializing");
|
||||
assert(is_the_null_class_loader_data(), "should only call this for the null class loader");
|
||||
_dependencies.init(CHECK);
|
||||
}
|
||||
|
||||
@ -429,7 +432,7 @@ void ClassLoaderData::free_deallocate_list() {
|
||||
// These anonymous class loaders are to contain classes used for JSR292
|
||||
ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
|
||||
// Add a new class loader data to the graph.
|
||||
return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL);
|
||||
return ClassLoaderDataGraph::add(loader, true, CHECK_NULL);
|
||||
}
|
||||
|
||||
const char* ClassLoaderData::loader_name() {
|
||||
@ -501,19 +504,22 @@ ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
|
||||
ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
|
||||
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
|
||||
|
||||
|
||||
// Add a new class loader data node to the list. Assign the newly created
|
||||
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
|
||||
ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) {
|
||||
// Not assigned a class loader data yet.
|
||||
// Create one.
|
||||
ClassLoaderData* *list_head = &_head;
|
||||
ClassLoaderData* next = _head;
|
||||
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
|
||||
// We need to allocate all the oops for the ClassLoaderData before allocating the
|
||||
// actual ClassLoaderData object.
|
||||
ClassLoaderData::Dependencies dependencies(CHECK_NULL);
|
||||
|
||||
bool is_anonymous = (cld_addr == NULL);
|
||||
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
|
||||
No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the
|
||||
// ClassLoaderData in the graph since the CLD
|
||||
// contains unhandled oops
|
||||
|
||||
if (cld_addr != NULL) {
|
||||
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies);
|
||||
|
||||
|
||||
if (!is_anonymous) {
|
||||
ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
|
||||
// First, Atomically set it
|
||||
ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
|
||||
if (old != NULL) {
|
||||
@ -525,6 +531,9 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo
|
||||
|
||||
// We won the race, and therefore the task of adding the data to the list of
|
||||
// class loader data
|
||||
ClassLoaderData** list_head = &_head;
|
||||
ClassLoaderData* next = _head;
|
||||
|
||||
do {
|
||||
cld->set_next(next);
|
||||
ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
|
||||
@ -537,10 +546,6 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo
|
||||
cld->loader_name());
|
||||
tty->print_cr("]");
|
||||
}
|
||||
// Create dependencies after the CLD is added to the list. Otherwise,
|
||||
// the GC GC will not find the CLD and the _class_loader field will
|
||||
// not be updated.
|
||||
cld->init_dependencies(CHECK_NULL);
|
||||
return cld;
|
||||
}
|
||||
next = exchanged;
|
||||
@ -671,6 +676,8 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
||||
dead->unload();
|
||||
data = data->next();
|
||||
// Remove from loader list.
|
||||
// This class loader data will no longer be found
|
||||
// in the ClassLoaderDataGraph.
|
||||
if (prev != NULL) {
|
||||
prev->set_next(data);
|
||||
} else {
|
||||
@ -692,6 +699,7 @@ void ClassLoaderDataGraph::purge() {
|
||||
next = purge_me->next();
|
||||
delete purge_me;
|
||||
}
|
||||
Metaspace::purge();
|
||||
}
|
||||
|
||||
// CDS support
|
||||
|
@ -62,7 +62,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
// CMS support.
|
||||
static ClassLoaderData* _saved_head;
|
||||
|
||||
static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS);
|
||||
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
|
||||
public:
|
||||
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
||||
static void purge();
|
||||
@ -100,6 +100,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
Thread* THREAD);
|
||||
public:
|
||||
Dependencies() : _list_head(NULL) {}
|
||||
Dependencies(TRAPS) : _list_head(NULL) {
|
||||
init(CHECK);
|
||||
}
|
||||
void add(Handle dependency, TRAPS);
|
||||
void init(TRAPS);
|
||||
void oops_do(OopClosure* f);
|
||||
@ -150,7 +153,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
void set_next(ClassLoaderData* next) { _next = next; }
|
||||
ClassLoaderData* next() const { return _next; }
|
||||
|
||||
ClassLoaderData(Handle h_class_loader, bool is_anonymous);
|
||||
ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
|
||||
~ClassLoaderData();
|
||||
|
||||
void set_metaspace(Metaspace* m) { _metaspace = m; }
|
||||
@ -190,7 +193,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
static void init_null_class_loader_data() {
|
||||
assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
|
||||
assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
|
||||
_the_null_class_loader_data = new ClassLoaderData((oop)NULL, false);
|
||||
|
||||
// We explicitly initialize the Dependencies object at a later phase in the initialization
|
||||
_the_null_class_loader_data = new ClassLoaderData((oop)NULL, false, Dependencies());
|
||||
ClassLoaderDataGraph::_head = _the_null_class_loader_data;
|
||||
assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
|
||||
if (DumpSharedSpaces) {
|
||||
|
@ -43,10 +43,9 @@ inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAP
|
||||
assert(loader() != NULL,"Must be a class loader");
|
||||
// Gets the class loader data out of the java/lang/ClassLoader object, if non-null
|
||||
// it's already in the loader_data, so no need to add
|
||||
ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader());
|
||||
ClassLoaderData* loader_data_id = *loader_data_addr;
|
||||
if (loader_data_id) {
|
||||
return loader_data_id;
|
||||
ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader());
|
||||
if (loader_data) {
|
||||
return loader_data;
|
||||
}
|
||||
return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD);
|
||||
return ClassLoaderDataGraph::add(loader, false, THREAD);
|
||||
}
|
||||
|
@ -2444,8 +2444,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
||||
// initial marking in checkpointRootsInitialWork has been completed
|
||||
if (VerifyDuringGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
gclog_or_tty->print("Verify before initial mark: ");
|
||||
Universe::verify();
|
||||
Universe::verify("Verify before initial mark: ");
|
||||
}
|
||||
{
|
||||
bool res = markFromRoots(false);
|
||||
@ -2456,8 +2455,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
||||
case FinalMarking:
|
||||
if (VerifyDuringGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
gclog_or_tty->print("Verify before re-mark: ");
|
||||
Universe::verify();
|
||||
Universe::verify("Verify before re-mark: ");
|
||||
}
|
||||
checkpointRootsFinal(false, clear_all_soft_refs,
|
||||
init_mark_was_synchronous);
|
||||
@ -2468,8 +2466,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
||||
// final marking in checkpointRootsFinal has been completed
|
||||
if (VerifyDuringGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
gclog_or_tty->print("Verify before sweep: ");
|
||||
Universe::verify();
|
||||
Universe::verify("Verify before sweep: ");
|
||||
}
|
||||
sweep(false);
|
||||
assert(_collectorState == Resizing, "Incorrect state");
|
||||
@ -2484,8 +2481,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
|
||||
// The heap has been resized.
|
||||
if (VerifyDuringGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
gclog_or_tty->print("Verify before reset: ");
|
||||
Universe::verify();
|
||||
Universe::verify("Verify before reset: ");
|
||||
}
|
||||
reset(false);
|
||||
assert(_collectorState == Idling, "Collector state should "
|
||||
@ -2853,8 +2849,8 @@ class VerifyMarkedClosure: public BitMapClosure {
|
||||
bool failed() { return _failed; }
|
||||
};
|
||||
|
||||
bool CMSCollector::verify_after_remark() {
|
||||
gclog_or_tty->print(" [Verifying CMS Marking... ");
|
||||
bool CMSCollector::verify_after_remark(bool silent) {
|
||||
if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
|
||||
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
|
||||
static bool init = false;
|
||||
|
||||
@ -2915,7 +2911,7 @@ bool CMSCollector::verify_after_remark() {
|
||||
warning("Unrecognized value %d for CMSRemarkVerifyVariant",
|
||||
CMSRemarkVerifyVariant);
|
||||
}
|
||||
gclog_or_tty->print(" done] ");
|
||||
if (!silent) gclog_or_tty->print(" done] ");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3426,8 +3422,9 @@ bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
|
||||
void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
assert_lock_strong(freelistLock());
|
||||
// XXX Fix when compaction is implemented.
|
||||
if (PrintGCDetails && Verbose) {
|
||||
warning("Shrinking of CMS not yet implemented");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -6010,26 +6007,23 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
||||
&cmsDrainMarkingStackClosure,
|
||||
NULL);
|
||||
}
|
||||
verify_work_stacks_empty();
|
||||
}
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
verify_work_stacks_empty();
|
||||
|
||||
if (should_unload_classes()) {
|
||||
{
|
||||
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
|
||||
|
||||
// Follow SystemDictionary roots and unload classes
|
||||
// Unload classes and purge the SystemDictionary.
|
||||
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
|
||||
|
||||
// Follow CodeCache roots and unload any methods marked for unloading
|
||||
// Unload nmethods.
|
||||
CodeCache::do_unloading(&_is_alive_closure, purged_class);
|
||||
|
||||
cmsDrainMarkingStackClosure.do_void();
|
||||
verify_work_stacks_empty();
|
||||
|
||||
// Update subklass/sibling/implementor links in KlassKlass descendants
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links(&_is_alive_closure);
|
||||
// Nothing should have been pushed onto the working stacks.
|
||||
verify_work_stacks_empty();
|
||||
}
|
||||
|
||||
{
|
||||
@ -6043,11 +6037,10 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
||||
// Need to check if we really scanned the StringTable.
|
||||
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
|
||||
TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
|
||||
// Now clean up stale oops in StringTable
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&_is_alive_closure);
|
||||
}
|
||||
|
||||
verify_work_stacks_empty();
|
||||
// Restore any preserved marks as a result of mark stack or
|
||||
// work queue overflow
|
||||
restore_preserved_marks_if_any(); // done single-threaded for now
|
||||
|
@ -990,7 +990,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
|
||||
// debugging
|
||||
void verify();
|
||||
bool verify_after_remark();
|
||||
bool verify_after_remark(bool silent = VerifySilently);
|
||||
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
||||
void verify_work_stacks_empty() const PRODUCT_RETURN;
|
||||
void verify_overflow_empty() const PRODUCT_RETURN;
|
||||
|
@ -1273,10 +1273,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(before)");
|
||||
}
|
||||
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
@ -1300,10 +1299,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
// Verify the heap w.r.t. the previous marking bitmap.
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(overflow)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(overflow)");
|
||||
}
|
||||
|
||||
// Clear the marking state because we will be restarting
|
||||
@ -1323,10 +1321,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UseNextMarking);
|
||||
Universe::verify(VerifyOption_G1UseNextMarking,
|
||||
" VerifyDuringGC:(after)");
|
||||
}
|
||||
assert(!restart_for_overflow(), "sanity");
|
||||
// Completely reset the marking state since marking completed
|
||||
@ -1972,10 +1969,9 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(before)");
|
||||
}
|
||||
|
||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||
@ -2127,10 +2123,9 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(after)");
|
||||
}
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
|
@ -1271,9 +1271,8 @@ double G1CollectedHeap::verify(bool guard, const char* msg) {
|
||||
if (guard && total_collections() >= VerifyGCStartAt) {
|
||||
double verify_start = os::elapsedTime();
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(msg);
|
||||
prepare_for_verify();
|
||||
Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, msg);
|
||||
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
|
||||
}
|
||||
|
||||
@ -1304,7 +1303,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
|
||||
print_heap_before_gc();
|
||||
|
||||
size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
|
||||
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
||||
|
||||
HRSPhaseSetter x(HRSPhaseFullGC);
|
||||
verify_region_sets_optional();
|
||||
@ -1425,6 +1424,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
MetaspaceAux::verify_metrics();
|
||||
|
||||
// Note: since we've just done a full GC, concurrent
|
||||
// marking is no longer active. Therefore we need not
|
||||
@ -1955,13 +1955,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
|
||||
assert(n_rem_sets > 0, "Invariant.");
|
||||
|
||||
HeapRegionRemSetIterator** iter_arr =
|
||||
NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
|
||||
for (int i = 0; i < n_queues; i++) {
|
||||
iter_arr[i] = new HeapRegionRemSetIterator();
|
||||
}
|
||||
_rem_set_iterator = iter_arr;
|
||||
|
||||
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
|
||||
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
|
||||
|
||||
@ -5079,10 +5072,9 @@ g1_process_strong_roots(bool is_scavenging,
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
|
||||
OopClosure* non_root_closure) {
|
||||
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
|
||||
CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
|
||||
SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
|
||||
SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
|
||||
}
|
||||
|
||||
// Weak Reference Processing support
|
||||
|
@ -786,9 +786,6 @@ protected:
|
||||
// concurrently after the collection.
|
||||
DirtyCardQueueSet _dirty_card_queue_set;
|
||||
|
||||
// The Heap Region Rem Set Iterator.
|
||||
HeapRegionRemSetIterator** _rem_set_iterator;
|
||||
|
||||
// The closure used to refine a single card.
|
||||
RefineCardTableEntryClosure* _refine_cte_cl;
|
||||
|
||||
@ -827,8 +824,7 @@ protected:
|
||||
// Apply "blk" to all the weak roots of the system. These include
|
||||
// JNI weak roots, the code cache, system dictionary, symbol table,
|
||||
// string table, and referents of reachable weak refs.
|
||||
void g1_process_weak_roots(OopClosure* root_closure,
|
||||
OopClosure* non_root_closure);
|
||||
void g1_process_weak_roots(OopClosure* root_closure);
|
||||
|
||||
// Frees a non-humongous region by initializing its contents and
|
||||
// adding it to the free list that's passed as a parameter (this is
|
||||
@ -1114,15 +1110,6 @@ public:
|
||||
G1RemSet* g1_rem_set() const { return _g1_rem_set; }
|
||||
ModRefBarrierSet* mr_bs() const { return _mr_bs; }
|
||||
|
||||
// The rem set iterator.
|
||||
HeapRegionRemSetIterator* rem_set_iterator(int i) {
|
||||
return _rem_set_iterator[i];
|
||||
}
|
||||
|
||||
HeapRegionRemSetIterator* rem_set_iterator() {
|
||||
return _rem_set_iterator[0];
|
||||
}
|
||||
|
||||
unsigned get_gc_time_stamp() {
|
||||
return _gc_time_stamp;
|
||||
}
|
||||
|
@ -144,33 +144,28 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
&GenMarkSweep::follow_stack_closure,
|
||||
NULL);
|
||||
|
||||
// Follow system dictionary roots and unload classes
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
|
||||
|
||||
// Unload classes and purge the SystemDictionary.
|
||||
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
// Follow code cache roots (has to be done after system dictionary,
|
||||
// assumes all live klasses are marked)
|
||||
// Unload nmethods.
|
||||
CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
|
||||
GenMarkSweep::follow_stack();
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
// Visit interned string tables and delete unmarked oops
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&GenMarkSweep::is_alive);
|
||||
|
||||
// Clean up unreferenced symbols in symbol table.
|
||||
SymbolTable::unlink();
|
||||
|
||||
assert(GenMarkSweep::_marking_stack.is_empty(),
|
||||
"stack should be empty by now");
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
||||
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
// Note: we can verify only the heap here. When an object is
|
||||
// marked, the previous value of the mark word (including
|
||||
@ -182,13 +177,15 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
// fail. At the end of the GC, the orginal mark word values
|
||||
// (including hash values) are restored to the appropriate
|
||||
// objects.
|
||||
Universe::heap()->verify(/* silent */ false,
|
||||
/* option */ VerifyOption_G1UseMarkWord);
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
if (!VerifySilently) {
|
||||
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
|
||||
}
|
||||
Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
|
||||
if (!VerifySilently) {
|
||||
gclog_or_tty->print_cr("]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class G1PrepareCompactClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
@ -308,17 +305,16 @@ void G1MarkSweep::mark_sweep_phase3() {
|
||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||
false, // not scavenging.
|
||||
SharedHeap::SO_AllClasses,
|
||||
&GenMarkSweep::adjust_root_pointer_closure,
|
||||
&GenMarkSweep::adjust_pointer_closure,
|
||||
NULL, // do not touch code cache here
|
||||
&GenMarkSweep::adjust_klass_closure);
|
||||
|
||||
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
|
||||
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
|
||||
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
|
||||
|
||||
// Now adjust pointers in remaining weak roots. (All of which should
|
||||
// have been cleared if they pointed to non-surviving objects.)
|
||||
g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
|
||||
&GenMarkSweep::adjust_pointer_closure);
|
||||
g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
|
||||
|
||||
GenMarkSweep::adjust_marks();
|
||||
|
||||
|
@ -169,14 +169,13 @@ public:
|
||||
// _try_claimed || r->claim_iter()
|
||||
// is true: either we're supposed to work on claimed-but-not-complete
|
||||
// regions, or we successfully claimed the region.
|
||||
HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
|
||||
hrrs->init_iterator(iter);
|
||||
HeapRegionRemSetIterator iter(hrrs);
|
||||
size_t card_index;
|
||||
|
||||
// We claim cards in block so as to recude the contention. The block size is determined by
|
||||
// the G1RSetScanBlockSize parameter.
|
||||
size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
|
||||
for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
|
||||
if (current_card >= jump_to_card + _block_size) {
|
||||
jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
}
|
||||
|
@ -877,14 +877,9 @@ bool HeapRegionRemSet::iter_is_complete() {
|
||||
return _iter_state == Complete;
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
|
||||
iter->initialize(this);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void HeapRegionRemSet::print() const {
|
||||
HeapRegionRemSetIterator iter;
|
||||
init_iterator(&iter);
|
||||
HeapRegionRemSetIterator iter(this);
|
||||
size_t card_index;
|
||||
while (iter.has_next(card_index)) {
|
||||
HeapWord* card_start =
|
||||
@ -928,35 +923,23 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
|
||||
|
||||
//-------------------- Iteration --------------------
|
||||
|
||||
HeapRegionRemSetIterator::
|
||||
HeapRegionRemSetIterator() :
|
||||
_hrrs(NULL),
|
||||
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
|
||||
_hrrs(hrrs),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_bosa(NULL),
|
||||
_sparse_iter() { }
|
||||
|
||||
void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
|
||||
_hrrs = hrrs;
|
||||
_coarse_map = &_hrrs->_other_regions._coarse_map;
|
||||
_fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
|
||||
_bosa = _hrrs->bosa();
|
||||
|
||||
_is = Sparse;
|
||||
_coarse_map(&hrrs->_other_regions._coarse_map),
|
||||
_fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
|
||||
_bosa(hrrs->bosa()),
|
||||
_is(Sparse),
|
||||
// Set these values so that we increment to the first region.
|
||||
_coarse_cur_region_index = -1;
|
||||
_coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
|
||||
|
||||
_cur_region_cur_card = 0;
|
||||
|
||||
_fine_array_index = -1;
|
||||
_fine_cur_prt = NULL;
|
||||
|
||||
_n_yielded_coarse = 0;
|
||||
_n_yielded_fine = 0;
|
||||
_n_yielded_sparse = 0;
|
||||
|
||||
_sparse_iter.init(&hrrs->_other_regions._sparse_table);
|
||||
}
|
||||
_coarse_cur_region_index(-1),
|
||||
_coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
|
||||
_cur_region_cur_card(0),
|
||||
_fine_array_index(-1),
|
||||
_fine_cur_prt(NULL),
|
||||
_n_yielded_coarse(0),
|
||||
_n_yielded_fine(0),
|
||||
_n_yielded_sparse(0),
|
||||
_sparse_iter(&hrrs->_other_regions._sparse_table) {}
|
||||
|
||||
bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
||||
if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
|
||||
@ -1209,8 +1192,7 @@ void HeapRegionRemSet::test() {
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
|
||||
|
||||
// Now, does iteration yield these three?
|
||||
HeapRegionRemSetIterator iter;
|
||||
hrrs->init_iterator(&iter);
|
||||
HeapRegionRemSetIterator iter(hrrs);
|
||||
size_t sum = 0;
|
||||
size_t card_index;
|
||||
while (iter.has_next(card_index)) {
|
||||
|
@ -281,9 +281,6 @@ public:
|
||||
return (_iter_state == Unclaimed) && (_iter_claimed == 0);
|
||||
}
|
||||
|
||||
// Initialize the given iterator to iterate over this rem set.
|
||||
void init_iterator(HeapRegionRemSetIterator* iter) const;
|
||||
|
||||
// The actual # of bytes this hr_remset takes up.
|
||||
size_t mem_size() {
|
||||
return _other_regions.mem_size()
|
||||
@ -345,9 +342,9 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
||||
class HeapRegionRemSetIterator : public StackObj {
|
||||
|
||||
// The region over which we're iterating.
|
||||
// The region RSet over which we're iterating.
|
||||
const HeapRegionRemSet* _hrrs;
|
||||
|
||||
// Local caching of HRRS fields.
|
||||
@ -362,8 +359,10 @@ class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
||||
size_t _n_yielded_coarse;
|
||||
size_t _n_yielded_sparse;
|
||||
|
||||
// If true we're iterating over the coarse table; if false the fine
|
||||
// table.
|
||||
// Indicates what granularity of table that we're currently iterating over.
|
||||
// We start iterating over the sparse table, progress to the fine grain
|
||||
// table, and then finish with the coarse table.
|
||||
// See HeapRegionRemSetIterator::has_next().
|
||||
enum IterState {
|
||||
Sparse,
|
||||
Fine,
|
||||
@ -403,9 +402,7 @@ class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
|
||||
public:
|
||||
// We require an iterator to be initialized before use, so the
|
||||
// constructor does little.
|
||||
HeapRegionRemSetIterator();
|
||||
|
||||
void initialize(const HeapRegionRemSet* hrrs);
|
||||
HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs);
|
||||
|
||||
// If there remains one or more cards to be yielded, returns true and
|
||||
// sets "card_index" to one of those cards (which is then considered
|
||||
|
@ -35,10 +35,6 @@
|
||||
|
||||
#define UNROLL_CARD_LOOPS 1
|
||||
|
||||
void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
|
||||
sprt_iter->init(this);
|
||||
}
|
||||
|
||||
void SparsePRTEntry::init(RegionIdx_t region_ind) {
|
||||
_region_ind = region_ind;
|
||||
_next_index = NullEntry;
|
||||
|
@ -192,18 +192,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
||||
size_t compute_card_ind(CardIdx_t ci);
|
||||
|
||||
public:
|
||||
RSHashTableIter() :
|
||||
_tbl_ind(RSHashTable::NullEntry),
|
||||
RSHashTableIter(RSHashTable* rsht) :
|
||||
_tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
|
||||
_bl_ind(RSHashTable::NullEntry),
|
||||
_card_ind((SparsePRTEntry::cards_num() - 1)),
|
||||
_rsht(NULL) {}
|
||||
|
||||
void init(RSHashTable* rsht) {
|
||||
_rsht = rsht;
|
||||
_tbl_ind = -1; // So that first increment gets to 0.
|
||||
_bl_ind = RSHashTable::NullEntry;
|
||||
_card_ind = (SparsePRTEntry::cards_num() - 1);
|
||||
}
|
||||
_rsht(rsht) {}
|
||||
|
||||
bool has_next(size_t& card_index);
|
||||
};
|
||||
@ -284,8 +277,6 @@ public:
|
||||
static void cleanup_all();
|
||||
RSHashTable* cur() const { return _cur; }
|
||||
|
||||
void init_iterator(SparsePRTIter* sprt_iter);
|
||||
|
||||
static void add_to_expanded_list(SparsePRT* sprt);
|
||||
static SparsePRT* get_from_expanded_list();
|
||||
|
||||
@ -321,9 +312,9 @@ public:
|
||||
|
||||
class SparsePRTIter: public RSHashTableIter {
|
||||
public:
|
||||
void init(const SparsePRT* sprt) {
|
||||
RSHashTableIter::init(sprt->cur());
|
||||
}
|
||||
SparsePRTIter(const SparsePRT* sprt) :
|
||||
RSHashTableIter(sprt->cur()) {}
|
||||
|
||||
bool has_next(size_t& card_index) {
|
||||
return RSHashTableIter::has_next(card_index);
|
||||
}
|
||||
|
@ -138,8 +138,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
|
||||
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||
Universe::verify();
|
||||
Universe::verify(" VerifyBeforeGC:");
|
||||
}
|
||||
|
||||
// Verify object start arrays
|
||||
@ -177,7 +176,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
size_t prev_used = heap->used();
|
||||
|
||||
// Capture metadata size before collection for sizing.
|
||||
size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
|
||||
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
||||
|
||||
// For PrintGCDetails
|
||||
size_t old_gen_prev_used = old_gen->used_in_bytes();
|
||||
@ -238,6 +237,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
MetaspaceAux::verify_metrics();
|
||||
|
||||
BiasedLocking::restore_marks();
|
||||
Threads::gc_epilogue();
|
||||
@ -340,8 +340,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
|
||||
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyAfterGC:");
|
||||
Universe::verify();
|
||||
Universe::verify(" VerifyAfterGC:");
|
||||
}
|
||||
|
||||
// Re-verify object start arrays
|
||||
@ -518,23 +517,23 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
|
||||
}
|
||||
|
||||
// Follow system dictionary roots and unload classes
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(_marking_stack.is_empty(), "Marking should have completed");
|
||||
|
||||
// Unload classes and purge the SystemDictionary.
|
||||
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
|
||||
|
||||
// Follow code cache roots
|
||||
// Unload nmethods.
|
||||
CodeCache::do_unloading(is_alive_closure(), purged_class);
|
||||
follow_stack(); // Flush marking stack
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
Klass::clean_weak_klass_links(&is_alive);
|
||||
assert(_marking_stack.is_empty(), "just drained");
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links(is_alive_closure());
|
||||
|
||||
// Visit interned string tables and delete unmarked oops
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(is_alive_closure());
|
||||
|
||||
// Clean up unreferenced symbols in symbol table.
|
||||
SymbolTable::unlink();
|
||||
|
||||
assert(_marking_stack.is_empty(), "stack should be empty by now");
|
||||
}
|
||||
|
||||
|
||||
@ -583,28 +582,27 @@ void PSMarkSweep::mark_sweep_phase3() {
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// General strong roots.
|
||||
Universe::oops_do(adjust_root_pointer_closure());
|
||||
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
|
||||
CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
|
||||
Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
|
||||
ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
|
||||
FlatProfiler::oops_do(adjust_root_pointer_closure());
|
||||
Management::oops_do(adjust_root_pointer_closure());
|
||||
JvmtiExport::oops_do(adjust_root_pointer_closure());
|
||||
Universe::oops_do(adjust_pointer_closure());
|
||||
JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
|
||||
CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
|
||||
Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
|
||||
ObjectSynchronizer::oops_do(adjust_pointer_closure());
|
||||
FlatProfiler::oops_do(adjust_pointer_closure());
|
||||
Management::oops_do(adjust_pointer_closure());
|
||||
JvmtiExport::oops_do(adjust_pointer_closure());
|
||||
// SO_AllClasses
|
||||
SystemDictionary::oops_do(adjust_root_pointer_closure());
|
||||
ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
|
||||
//CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
|
||||
SystemDictionary::oops_do(adjust_pointer_closure());
|
||||
ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
|
||||
|
||||
// Now adjust pointers in remaining weak roots. (All of which should
|
||||
// have been cleared if they pointed to non-surviving objects.)
|
||||
// Global (weak) JNI handles
|
||||
JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
|
||||
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
|
||||
|
||||
CodeCache::oops_do(adjust_pointer_closure());
|
||||
StringTable::oops_do(adjust_root_pointer_closure());
|
||||
ref_processor()->weak_oops_do(adjust_root_pointer_closure());
|
||||
PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
|
||||
StringTable::oops_do(adjust_pointer_closure());
|
||||
ref_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
|
||||
adjust_marks();
|
||||
|
||||
|
@ -44,7 +44,6 @@ class PSMarkSweep : public MarkSweep {
|
||||
static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
|
||||
static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
|
||||
static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_root_pointer_closure; }
|
||||
static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
|
||||
|
||||
|
@ -787,12 +787,11 @@ bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap(
|
||||
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
||||
void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
|
||||
PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
|
||||
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
|
||||
|
||||
void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
|
||||
|
||||
@ -805,7 +804,7 @@ void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
|
||||
klass->oops_do(_mark_and_push_closure);
|
||||
}
|
||||
void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
|
||||
klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure);
|
||||
klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
|
||||
}
|
||||
|
||||
void PSParallelCompact::post_initialize() {
|
||||
@ -892,7 +891,7 @@ public:
|
||||
_heap_used = heap->used();
|
||||
_young_gen_used = heap->young_gen()->used_in_bytes();
|
||||
_old_gen_used = heap->old_gen()->used_in_bytes();
|
||||
_metadata_used = MetaspaceAux::used_in_bytes();
|
||||
_metadata_used = MetaspaceAux::allocated_used_bytes();
|
||||
};
|
||||
|
||||
size_t heap_used() const { return _heap_used; }
|
||||
@ -967,8 +966,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
|
||||
|
||||
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||
Universe::verify();
|
||||
Universe::verify(" VerifyBeforeGC:");
|
||||
}
|
||||
|
||||
// Verify object start arrays
|
||||
@ -1027,6 +1025,7 @@ void PSParallelCompact::post_compact()
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
MetaspaceAux::verify_metrics();
|
||||
|
||||
Threads::gc_epilogue();
|
||||
CodeCache::gc_epilogue();
|
||||
@ -2168,8 +2167,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
|
||||
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyAfterGC:");
|
||||
Universe::verify();
|
||||
Universe::verify(" VerifyAfterGC:");
|
||||
}
|
||||
|
||||
// Re-verify object start arrays
|
||||
@ -2356,22 +2354,24 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
||||
}
|
||||
|
||||
TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(cm->marking_stacks_empty(), "Marking should have completed");
|
||||
|
||||
// Follow system dictionary roots and unload classes.
|
||||
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
|
||||
|
||||
// Follow code cache roots.
|
||||
// Unload nmethods.
|
||||
CodeCache::do_unloading(is_alive_closure(), purged_class);
|
||||
cm->follow_marking_stacks(); // Flush marking stack.
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links(is_alive_closure());
|
||||
|
||||
// Visit interned string tables and delete unmarked oops
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(is_alive_closure());
|
||||
|
||||
// Clean up unreferenced symbols in symbol table.
|
||||
SymbolTable::unlink();
|
||||
|
||||
assert(cm->marking_stacks_empty(), "marking stacks should be empty");
|
||||
}
|
||||
|
||||
void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
|
||||
@ -2398,7 +2398,7 @@ void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
|
||||
|
||||
void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
|
||||
ClassLoaderData* cld) {
|
||||
cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(),
|
||||
cld->oops_do(PSParallelCompact::adjust_pointer_closure(),
|
||||
PSParallelCompact::adjust_klass_closure(),
|
||||
true);
|
||||
}
|
||||
@ -2419,32 +2419,31 @@ void PSParallelCompact::adjust_roots() {
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// General strong roots.
|
||||
Universe::oops_do(adjust_root_pointer_closure());
|
||||
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
|
||||
CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
|
||||
Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
|
||||
ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
|
||||
FlatProfiler::oops_do(adjust_root_pointer_closure());
|
||||
Management::oops_do(adjust_root_pointer_closure());
|
||||
JvmtiExport::oops_do(adjust_root_pointer_closure());
|
||||
Universe::oops_do(adjust_pointer_closure());
|
||||
JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
|
||||
CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
|
||||
Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
|
||||
ObjectSynchronizer::oops_do(adjust_pointer_closure());
|
||||
FlatProfiler::oops_do(adjust_pointer_closure());
|
||||
Management::oops_do(adjust_pointer_closure());
|
||||
JvmtiExport::oops_do(adjust_pointer_closure());
|
||||
// SO_AllClasses
|
||||
SystemDictionary::oops_do(adjust_root_pointer_closure());
|
||||
ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
|
||||
SystemDictionary::oops_do(adjust_pointer_closure());
|
||||
ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
|
||||
|
||||
// Now adjust pointers in remaining weak roots. (All of which should
|
||||
// have been cleared if they pointed to non-surviving objects.)
|
||||
// Global (weak) JNI handles
|
||||
JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
|
||||
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
|
||||
|
||||
CodeCache::oops_do(adjust_pointer_closure());
|
||||
StringTable::oops_do(adjust_root_pointer_closure());
|
||||
ref_processor()->weak_oops_do(adjust_root_pointer_closure());
|
||||
StringTable::oops_do(adjust_pointer_closure());
|
||||
ref_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
// Roots were visited so references into the young gen in roots
|
||||
// may have been scanned. Process them also.
|
||||
// Should the reference processor have a span that excludes
|
||||
// young gen objects?
|
||||
PSScavenge::reference_processor()->weak_oops_do(
|
||||
adjust_root_pointer_closure());
|
||||
PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
|
||||
}
|
||||
|
||||
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
||||
|
@ -799,16 +799,6 @@ class PSParallelCompact : AllStatic {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// Current unused
|
||||
class FollowRootClosure: public OopsInGenClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
public:
|
||||
FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
@ -818,10 +808,7 @@ class PSParallelCompact : AllStatic {
|
||||
};
|
||||
|
||||
class AdjustPointerClosure: public OopClosure {
|
||||
private:
|
||||
bool _is_root;
|
||||
public:
|
||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
// do not walk from thread stacks to the code cache on this phase
|
||||
@ -838,7 +825,6 @@ class PSParallelCompact : AllStatic {
|
||||
friend class AdjustPointerClosure;
|
||||
friend class AdjustKlassClosure;
|
||||
friend class FollowKlassClosure;
|
||||
friend class FollowRootClosure;
|
||||
friend class InstanceClassLoaderKlass;
|
||||
friend class RefProcTaskProxy;
|
||||
|
||||
@ -853,7 +839,6 @@ class PSParallelCompact : AllStatic {
|
||||
static IsAliveClosure _is_alive_closure;
|
||||
static SpaceInfo _space_info[last_space_id];
|
||||
static bool _print_phases;
|
||||
static AdjustPointerClosure _adjust_root_pointer_closure;
|
||||
static AdjustPointerClosure _adjust_pointer_closure;
|
||||
static AdjustKlassClosure _adjust_klass_closure;
|
||||
|
||||
@ -889,9 +874,6 @@ class PSParallelCompact : AllStatic {
|
||||
static void marking_phase(ParCompactionManager* cm,
|
||||
bool maximum_heap_compaction);
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p, bool is_root);
|
||||
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
|
||||
|
||||
template <class T>
|
||||
static inline void follow_root(ParCompactionManager* cm, T* p);
|
||||
|
||||
@ -1046,7 +1028,6 @@ class PSParallelCompact : AllStatic {
|
||||
|
||||
// Closure accessors
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
|
||||
static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
|
||||
static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
||||
|
||||
@ -1067,6 +1048,7 @@ class PSParallelCompact : AllStatic {
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static inline void mark_and_push(ParCompactionManager* cm,
|
||||
T* p);
|
||||
template <class T> static inline void adjust_pointer(T* p);
|
||||
|
||||
static void follow_klass(ParCompactionManager* cm, Klass* klass);
|
||||
static void adjust_klass(ParCompactionManager* cm, Klass* klass);
|
||||
@ -1151,9 +1133,6 @@ class PSParallelCompact : AllStatic {
|
||||
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
|
||||
static ParallelCompactData& summary_data() { return _summary_data; }
|
||||
|
||||
static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
||||
static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
|
||||
|
||||
// Reference Processing
|
||||
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
||||
|
||||
@ -1230,7 +1209,7 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
|
||||
inline void PSParallelCompact::adjust_pointer(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
|
@ -314,8 +314,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
|
||||
if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||
Universe::verify();
|
||||
Universe::verify(" VerifyBeforeGC:");
|
||||
}
|
||||
|
||||
{
|
||||
@ -638,8 +637,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
|
||||
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyAfterGC:");
|
||||
Universe::verify();
|
||||
Universe::verify(" VerifyAfterGC:");
|
||||
}
|
||||
|
||||
heap->print_heap_after_gc();
|
||||
|
@ -81,7 +81,7 @@ void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
|
||||
}
|
||||
|
||||
void MarkSweep::adjust_class_loader(ClassLoaderData* cld) {
|
||||
cld->oops_do(&MarkSweep::adjust_root_pointer_closure, &MarkSweep::adjust_klass_closure, true);
|
||||
cld->oops_do(&MarkSweep::adjust_pointer_closure, &MarkSweep::adjust_klass_closure, true);
|
||||
}
|
||||
|
||||
|
||||
@ -121,11 +121,10 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
|
||||
}
|
||||
}
|
||||
|
||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true);
|
||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false);
|
||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
|
||||
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
|
||||
|
||||
void MarkSweep::adjust_marks() {
|
||||
assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
||||
|
@ -80,10 +80,7 @@ class MarkSweep : AllStatic {
|
||||
};
|
||||
|
||||
class AdjustPointerClosure: public OopsInGenClosure {
|
||||
private:
|
||||
bool _is_root;
|
||||
public:
|
||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
@ -146,7 +143,6 @@ class MarkSweep : AllStatic {
|
||||
static MarkAndPushClosure mark_and_push_closure;
|
||||
static FollowKlassClosure follow_klass_closure;
|
||||
static FollowStackClosure follow_stack_closure;
|
||||
static AdjustPointerClosure adjust_root_pointer_closure;
|
||||
static AdjustPointerClosure adjust_pointer_closure;
|
||||
static AdjustKlassClosure adjust_klass_closure;
|
||||
|
||||
@ -179,12 +175,7 @@ class MarkSweep : AllStatic {
|
||||
static void adjust_marks(); // Adjust the pointers in the preserved marks table
|
||||
static void restore_marks(); // Restore the marks that we saved in preserve_mark
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p, bool isroot);
|
||||
|
||||
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
|
||||
static void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
||||
static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p);
|
||||
};
|
||||
|
||||
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
||||
|
@ -76,7 +76,7 @@ void MarkSweep::push_objarray(oop obj, size_t index) {
|
||||
_objarray_stack.push(task);
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
|
||||
template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
|
@ -225,7 +225,10 @@ void VM_CollectForMetadataAllocation::doit() {
|
||||
gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
|
||||
}
|
||||
heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
|
||||
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
// After a GC try to allocate without expanding. Could fail
|
||||
// and expansion will be tried below.
|
||||
_result =
|
||||
_loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
}
|
||||
if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
|
||||
// If still failing, allow the Metaspace to expand.
|
||||
|
@ -238,8 +238,8 @@ void FileMapInfo::write_header() {
|
||||
|
||||
void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
|
||||
align_file_position();
|
||||
size_t used = space->used_words(Metaspace::NonClassType) * BytesPerWord;
|
||||
size_t capacity = space->capacity_words(Metaspace::NonClassType) * BytesPerWord;
|
||||
size_t used = space->used_bytes_slow(Metaspace::NonClassType);
|
||||
size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
|
||||
write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
|
||||
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
|
||||
|
||||
const size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
|
||||
const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
||||
|
||||
print_heap_before_gc();
|
||||
|
||||
@ -447,8 +447,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
prepare_for_verify();
|
||||
prepared_for_verification = true;
|
||||
}
|
||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||
Universe::verify();
|
||||
Universe::verify(" VerifyBeforeGC:");
|
||||
}
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
@ -519,8 +518,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
if (VerifyAfterGC && i >= VerifyGCLevel &&
|
||||
total_collections() >= VerifyGCStartAt) {
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
gclog_or_tty->print(" VerifyAfterGC:");
|
||||
Universe::verify();
|
||||
Universe::verify(" VerifyAfterGC:");
|
||||
}
|
||||
|
||||
if (PrintGCDetails) {
|
||||
@ -556,6 +554,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
if (complete) {
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
MetaspaceAux::verify_metrics();
|
||||
// Resize the metaspace capacity after full collections
|
||||
MetaspaceGC::compute_new_size();
|
||||
update_full_collections_completed();
|
||||
@ -633,9 +632,8 @@ gen_process_strong_roots(int level,
|
||||
}
|
||||
|
||||
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
|
||||
CodeBlobClosure* code_roots,
|
||||
OopClosure* non_root_closure) {
|
||||
SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
|
||||
CodeBlobClosure* code_roots) {
|
||||
SharedHeap::process_weak_roots(root_closure, code_roots);
|
||||
// "Local" "weak" refs
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->ref_processor()->weak_oops_do(root_closure);
|
||||
|
@ -432,8 +432,7 @@ public:
|
||||
// JNI weak roots, the code cache, system dictionary, symbol table,
|
||||
// string table, and referents of reachable weak refs.
|
||||
void gen_process_weak_roots(OopClosure* root_closure,
|
||||
CodeBlobClosure* code_roots,
|
||||
OopClosure* non_root_closure);
|
||||
CodeBlobClosure* code_roots);
|
||||
|
||||
// Set the saved marks of generations, if that makes sense.
|
||||
// In particular, if any generation might iterate over the oops
|
||||
|
@ -223,23 +223,23 @@ void GenMarkSweep::mark_sweep_phase1(int level,
|
||||
&is_alive, &keep_alive, &follow_stack_closure, NULL);
|
||||
}
|
||||
|
||||
// Follow system dictionary roots and unload classes
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(_marking_stack.is_empty(), "Marking should have completed");
|
||||
|
||||
// Unload classes and purge the SystemDictionary.
|
||||
bool purged_class = SystemDictionary::do_unloading(&is_alive);
|
||||
|
||||
// Follow code cache roots
|
||||
// Unload nmethods.
|
||||
CodeCache::do_unloading(&is_alive, purged_class);
|
||||
follow_stack(); // Flush marking stack
|
||||
|
||||
// Update subklass/sibling/implementor links of live klasses
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links(&is_alive);
|
||||
assert(_marking_stack.is_empty(), "just drained");
|
||||
|
||||
// Visit interned string tables and delete unmarked oops
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&is_alive);
|
||||
|
||||
// Clean up unreferenced symbols in symbol table.
|
||||
SymbolTable::unlink();
|
||||
|
||||
assert(_marking_stack.is_empty(), "stack should be empty by now");
|
||||
}
|
||||
|
||||
|
||||
@ -282,11 +282,10 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
|
||||
// Need new claim bits for the pointer adjustment tracing.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// Because the two closures below are created statically, cannot
|
||||
// Because the closure below is created statically, we cannot
|
||||
// use OopsInGenClosure constructor which takes a generation,
|
||||
// as the Universe has not been created when the static constructors
|
||||
// are run.
|
||||
adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level));
|
||||
adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
|
||||
|
||||
gch->gen_process_strong_roots(level,
|
||||
@ -294,18 +293,17 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
|
||||
true, // activate StrongRootsScope
|
||||
false, // not scavenging
|
||||
SharedHeap::SO_AllClasses,
|
||||
&adjust_root_pointer_closure,
|
||||
&adjust_pointer_closure,
|
||||
false, // do not walk code
|
||||
&adjust_root_pointer_closure,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_klass_closure);
|
||||
|
||||
// Now adjust pointers in remaining weak roots. (All of which should
|
||||
// have been cleared if they pointed to non-surviving objects.)
|
||||
CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
|
||||
/*do_marking=*/ false);
|
||||
gch->gen_process_weak_roots(&adjust_root_pointer_closure,
|
||||
&adjust_code_pointer_closure,
|
||||
&adjust_pointer_closure);
|
||||
gch->gen_process_weak_roots(&adjust_pointer_closure,
|
||||
&adjust_code_pointer_closure);
|
||||
|
||||
adjust_marks();
|
||||
GenAdjustPointersClosure blk;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class VirtualSpaceNode;
|
||||
//
|
||||
// Future modification
|
||||
//
|
||||
@ -45,27 +46,30 @@ size_t Metachunk::_overhead =
|
||||
|
||||
// Metachunk methods
|
||||
|
||||
Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
|
||||
// Set bottom, top, and end. Allow space for the Metachunk itself
|
||||
Metachunk* chunk = (Metachunk*) ptr;
|
||||
|
||||
MetaWord* chunk_bottom = ptr + _overhead;
|
||||
chunk->set_bottom(ptr);
|
||||
chunk->set_top(chunk_bottom);
|
||||
MetaWord* chunk_end = ptr + word_size;
|
||||
assert(chunk_end > chunk_bottom, "Chunk must be too small");
|
||||
chunk->set_end(chunk_end);
|
||||
chunk->set_next(NULL);
|
||||
chunk->set_prev(NULL);
|
||||
chunk->set_word_size(word_size);
|
||||
Metachunk::Metachunk(size_t word_size,
|
||||
VirtualSpaceNode* container) :
|
||||
_word_size(word_size),
|
||||
_bottom(NULL),
|
||||
_end(NULL),
|
||||
_top(NULL),
|
||||
_next(NULL),
|
||||
_prev(NULL),
|
||||
_container(container)
|
||||
{
|
||||
_bottom = (MetaWord*)this;
|
||||
_top = (MetaWord*)this + _overhead;
|
||||
_end = (MetaWord*)this + word_size;
|
||||
#ifdef ASSERT
|
||||
size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
|
||||
Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
|
||||
set_is_free(false);
|
||||
size_t data_word_size = pointer_delta(end(),
|
||||
top(),
|
||||
sizeof(MetaWord));
|
||||
Copy::fill_to_words((HeapWord*) top(),
|
||||
data_word_size,
|
||||
metadata_chunk_initialize);
|
||||
#endif
|
||||
return chunk;
|
||||
}
|
||||
|
||||
|
||||
MetaWord* Metachunk::allocate(size_t word_size) {
|
||||
MetaWord* result = NULL;
|
||||
// If available, bump the pointer to allocate.
|
||||
|
@ -41,10 +41,13 @@
|
||||
// | | | |
|
||||
// +--------------+ <- bottom ---+ ---+
|
||||
|
||||
class VirtualSpaceNode;
|
||||
|
||||
class Metachunk VALUE_OBJ_CLASS_SPEC {
|
||||
// link to support lists of chunks
|
||||
Metachunk* _next;
|
||||
Metachunk* _prev;
|
||||
VirtualSpaceNode* _container;
|
||||
|
||||
MetaWord* _bottom;
|
||||
MetaWord* _end;
|
||||
@ -61,29 +64,20 @@ class Metachunk VALUE_OBJ_CLASS_SPEC {
|
||||
// the space.
|
||||
static size_t _overhead;
|
||||
|
||||
void set_bottom(MetaWord* v) { _bottom = v; }
|
||||
void set_end(MetaWord* v) { _end = v; }
|
||||
void set_top(MetaWord* v) { _top = v; }
|
||||
void set_word_size(size_t v) { _word_size = v; }
|
||||
public:
|
||||
#ifdef ASSERT
|
||||
Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false),
|
||||
_next(NULL), _prev(NULL) {}
|
||||
#else
|
||||
Metachunk() : _bottom(NULL), _end(NULL), _top(NULL),
|
||||
_next(NULL), _prev(NULL) {}
|
||||
#endif
|
||||
Metachunk(size_t word_size , VirtualSpaceNode* container);
|
||||
|
||||
// Used to add a Metachunk to a list of Metachunks
|
||||
void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
|
||||
void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
|
||||
void set_container(VirtualSpaceNode* v) { _container = v; }
|
||||
|
||||
MetaWord* allocate(size_t word_size);
|
||||
static Metachunk* initialize(MetaWord* ptr, size_t word_size);
|
||||
|
||||
// Accessors
|
||||
Metachunk* next() const { return _next; }
|
||||
Metachunk* prev() const { return _prev; }
|
||||
VirtualSpaceNode* container() const { return _container; }
|
||||
MetaWord* bottom() const { return _bottom; }
|
||||
MetaWord* end() const { return _end; }
|
||||
MetaWord* top() const { return _top; }
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -111,6 +111,10 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
SpaceManager* _class_vsm;
|
||||
SpaceManager* class_vsm() const { return _class_vsm; }
|
||||
|
||||
// Allocate space for metadata of type mdtype. This is space
|
||||
// within a Metachunk and is used by
|
||||
// allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
|
||||
// which returns a Metablock.
|
||||
MetaWord* allocate(size_t word_size, MetadataType mdtype);
|
||||
|
||||
// Virtual Space lists for both classes and other metadata
|
||||
@ -133,11 +137,14 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
|
||||
|
||||
char* bottom() const;
|
||||
size_t used_words(MetadataType mdtype) const;
|
||||
size_t used_words_slow(MetadataType mdtype) const;
|
||||
size_t free_words(MetadataType mdtype) const;
|
||||
size_t capacity_words(MetadataType mdtype) const;
|
||||
size_t capacity_words_slow(MetadataType mdtype) const;
|
||||
size_t waste_words(MetadataType mdtype) const;
|
||||
|
||||
size_t used_bytes_slow(MetadataType mdtype) const;
|
||||
size_t capacity_bytes_slow(MetadataType mdtype) const;
|
||||
|
||||
static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
|
||||
bool read_only, MetadataType mdtype, TRAPS);
|
||||
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
|
||||
@ -150,6 +157,9 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
static bool contains(const void *ptr);
|
||||
void dump(outputStream* const out) const;
|
||||
|
||||
// Free empty virtualspaces
|
||||
static void purge();
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
// Debugging support
|
||||
void verify();
|
||||
@ -158,28 +168,81 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
class MetaspaceAux : AllStatic {
|
||||
|
||||
// Statistics for class space and data space in metaspace.
|
||||
static size_t used_in_bytes(Metaspace::MetadataType mdtype);
|
||||
|
||||
// These methods iterate over the classloader data graph
|
||||
// for the given Metaspace type. These are slow.
|
||||
static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
|
||||
static size_t free_in_bytes(Metaspace::MetadataType mdtype);
|
||||
static size_t capacity_in_bytes(Metaspace::MetadataType mdtype);
|
||||
static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
|
||||
|
||||
// Iterates over the virtual space list.
|
||||
static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
|
||||
|
||||
static size_t free_chunks_total(Metaspace::MetadataType mdtype);
|
||||
static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
|
||||
|
||||
public:
|
||||
// Total of space allocated to metadata in all Metaspaces
|
||||
static size_t used_in_bytes() {
|
||||
return used_in_bytes(Metaspace::ClassType) +
|
||||
used_in_bytes(Metaspace::NonClassType);
|
||||
// Running sum of space in all Metachunks that has been
|
||||
// allocated to a Metaspace. This is used instead of
|
||||
// iterating over all the classloaders
|
||||
static size_t _allocated_capacity_words;
|
||||
// Running sum of space in all Metachunks that have
|
||||
// are being used for metadata.
|
||||
static size_t _allocated_used_words;
|
||||
|
||||
public:
|
||||
// Decrement and increment _allocated_capacity_words
|
||||
static void dec_capacity(size_t words);
|
||||
static void inc_capacity(size_t words);
|
||||
|
||||
// Decrement and increment _allocated_used_words
|
||||
static void dec_used(size_t words);
|
||||
static void inc_used(size_t words);
|
||||
|
||||
// Total of space allocated to metadata in all Metaspaces.
|
||||
// This sums the space used in each Metachunk by
|
||||
// iterating over the classloader data graph
|
||||
static size_t used_bytes_slow() {
|
||||
return used_bytes_slow(Metaspace::ClassType) +
|
||||
used_bytes_slow(Metaspace::NonClassType);
|
||||
}
|
||||
|
||||
// Total of available space in all Metaspaces
|
||||
// Total of capacity allocated to all Metaspaces. This includes
|
||||
// space in Metachunks not yet allocated and in the Metachunk
|
||||
// freelist.
|
||||
static size_t capacity_in_bytes() {
|
||||
return capacity_in_bytes(Metaspace::ClassType) +
|
||||
capacity_in_bytes(Metaspace::NonClassType);
|
||||
// Used by MetaspaceCounters
|
||||
static size_t free_chunks_total();
|
||||
static size_t free_chunks_total_in_bytes();
|
||||
|
||||
static size_t allocated_capacity_words() {
|
||||
return _allocated_capacity_words;
|
||||
}
|
||||
static size_t allocated_capacity_bytes() {
|
||||
return _allocated_capacity_words * BytesPerWord;
|
||||
}
|
||||
|
||||
static size_t allocated_used_words() {
|
||||
return _allocated_used_words;
|
||||
}
|
||||
static size_t allocated_used_bytes() {
|
||||
return _allocated_used_words * BytesPerWord;
|
||||
}
|
||||
|
||||
static size_t free_bytes();
|
||||
|
||||
// Total capacity in all Metaspaces
|
||||
static size_t capacity_bytes_slow() {
|
||||
#ifdef PRODUCT
|
||||
// Use allocated_capacity_bytes() in PRODUCT instead of this function.
|
||||
guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
|
||||
#endif
|
||||
size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
|
||||
size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
|
||||
assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
|
||||
err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
|
||||
" class_capacity + non_class_capacity " SIZE_FORMAT
|
||||
" class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
|
||||
allocated_capacity_bytes(), class_capacity + non_class_capacity,
|
||||
class_capacity, non_class_capacity));
|
||||
|
||||
return class_capacity + non_class_capacity;
|
||||
}
|
||||
|
||||
// Total space reserved in all Metaspaces
|
||||
@ -198,6 +261,11 @@ class MetaspaceAux : AllStatic {
|
||||
static void print_waste(outputStream* out);
|
||||
static void dump(outputStream* out);
|
||||
static void verify_free_chunks();
|
||||
// Checks that the values returned by allocated_capacity_bytes() and
|
||||
// capacity_bytes_slow() are the same.
|
||||
static void verify_capacity();
|
||||
static void verify_used();
|
||||
static void verify_metrics();
|
||||
};
|
||||
|
||||
// Metaspace are deallocated when their class loader are GC'ed.
|
||||
@ -232,7 +300,6 @@ class MetaspaceGC : AllStatic {
|
||||
public:
|
||||
|
||||
static size_t capacity_until_GC() { return _capacity_until_GC; }
|
||||
static size_t capacity_until_GC_in_bytes() { return _capacity_until_GC * BytesPerWord; }
|
||||
static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
|
||||
static void dec_capacity_until_GC(size_t v) {
|
||||
_capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
|
||||
|
@ -29,6 +29,16 @@
|
||||
|
||||
MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
|
||||
|
||||
size_t MetaspaceCounters::calc_total_capacity() {
|
||||
// The total capacity is the sum of
|
||||
// 1) capacity of Metachunks in use by all Metaspaces
|
||||
// 2) unused space at the end of each Metachunk
|
||||
// 3) space in the freelist
|
||||
size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
|
||||
+ MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
|
||||
return total_capacity;
|
||||
}
|
||||
|
||||
MetaspaceCounters::MetaspaceCounters() :
|
||||
_capacity(NULL),
|
||||
_used(NULL),
|
||||
@ -36,8 +46,8 @@ MetaspaceCounters::MetaspaceCounters() :
|
||||
if (UsePerfData) {
|
||||
size_t min_capacity = MetaspaceAux::min_chunk_size();
|
||||
size_t max_capacity = MetaspaceAux::reserved_in_bytes();
|
||||
size_t curr_capacity = MetaspaceAux::capacity_in_bytes();
|
||||
size_t used = MetaspaceAux::used_in_bytes();
|
||||
size_t curr_capacity = calc_total_capacity();
|
||||
size_t used = MetaspaceAux::allocated_used_bytes();
|
||||
|
||||
initialize(min_capacity, max_capacity, curr_capacity, used);
|
||||
}
|
||||
@ -82,15 +92,13 @@ void MetaspaceCounters::initialize(size_t min_capacity,
|
||||
|
||||
void MetaspaceCounters::update_capacity() {
|
||||
assert(UsePerfData, "Should not be called unless being used");
|
||||
assert(_capacity != NULL, "Should be initialized");
|
||||
size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes();
|
||||
_capacity->set_value(capacity_in_bytes);
|
||||
size_t total_capacity = calc_total_capacity();
|
||||
_capacity->set_value(total_capacity);
|
||||
}
|
||||
|
||||
void MetaspaceCounters::update_used() {
|
||||
assert(UsePerfData, "Should not be called unless being used");
|
||||
assert(_used != NULL, "Should be initialized");
|
||||
size_t used_in_bytes = MetaspaceAux::used_in_bytes();
|
||||
size_t used_in_bytes = MetaspaceAux::allocated_used_bytes();
|
||||
_used->set_value(used_in_bytes);
|
||||
}
|
||||
|
||||
|
@ -37,6 +37,7 @@ class MetaspaceCounters: public CHeapObj<mtClass> {
|
||||
size_t max_capacity,
|
||||
size_t curr_capacity,
|
||||
size_t used);
|
||||
size_t calc_total_capacity();
|
||||
public:
|
||||
MetaspaceCounters();
|
||||
~MetaspaceCounters();
|
||||
|
@ -376,18 +376,17 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT;
|
||||
Metaspace* ro_space = _loader_data->ro_metaspace();
|
||||
Metaspace* rw_space = _loader_data->rw_metaspace();
|
||||
const size_t BPW = BytesPerWord;
|
||||
|
||||
// Allocated size of each space (may not be all occupied)
|
||||
const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW;
|
||||
const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW;
|
||||
const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType);
|
||||
const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType);
|
||||
const size_t md_alloced = md_end-md_low;
|
||||
const size_t mc_alloced = mc_end-mc_low;
|
||||
const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced;
|
||||
|
||||
// Occupied size of each space.
|
||||
const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW;
|
||||
const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW;
|
||||
const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType);
|
||||
const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType);
|
||||
const size_t md_bytes = size_t(md_top - md_low);
|
||||
const size_t mc_bytes = size_t(mc_top - mc_low);
|
||||
|
||||
|
@ -218,8 +218,7 @@ public:
|
||||
static AlwaysTrueClosure always_true;
|
||||
|
||||
void SharedHeap::process_weak_roots(OopClosure* root_closure,
|
||||
CodeBlobClosure* code_roots,
|
||||
OopClosure* non_root_closure) {
|
||||
CodeBlobClosure* code_roots) {
|
||||
// Global (weak) JNI handles
|
||||
JNIHandles::weak_oops_do(&always_true, root_closure);
|
||||
|
||||
|
@ -249,8 +249,7 @@ public:
|
||||
// JNI weak roots, the code cache, system dictionary, symbol table,
|
||||
// string table.
|
||||
void process_weak_roots(OopClosure* root_closure,
|
||||
CodeBlobClosure* code_roots,
|
||||
OopClosure* non_root_closure);
|
||||
CodeBlobClosure* code_roots);
|
||||
|
||||
// The functions below are helper functions that a subclass of
|
||||
// "SharedHeap" can use in the implementation of its virtual
|
||||
|
@ -1270,7 +1270,7 @@ void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
|
||||
st->print_cr("}");
|
||||
}
|
||||
|
||||
void Universe::verify(bool silent, VerifyOption option) {
|
||||
void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
|
||||
// The use of _verify_in_progress is a temporary work around for
|
||||
// 6320749. Don't bother with a creating a class to set and clear
|
||||
// it since it is only used in this method and the control flow is
|
||||
@ -1287,11 +1287,12 @@ void Universe::verify(bool silent, VerifyOption option) {
|
||||
HandleMark hm; // Handles created during verification can be zapped
|
||||
_verify_count++;
|
||||
|
||||
if (!silent) gclog_or_tty->print(prefix);
|
||||
if (!silent) gclog_or_tty->print("[Verifying ");
|
||||
if (!silent) gclog_or_tty->print("threads ");
|
||||
Threads::verify();
|
||||
if (!silent) gclog_or_tty->print("heap ");
|
||||
heap()->verify(silent, option);
|
||||
|
||||
if (!silent) gclog_or_tty->print("syms ");
|
||||
SymbolTable::verify();
|
||||
if (!silent) gclog_or_tty->print("strs ");
|
||||
|
@ -445,12 +445,12 @@ class Universe: AllStatic {
|
||||
|
||||
// Debugging
|
||||
static bool verify_in_progress() { return _verify_in_progress; }
|
||||
static void verify(bool silent, VerifyOption option);
|
||||
static void verify(bool silent) {
|
||||
verify(silent, VerifyOption_Default /* option */);
|
||||
static void verify(VerifyOption option, const char* prefix, bool silent = VerifySilently);
|
||||
static void verify(const char* prefix, bool silent = VerifySilently) {
|
||||
verify(VerifyOption_Default, prefix, silent);
|
||||
}
|
||||
static void verify() {
|
||||
verify(false /* silent */);
|
||||
static void verify(bool silent = VerifySilently) {
|
||||
verify("", silent);
|
||||
}
|
||||
|
||||
static int verify_count() { return _verify_count; }
|
||||
|
@ -3565,6 +3565,7 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
|
||||
Node* no_ctrl = NULL;
|
||||
Node* no_base = __ top();
|
||||
Node* zero = __ ConI(0);
|
||||
Node* zeroX = __ ConX(0);
|
||||
|
||||
float likely = PROB_LIKELY(0.999);
|
||||
float unlikely = PROB_UNLIKELY(0.999);
|
||||
@ -3590,7 +3591,9 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
|
||||
|
||||
// if (!marking)
|
||||
__ if_then(marking, BoolTest::ne, zero); {
|
||||
Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
|
||||
BasicType index_bt = TypeX_X->basic_type();
|
||||
assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
|
||||
Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
|
||||
|
||||
if (do_load) {
|
||||
// load original value
|
||||
@ -3603,22 +3606,16 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
|
||||
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
||||
|
||||
// is the queue for this thread full?
|
||||
__ if_then(index, BoolTest::ne, zero, likely); {
|
||||
__ if_then(index, BoolTest::ne, zeroX, likely); {
|
||||
|
||||
// decrement the index
|
||||
Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
|
||||
Node* next_indexX = next_index;
|
||||
#ifdef _LP64
|
||||
// We could refine the type for what it's worth
|
||||
// const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
|
||||
next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
||||
#endif
|
||||
Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
|
||||
|
||||
// Now get the buffer location we will log the previous value into and store it
|
||||
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
|
||||
Node *log_addr = __ AddP(no_base, buffer, next_index);
|
||||
__ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
|
||||
// update the index
|
||||
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
||||
__ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
|
||||
|
||||
} __ else_(); {
|
||||
|
||||
@ -3646,25 +3643,20 @@ void GraphKit::g1_mark_card(IdealKit& ideal,
|
||||
const TypeFunc* tf) {
|
||||
|
||||
Node* zero = __ ConI(0);
|
||||
Node* zeroX = __ ConX(0);
|
||||
Node* no_base = __ top();
|
||||
BasicType card_bt = T_BYTE;
|
||||
// Smash zero into card. MUST BE ORDERED WRT TO STORE
|
||||
__ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
|
||||
|
||||
// Now do the queue work
|
||||
__ if_then(index, BoolTest::ne, zero); {
|
||||
__ if_then(index, BoolTest::ne, zeroX); {
|
||||
|
||||
Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
|
||||
Node* next_indexX = next_index;
|
||||
#ifdef _LP64
|
||||
// We could refine the type for what it's worth
|
||||
// const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
|
||||
next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
||||
#endif // _LP64
|
||||
Node* log_addr = __ AddP(no_base, buffer, next_indexX);
|
||||
Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
|
||||
Node* log_addr = __ AddP(no_base, buffer, next_index);
|
||||
|
||||
__ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
|
||||
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
||||
__ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
|
||||
|
||||
} __ else_(); {
|
||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
|
||||
@ -3725,7 +3717,7 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
|
||||
// Now some values
|
||||
// Use ctrl to avoid hoisting these values past a safepoint, which could
|
||||
// potentially reset these fields in the JavaThread.
|
||||
Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
|
||||
Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
|
||||
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
||||
|
||||
// Convert the store obj pointer to an int prior to doing math on it
|
||||
|
@ -2123,6 +2123,9 @@ class CommandLineFlags {
|
||||
product(intx, PrefetchFieldsAhead, -1, \
|
||||
"How many fields ahead to prefetch in oop scan (<= 0 means off)") \
|
||||
\
|
||||
diagnostic(bool, VerifySilently, false, \
|
||||
"Don't print print the verification progress") \
|
||||
\
|
||||
diagnostic(bool, VerifyDuringStartup, false, \
|
||||
"Verify memory system before executing any Java code " \
|
||||
"during VM initialization") \
|
||||
|
@ -3447,7 +3447,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
|
||||
assert (Universe::is_fully_initialized(), "not initialized");
|
||||
if (VerifyDuringStartup) {
|
||||
VM_Verify verify_op(false /* silent */); // make sure we're starting with a clean slate
|
||||
// Make sure we're starting with a clean slate.
|
||||
VM_Verify verify_op;
|
||||
VMThread::execute(&verify_op);
|
||||
}
|
||||
|
||||
|
@ -60,72 +60,6 @@ ReservedSpace::ReservedSpace(size_t size, size_t alignment,
|
||||
initialize(size, alignment, large, NULL, 0, executable);
|
||||
}
|
||||
|
||||
char *
|
||||
ReservedSpace::align_reserved_region(char* addr, const size_t len,
|
||||
const size_t prefix_size,
|
||||
const size_t prefix_align,
|
||||
const size_t suffix_size,
|
||||
const size_t suffix_align)
|
||||
{
|
||||
assert(addr != NULL, "sanity");
|
||||
const size_t required_size = prefix_size + suffix_size;
|
||||
assert(len >= required_size, "len too small");
|
||||
|
||||
const size_t s = size_t(addr);
|
||||
const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
|
||||
const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
|
||||
|
||||
if (len < beg_delta + required_size) {
|
||||
return NULL; // Cannot do proper alignment.
|
||||
}
|
||||
const size_t end_delta = len - (beg_delta + required_size);
|
||||
|
||||
if (beg_delta != 0) {
|
||||
os::release_memory(addr, beg_delta);
|
||||
}
|
||||
|
||||
if (end_delta != 0) {
|
||||
char* release_addr = (char*) (s + beg_delta + required_size);
|
||||
os::release_memory(release_addr, end_delta);
|
||||
}
|
||||
|
||||
return (char*) (s + beg_delta);
|
||||
}
|
||||
|
||||
char* ReservedSpace::reserve_and_align(const size_t reserve_size,
|
||||
const size_t prefix_size,
|
||||
const size_t prefix_align,
|
||||
const size_t suffix_size,
|
||||
const size_t suffix_align)
|
||||
{
|
||||
assert(reserve_size > prefix_size + suffix_size, "should not be here");
|
||||
|
||||
char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
|
||||
if (raw_addr == NULL) return NULL;
|
||||
|
||||
char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
|
||||
prefix_align, suffix_size,
|
||||
suffix_align);
|
||||
if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
|
||||
fatal("os::release_memory failed");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (result != NULL) {
|
||||
const size_t raw = size_t(raw_addr);
|
||||
const size_t res = size_t(result);
|
||||
assert(res >= raw, "alignment decreased start addr");
|
||||
assert(res + prefix_size + suffix_size <= raw + reserve_size,
|
||||
"alignment increased end addr");
|
||||
assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
|
||||
assert(((res + prefix_size) & (suffix_align - 1)) == 0,
|
||||
"bad alignment of suffix");
|
||||
}
|
||||
#endif
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Helper method.
|
||||
static bool failed_to_reserve_as_requested(char* base, char* requested_address,
|
||||
const size_t size, bool special)
|
||||
@ -155,92 +89,6 @@ static bool failed_to_reserve_as_requested(char* base, char* requested_address,
|
||||
return true;
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(const size_t suffix_size,
|
||||
const size_t suffix_align,
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix)
|
||||
{
|
||||
assert(suffix_size != 0, "sanity");
|
||||
assert(suffix_align != 0, "sanity");
|
||||
assert((suffix_size & (suffix_align - 1)) == 0,
|
||||
"suffix_size not divisible by suffix_align");
|
||||
|
||||
// Assert that if noaccess_prefix is used, it is the same as prefix_align.
|
||||
// Add in noaccess_prefix to prefix
|
||||
const size_t adjusted_prefix_size = noaccess_prefix;
|
||||
const size_t size = adjusted_prefix_size + suffix_size;
|
||||
|
||||
// On systems where the entire region has to be reserved and committed up
|
||||
// front, the compound alignment normally done by this method is unnecessary.
|
||||
const bool try_reserve_special = UseLargePages &&
|
||||
suffix_align == os::large_page_size();
|
||||
if (!os::can_commit_large_page_memory() && try_reserve_special) {
|
||||
initialize(size, suffix_align, true, requested_address, noaccess_prefix,
|
||||
false);
|
||||
return;
|
||||
}
|
||||
|
||||
_base = NULL;
|
||||
_size = 0;
|
||||
_alignment = 0;
|
||||
_special = false;
|
||||
_noaccess_prefix = 0;
|
||||
_executable = false;
|
||||
|
||||
// Optimistically try to reserve the exact size needed.
|
||||
char* addr;
|
||||
if (requested_address != 0) {
|
||||
requested_address -= noaccess_prefix; // adjust address
|
||||
assert(requested_address != NULL, "huge noaccess prefix?");
|
||||
addr = os::attempt_reserve_memory_at(size, requested_address);
|
||||
if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
|
||||
// OS ignored requested address. Try different address.
|
||||
addr = NULL;
|
||||
}
|
||||
} else {
|
||||
addr = os::reserve_memory(size, NULL, suffix_align);
|
||||
}
|
||||
if (addr == NULL) return;
|
||||
|
||||
// Check whether the result has the needed alignment
|
||||
const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
|
||||
if (ofs != 0) {
|
||||
// Wrong alignment. Release, allocate more space and do manual alignment.
|
||||
//
|
||||
// On most operating systems, another allocation with a somewhat larger size
|
||||
// will return an address "close to" that of the previous allocation. The
|
||||
// result is often the same address (if the kernel hands out virtual
|
||||
// addresses from low to high), or an address that is offset by the increase
|
||||
// in size. Exploit that to minimize the amount of extra space requested.
|
||||
if (!os::release_memory(addr, size)) {
|
||||
fatal("os::release_memory failed");
|
||||
}
|
||||
|
||||
const size_t extra = MAX2(ofs, suffix_align - ofs);
|
||||
addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align,
|
||||
suffix_size, suffix_align);
|
||||
if (addr == NULL) {
|
||||
// Try an even larger region. If this fails, address space is exhausted.
|
||||
addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
|
||||
suffix_align, suffix_size, suffix_align);
|
||||
}
|
||||
|
||||
if (requested_address != 0 &&
|
||||
failed_to_reserve_as_requested(addr, requested_address, size, false)) {
|
||||
// As a result of the alignment constraints, the allocated addr differs
|
||||
// from the requested address. Return back to the caller who can
|
||||
// take remedial action (like try again without a requested address).
|
||||
assert(_base == NULL, "should be");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_base = addr;
|
||||
_size = size;
|
||||
_alignment = suffix_align;
|
||||
_noaccess_prefix = noaccess_prefix;
|
||||
}
|
||||
|
||||
void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix,
|
||||
@ -476,20 +324,6 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
|
||||
protect_noaccess_prefix(size);
|
||||
}
|
||||
|
||||
ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size,
|
||||
const size_t alignment,
|
||||
char* requested_address) :
|
||||
ReservedSpace(heap_space_size, alignment,
|
||||
requested_address,
|
||||
(UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
|
||||
Universe::narrow_oop_use_implicit_null_checks()) ?
|
||||
lcm(os::vm_page_size(), alignment) : 0) {
|
||||
if (base() > 0) {
|
||||
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
|
||||
}
|
||||
protect_noaccess_prefix(heap_space_size);
|
||||
}
|
||||
|
||||
// Reserve space for code segment. Same as Java heap only we mark this as
|
||||
// executable.
|
||||
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
|
||||
|
@ -47,28 +47,6 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
||||
const size_t noaccess_prefix,
|
||||
bool executable);
|
||||
|
||||
// Release parts of an already-reserved memory region [addr, addr + len) to
|
||||
// get a new region that has "compound alignment." Return the start of the
|
||||
// resulting region, or NULL on failure.
|
||||
//
|
||||
// The region is logically divided into a prefix and a suffix. The prefix
|
||||
// starts at the result address, which is aligned to prefix_align. The suffix
|
||||
// starts at result address + prefix_size, which is aligned to suffix_align.
|
||||
// The total size of the result region is size prefix_size + suffix_size.
|
||||
char* align_reserved_region(char* addr, const size_t len,
|
||||
const size_t prefix_size,
|
||||
const size_t prefix_align,
|
||||
const size_t suffix_size,
|
||||
const size_t suffix_align);
|
||||
|
||||
// Reserve memory, call align_reserved_region() to alignment it and return the
|
||||
// result.
|
||||
char* reserve_and_align(const size_t reserve_size,
|
||||
const size_t prefix_size,
|
||||
const size_t prefix_align,
|
||||
const size_t suffix_size,
|
||||
const size_t suffix_align);
|
||||
|
||||
protected:
|
||||
// Create protection page at the beginning of the space.
|
||||
void protect_noaccess_prefix(const size_t size);
|
||||
@ -79,9 +57,6 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
||||
ReservedSpace(size_t size, size_t alignment, bool large,
|
||||
char* requested_address = NULL,
|
||||
const size_t noaccess_prefix = 0);
|
||||
ReservedSpace(const size_t suffix_size, const size_t suffix_align,
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix = 0);
|
||||
ReservedSpace(size_t size, size_t alignment, bool large, bool executable);
|
||||
|
||||
// Accessors
|
||||
@ -128,8 +103,6 @@ public:
|
||||
// Constructor
|
||||
ReservedHeapSpace(size_t size, size_t forced_base_alignment,
|
||||
bool large, char* requested_address);
|
||||
ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align,
|
||||
char* requested_address);
|
||||
};
|
||||
|
||||
// Class encapsulating behavior specific memory space for Code
|
||||
|
@ -293,7 +293,7 @@ void VMThread::run() {
|
||||
os::check_heap();
|
||||
// Silent verification so as not to pollute normal output,
|
||||
// unless we really asked for it.
|
||||
Universe::verify(!(PrintGCDetails || Verbose));
|
||||
Universe::verify(!(PrintGCDetails || Verbose) || VerifySilently);
|
||||
}
|
||||
|
||||
CompileBroker::set_should_block();
|
||||
|
@ -302,7 +302,7 @@ class VM_Verify: public VM_Operation {
|
||||
private:
|
||||
bool _silent;
|
||||
public:
|
||||
VM_Verify(bool silent) : _silent(silent) {}
|
||||
VM_Verify(bool silent = VerifySilently) : _silent(silent) {}
|
||||
VMOp_Type type() const { return VMOp_Verify; }
|
||||
void doit();
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,71 +25,67 @@
|
||||
* @test TestFullGCount.java
|
||||
* @bug 7072527
|
||||
* @summary CMS: JMM GC counters overcount in some cases
|
||||
* @run main/othervm -XX:+UseConcMarkSweepGC TestFullGCCount
|
||||
*
|
||||
* @run main/othervm -XX:+PrintGC TestFullGCCount
|
||||
*/
|
||||
import java.util.*;
|
||||
import java.lang.management.*;
|
||||
|
||||
/*
|
||||
* Originally for a specific failure in CMS, this test now monitors all
|
||||
* collectors for double-counting of collections.
|
||||
*/
|
||||
public class TestFullGCCount {
|
||||
|
||||
public String collectorName = "ConcurrentMarkSweep";
|
||||
static List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
TestFullGCCount t = null;
|
||||
if (args.length==2) {
|
||||
t = new TestFullGCCount(args[0], args[1]);
|
||||
} else {
|
||||
t = new TestFullGCCount();
|
||||
}
|
||||
System.out.println("Monitoring collector: " + t.collectorName);
|
||||
t.run();
|
||||
}
|
||||
|
||||
public TestFullGCCount(String pool, String collector) {
|
||||
collectorName = collector;
|
||||
}
|
||||
|
||||
public TestFullGCCount() {
|
||||
}
|
||||
|
||||
public void run() {
|
||||
int count = 0;
|
||||
int iterations = 20;
|
||||
long counts[] = new long[iterations];
|
||||
boolean diffAlways2 = true; // assume we will fail
|
||||
boolean failed = false;
|
||||
String errorMessage = "";
|
||||
HashMap<String, List> counts = new HashMap<String, List>();
|
||||
|
||||
// Prime the collection of count lists for all collectors.
|
||||
for (int i = 0; i < collectors.size(); i++) {
|
||||
GarbageCollectorMXBean collector = collectors.get(i);
|
||||
counts.put(collector.getName(), new ArrayList<Long>(iterations));
|
||||
}
|
||||
|
||||
// Perform some gc, record collector counts.
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
System.gc();
|
||||
counts[i] = getCollectionCount();
|
||||
if (i>0) {
|
||||
if (counts[i] - counts[i-1] != 2) {
|
||||
diffAlways2 = false;
|
||||
addCollectionCount(counts, i);
|
||||
}
|
||||
|
||||
// Check the increments:
|
||||
// Old gen collectors should increase by one,
|
||||
// New collectors may or may not increase.
|
||||
// Any increase >=2 is unexpected.
|
||||
for (String collector : counts.keySet()) {
|
||||
System.out.println("Checking: " + collector);
|
||||
|
||||
for (int i = 0; i < iterations - 1; i++) {
|
||||
List<Long> theseCounts = counts.get(collector);
|
||||
long a = theseCounts.get(i);
|
||||
long b = theseCounts.get(i + 1);
|
||||
if (b - a >= 2) {
|
||||
failed = true;
|
||||
errorMessage += "Collector '" + collector + "' has increment " + (b - a) +
|
||||
" at iteration " + i + "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
if (diffAlways2) {
|
||||
throw new RuntimeException("FAILED: System.gc must be incrementing count twice.");
|
||||
if (failed) {
|
||||
System.err.println(errorMessage);
|
||||
throw new RuntimeException("FAILED: System.gc collections miscounted.");
|
||||
}
|
||||
System.out.println("Passed.");
|
||||
}
|
||||
|
||||
private long getCollectionCount() {
|
||||
long count = 0;
|
||||
List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
|
||||
List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
|
||||
private static void addCollectionCount(HashMap<String, List> counts, int iteration) {
|
||||
for (int i = 0; i < collectors.size(); i++) {
|
||||
GarbageCollectorMXBean collector = collectors.get(i);
|
||||
String name = collector.getName();
|
||||
if (name.contains(collectorName)) {
|
||||
System.out.println(name + ": collection count = "
|
||||
+ collector.getCollectionCount());
|
||||
count = collector.getCollectionCount();
|
||||
List thisList = counts.get(collector.getName());
|
||||
thisList.add(collector.getCollectionCount());
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -23,22 +23,43 @@
|
||||
|
||||
/* @test TestVerifyDuringStartup.java
|
||||
* @key gc
|
||||
* @bug 8010463
|
||||
* @bug 8010463 8011343 8011898
|
||||
* @summary Simple test run with -XX:+VerifyDuringStartup -XX:-UseTLAB to verify 8010463
|
||||
* @library /testlibrary
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.JDKToolFinder;
|
||||
import com.oracle.java.testlibrary.OutputAnalyzer;
|
||||
import com.oracle.java.testlibrary.ProcessTools;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
public class TestVerifyDuringStartup {
|
||||
public static void main(String args[]) throws Exception {
|
||||
ProcessBuilder pb =
|
||||
ProcessTools.createJavaProcessBuilder(System.getProperty("test.vm.opts"),
|
||||
"-XX:-UseTLAB",
|
||||
ArrayList<String> vmOpts = new ArrayList();
|
||||
|
||||
String testVmOptsStr = System.getProperty("test.java.opts");
|
||||
if (!testVmOptsStr.isEmpty()) {
|
||||
String[] testVmOpts = testVmOptsStr.split(" ");
|
||||
Collections.addAll(vmOpts, testVmOpts);
|
||||
}
|
||||
Collections.addAll(vmOpts, new String[] {"-XX:-UseTLAB",
|
||||
"-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:+VerifyDuringStartup", "-version");
|
||||
"-XX:+VerifyDuringStartup",
|
||||
"-version"});
|
||||
|
||||
System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
|
||||
for (int i = 0; i < vmOpts.size(); i += 1) {
|
||||
System.out.print(" " + vmOpts.get(i));
|
||||
}
|
||||
System.out.println();
|
||||
|
||||
ProcessBuilder pb =
|
||||
ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
|
||||
System.out.println("Output:\n" + output.getOutput());
|
||||
|
||||
output.shouldContain("[Verifying");
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
60
hotspot/test/gc/concurrentMarkSweep/GuardShrinkWarning.java
Normal file
60
hotspot/test/gc/concurrentMarkSweep/GuardShrinkWarning.java
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test GuardShrinkWarning
|
||||
* @summary Remove warning about CMS generation shrinking.
|
||||
* @bug 8012111
|
||||
* @key gc
|
||||
* @key regression
|
||||
* @library /testlibrary
|
||||
* @run main/othervm GuardShrinkWarning
|
||||
* @author jon.masamitsu@oracle.com
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.*;
|
||||
|
||||
public class GuardShrinkWarning {
|
||||
public static void main(String args[]) throws Exception {
|
||||
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-showversion",
|
||||
"-XX:+UseConcMarkSweepGC",
|
||||
"-XX:+ExplicitGCInvokesConcurrent",
|
||||
"GuardShrinkWarning$SystemGCCaller"
|
||||
);
|
||||
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
|
||||
output.shouldNotContain("Shrinking of CMS not yet implemented");
|
||||
|
||||
output.shouldNotContain("error");
|
||||
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
static class SystemGCCaller {
|
||||
public static void main(String [] args) {
|
||||
System.gc();
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user