8038212: Method::is_valid_method() check has performance regression impact for stackwalking
Only prune metaspace virtual spaces at safepoint so walking them is safe outside a safepoint. Reviewed-by: mgerdin, mgronlun, hseigel, stefank
This commit is contained in:
parent
cc3c656cf1
commit
c336175c94
@ -549,6 +549,8 @@ ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
|
||||
ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
|
||||
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
|
||||
|
||||
bool ClassLoaderDataGraph::_should_purge = false;
|
||||
|
||||
// Add a new class loader data node to the list. Assign the newly created
|
||||
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
|
||||
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
|
||||
@ -675,32 +677,6 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
|
||||
return array;
|
||||
}
|
||||
|
||||
// For profiling and hsfind() only. Otherwise, this is unsafe (and slow). This
|
||||
// is done lock free to avoid lock inversion problems. It is safe because
|
||||
// new ClassLoaderData are added to the end of the CLDG, and only removed at
|
||||
// safepoint. The _unloading list can be deallocated concurrently with CMS so
|
||||
// this doesn't look in metaspace for classes that have been unloaded.
|
||||
bool ClassLoaderDataGraph::contains(const void* x) {
|
||||
if (DumpSharedSpaces) {
|
||||
// There are only two metaspaces to worry about.
|
||||
ClassLoaderData* ncld = ClassLoaderData::the_null_class_loader_data();
|
||||
return (ncld->ro_metaspace()->contains(x) || ncld->rw_metaspace()->contains(x));
|
||||
}
|
||||
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(x)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
|
||||
if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Do not check unloading list because deallocation can be concurrent.
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
|
||||
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
|
||||
@ -759,6 +735,7 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::purge() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
|
||||
ClassLoaderData* list = _unloading;
|
||||
_unloading = NULL;
|
||||
ClassLoaderData* next = list;
|
||||
|
@ -66,6 +66,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static ClassLoaderData* _unloading;
|
||||
// CMS support.
|
||||
static ClassLoaderData* _saved_head;
|
||||
static bool _should_purge;
|
||||
|
||||
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
|
||||
static void post_class_unload_events(void);
|
||||
@ -87,12 +88,20 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
|
||||
static GrowableArray<ClassLoaderData*>* new_clds();
|
||||
|
||||
static void set_should_purge(bool b) { _should_purge = b; }
|
||||
static void purge_if_needed() {
|
||||
// Only purge the CLDG for CMS if concurrent sweep is complete.
|
||||
if (_should_purge) {
|
||||
purge();
|
||||
// reset for next time.
|
||||
set_should_purge(false);
|
||||
}
|
||||
}
|
||||
|
||||
static void dump_on(outputStream * const out) PRODUCT_RETURN;
|
||||
static void dump() { dump_on(tty); }
|
||||
static void verify();
|
||||
|
||||
// expensive test for pointer in metaspace for debugging
|
||||
static bool contains(const void* x);
|
||||
#ifndef PRODUCT
|
||||
static bool contains_loader_data(ClassLoaderData* loader_data);
|
||||
#endif
|
||||
|
@ -6362,7 +6362,9 @@ void CMSCollector::sweep(bool asynch) {
|
||||
verify_overflow_empty();
|
||||
|
||||
if (should_unload_classes()) {
|
||||
ClassLoaderDataGraph::purge();
|
||||
// Delay purge to the beginning of the next safepoint. Metaspace::contains
|
||||
// requires that the virtual spaces are stable and not deleted.
|
||||
ClassLoaderDataGraph::set_should_purge(true);
|
||||
}
|
||||
|
||||
_intra_sweep_timer.stop();
|
||||
|
@ -5401,7 +5401,7 @@ public:
|
||||
if (_g1h->is_in_g1_reserved(p)) {
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
assert(!ClassLoaderDataGraph::contains((address)p),
|
||||
assert(!Metaspace::contains((const void*)p),
|
||||
err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
|
||||
PTR_FORMAT, p));
|
||||
_copy_non_heap_obj_cl->do_oop(p);
|
||||
|
@ -75,7 +75,7 @@ bool MetaspaceObj::is_shared() const {
|
||||
}
|
||||
|
||||
bool MetaspaceObj::is_metaspace_object() const {
|
||||
return ClassLoaderDataGraph::contains((void*)this);
|
||||
return Metaspace::contains((void*)this);
|
||||
}
|
||||
|
||||
void MetaspaceObj::print_address_on(outputStream* st) const {
|
||||
|
@ -316,6 +316,8 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
||||
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
|
||||
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
|
||||
|
||||
bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
|
||||
|
||||
size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
|
||||
size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
|
||||
|
||||
@ -557,6 +559,8 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
|
||||
void inc_virtual_space_count();
|
||||
void dec_virtual_space_count();
|
||||
|
||||
bool contains(const void* ptr);
|
||||
|
||||
// Unlink empty VirtualSpaceNodes and free it.
|
||||
void purge(ChunkManager* chunk_manager);
|
||||
|
||||
@ -641,8 +645,6 @@ class SpaceManager : public CHeapObj<mtClass> {
|
||||
// Accessors
|
||||
Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
|
||||
void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
|
||||
// ensure lock-free iteration sees fully initialized node
|
||||
OrderAccess::storestore();
|
||||
_chunks_in_use[index] = v;
|
||||
}
|
||||
|
||||
@ -757,8 +759,6 @@ class SpaceManager : public CHeapObj<mtClass> {
|
||||
void print_on(outputStream* st) const;
|
||||
void locked_print_chunks_in_use_on(outputStream* st) const;
|
||||
|
||||
bool contains(const void *ptr);
|
||||
|
||||
void verify();
|
||||
void verify_chunk_size(Metachunk* chunk);
|
||||
NOT_PRODUCT(void mangle_freed_chunks();)
|
||||
@ -1078,6 +1078,7 @@ void ChunkManager::remove_chunk(Metachunk* chunk) {
|
||||
// nodes with a 0 container_count. Remove Metachunks in
|
||||
// the node from their respective freelists.
|
||||
void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
|
||||
assert_lock_strong(SpaceManager::expand_lock());
|
||||
// Don't use a VirtualSpaceListIterator because this
|
||||
// list is being changed and a straightforward use of an iterator is not safe.
|
||||
@ -1121,6 +1122,23 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// This function looks at the mmap regions in the metaspace without locking.
|
||||
// The chunks are added with store ordering and not deleted except for at
|
||||
// unloading time during a safepoint.
|
||||
bool VirtualSpaceList::contains(const void* ptr) {
|
||||
// List should be stable enough to use an iterator here because removing virtual
|
||||
// space nodes is only allowed at a safepoint.
|
||||
VirtualSpaceListIterator iter(virtual_space_list());
|
||||
while (iter.repeat()) {
|
||||
VirtualSpaceNode* vsn = iter.get_next();
|
||||
if (vsn->contains(ptr)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void VirtualSpaceList::retire_current_virtual_space() {
|
||||
assert_lock_strong(SpaceManager::expand_lock());
|
||||
|
||||
@ -1210,6 +1228,8 @@ bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
|
||||
} else {
|
||||
assert(new_entry->reserved_words() == vs_word_size,
|
||||
"Reserved memory size differs from requested memory size");
|
||||
// ensure lock-free iteration sees fully initialized node
|
||||
OrderAccess::storestore();
|
||||
link_vs(new_entry);
|
||||
return true;
|
||||
}
|
||||
@ -2434,21 +2454,6 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// This function looks at the chunks in the metaspace without locking.
|
||||
// The chunks are added with store ordering and not deleted except for at
|
||||
// unloading time.
|
||||
bool SpaceManager::contains(const void *ptr) {
|
||||
for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
|
||||
{
|
||||
Metachunk* curr = chunks_in_use(i);
|
||||
while (curr != NULL) {
|
||||
if (curr->contains(ptr)) return true;
|
||||
curr = curr->next();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void SpaceManager::verify() {
|
||||
// If there are blocks in the dictionary, then
|
||||
// verification of chunks does not work since
|
||||
@ -3538,11 +3543,15 @@ void Metaspace::print_on(outputStream* out) const {
|
||||
}
|
||||
|
||||
bool Metaspace::contains(const void* ptr) {
|
||||
if (vsm()->contains(ptr)) return true;
|
||||
if (using_class_space()) {
|
||||
return class_vsm()->contains(ptr);
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return get_space_list(NonClassType)->contains(ptr);
|
||||
}
|
||||
|
||||
void Metaspace::verify() {
|
||||
@ -3787,5 +3796,4 @@ void TestVirtualSpaceNode_test() {
|
||||
TestVirtualSpaceNodeTest::test();
|
||||
TestVirtualSpaceNodeTest::test_is_available();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -232,7 +232,8 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
MetaWord* expand_and_allocate(size_t size,
|
||||
MetadataType mdtype);
|
||||
|
||||
bool contains(const void* ptr);
|
||||
static bool contains(const void* ptr);
|
||||
|
||||
void dump(outputStream* const out) const;
|
||||
|
||||
// Free empty virtualspaces
|
||||
|
@ -640,7 +640,7 @@ void Klass::verify_on(outputStream* st) {
|
||||
|
||||
// This can be expensive, but it is worth checking that this klass is actually
|
||||
// in the CLD graph but not in production.
|
||||
assert(ClassLoaderDataGraph::contains((address)this), "Should be");
|
||||
assert(Metaspace::contains((address)this), "Should be");
|
||||
|
||||
guarantee(this->is_klass(),"should be klass");
|
||||
|
||||
|
@ -1866,6 +1866,14 @@ void Method::clear_jmethod_ids(ClassLoaderData* loader_data) {
|
||||
loader_data->jmethod_ids()->clear_all_methods();
|
||||
}
|
||||
|
||||
bool Method::has_method_vptr(const void* ptr) {
|
||||
Method m;
|
||||
// This assumes that the vtbl pointer is the first word of a C++ object.
|
||||
// This assumption is also in universe.cpp patch_klass_vtble
|
||||
void* vtbl2 = dereference_vptr((const void*)&m);
|
||||
void* this_vtbl = dereference_vptr(ptr);
|
||||
return vtbl2 == this_vtbl;
|
||||
}
|
||||
|
||||
// Check that this pointer is valid by checking that the vtbl pointer matches
|
||||
bool Method::is_valid_method() const {
|
||||
@ -1874,12 +1882,7 @@ bool Method::is_valid_method() const {
|
||||
} else if (!is_metaspace_object()) {
|
||||
return false;
|
||||
} else {
|
||||
Method m;
|
||||
// This assumes that the vtbl pointer is the first word of a C++ object.
|
||||
// This assumption is also in universe.cpp patch_klass_vtble
|
||||
void* vtbl2 = dereference_vptr((void*)&m);
|
||||
void* this_vtbl = dereference_vptr((void*)this);
|
||||
return vtbl2 == this_vtbl;
|
||||
return has_method_vptr((const void*)this);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -861,6 +861,7 @@ class Method : public Metadata {
|
||||
const char* internal_name() const { return "{method}"; }
|
||||
|
||||
// Check for valid method pointer
|
||||
static bool has_method_vptr(const void* ptr);
|
||||
bool is_valid_method() const;
|
||||
|
||||
// Verify
|
||||
|
@ -1097,11 +1097,15 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
|
||||
|
||||
}
|
||||
|
||||
// Check if in metaspace.
|
||||
if (ClassLoaderDataGraph::contains((address)addr)) {
|
||||
// Check if in metaspace and print types that have vptrs (only method now)
|
||||
if (Metaspace::contains(addr)) {
|
||||
if (Method::has_method_vptr((const void*)addr)) {
|
||||
((Method*)addr)->print_value_on(st);
|
||||
st->cr();
|
||||
} else {
|
||||
// Use addr->print() from the debugger instead (not here)
|
||||
st->print_cr(INTPTR_FORMAT
|
||||
" is pointing into metadata", addr);
|
||||
st->print_cr(INTPTR_FORMAT " is pointing into metadata", addr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -541,6 +541,13 @@ void SafepointSynchronize::do_cleanup_tasks() {
|
||||
gclog_or_tty->rotate_log(false);
|
||||
}
|
||||
|
||||
{
|
||||
// CMS delays purging the CLDG until the beginning of the next safepoint and to
|
||||
// make sure concurrent sweep is done
|
||||
TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
|
||||
ClassLoaderDataGraph::purge_if_needed();
|
||||
}
|
||||
|
||||
if (MemTracker::is_on()) {
|
||||
MemTracker::sync();
|
||||
}
|
||||
|
@ -1385,7 +1385,7 @@ inline intptr_t p2i(const void * p) {
|
||||
// All C++ compilers that we know of have the vtbl pointer in the first
|
||||
// word. If there are exceptions, this function needs to be made compiler
|
||||
// specific.
|
||||
static inline void* dereference_vptr(void* addr) {
|
||||
static inline void* dereference_vptr(const void* addr) {
|
||||
return *(void**)addr;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user