8075242: Remove SpecializationStats

Reviewed-by: brutisso, mgerdin
This commit is contained in:
Stefan Karlsson 2015-03-17 13:23:49 +01:00
parent e7a218b4b6
commit 426a345fec
14 changed files with 2 additions and 253 deletions

View File

@ -2997,7 +2997,6 @@ void CMSCollector::checkpointRootsInitial() {
report_heap_summary(GCWhen::BeforeGC);
ReferenceProcessor* rp = ref_processor();
SpecializationStats::clear();
assert(_restart_addr == NULL, "Control point invariant");
{
// acquire locks for subsequent manipulations
@ -3008,7 +3007,6 @@ void CMSCollector::checkpointRootsInitial() {
rp->enable_discovery();
_collectorState = Marking;
}
SpecializationStats::print();
}
void CMSCollector::checkpointRootsInitialWork() {
@ -4326,7 +4324,6 @@ void CMSCollector::checkpointRootsFinal() {
verify_work_stacks_empty();
verify_overflow_empty();
SpecializationStats::clear();
if (PrintGCDetails) {
gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
_young_gen->used() / K,
@ -4357,7 +4354,6 @@ void CMSCollector::checkpointRootsFinal() {
}
verify_work_stacks_empty();
verify_overflow_empty();
SpecializationStats::print();
}
void CMSCollector::checkpointRootsFinalWork() {

View File

@ -2026,10 +2026,6 @@ jint G1CollectedHeap::initialize() {
Shared_DirtyCardQ_lock,
&JavaThread::dirty_card_queue_set());
// In case we're keeping closure specialization stats, initialize those
// counts and that mechanism.
SpecializationStats::clear();
// Here we allocate the dummy HeapRegion that is required by the
// G1AllocRegion class.
HeapRegion* dummy_region = _hrm.get_dummy_region();
@ -3321,7 +3317,6 @@ void G1CollectedHeap::print_tracing_info() const {
concurrent_mark()->print_summary_info();
}
g1_policy()->print_yg_surv_rate_info();
SpecializationStats::print();
}
#ifndef PRODUCT

View File

@ -951,8 +951,6 @@ void ParNewGeneration::collect(bool full,
// Capture heap used before collection (for printing).
size_t gch_prev_used = gch->used();
SpecializationStats::clear();
age_table()->clear();
to()->clear(SpaceDecorator::Mangle);
@ -1072,8 +1070,6 @@ void ParNewGeneration::collect(bool full,
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
update_time_of_last_gc(now);
SpecializationStats::print();
rp->set_enqueuing_is_done(true);
if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);

View File

@ -590,8 +590,6 @@ void DefNewGeneration::collect(bool full,
gch->trace_heap_before_gc(&gc_tracer);
SpecializationStats::clear();
// These can be shared for all code paths
IsAliveClosure is_alive(this);
ScanWeakRefClosure scan_weak_ref(this);
@ -700,7 +698,6 @@ void DefNewGeneration::collect(bool full,
// set new iteration safe limit for the survivor spaces
from()->set_concurrent_iteration_safe_limit(from()->top());
to()->set_concurrent_iteration_safe_limit(to()->top());
SpecializationStats::print();
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()

View File

@ -1,115 +0,0 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/specialized_oop_closures.hpp"
#include "utilities/ostream.hpp"
// For keeping stats on effectiveness.
#ifndef PRODUCT
#if ENABLE_SPECIALIZATION_STATS
int SpecializationStats::_numCallsAll;
int SpecializationStats::_numCallsTotal[NUM_Kinds];
int SpecializationStats::_numCalls_nv[NUM_Kinds];
int SpecializationStats::_numDoOopCallsTotal[NUM_Kinds];
int SpecializationStats::_numDoOopCalls_nv[NUM_Kinds];
void SpecializationStats::clear() {
_numCallsAll = 0;
for (int k = ik; k < NUM_Kinds; k++) {
_numCallsTotal[k] = 0;
_numCalls_nv[k] = 0;
_numDoOopCallsTotal[k] = 0;
_numDoOopCalls_nv[k] = 0;
}
}
void SpecializationStats::print() {
const char* header_format = " %20s %10s %11s %10s";
const char* line_format = " %20s %10d %11d %9.2f%%";
int all_numCallsTotal =
_numCallsTotal[ik] + _numCallsTotal[irk] + _numCallsTotal[oa];
int all_numCalls_nv =
_numCalls_nv[ik] + _numCalls_nv[irk] + _numCalls_nv[oa];
gclog_or_tty->print_cr("\nOf %d oop_oop_iterate calls %d (%6.3f%%) are in (ik, irk, oa).",
_numCallsAll, all_numCallsTotal,
100.0 * (float)all_numCallsTotal / (float)_numCallsAll);
// irk calls are double-counted.
int real_ik_numCallsTotal = _numCallsTotal[ik] - _numCallsTotal[irk];
int real_ik_numCalls_nv = _numCalls_nv[ik] - _numCalls_nv[irk];
gclog_or_tty->print_cr("");
gclog_or_tty->print_cr(header_format, "oop_oop_iterate:", "calls", "non-virtual", "pct");
gclog_or_tty->print_cr(header_format,
"----------",
"----------",
"-----------",
"----------");
gclog_or_tty->print_cr(line_format, "all",
all_numCallsTotal,
all_numCalls_nv,
100.0 * (float)all_numCalls_nv / (float)all_numCallsTotal);
gclog_or_tty->print_cr(line_format, "ik",
real_ik_numCallsTotal, real_ik_numCalls_nv,
100.0 * (float)real_ik_numCalls_nv /
(float)real_ik_numCallsTotal);
gclog_or_tty->print_cr(line_format, "irk",
_numCallsTotal[irk], _numCalls_nv[irk],
100.0 * (float)_numCalls_nv[irk] / (float)_numCallsTotal[irk]);
gclog_or_tty->print_cr(line_format, "oa",
_numCallsTotal[oa], _numCalls_nv[oa],
100.0 * (float)_numCalls_nv[oa] / (float)_numCallsTotal[oa]);
gclog_or_tty->print_cr("");
gclog_or_tty->print_cr(header_format, "do_oop:", "calls", "non-virtual", "pct");
gclog_or_tty->print_cr(header_format,
"----------",
"----------",
"-----------",
"----------");
int all_numDoOopCallsTotal =
_numDoOopCallsTotal[ik] + _numDoOopCallsTotal[irk] + _numDoOopCallsTotal[oa];
int all_numDoOopCalls_nv =
_numDoOopCalls_nv[ik] + _numDoOopCalls_nv[irk] + _numDoOopCalls_nv[oa];
gclog_or_tty->print_cr(line_format, "all",
all_numDoOopCallsTotal, all_numDoOopCalls_nv,
100.0 * (float)all_numDoOopCalls_nv /
(float)all_numDoOopCallsTotal);
const char* kind_names[] = { "ik", "irk", "oa" };
for (int k = ik; k < NUM_Kinds; k++) {
gclog_or_tty->print_cr(line_format, kind_names[k],
_numDoOopCallsTotal[k], _numDoOopCalls_nv[k],
(_numDoOopCallsTotal[k] > 0 ?
100.0 * (float)_numDoOopCalls_nv[k] /
(float)_numDoOopCallsTotal[k]
: 0.0));
}
}
#endif // ENABLE_SPECIALIZATION_STATS
#endif // !PRODUCT

View File

@ -60,10 +60,10 @@ class NoHeaderExtendedOopClosure;
// This macro applies an argument macro to all OopClosures for which we
// want specialized bodies of "oop_oop_iterate". The arguments to "f" are:
// "f(closureType, non_virtual)"
// where "closureType" is the name of the particular subclass of OopClosure,
// where "closureType" is the name of the particular subclass of ExtendedOopClosure,
// and "non_virtual" will be the string "_nv" if the closure type should
// have its "do_oop" method invoked non-virtually, or else the
// string "_v". ("OopClosure" itself will be the only class in the latter
// string "_v". ("ExtendedOopClosure" itself will be the only class in the latter
// category.)
// This is split into several because of a Visual C++ 6.0 compiler bug
@ -174,93 +174,9 @@ class NoHeaderExtendedOopClosure;
// We separate these out, because sometime the general one has
// a different definition from the specialized ones, and sometimes it
// doesn't.
// NOTE: One of the valid criticisms of this
// specialize-oop_oop_iterate-for-specific-closures idiom is that it is
// easy to have a silent performance bug: if you fail to de-virtualize,
// things still work, just slower. The "SpecializationStats" mode is
// intended to at least make such a failure easy to detect.
// *Not* using the ALL_SINCE_SAVE_MARKS_CLOSURES(f) macro defined
// below means that *only* closures for which oop_oop_iterate specializations
// exist above may be applied to "oops_since_save_marks". That is,
// this form of the performance bug is caught statically. When you add
// a definition for the general type, this property goes away.
// Make sure you test with SpecializationStats to find such bugs
// when introducing a new closure where you don't want virtual dispatch.
#define ALL_SINCE_SAVE_MARKS_CLOSURES(f) \
f(OopsInGenClosure,_v) \
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f)
// For keeping stats on effectiveness.
#define ENABLE_SPECIALIZATION_STATS 0
class SpecializationStats {
public:
enum Kind {
ik, // InstanceKlass
irk, // InstanceRefKlass
oa, // ObjArrayKlass
NUM_Kinds
};
#if ENABLE_SPECIALIZATION_STATS
private:
static bool _init;
static bool _wrapped;
static jint _numCallsAll;
static jint _numCallsTotal[NUM_Kinds];
static jint _numCalls_nv[NUM_Kinds];
static jint _numDoOopCallsTotal[NUM_Kinds];
static jint _numDoOopCalls_nv[NUM_Kinds];
public:
#endif
static void clear() PRODUCT_RETURN;
static inline void record_call() PRODUCT_RETURN;
static inline void record_iterate_call_v(Kind k) PRODUCT_RETURN;
static inline void record_iterate_call_nv(Kind k) PRODUCT_RETURN;
static inline void record_do_oop_call_v(Kind k) PRODUCT_RETURN;
static inline void record_do_oop_call_nv(Kind k) PRODUCT_RETURN;
static void print() PRODUCT_RETURN;
};
#ifndef PRODUCT
#if ENABLE_SPECIALIZATION_STATS
inline void SpecializationStats::record_call() {
Atomic::inc(&_numCallsAll);
}
inline void SpecializationStats::record_iterate_call_v(Kind k) {
Atomic::inc(&_numCallsTotal[k]);
}
inline void SpecializationStats::record_iterate_call_nv(Kind k) {
Atomic::inc(&_numCallsTotal[k]);
Atomic::inc(&_numCalls_nv[k]);
}
inline void SpecializationStats::record_do_oop_call_v(Kind k) {
Atomic::inc(&_numDoOopCallsTotal[k]);
}
inline void SpecializationStats::record_do_oop_call_nv(Kind k) {
Atomic::inc(&_numDoOopCallsTotal[k]);
Atomic::inc(&_numDoOopCalls_nv[k]);
}
#else // !ENABLE_SPECIALIZATION_STATS
inline void SpecializationStats::record_call() {}
inline void SpecializationStats::record_iterate_call_v(Kind k) {}
inline void SpecializationStats::record_iterate_call_nv(Kind k) {}
inline void SpecializationStats::record_do_oop_call_v(Kind k) {}
inline void SpecializationStats::record_do_oop_call_nv(Kind k) {}
inline void SpecializationStats::clear() {}
inline void SpecializationStats::print() {}
#endif // ENABLE_SPECIALIZATION_STATS
#endif // !PRODUCT
#endif // SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_HPP

View File

@ -178,7 +178,6 @@ void TenuredGeneration::collect(bool full,
bool is_tlab) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
SpecializationStats::clear();
// Temporarily expand the span of our ref processor, so
// refs discovery is over the entire heap, not just this generation
ReferenceProcessorSpanMutator
@ -195,8 +194,6 @@ void TenuredGeneration::collect(bool full,
gc_timer->register_gc_end();
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
SpecializationStats::print();
}
HeapWord*

View File

@ -54,7 +54,6 @@
int InstanceClassLoaderKlass:: \
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
/* Get size before changing pointers */ \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
int size = InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
\
if_do_metadata_checked(closure, nv_suffix) { \
@ -74,7 +73,6 @@ oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {
int InstanceClassLoaderKlass:: \
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
/* Get size before changing pointers */ \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \
return size; \
}
@ -87,8 +85,6 @@ int InstanceClassLoaderKlass::
oop_oop_iterate##nv_suffix##_m(oop obj, \
OopClosureType* closure, \
MemRegion mr) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
\
int size = InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
\
if_do_metadata_checked(closure, nv_suffix) { \

View File

@ -2209,15 +2209,12 @@ void InstanceKlass::oop_follow_contents(ParCompactionManager* cm,
#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
/* header */ \
if_do_metadata_checked(closure, nv_suffix) { \
closure->do_klass##nv_suffix(obj->klass()); \
} \
InstanceKlass_OOP_MAP_ITERATE( \
obj, \
SpecializationStats:: \
record_do_oop_call##nv_suffix(SpecializationStats::ik); \
(closure)->do_oop##nv_suffix(p), \
assert_is_in_closed_subset) \
return size_helper(); \
@ -2228,14 +2225,11 @@ int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure)
\
int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \
OopClosureType* closure) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
\
assert_should_ignore_metadata(closure, nv_suffix); \
\
/* instance variables */ \
InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
obj, \
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
(closure)->do_oop##nv_suffix(p), \
assert_is_in_closed_subset) \
return size_helper(); \
@ -2247,7 +2241,6 @@ int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj,
int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
OopClosureType* closure, \
MemRegion mr) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
if_do_metadata_checked(closure, nv_suffix) { \
if (mr.contains(obj)) { \
closure->do_klass##nv_suffix(obj->klass()); \

View File

@ -250,8 +250,6 @@ int InstanceMirrorKlass::oop_adjust_pointers(oop obj) {
int InstanceMirrorKlass:: \
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
/* Get size before changing pointers */ \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \
\
InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
\
if_do_metadata_checked(closure, nv_suffix) { \
@ -275,8 +273,6 @@ oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {
int InstanceMirrorKlass:: \
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
/* Get size before changing pointers */ \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \
\
InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \
\
if (UseCompressedOops) { \
@ -294,8 +290,6 @@ int InstanceMirrorKlass::
oop_oop_iterate##nv_suffix##_m(oop obj, \
OopClosureType* closure, \
MemRegion mr) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \
\
InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
\
if_do_metadata_checked(closure, nv_suffix) { \

View File

@ -260,7 +260,6 @@ int InstanceRefKlass::oop_adjust_pointers(oop obj) {
return size; \
} else if (contains(referent_addr)) { \
/* treat referent as normal oop */ \
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
closure->do_oop##nv_suffix(referent_addr); \
} \
} \
@ -276,7 +275,6 @@ int InstanceRefKlass::oop_adjust_pointers(oop obj) {
INTPTR_FORMAT, disc_addr); \
} \
) \
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
closure->do_oop##nv_suffix(disc_addr); \
} \
} else { \
@ -293,7 +291,6 @@ int InstanceRefKlass::oop_adjust_pointers(oop obj) {
} \
/* treat next as normal oop */ \
if (contains(next_addr)) { \
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
closure->do_oop##nv_suffix(next_addr); \
} \
return size; \
@ -309,8 +306,6 @@ template <class T> bool contains(T *t) { return true; }
int InstanceRefKlass:: \
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
/* Get size before changing pointers */ \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
\
int size = InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
\
if (UseCompressedOops) { \
@ -326,8 +321,6 @@ oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {
int InstanceRefKlass:: \
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
/* Get size before changing pointers */ \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
\
int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \
\
if (UseCompressedOops) { \
@ -345,8 +338,6 @@ int InstanceRefKlass::
oop_oop_iterate##nv_suffix##_m(oop obj, \
OopClosureType* closure, \
MemRegion mr) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
\
int size = InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
if (UseCompressedOops) { \
InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr.contains); \

View File

@ -479,7 +479,6 @@ void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm,
\
int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \
OopClosureType* closure) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \
assert (obj->is_array(), "obj must be array"); \
objArrayOop a = objArrayOop(obj); \
/* Get size before changing pointers. */ \
@ -497,7 +496,6 @@ int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj,
int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
OopClosureType* closure, \
MemRegion mr) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \
assert(obj->is_array(), "obj must be array"); \
objArrayOop a = objArrayOop(obj); \
/* Get size before changing pointers. */ \
@ -519,7 +517,6 @@ int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj,
int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, \
OopClosureType* closure, \
int start, int end) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \
assert(obj->is_array(), "obj must be array"); \
objArrayOop a = objArrayOop(obj); \
/* Get size before changing pointers. */ \

View File

@ -46,7 +46,6 @@ oop objArrayOopDesc::atomic_compare_exchange_oop(int index, oop exchange_value,
#define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) { \
SpecializationStats::record_call(); \
return ((ObjArrayKlass*)klass())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
}

View File

@ -692,12 +692,10 @@ inline int oopDesc::adjust_pointers() {
#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
inline int oopDesc::oop_iterate(OopClosureType* blk) { \
SpecializationStats::record_call(); \
return klass()->oop_oop_iterate##nv_suffix(this, blk); \
} \
\
inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
SpecializationStats::record_call(); \
return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
}
@ -721,7 +719,6 @@ ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
\
inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
SpecializationStats::record_call(); \
return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
}