8166848: Performance bug: SystemDictionary - optimization
Check instead that a bucket isn't 10x the average Reviewed-by: iklam, gziemski, sspitsyn
This commit is contained in:
parent
553d1e815b
commit
f820e2f2ee
@ -337,15 +337,12 @@ void Dictionary::add_klass(Symbol* class_name, ClassLoaderData* loader_data,
|
|||||||
DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash,
|
DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash,
|
||||||
Symbol* class_name,
|
Symbol* class_name,
|
||||||
ClassLoaderData* loader_data) {
|
ClassLoaderData* loader_data) {
|
||||||
DEBUG_ONLY(_lookup_count++);
|
|
||||||
for (DictionaryEntry* entry = bucket(index);
|
for (DictionaryEntry* entry = bucket(index);
|
||||||
entry != NULL;
|
entry != NULL;
|
||||||
entry = entry->next()) {
|
entry = entry->next()) {
|
||||||
if (entry->hash() == hash && entry->equals(class_name, loader_data)) {
|
if (entry->hash() == hash && entry->equals(class_name, loader_data)) {
|
||||||
DEBUG_ONLY(bucket_count_hit(index));
|
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
DEBUG_ONLY(_lookup_length++);
|
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -548,114 +545,24 @@ void Dictionary::print(bool details) {
|
|||||||
tty->cr();
|
tty->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
void DictionaryEntry::verify() {
|
||||||
void Dictionary::printPerformanceInfoDetails() {
|
Klass* e = klass();
|
||||||
if (log_is_enabled(Info, hashtables)) {
|
ClassLoaderData* cld = loader_data();
|
||||||
ResourceMark rm;
|
guarantee(e->is_instance_klass(),
|
||||||
|
"Verify of system dictionary failed");
|
||||||
log_info(hashtables)(" ");
|
// class loader must be present; a null class loader is the
|
||||||
log_info(hashtables)("Java system dictionary (table_size=%d, classes=%d)",
|
// boostrap loader
|
||||||
table_size(), number_of_entries());
|
guarantee(cld != NULL || DumpSharedSpaces ||
|
||||||
log_info(hashtables)("1st number: the bucket index");
|
cld->class_loader() == NULL ||
|
||||||
log_info(hashtables)("2nd number: the hit percentage for this bucket");
|
cld->class_loader()->is_instance(),
|
||||||
log_info(hashtables)("3rd number: the entry's index within this bucket");
|
"checking type of class_loader");
|
||||||
log_info(hashtables)("4th number: the hash index of this entry");
|
e->verify();
|
||||||
log_info(hashtables)(" ");
|
verify_protection_domain_set();
|
||||||
|
|
||||||
// find top buckets with highest lookup count
|
|
||||||
#define TOP_COUNT 16
|
|
||||||
int topItemsIndicies[TOP_COUNT];
|
|
||||||
for (int i = 0; i < TOP_COUNT; i++) {
|
|
||||||
topItemsIndicies[i] = i;
|
|
||||||
}
|
|
||||||
double total = 0.0;
|
|
||||||
for (int i = 0; i < table_size(); i++) {
|
|
||||||
// find the total count number, so later on we can
|
|
||||||
// express bucket lookup count as a percentage of all lookups
|
|
||||||
unsigned value = bucket_hits(i);
|
|
||||||
total += value;
|
|
||||||
|
|
||||||
// find the top entry with min value
|
|
||||||
int min_index = 0;
|
|
||||||
unsigned min_value = bucket_hits(topItemsIndicies[min_index]);
|
|
||||||
for (int j = 1; j < TOP_COUNT; j++) {
|
|
||||||
unsigned top_value = bucket_hits(topItemsIndicies[j]);
|
|
||||||
if (top_value < min_value) {
|
|
||||||
min_value = top_value;
|
|
||||||
min_index = j;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if the bucket loookup value is bigger than the top buckets min
|
|
||||||
// move that bucket index into the top list
|
|
||||||
if (value > min_value) {
|
|
||||||
topItemsIndicies[min_index] = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int index = 0; index < table_size(); index++) {
|
|
||||||
double percentage = 100.0 * (double)bucket_hits(index)/total;
|
|
||||||
int chain = 0;
|
|
||||||
for (DictionaryEntry* probe = bucket(index);
|
|
||||||
probe != NULL;
|
|
||||||
probe = probe->next()) {
|
|
||||||
Klass* e = probe->klass();
|
|
||||||
ClassLoaderData* loader_data = probe->loader_data();
|
|
||||||
bool is_defining_class =
|
|
||||||
(loader_data == e->class_loader_data());
|
|
||||||
log_info(hashtables)("%4d: %5.2f%%: %3d: %10u: %s, loader %s",
|
|
||||||
index, percentage, chain, probe->hash(), e->external_name(),
|
|
||||||
(loader_data != NULL) ? loader_data->loader_name() : "NULL");
|
|
||||||
|
|
||||||
chain++;
|
|
||||||
}
|
|
||||||
if (chain == 0) {
|
|
||||||
log_info(hashtables)("%4d:", index+1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log_info(hashtables)(" ");
|
|
||||||
|
|
||||||
// print out the TOP_COUNT of buckets with highest lookup count (unsorted)
|
|
||||||
log_info(hashtables)("Top %d buckets:", TOP_COUNT);
|
|
||||||
for (int i = 0; i < TOP_COUNT; i++) {
|
|
||||||
log_info(hashtables)("%4d: hits %5.2f%%",
|
|
||||||
topItemsIndicies[i],
|
|
||||||
100.0*(double)bucket_hits(topItemsIndicies[i])/total);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
|
||||||
|
|
||||||
void Dictionary::verify() {
|
void Dictionary::verify() {
|
||||||
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
|
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
|
||||||
|
verify_table<DictionaryEntry>("System Dictionary");
|
||||||
int element_count = 0;
|
|
||||||
for (int index = 0; index < table_size(); index++) {
|
|
||||||
for (DictionaryEntry* probe = bucket(index);
|
|
||||||
probe != NULL;
|
|
||||||
probe = probe->next()) {
|
|
||||||
Klass* e = probe->klass();
|
|
||||||
ClassLoaderData* loader_data = probe->loader_data();
|
|
||||||
guarantee(e->is_instance_klass(),
|
|
||||||
"Verify of system dictionary failed");
|
|
||||||
// class loader must be present; a null class loader is the
|
|
||||||
// boostrap loader
|
|
||||||
guarantee(loader_data != NULL || DumpSharedSpaces ||
|
|
||||||
loader_data->class_loader() == NULL ||
|
|
||||||
loader_data->class_loader()->is_instance(),
|
|
||||||
"checking type of class_loader");
|
|
||||||
e->verify();
|
|
||||||
probe->verify_protection_domain_set();
|
|
||||||
element_count++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
guarantee(number_of_entries() == element_count,
|
|
||||||
"Verify of system dictionary failed");
|
|
||||||
#ifdef ASSERT
|
|
||||||
if (!verify_lookup_length((double)number_of_entries() / table_size(), "System Dictionary")) {
|
|
||||||
this->printPerformanceInfoDetails();
|
|
||||||
}
|
|
||||||
#endif // ASSERT
|
|
||||||
|
|
||||||
_pd_cache_table->verify();
|
_pd_cache_table->verify();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,6 +211,8 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
|
|||||||
}
|
}
|
||||||
st->print_cr("pd set count = #%d", count);
|
st->print_cr("pd set count = #%d", count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void verify();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Entry in a SymbolPropertyTable, mapping a single Symbol*
|
// Entry in a SymbolPropertyTable, mapping a single Symbol*
|
||||||
|
@ -510,18 +510,7 @@ void ModuleEntry::print(outputStream* st) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ModuleEntryTable::verify() {
|
void ModuleEntryTable::verify() {
|
||||||
int element_count = 0;
|
verify_table<ModuleEntry>("Module Entry Table");
|
||||||
for (int i = 0; i < table_size(); i++) {
|
|
||||||
for (ModuleEntry* probe = bucket(i);
|
|
||||||
probe != NULL;
|
|
||||||
probe = probe->next()) {
|
|
||||||
probe->verify();
|
|
||||||
element_count++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
guarantee(number_of_entries() == element_count,
|
|
||||||
"Verify of Module Entry Table failed");
|
|
||||||
DEBUG_ONLY(verify_lookup_length((double)number_of_entries() / table_size(), "Module Entry Table"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ModuleEntry::verify() {
|
void ModuleEntry::verify() {
|
||||||
|
@ -350,18 +350,7 @@ void PackageEntry::print(outputStream* st) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PackageEntryTable::verify() {
|
void PackageEntryTable::verify() {
|
||||||
int element_count = 0;
|
verify_table<PackageEntry>("Package Entry Table");
|
||||||
for (int index = 0; index < table_size(); index++) {
|
|
||||||
for (PackageEntry* probe = bucket(index);
|
|
||||||
probe != NULL;
|
|
||||||
probe = probe->next()) {
|
|
||||||
probe->verify();
|
|
||||||
element_count++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
guarantee(number_of_entries() == element_count,
|
|
||||||
"Verify of Package Entry Table failed");
|
|
||||||
DEBUG_ONLY(verify_lookup_length((double)number_of_entries() / table_size(), "Package Entry Table"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PackageEntry::verify() {
|
void PackageEntry::verify() {
|
||||||
|
@ -97,18 +97,7 @@ void ProtectionDomainCacheEntry::print() {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void ProtectionDomainCacheTable::verify() {
|
void ProtectionDomainCacheTable::verify() {
|
||||||
int element_count = 0;
|
verify_table<ProtectionDomainCacheEntry>("Protection Domain Table");
|
||||||
for (int index = 0; index < table_size(); index++) {
|
|
||||||
for (ProtectionDomainCacheEntry* probe = bucket(index);
|
|
||||||
probe != NULL;
|
|
||||||
probe = probe->next()) {
|
|
||||||
probe->verify();
|
|
||||||
element_count++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
guarantee(number_of_entries() == element_count,
|
|
||||||
"Verify of protection domain cache table failed");
|
|
||||||
DEBUG_ONLY(verify_lookup_length((double)number_of_entries() / table_size(), "Domain Cache Table"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProtectionDomainCacheEntry::verify() {
|
void ProtectionDomainCacheEntry::verify() {
|
||||||
|
@ -126,6 +126,8 @@ DEF_HANDLE(typeArray , is_typeArray_noinline )
|
|||||||
|
|
||||||
// Metadata Handles. Unlike oop Handles these are needed to prevent metadata
|
// Metadata Handles. Unlike oop Handles these are needed to prevent metadata
|
||||||
// from being reclaimed by RedefineClasses.
|
// from being reclaimed by RedefineClasses.
|
||||||
|
// Metadata Handles should be passed around as const references to avoid copy construction
|
||||||
|
// and destruction for parameters.
|
||||||
|
|
||||||
// Specific Handles for different oop types
|
// Specific Handles for different oop types
|
||||||
#define DEF_METADATA_HANDLE(name, type) \
|
#define DEF_METADATA_HANDLE(name, type) \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -24,7 +24,11 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "classfile/altHashing.hpp"
|
#include "classfile/altHashing.hpp"
|
||||||
|
#include "classfile/dictionary.hpp"
|
||||||
#include "classfile/javaClasses.inline.hpp"
|
#include "classfile/javaClasses.inline.hpp"
|
||||||
|
#include "classfile/moduleEntry.hpp"
|
||||||
|
#include "classfile/packageEntry.hpp"
|
||||||
|
#include "classfile/protectionDomainCache.hpp"
|
||||||
#include "classfile/stringTable.hpp"
|
#include "classfile/stringTable.hpp"
|
||||||
#include "memory/allocation.inline.hpp"
|
#include "memory/allocation.inline.hpp"
|
||||||
#include "memory/filemap.hpp"
|
#include "memory/filemap.hpp"
|
||||||
@ -276,14 +280,22 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template <MEMFLAGS F> void BasicHashtable<F>::verify() {
|
template <MEMFLAGS F>
|
||||||
int count = 0;
|
template <class T> void BasicHashtable<F>::verify_table(const char* table_name) {
|
||||||
for (int i = 0; i < table_size(); i++) {
|
int element_count = 0;
|
||||||
for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
|
int max_bucket_count = 0;
|
||||||
++count;
|
for (int index = 0; index < table_size(); index++) {
|
||||||
|
int bucket_count = 0;
|
||||||
|
for (T* probe = (T*)bucket(index); probe != NULL; probe = probe->next()) {
|
||||||
|
probe->verify();
|
||||||
|
bucket_count++;
|
||||||
}
|
}
|
||||||
|
element_count += bucket_count;
|
||||||
|
max_bucket_count = MAX2(max_bucket_count, bucket_count);
|
||||||
}
|
}
|
||||||
assert(count == number_of_entries(), "number of hashtable entries incorrect");
|
guarantee(number_of_entries() == element_count,
|
||||||
|
"Verify of %s failed", table_name);
|
||||||
|
DEBUG_ONLY(verify_lookup_length(max_bucket_count, table_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -291,18 +303,12 @@ template <MEMFLAGS F> void BasicHashtable<F>::verify() {
|
|||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
|
||||||
template <MEMFLAGS F> bool BasicHashtable<F>::verify_lookup_length(double load, const char *table_name) {
|
// Assert if the longest bucket is 10x longer than the average bucket size.
|
||||||
if ((!_lookup_warning) && (_lookup_count != 0)
|
// Could change back to a warning, but warnings are not noticed.
|
||||||
&& ((double)_lookup_length / (double)_lookup_count > load * 2.0)) {
|
template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(int max_bucket_count, const char *table_name) {
|
||||||
warning("Performance bug: %s lookup_count=%d "
|
log_info(hashtables)("%s max bucket size %d element count %d table size %d", table_name,
|
||||||
"lookup_length=%d average=%lf load=%f",
|
max_bucket_count, _number_of_entries, _table_size);
|
||||||
table_name, _lookup_count, _lookup_length,
|
assert (max_bucket_count < ((1 + number_of_entries()/table_size())*10), "Table is unbalanced");
|
||||||
(double)_lookup_length / _lookup_count, load);
|
|
||||||
_lookup_warning = true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -344,3 +350,8 @@ template class HashtableEntry<Symbol*, mtTracing>;
|
|||||||
template class BasicHashtable<mtTracing>;
|
template class BasicHashtable<mtTracing>;
|
||||||
#endif
|
#endif
|
||||||
template class BasicHashtable<mtCompiler>;
|
template class BasicHashtable<mtCompiler>;
|
||||||
|
|
||||||
|
template void BasicHashtable<mtClass>::verify_table<DictionaryEntry>(char const*);
|
||||||
|
template void BasicHashtable<mtModule>::verify_table<ModuleEntry>(char const*);
|
||||||
|
template void BasicHashtable<mtModule>::verify_table<PackageEntry>(char const*);
|
||||||
|
template void BasicHashtable<mtClass>::verify_table<ProtectionDomainCacheEntry>(char const*);
|
||||||
|
@ -124,17 +124,9 @@ private:
|
|||||||
// Instance variable
|
// Instance variable
|
||||||
BasicHashtableEntry<F>* _entry;
|
BasicHashtableEntry<F>* _entry;
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
private:
|
|
||||||
unsigned _hits;
|
|
||||||
public:
|
|
||||||
unsigned hits() { return _hits; }
|
|
||||||
void count_hit() { _hits++; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Accessing
|
// Accessing
|
||||||
void clear() { _entry = NULL; DEBUG_ONLY(_hits = 0); }
|
void clear() { _entry = NULL; }
|
||||||
|
|
||||||
// The following methods use order access methods to avoid race
|
// The following methods use order access methods to avoid race
|
||||||
// conditions in multiprocessor systems.
|
// conditions in multiprocessor systems.
|
||||||
@ -179,10 +171,7 @@ private:
|
|||||||
protected:
|
protected:
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
bool _lookup_warning;
|
void verify_lookup_length(int max_bucket_count, const char *table_name);
|
||||||
mutable int _lookup_count;
|
|
||||||
mutable int _lookup_length;
|
|
||||||
bool verify_lookup_length(double load, const char *table_name);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void initialize(int table_size, int entry_size, int number_of_entries);
|
void initialize(int table_size, int entry_size, int number_of_entries);
|
||||||
@ -232,16 +221,7 @@ public:
|
|||||||
|
|
||||||
int number_of_entries() { return _number_of_entries; }
|
int number_of_entries() { return _number_of_entries; }
|
||||||
|
|
||||||
void verify() PRODUCT_RETURN;
|
template <class T> void verify_table(const char* table_name) PRODUCT_RETURN;
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
void bucket_count_hit(int i) const {
|
|
||||||
_buckets[i].count_hit();
|
|
||||||
}
|
|
||||||
unsigned bucket_hits(int i) const {
|
|
||||||
return _buckets[i].hits();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -64,11 +64,6 @@ template <MEMFLAGS F> inline void BasicHashtable<F>::initialize(int table_size,
|
|||||||
_first_free_entry = NULL;
|
_first_free_entry = NULL;
|
||||||
_end_block = NULL;
|
_end_block = NULL;
|
||||||
_number_of_entries = number_of_entries;
|
_number_of_entries = number_of_entries;
|
||||||
#ifdef ASSERT
|
|
||||||
_lookup_warning = false;
|
|
||||||
_lookup_count = 0;
|
|
||||||
_lookup_length = 0;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user