8195100: Use a low latency hashtable for SymbolTable

Used concurrentHashTable, similar to stringTable

Reviewed-by: coleenp, kbarrett, iklam, pliden
This commit is contained in:
Gerard Ziemski 2018-08-14 18:42:14 -05:00
parent 9cea96184f
commit b75805c1a5
26 changed files with 1094 additions and 798 deletions

@ -1418,6 +1418,28 @@ bool ClassLoaderDataGraph::do_unloading(bool do_cleaning) {
}
if (seen_dead_loader) {
data = _head;
while (data != NULL) {
// Remove entries in the dictionary of live class loader that have
// initiated loading classes in a dead class loader.
if (data->dictionary() != NULL) {
data->dictionary()->do_unloading();
}
// Walk a ModuleEntry's reads, and a PackageEntry's exports
// lists to determine if there are modules on those lists that are now
// dead and should be removed. A module's life cycle is equivalent
// to its defining class loader's life cycle. Since a module is
// considered dead if its class loader is dead, these walks must
// occur after each class loader's aliveness is determined.
if (data->packages() != NULL) {
data->packages()->purge_all_package_exports();
}
if (data->modules_defined()) {
data->modules()->purge_all_module_reads();
}
data = data->next();
}
SymbolTable::do_check_concurrent_work();
JFR_ONLY(post_class_unload_events();)
}

@ -231,6 +231,10 @@ public:
// For reading from/writing to the CDS archive
void serialize(SerializeClosure* soc);
inline bool empty() {
return (_entry_count == 0);
}
};
template <class T, class N> class CompactHashtable : public SimpleCompactHashtable {

@ -64,9 +64,9 @@
// --------------------------------------------------------------------------
StringTable* StringTable::_the_table = NULL;
bool StringTable::_shared_string_mapped = false;
CompactHashtable<oop, char> StringTable::_shared_table;
bool StringTable::_alt_hash = false;
volatile bool StringTable::_shared_string_mapped = false;
volatile bool StringTable::_alt_hash = false;
static juint murmur_seed = 0;
@ -176,18 +176,18 @@ class StringTableLookupOop : public StackObj {
}
};
static size_t ceil_pow_2(uintx val) {
static size_t ceil_log2(size_t val) {
size_t ret;
for (ret = 1; ((size_t)1 << ret) < val; ++ret);
return ret;
}
StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0),
_needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) {
_needs_rehashing(false), _weak_handles(NULL), _items_count(0), _uncleaned_items_count(0) {
_weak_handles = new OopStorage("StringTable weak",
StringTableWeakAlloc_lock,
StringTableWeakActive_lock);
size_t start_size_log_2 = ceil_pow_2(StringTableSize);
size_t start_size_log_2 = ceil_log2(StringTableSize);
_current_size = ((size_t)1) << start_size_log_2;
log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
_current_size, start_size_log_2);
@ -195,32 +195,31 @@ StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0),
}
size_t StringTable::item_added() {
return Atomic::add((size_t)1, &(the_table()->_items));
return Atomic::add((size_t)1, &(the_table()->_items_count));
}
size_t StringTable::add_items_to_clean(size_t ndead) {
size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items));
size_t StringTable::add_items_count_to_clean(size_t ndead) {
size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items_count));
log_trace(stringtable)(
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
the_table()->_uncleaned_items, ndead, total);
the_table()->_uncleaned_items_count, ndead, total);
return total;
}
void StringTable::item_removed() {
Atomic::add((size_t)-1, &(the_table()->_items));
Atomic::add((size_t)-1, &(the_table()->_items_count));
}
double StringTable::get_load_factor() {
return (_items*1.0)/_current_size;
return (double)_items_count/_current_size;
}
double StringTable::get_dead_factor() {
return (_uncleaned_items*1.0)/_current_size;
return (double)_uncleaned_items_count/_current_size;
}
size_t StringTable::table_size(Thread* thread) {
return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread
: Thread::current());
size_t StringTable::table_size() {
return ((size_t)1) << _local_table->get_size_log2(Thread::current());
}
void StringTable::trigger_concurrent_work() {
@ -406,7 +405,7 @@ void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f,
// This is the serial case without ParState.
// Just set the correct number and check for a cleaning phase.
the_table()->_uncleaned_items = stiac._count;
the_table()->_uncleaned_items_count = stiac._count;
StringTable::the_table()->check_concurrent_work();
if (processed != NULL) {
@ -433,7 +432,7 @@ void StringTable::possibly_parallel_unlink(
_par_state_string->weak_oops_do(&stiac, &dnc);
// Accumulate the dead strings.
the_table()->add_items_to_clean(stiac._count);
the_table()->add_items_count_to_clean(stiac._count);
*processed = (int) stiac._count_total;
*removed = (int) stiac._count;
@ -465,7 +464,7 @@ void StringTable::grow(JavaThread* jt) {
}
}
gt.done(jt);
_current_size = table_size(jt);
_current_size = table_size();
log_debug(stringtable)("Grown to size:" SIZE_FORMAT, _current_size);
}
@ -843,7 +842,7 @@ void StringTable::write_to_archive() {
assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be");
_shared_table.reset();
int num_buckets = the_table()->_items / SharedSymbolTableBucketSize;
int num_buckets = the_table()->_items_count / SharedSymbolTableBucketSize;
// calculation of num_buckets can result in zero buckets, we need at least one
CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1,
&MetaspaceShared::stats()->string);

@ -58,21 +58,22 @@ private:
static StringTable* _the_table;
// Shared string table
static CompactHashtable<oop, char> _shared_table;
static bool _shared_string_mapped;
static bool _alt_hash;
static volatile bool _shared_string_mapped;
static volatile bool _alt_hash;
private:
// Set if one bucket is out of balance due to hash algorithm deficiency
StringTableHash* _local_table;
size_t _current_size;
volatile bool _has_work;
// Set if one bucket is out of balance due to hash algorithm deficiency
volatile bool _needs_rehashing;
OopStorage* _weak_handles;
volatile size_t _items;
volatile size_t _items_count;
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
volatile size_t _uncleaned_items;
volatile size_t _uncleaned_items_count;
DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
double get_load_factor();
@ -83,7 +84,7 @@ private:
static size_t item_added();
static void item_removed();
size_t add_items_to_clean(size_t ndead);
size_t add_items_count_to_clean(size_t ndead);
StringTable();
@ -100,7 +101,7 @@ private:
public:
// The string table
static StringTable* the_table() { return _the_table; }
size_t table_size(Thread* thread = NULL);
size_t table_size();
static OopStorage* weak_storage() { return the_table()->_weak_handles; }
@ -116,7 +117,7 @@ private:
// Must be called before a parallel walk where strings might die.
static void reset_dead_counter() {
the_table()->_uncleaned_items = 0;
the_table()->_uncleaned_items_count = 0;
}
// After the parallel walk this method must be called to trigger
// cleaning. Note it might trigger a resize instead.
@ -127,7 +128,7 @@ private:
// If GC uses ParState directly it should add the number of cleared
// strings to this method.
static void inc_dead_counter(size_t ndead) {
the_table()->add_items_to_clean(ndead);
the_table()->add_items_count_to_clean(ndead);
}
// Delete pointers to otherwise-unreachable objects.

File diff suppressed because it is too large Load Diff

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,23 +26,11 @@
#define SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "oops/symbol.hpp"
#include "utilities/concurrentHashTable.hpp"
#include "utilities/hashtable.hpp"
// The symbol table holds all Symbol*s and corresponding interned strings.
// Symbol*s and literal strings should be canonicalized.
//
// The interned strings are created lazily.
//
// It is implemented as an open hash table with a fixed number of buckets.
//
// %note:
// - symbolTableEntrys are allocated in blocks to reduce the space overhead.
class BoolObjectClosure;
class outputStream;
class SerializeClosure;
// TempNewSymbol acts as a handle class in a handle/body idiom and is
// responsible for proper resource management of the body (which is a Symbol*).
// The body is resource managed by a reference counting scheme.
@ -59,7 +47,7 @@ class SerializeClosure;
class TempNewSymbol : public StackObj {
Symbol* _temp;
public:
public:
TempNewSymbol() : _temp(NULL) {}
// Conversion from a Symbol* to a TempNewSymbol.
@ -97,35 +85,69 @@ class TempNewSymbol : public StackObj {
};
template <class T, class N> class CompactHashtable;
class CompactSymbolTableWriter;
class SerializeClosure;
class SymbolTable : public RehashableHashtable<Symbol*, mtSymbol> {
class SymbolTableConfig;
typedef ConcurrentHashTable<Symbol*,
SymbolTableConfig, mtSymbol> SymbolTableHash;
class SymbolTableCreateEntry;
class SymbolTable : public CHeapObj<mtSymbol> {
friend class VMStructs;
friend class Symbol;
friend class ClassFileParser;
friend class SymbolTableConfig;
friend class SymbolTableCreateEntry;
private:
static void delete_symbol(Symbol* sym);
void grow(JavaThread* jt);
void clean_dead_entries(JavaThread* jt);
// The symbol table
static SymbolTable* _the_table;
// Set if one bucket is out of balance due to hash algorithm deficiency
static bool _needs_rehashing;
static bool _lookup_shared_first;
// Shared symbol table.
static CompactHashtable<Symbol*, char> _shared_table;
static volatile bool _lookup_shared_first;
static volatile bool _alt_hash;
// For statistics
static int _symbols_removed;
static int _symbols_counted;
volatile size_t _symbols_removed;
volatile size_t _symbols_counted;
// shared symbol table.
static CompactHashtable<Symbol*, char> _shared_table;
SymbolTableHash* _local_table;
size_t _current_size;
volatile bool _has_work;
// Set if one bucket is out of balance due to hash algorithm deficiency
volatile bool _needs_rehashing;
Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F
volatile size_t _items_count;
volatile size_t _uncleaned_items_count;
double get_load_factor();
double get_dead_factor();
void check_concurrent_work();
void trigger_concurrent_work();
static void item_added();
static void item_removed();
static void set_item_clean_count(size_t ncl);
static void mark_item_clean_count();
SymbolTable();
Symbol* allocate_symbol(const char* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F
Symbol* do_lookup(const char* name, int len, uintx hash);
Symbol* do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS);
// Adding elements
Symbol* basic_add(int index, u1* name, int len, unsigned int hashValue,
bool c_heap, TRAPS);
bool basic_add(ClassLoaderData* loader_data,
const constantPoolHandle& cp, int names_count,
const char** names, int* lengths, int* cp_indices,
unsigned int* hashValues, TRAPS);
static void add(ClassLoaderData* loader_data,
const constantPoolHandle& cp, int names_count,
const char** names, int* lengths, int* cp_indices,
unsigned int* hashValues, TRAPS);
static void new_symbols(ClassLoaderData* loader_data,
const constantPoolHandle& cp, int names_count,
@ -136,15 +158,8 @@ private:
}
static Symbol* lookup_shared(const char* name, int len, unsigned int hash);
Symbol* lookup_dynamic(int index, const char* name, int len, unsigned int hash);
Symbol* lookup(int index, const char* name, int len, unsigned int hash);
SymbolTable()
: RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
: RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
number_of_entries) {}
Symbol* lookup_dynamic(const char* name, int len, unsigned int hash);
Symbol* lookup_common(const char* name, int len, unsigned int hash);
// Arena for permanent symbols (null class loader) that are never unloaded
static Arena* _arena;
@ -152,88 +167,45 @@ private:
static void initialize_symbols(int arena_alloc_size = 0);
static volatile int _parallel_claimed_idx;
void concurrent_work(JavaThread* jt);
void print_table_statistics(outputStream* st, const char* table_name);
void try_rehash_table();
bool do_rehash();
typedef SymbolTable::BucketUnlinkContext BucketUnlinkContext;
// Release any dead symbols. Unlinked bucket entries are collected in the given
// context to be freed later.
// This allows multiple threads to work on the table at once.
static void buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context);
public:
// The symbol table
static SymbolTable* the_table() { return _the_table; }
size_t table_size();
enum {
symbol_alloc_batch_size = 8,
// Pick initial size based on java -version size measurements
symbol_alloc_arena_size = 360*K
symbol_alloc_arena_size = 360*K // TODO (revisit)
};
// The symbol table
static SymbolTable* the_table() { return _the_table; }
// Size of one bucket in the string table. Used when checking for rollover.
static uint bucket_size() { return sizeof(HashtableBucket<mtSymbol>); }
static void create_table() {
assert(_the_table == NULL, "One symbol table allowed.");
_the_table = new SymbolTable();
initialize_symbols(symbol_alloc_arena_size);
}
static unsigned int hash_symbol(const char* s, int len);
static unsigned int hash_shared_symbol(const char* s, int len);
static void unlink() {
do_check_concurrent_work();
}
static void do_check_concurrent_work();
static void do_concurrent_work(JavaThread* jt);
static bool has_work() { return the_table()->_has_work; }
// Probing
static Symbol* lookup(const char* name, int len, TRAPS);
// lookup only, won't add. Also calculate hash.
static Symbol* lookup_only(const char* name, int len, unsigned int& hash);
// Only copy to C string to be added if lookup failed.
// adds new symbol if not found
static Symbol* lookup(const Symbol* sym, int begin, int end, TRAPS);
static void release(Symbol* sym);
// Look up the address of the literal in the SymbolTable for this Symbol*
static Symbol** lookup_symbol_addr(Symbol* sym);
// jchar (UTF16) version of lookups
static Symbol* lookup_unicode(const jchar* name, int len, TRAPS);
static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash);
static void add(ClassLoaderData* loader_data,
const constantPoolHandle& cp, int names_count,
const char** names, int* lengths, int* cp_indices,
unsigned int* hashValues, TRAPS);
// Release any dead symbols
static void unlink() {
int processed = 0;
int removed = 0;
unlink(&processed, &removed);
}
static void unlink(int* processed, int* removed);
// Release any dead symbols, possibly parallel version
static void possibly_parallel_unlink(int* processed, int* removed);
// iterate over symbols
static void symbols_do(SymbolClosure *cl);
static void metaspace_pointers_do(MetaspaceClosure* it);
// Symbol creation
static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) {
assert(utf8_buffer != NULL, "just checking");
return lookup(utf8_buffer, length, THREAD);
}
static Symbol* new_symbol(const char* name, TRAPS) {
return new_symbol(name, (int)strlen(name), THREAD);
}
static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) {
assert(begin <= end && end <= sym->utf8_length(), "just checking");
return lookup(sym, begin, end, THREAD);
}
// Create a symbol in the arena for symbols that are not deleted
static Symbol* new_permanent_symbol(const char* name, TRAPS);
// Symbol lookup
static Symbol* lookup(int index, const char* name, int len, TRAPS);
// Needed for preloading classes in signatures when compiling.
// Returns the symbol is already present in symbol table, otherwise
// NULL. NO ALLOCATION IS GUARANTEED!
@ -246,27 +218,45 @@ public:
return lookup_only_unicode(name, len, ignore_hash);
}
// Histogram
static void print_histogram() PRODUCT_RETURN;
static void print() PRODUCT_RETURN;
// Symbol creation
static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) {
assert(utf8_buffer != NULL, "just checking");
return lookup(utf8_buffer, length, THREAD);
}
static Symbol* new_symbol(const char* name, TRAPS) {
return new_symbol(name, (int)strlen(name), THREAD);
}
static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) {
assert(begin <= end && end <= sym->utf8_length(), "just checking");
return lookup(sym, begin, end, THREAD);
}
// Create a symbol in the arena for symbols that are not deleted
static Symbol* new_permanent_symbol(const char* name, TRAPS);
// Debugging
static void verify();
static void dump(outputStream* st, bool verbose=false);
static void read(const char* filename, TRAPS);
// Rehash the string table if it gets out of balance
static void rehash_table();
static bool needs_rehashing()
{ return SymbolTable::the_table()->_needs_rehashing; }
// Heap dumper and CDS
static void symbols_do(SymbolClosure *cl);
// Sharing
static void write_to_archive();
static void serialize(SerializeClosure* soc);
static u4 encode_shared(Symbol* sym);
static Symbol* decode_shared(u4 offset);
private:
static void copy_shared_symbol_table(CompactSymbolTableWriter* ch_table);
public:
static void write_to_archive() NOT_CDS_RETURN;
static void serialize(SerializeClosure* soc) NOT_CDS_RETURN;
static void metaspace_pointers_do(MetaspaceClosure* it);
// Rehash the symbol table if it gets out of balance
static void rehash_table();
static bool needs_rehashing() { return _needs_rehashing; }
// Parallel chunked scanning
static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
static int parallel_claimed_index() { return _parallel_claimed_idx; }
// Jcmd
static void dump(outputStream* st, bool verbose=false);
// Debugging
static void verify();
static void read(const char* filename, TRAPS);
// Histogram
static void print_histogram() PRODUCT_RETURN;
};
#endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
@ -3256,56 +3255,40 @@ void G1CollectedHeap::print_termination_stats(uint worker_id,
undo_waste * HeapWordSize / K);
}
class G1StringAndSymbolCleaningTask : public AbstractGangTask {
class G1StringCleaningTask : public AbstractGangTask {
private:
BoolObjectClosure* _is_alive;
G1StringDedupUnlinkOrOopsDoClosure _dedup_closure;
OopStorage::ParState<false /* concurrent */, false /* const */> _par_state_string;
int _initial_string_table_size;
int _initial_symbol_table_size;
bool _process_strings;
int _strings_processed;
int _strings_removed;
bool _process_symbols;
int _symbols_processed;
int _symbols_removed;
bool _process_string_dedup;
public:
G1StringAndSymbolCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool process_string_dedup) :
AbstractGangTask("String/Symbol Unlinking"),
G1StringCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_string_dedup) :
AbstractGangTask("String Unlinking"),
_is_alive(is_alive),
_dedup_closure(is_alive, NULL, false),
_par_state_string(StringTable::weak_storage()),
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0),
_process_string_dedup(process_string_dedup) {
_initial_string_table_size = (int) StringTable::the_table()->table_size();
_initial_symbol_table_size = SymbolTable::the_table()->table_size();
if (process_symbols) {
SymbolTable::clear_parallel_claimed_index();
}
if (process_strings) {
StringTable::reset_dead_counter();
}
}
~G1StringAndSymbolCleaningTask() {
guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
"claim value %d after unlink less than initial symbol table size %d",
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
~G1StringCleaningTask() {
log_info(gc, stringtable)(
"Cleaned string and symbol table, "
"strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
"symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
strings_processed(), strings_removed(),
symbols_processed(), symbols_removed());
"Cleaned string table, "
"strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
strings_processed(), strings_removed());
if (_process_strings) {
StringTable::finish_dead_counter();
}
@ -3314,18 +3297,11 @@ public:
void work(uint worker_id) {
int strings_processed = 0;
int strings_removed = 0;
int symbols_processed = 0;
int symbols_removed = 0;
if (_process_strings) {
StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed);
Atomic::add(strings_processed, &_strings_processed);
Atomic::add(strings_removed, &_strings_removed);
}
if (_process_symbols) {
SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
Atomic::add(symbols_processed, &_symbols_processed);
Atomic::add(symbols_removed, &_symbols_removed);
}
if (_process_string_dedup) {
G1StringDedup::parallel_unlink(&_dedup_closure, worker_id);
}
@ -3333,9 +3309,6 @@ public:
size_t strings_processed() const { return (size_t)_strings_processed; }
size_t strings_removed() const { return (size_t)_strings_removed; }
size_t symbols_processed() const { return (size_t)_symbols_processed; }
size_t symbols_removed() const { return (size_t)_symbols_removed; }
};
class G1CodeCacheUnloadingTask {
@ -3585,7 +3558,7 @@ public:
class G1ParallelCleaningTask : public AbstractGangTask {
private:
bool _unloading_occurred;
G1StringAndSymbolCleaningTask _string_symbol_task;
G1StringCleaningTask _string_task;
G1CodeCacheUnloadingTask _code_cache_task;
G1KlassCleaningTask _klass_cleaning_task;
G1ResolvedMethodCleaningTask _resolved_method_cleaning_task;
@ -3595,7 +3568,7 @@ public:
G1ParallelCleaningTask(BoolObjectClosure* is_alive, uint num_workers, bool unloading_occurred) :
AbstractGangTask("Parallel Cleaning"),
_unloading_occurred(unloading_occurred),
_string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()),
_string_task(is_alive, true, G1StringDedup::is_enabled()),
_code_cache_task(num_workers, is_alive, unloading_occurred),
_klass_cleaning_task(),
_resolved_method_cleaning_task() {
@ -3609,8 +3582,8 @@ public:
// Let the threads mark that the first pass is done.
_code_cache_task.barrier_mark(worker_id);
// Clean the Strings and Symbols.
_string_symbol_task.work(worker_id);
// Clean the Strings.
_string_task.work(worker_id);
// Clean unreferenced things in the ResolvedMethodTable
_resolved_method_cleaning_task.work();
@ -3642,16 +3615,14 @@ void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive,
bool process_strings,
bool process_symbols,
bool process_string_dedup) {
if (!process_strings && !process_symbols && !process_string_dedup) {
if (!process_strings && !process_string_dedup) {
// Nothing to clean.
return;
}
G1StringAndSymbolCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, process_string_dedup);
G1StringCleaningTask g1_unlink_task(is_alive, process_strings, process_string_dedup);
workers()->run_task(&g1_unlink_task);
}
class G1RedirtyLoggedCardsTask : public AbstractGangTask {
@ -4045,7 +4016,7 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
process_discovered_references(per_thread_states);
// FIXME
// CM's reference processing also cleans up the string and symbol tables.
// CM's reference processing also cleans up the string table.
// Should we do that here also? We could, but it is a serial operation
// and could significantly increase the pause time.

@ -1324,9 +1324,8 @@ public:
// Partial cleaning used when class unloading is disabled.
// Let the caller choose what structures to clean out:
// - StringTable
// - SymbolTable
// - StringDeduplication structures
void partial_cleaning(BoolObjectClosure* is_alive, bool unlink_strings, bool unlink_symbols, bool unlink_string_dedup);
void partial_cleaning(BoolObjectClosure* is_alive, bool unlink_strings, bool unlink_string_dedup);
// Complete cleaning used when class unloading is enabled.
// Cleans out all structures handled by partial_cleaning and also the CodeCache.

@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
@ -1578,8 +1577,8 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
// Is alive closure.
G1CMIsAliveClosure g1_is_alive(_g1h);
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
// Inner scope to exclude the cleaning of the string table
// from the displayed time.
{
GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
@ -1673,16 +1672,16 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl);
}
// Unload Klasses, String, Symbols, Code Cache, etc.
// Unload Klasses, String, Code Cache, etc.
if (ClassUnloadingWithConcurrentMark) {
GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */);
_g1h->complete_cleaning(&g1_is_alive, purged_classes);
} else {
GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
// No need to clean string table and symbol table as they are treated as strong roots when
// No need to clean string table as it is treated as strong roots when
// class unloading is disabled.
_g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
_g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled());
}
}

@ -226,8 +226,8 @@ void G1FullCollector::phase1_mark_live_objects() {
_heap->complete_cleaning(&_is_alive, purged_class);
} else {
GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer());
// If no class unloading just clean out strings and symbols.
_heap->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled());
// If no class unloading just clean out strings.
_heap->partial_cleaning(&_is_alive, true, G1StringDedup::is_enabled());
}
scope()->tracer()->report_object_count_after_gc(&_is_alive);

@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "compiler/oopMap.hpp"
@ -74,7 +73,6 @@ static const ZStatSubPhase ZSubPhasePauseWeakRootsVMWeakHandles("Pause Weak Root
static const ZStatSubPhase ZSubPhasePauseWeakRootsJNIWeakHandles("Pause Weak Roots JNIWeakHandles");
static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Roots JVMTIWeakExport");
static const ZStatSubPhase ZSubPhasePauseWeakRootsJFRWeak("Pause Weak Roots JFRWeak");
static const ZStatSubPhase ZSubPhasePauseWeakRootsSymbolTable("Pause Weak Roots SymbolTable");
static const ZStatSubPhase ZSubPhasePauseWeakRootsStringTable("Pause Weak Roots StringTable");
static const ZStatSubPhase ZSubPhaseConcurrentWeakRoots("Concurrent Weak Roots");
@ -302,11 +300,9 @@ ZWeakRootsIterator::ZWeakRootsIterator() :
_jfr_weak(this),
_vm_weak_handles(this),
_jni_weak_handles(this),
_symbol_table(this),
_string_table(this) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZStatTimer timer(ZSubPhasePauseWeakRootsSetup);
SymbolTable::clear_parallel_claimed_index();
StringTable::reset_dead_counter();
}
@ -337,12 +333,6 @@ void ZWeakRootsIterator::do_jfr_weak(BoolObjectClosure* is_alive, OopClosure* cl
#endif
}
void ZWeakRootsIterator::do_symbol_table(BoolObjectClosure* is_alive, OopClosure* cl) {
ZStatTimer timer(ZSubPhasePauseWeakRootsSymbolTable);
int dummy;
SymbolTable::possibly_parallel_unlink(&dummy, &dummy);
}
class ZStringTableDeadCounterBoolObjectClosure : public BoolObjectClosure {
private:
BoolObjectClosure* const _cl;
@ -375,9 +365,6 @@ void ZWeakRootsIterator::do_string_table(BoolObjectClosure* is_alive, OopClosure
void ZWeakRootsIterator::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl) {
ZStatTimer timer(ZSubPhasePauseWeakRoots);
if (ZSymbolTableUnloading) {
_symbol_table.weak_oops_do(is_alive, cl);
}
if (ZWeakRoots) {
_jvmti_weak_export.weak_oops_do(is_alive, cl);
_jfr_weak.weak_oops_do(is_alive, cl);

@ -130,14 +130,12 @@ private:
void do_jni_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl);
void do_jvmti_weak_export(BoolObjectClosure* is_alive, OopClosure* cl);
void do_jfr_weak(BoolObjectClosure* is_alive, OopClosure* cl);
void do_symbol_table(BoolObjectClosure* is_alive, OopClosure* cl);
void do_string_table(BoolObjectClosure* is_alive, OopClosure* cl);
ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jvmti_weak_export> _jvmti_weak_export;
ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jfr_weak> _jfr_weak;
ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_vm_weak_handles> _vm_weak_handles;
ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jni_weak_handles> _jni_weak_handles;
ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_symbol_table> _symbol_table;
ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_string_table> _string_table;
public:

@ -79,9 +79,6 @@
diagnostic(bool, ZVerifyForwarding, false, \
"Verify forwarding tables") \
\
diagnostic(bool, ZSymbolTableUnloading, false, \
"Unload unused VM symbols") \
\
diagnostic(bool, ZWeakRoots, true, \
"Treat JNI WeakGlobalRefs and StringTable as weak roots") \
\

@ -80,6 +80,7 @@ DEBUG_ONLY(size_t Test_log_prefix_prefixer(char* buf, size_t len);)
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, reloc)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, stringtable)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, symboltable)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, sweep)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, start)) \

@ -147,6 +147,7 @@
LOG_TAG(stats) \
LOG_TAG(stringdedup) \
LOG_TAG(stringtable) \
LOG_TAG(symboltable) \
LOG_TAG(stackmap) \
LOG_TAG(subclass) \
LOG_TAG(survivor) \

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -199,12 +199,18 @@ protected:
}
// Fast delete in area. Common case is: NOP (except for storage reclaimed)
void Afree(void *ptr, size_t size) {
bool Afree(void *ptr, size_t size) {
#ifdef ASSERT
if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
if (UseMallocOnly) return;
if (UseMallocOnly) return true;
#endif
if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
if (((char*)ptr) + size == _hwm) {
_hwm = (char*)ptr;
return true;
} else {
// Unable to fast free, so we just drop it.
return false;
}
}
void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,

@ -318,4 +318,4 @@ void Symbol::print_value_on(outputStream* st) const {
}
// SymbolTable prints this in its statistics
NOT_PRODUCT(int Symbol::_total_count = 0;)
NOT_PRODUCT(size_t Symbol::_total_count = 0;)

@ -256,7 +256,7 @@ class Symbol : public MetaspaceObj {
// only for getting its vtable pointer.
Symbol() { }
static int _total_count;
static size_t _total_count;
#endif
};

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/serviceThread.hpp"
@ -84,6 +85,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
bool has_dcmd_notification_event = false;
bool acs_notify = false;
bool stringtable_work = false;
bool symboltable_work = false;
JvmtiDeferredEvent jvmti_event;
{
// Need state transition ThreadBlockInVM so that this thread
@ -101,7 +103,8 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
!(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) &&
!(has_gc_notification_event = GCNotifier::has_event()) &&
!(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) &&
!(stringtable_work = StringTable::has_work())) {
!(stringtable_work = StringTable::has_work()) &&
!(symboltable_work = SymbolTable::has_work())) {
// wait until one of the sensors has pending requests, or there is a
// pending JVMTI event or JMX GC notification to post
Service_lock->wait(Mutex::_no_safepoint_check_flag);
@ -116,6 +119,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
StringTable::do_concurrent_work(jt);
}
if (symboltable_work) {
SymbolTable::do_concurrent_work(jt);
}
if (has_jvmti_events) {
jvmti_event.post();
}

@ -162,12 +162,8 @@
typedef HashtableEntry<intptr_t, mtInternal> IntptrHashtableEntry;
typedef Hashtable<intptr_t, mtInternal> IntptrHashtable;
typedef Hashtable<Symbol*, mtSymbol> SymbolHashtable;
typedef HashtableEntry<Symbol*, mtClass> SymbolHashtableEntry;
typedef Hashtable<InstanceKlass*, mtClass> KlassHashtable;
typedef HashtableEntry<InstanceKlass*, mtClass> KlassHashtableEntry;
typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
@ -467,24 +463,6 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
static_field(PerfMemory, _prologue, PerfDataPrologue*) \
static_field(PerfMemory, _initialized, int) \
\
/***************/ \
/* SymbolTable */ \
/***************/ \
\
static_field(SymbolTable, _the_table, SymbolTable*) \
static_field(SymbolTable, _shared_table, SymbolCompactHashTable) \
static_field(RehashableSymbolHashtable, _seed, juint) \
\
/********************/ \
/* CompactHashTable */ \
/********************/ \
\
nonstatic_field(SymbolCompactHashTable, _base_address, address) \
nonstatic_field(SymbolCompactHashTable, _entry_count, u4) \
nonstatic_field(SymbolCompactHashTable, _bucket_count, u4) \
nonstatic_field(SymbolCompactHashTable, _buckets, u4*) \
nonstatic_field(SymbolCompactHashTable, _entries, u4*) \
\
/********************/ \
/* SystemDictionary */ \
/********************/ \
@ -1351,15 +1329,13 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
declare_toplevel_type(PerfMemory) \
declare_type(PerfData, CHeapObj<mtInternal>) \
\
/*********************************/ \
/* SymbolTable, SystemDictionary */ \
/*********************************/ \
/********************/ \
/* SystemDictionary */ \
/********************/ \
\
declare_toplevel_type(BasicHashtable<mtInternal>) \
declare_type(IntptrHashtable, BasicHashtable<mtInternal>) \
declare_toplevel_type(BasicHashtable<mtSymbol>) \
declare_type(RehashableSymbolHashtable, BasicHashtable<mtSymbol>) \
declare_type(SymbolTable, SymbolHashtable) \
declare_type(Dictionary, KlassHashtable) \
declare_toplevel_type(BasicHashtableEntry<mtInternal>) \
declare_type(IntptrHashtableEntry, BasicHashtableEntry<mtInternal>) \
@ -1373,8 +1349,6 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
declare_toplevel_type(Arena) \
declare_type(ResourceArea, Arena) \
\
declare_toplevel_type(SymbolCompactHashTable) \
\
/***********************************************************/ \
/* Thread hierarchy (needed for run-time type information) */ \
/***********************************************************/ \

@ -309,7 +309,7 @@ class ConcurrentHashTable : public CHeapObj<F> {
// Insert which handles a number of cases.
template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
bool internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f,
CALLBACK_FUNC& callback, bool* grow_hint = NULL);
CALLBACK_FUNC& callback, bool* grow_hint = NULL, bool* clean_hint = NULL);
// Returns true if an item matching LOOKUP_FUNC is removed.
// Calls DELETE_FUNC before destroying the node.
@ -396,8 +396,8 @@ class ConcurrentHashTable : public CHeapObj<F> {
// value already exists.
template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
bool get_insert_lazy(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& val_f,
CALLBACK_FUNC& callback_f, bool* grow_hint = NULL) {
return !internal_insert(thread, lookup_f, val_f, callback_f, grow_hint);
CALLBACK_FUNC& callback_f, bool* grow_hint = NULL, bool* clean_hint = NULL) {
return !internal_insert(thread, lookup_f, val_f, callback_f, grow_hint, clean_hint);
}
// Same without CALLBACK_FUNC.
@ -436,9 +436,9 @@ class ConcurrentHashTable : public CHeapObj<F> {
// LOOKUP_FUNC.
template <typename LOOKUP_FUNC>
bool insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
bool* grow_hint = NULL) {
bool* grow_hint = NULL, bool* clean_hint = NULL) {
LazyValueRetrieve vp(value);
return internal_insert(thread, lookup_f, vp, noOp, grow_hint);
return internal_insert(thread, lookup_f, vp, noOp, grow_hint, clean_hint);
}
// This does a fast unsafe insert and can thus only be used when there is no

@ -540,6 +540,8 @@ template <typename LOOKUP_FUNC>
inline void ConcurrentHashTable<VALUE, CONFIG, F>::
delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
{
assert(bucket->is_locked(), "Must be locked.");
size_t dels = 0;
Node* ndel[BULK_DELETE_LIMIT];
Node* const volatile * rem_n_prev = bucket->first_ptr();
@ -874,7 +876,7 @@ template <typename VALUE, typename CONFIG, MEMFLAGS F>
template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f,
CALLBACK_FUNC& callback, bool* grow_hint)
CALLBACK_FUNC& callback, bool* grow_hint, bool* clean_hint)
{
bool ret = false;
bool clean = false;
@ -925,15 +927,20 @@ inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
} else if (i == 0 && clean) {
// We only do cleaning on fast inserts.
Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
assert(bucket->is_locked(), "Must be locked.");
delete_in_bucket(thread, bucket, lookup_f);
bucket->unlock();
clean = false;
}
if (grow_hint != NULL) {
*grow_hint = loops > _grow_hint;
}
if (clean_hint != NULL) {
*clean_hint = clean;
}
return ret;
}

@ -61,6 +61,11 @@ void GlobalCounter::write_synchronize() {
// Atomic::add must provide fence since we have storeload dependency.
volatile uintx gbl_cnt = Atomic::add((uintx)COUNTER_INCREMENT, &_global_counter._counter,
memory_order_conservative);
// Handle bootstrap
if (Threads::number_of_threads() == 0) {
return;
}
// Do all RCU threads.
CounterThreadCheck ctc(gbl_cnt);
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {

@ -422,13 +422,14 @@ const jint max_jint = (juint)min_jint - 1; // 0x7FFFFFFF ==
const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134)
//----------------------------------------------------------------------------------------------------
// Default and minimum StringTableSize values
// Default and minimum StringTable and SymbolTable size values
// Must be a power of 2
const int defaultStringTableSize = NOT_LP64(1024) LP64_ONLY(65536);
const int minimumStringTableSize = 128;
const size_t defaultStringTableSize = NOT_LP64(1024) LP64_ONLY(65536);
const size_t minimumStringTableSize = 128;
const int defaultSymbolTableSize = 20011;
const int minimumSymbolTableSize = 1009;
const size_t defaultSymbolTableSize = 32768; // 2^15
const size_t minimumSymbolTableSize = 1024;
//----------------------------------------------------------------------------------------------------

@ -22,9 +22,9 @@
*/
/*
* @test TestStringSymbolTableStats.java
* @test TestStringTableStats.java
* @bug 8027476 8027455
* @summary Ensure that the G1TraceStringSymbolTableScrubbing prints the expected message.
* @summary Ensure that the G1TraceStringTableScrubbing prints the expected message.
* @key gc
* @requires vm.gc.G1
* @library /test/lib
@ -35,7 +35,7 @@
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
public class TestStringSymbolTableStats {
public class TestStringTableStats {
public static void main(String[] args) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
@ -47,7 +47,7 @@ public class TestStringSymbolTableStats {
System.out.println("Output:\n" + output.getOutput());
output.shouldMatch("GC\\(\\d+\\) Cleaned string and symbol table");
output.shouldMatch("GC\\(\\d+\\) Cleaned string table");
output.shouldHaveExitValue(0);
}

@ -0,0 +1,119 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8195100
* @summary a short lived Symbol should be cleaned up
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management
* @requires (vm.debug == true)
*/
import jdk.test.lib.Platform;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
import java.util.Scanner;
public class ShortLivedSymbolCleanup {
static int getSymbolTableSize(ProcessBuilder pb) throws Exception {
int size = 0;
OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
String output = analyzer.getStdout();
analyzer.shouldHaveExitValue(0);
// Split string into lines using platform independent end of line marker.
String[] lines = output.split("\\R");
for (String line : lines) {
if (line.contains("Start size")) {
// ex. "[0.023s][trace][symboltable] Start size: 32768 (15)"
Scanner scanner = new Scanner(line);
scanner.next(); // skip "[0.023s][trace][symboltable]"
scanner.next(); // skip "Start"
scanner.next(); // skip "size:"
size = Integer.parseInt(scanner.next()); // process "32768"
scanner.close();
}
}
return size;
}
static void analyzeOutputOn(int size, ProcessBuilder pb) throws Exception {
OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
String output = analyzer.getStdout();
analyzer.shouldHaveExitValue(0);
// Split string into lines using platform independent end of line marker.
String[] lines = output.split("\\R");
for (String line : lines) {
if (line.startsWith(" Total removed")) {
// ex. "Total removed 13309"
Scanner scanner = new Scanner(line);
scanner.next(); // skip "Total"
scanner.next(); // skip "removed"
int removed = Integer.parseInt(scanner.next()); // process "13309"
scanner.close();
if (removed < (size/2)) {
System.out.println(output);
// We should have removed at least half of the temporary Symbols
throw new RuntimeException("Did not clean dead temporary Symbols [removed:"+removed+", size:"+size+"]");
}
}
}
}
public static void main(String[] args) throws Exception {
if (Platform.isDebugBuild()) {
{
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:symboltable=trace",
"-version");
int size = getSymbolTableSize(pb);
pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintSymbolTableSizeHistogram",
LotsOfTempSymbols.class.getName(),
Integer.toString(size));
analyzeOutputOn(size, pb);
}
}
}
static class LotsOfTempSymbols {
public static void main(String [] args) {
int size = 2*Integer.parseInt(args[0]);
// Create enough temporary Symbols, that we are
// guranteed to insert into every bucket twice,
// and therefore have the table check for dead entries
for (int i=0; i<size; i++) {
try {
Class.forName(String.format("%05d", i), false, null);
} catch (java.lang.ClassNotFoundException e) {}
}
}
}
}