This commit is contained in:
Jesper Wilhelmsson 2015-06-15 13:48:30 +02:00
commit a347180ac5
105 changed files with 2465 additions and 491 deletions

View File

@ -121,6 +121,8 @@ public class VM {
private Flag[] commandLineFlags;
private Map flagsMap;
private static Type intType;
private static Type uintType;
private static Type intxType;
private static Type uintxType;
private static Type sizetType;
@ -170,6 +172,28 @@ public class VM {
return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned()) != 0;
}
public boolean isInt() {
return type.equals("int");
}
public long getInt() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(isInt(), "not an int flag!");
}
return addr.getCIntegerAt(0, intType.getSize(), false);
}
public boolean isUInt() {
return type.equals("uint");
}
public long getUInt() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(isUInt(), "not a uint flag!");
}
return addr.getCIntegerAt(0, uintType.getSize(), false);
}
public boolean isIntx() {
return type.equals("intx");
}
@ -206,6 +230,10 @@ public class VM {
public String getValue() {
if (isBool()) {
return new Boolean(getBool()).toString();
} else if (isInt()) {
return new Long(getInt()).toString();
} else if (isUInt()) {
return new Long(getUInt()).toString();
} else if (isIntx()) {
return new Long(getIntx()).toString();
} else if (isUIntx()) {
@ -334,6 +362,8 @@ public class VM {
heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
oopSize = db.lookupIntConstant("oopSize").intValue();
intType = db.lookupType("int");
uintType = db.lookupType("uint");
intxType = db.lookupType("intx");
uintxType = db.lookupType("uintx");
sizetType = db.lookupType("size_t");

View File

@ -505,7 +505,8 @@ void VM_Version::determine_section_size() {
void VM_Version::determine_features() {
#if defined(ABI_ELFv2)
const int code_size = (num_features+1+2*7)*BytesPerInstWord; // TODO(asmundak): calculation is incorrect.
// 1 InstWord per call for the blr instruction.
const int code_size = (num_features+1+2*1)*BytesPerInstWord;
#else
// 7 InstWords for each call (function descriptor + blr instruction).
const int code_size = (num_features+1+2*7)*BytesPerInstWord;
@ -540,7 +541,8 @@ void VM_Version::determine_features() {
a->popcntw(R7, R5); // code[6] -> popcntw
a->fcfids(F3, F4); // code[7] -> fcfids
a->vand(VR0, VR0, VR0); // code[8] -> vand
a->lqarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m
// arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16
a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m
a->vcipher(VR0, VR1, VR2); // code[10] -> vcipher
a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
a->tcheck(0); // code[12] -> tcheck
@ -572,7 +574,8 @@ void VM_Version::determine_features() {
// Execute code. Illegal instructions will be replaced by 0 in the signal handler.
VM_Version::_is_determine_features_test_running = true;
(*test)((address)mid_of_test_area, (uint64_t)0);
// We must align the first argument to 16 bytes because of the lqarx check.
(*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0);
VM_Version::_is_determine_features_test_running = false;
// determine which instructions are legal.
@ -614,12 +617,12 @@ void VM_Version::config_dscr() {
MacroAssembler* a = new MacroAssembler(&cb);
// Emit code.
uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->emit_fd();
uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
uint32_t *code = (uint32_t *)a->pc();
a->mfdscr(R3);
a->blr();
void (*set_dscr)(long) = (void(*)(long))(void *)a->emit_fd();
void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
a->mtdscr(R3);
a->blr();

View File

@ -32,11 +32,11 @@
//
// The compact hash table writer implementations
//
CompactHashtableWriter::CompactHashtableWriter(const char* table_name,
CompactHashtableWriter::CompactHashtableWriter(int table_type,
int num_entries,
CompactHashtableStats* stats) {
assert(DumpSharedSpaces, "dump-time only");
_table_name = table_name;
_type = table_type;
_num_entries = num_entries;
_num_buckets = number_of_buckets(_num_entries);
_buckets = NEW_C_HEAP_ARRAY(Entry*, _num_buckets, mtSymbol);
@ -99,7 +99,7 @@ juint* CompactHashtableWriter::dump_table(juint* p, juint** first_bucket,
NumberSeq* summary) {
int index;
juint* compact_table = p;
// Find the start of the buckets, skip the compact_bucket_infos table
// Compute the start of the buckets, include the compact_bucket_infos table
// and the table end offset.
juint offset = _num_buckets + 1;
*first_bucket = compact_table + offset;
@ -130,10 +130,17 @@ juint* CompactHashtableWriter::dump_table(juint* p, juint** first_bucket,
// Write the compact table's entries
juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
NumberSeq* summary) {
uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
uintx max_delta = uintx(MetaspaceShared::shared_rs()->size());
assert(max_delta <= 0x7fffffff, "range check");
uintx base_address = 0;
uintx max_delta = 0;
int num_compact_buckets = 0;
if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
base_address = uintx(MetaspaceShared::shared_rs()->base());
max_delta = uintx(MetaspaceShared::shared_rs()->size());
assert(max_delta <= 0x7fffffff, "range check");
} else {
assert((_type == CompactHashtable<oop, char>::_string_table), "unknown table");
assert(UseCompressedOops, "UseCompressedOops is required");
}
assert(p != NULL, "sanity");
for (int index = 0; index < _num_buckets; index++) {
@ -148,12 +155,16 @@ juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
for (Entry* tent = _buckets[index]; tent;
tent = tent->next()) {
if (bucket_type == REGULAR_BUCKET_TYPE) {
*p++ = juint(tent->hash()); // write symbol hash
*p++ = juint(tent->hash()); // write entry hash
}
if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
uintx deltax = uintx(tent->value()) - base_address;
assert(deltax < max_delta, "range check");
juint delta = juint(deltax);
*p++ = delta; // write entry offset
} else {
*p++ = oopDesc::encode_heap_oop(tent->string());
}
uintx deltax = uintx(tent->value()) - base_address;
assert(deltax < max_delta, "range check");
juint delta = juint(deltax);
*p++ = delta; // write symbol offset
count ++;
}
assert(count == _bucket_sizes[index], "sanity");
@ -174,6 +185,10 @@ void CompactHashtableWriter::dump(char** top, char* end) {
uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
// Now write the following at the beginning of the table:
// base_address (uintx)
// num_entries (juint)
// num_buckets (juint)
*p++ = high(base_address);
*p++ = low (base_address); // base address
*p++ = _num_entries; // number of entries in the table
@ -191,7 +206,8 @@ void CompactHashtableWriter::dump(char** top, char* end) {
if (_num_entries > 0) {
avg_cost = double(_required_bytes)/double(_num_entries);
}
tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT, _table_name, (intptr_t)base_address);
tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT,
table_name(), (intptr_t)base_address);
tty->print_cr("Number of entries : %9d", _num_entries);
tty->print_cr("Total bytes used : %9d", (int)((*top) - old_top));
tty->print_cr("Average bytes per entry : %9.3f", avg_cost);
@ -202,12 +218,24 @@ void CompactHashtableWriter::dump(char** top, char* end) {
}
}
const char* CompactHashtableWriter::table_name() {
switch (_type) {
case CompactHashtable<Symbol*, char>::_symbol_table: return "symbol";
case CompactHashtable<oop, char>::_string_table: return "string";
default:
;
}
return "unknown";
}
/////////////////////////////////////////////////////////////
//
// The CompactHashtable implementation
//
template <class T, class N> const char* CompactHashtable<T, N>::init(const char* buffer) {
template <class T, class N> const char* CompactHashtable<T, N>::init(
CompactHashtableType type, const char* buffer) {
assert(!DumpSharedSpaces, "run-time only");
_type = type;
juint*p = (juint*)buffer;
juint upper = *p++;
juint lower = *p++;
@ -245,8 +273,34 @@ template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosur
}
}
template <class T, class N> void CompactHashtable<T, N>::oops_do(OopClosure* f) {
assert(!DumpSharedSpaces, "run-time only");
assert(_type == _string_table || _bucket_count == 0, "sanity");
for (juint i = 0; i < _bucket_count; i ++) {
juint bucket_info = _buckets[i];
juint bucket_offset = BUCKET_OFFSET(bucket_info);
int bucket_type = BUCKET_TYPE(bucket_info);
juint* bucket = _buckets + bucket_offset;
juint* bucket_end = _buckets;
narrowOop o;
if (bucket_type == COMPACT_BUCKET_TYPE) {
o = (narrowOop)bucket[0];
f->do_oop(&o);
} else {
bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
while (bucket < bucket_end) {
o = (narrowOop)bucket[1];
f->do_oop(&o);
bucket += 2;
}
}
}
}
// Explicitly instantiate these types
template class CompactHashtable<Symbol*, char>;
template class CompactHashtable<oop, char>;
#ifndef O_BINARY // if defined (Win32) use binary files.
#define O_BINARY 0 // otherwise do nothing.
@ -273,6 +327,8 @@ HashtableTextDump::HashtableTextDump(const char* filename) : _fd(-1) {
_p = _base;
_end = _base + st.st_size;
_filename = filename;
_prefix_type = Unknown;
_line_no = 1;
}
HashtableTextDump::~HashtableTextDump() {
@ -286,9 +342,9 @@ void HashtableTextDump::quit(const char* err, const char* msg) {
vm_exit_during_initialization(err, msg);
}
void HashtableTextDump::corrupted(const char *p) {
void HashtableTextDump::corrupted(const char *p, const char* msg) {
char info[60];
sprintf(info, "corrupted at pos %d", (int)(p - _base));
sprintf(info, "%s. Corrupted at line %d (file pos %d)", msg, _line_no, (int)(p - _base));
quit(info, _filename);
}
@ -298,8 +354,9 @@ bool HashtableTextDump::skip_newline() {
} else if (_p[0] == '\n') {
_p += 1;
} else {
corrupted(_p);
corrupted(_p, "Unexpected character");
}
_line_no ++;
return true;
}
@ -328,26 +385,60 @@ void HashtableTextDump::check_version(const char* ver) {
skip_newline();
}
void HashtableTextDump::scan_prefix_type() {
_p ++;
if (strncmp(_p, "SECTION: String", 15) == 0) {
_p += 15;
_prefix_type = StringPrefix;
} else if (strncmp(_p, "SECTION: Symbol", 15) == 0) {
_p += 15;
_prefix_type = SymbolPrefix;
} else {
_prefix_type = Unknown;
}
skip_newline();
}
int HashtableTextDump::scan_prefix() {
int HashtableTextDump::scan_prefix(int* utf8_length) {
if (*_p == '@') {
scan_prefix_type();
}
switch (_prefix_type) {
case SymbolPrefix:
*utf8_length = scan_symbol_prefix(); break;
case StringPrefix:
*utf8_length = scan_string_prefix(); break;
default:
tty->print_cr("Shared input data type: Unknown.");
corrupted(_p, "Unknown data type");
}
return _prefix_type;
}
int HashtableTextDump::scan_string_prefix() {
// Expect /[0-9]+: /
int utf8_length = get_num(':');
int utf8_length;
get_num(':', &utf8_length);
if (*_p != ' ') {
corrupted(_p);
corrupted(_p, "Wrong prefix format for string");
}
_p++;
return utf8_length;
}
int HashtableTextDump::scan_prefix2() {
int HashtableTextDump::scan_symbol_prefix() {
// Expect /[0-9]+ (-|)[0-9]+: /
int utf8_length = get_num(' ');
if (*_p == '-') {
_p++;
int utf8_length;
get_num(' ', &utf8_length);
if (*_p == '-') {
_p++;
}
(void)get_num(':');
int ref_num;
(void)get_num(':', &ref_num);
if (*_p != ' ') {
corrupted(_p);
corrupted(_p, "Wrong prefix format for symbol");
}
_p++;
return utf8_length;
@ -408,7 +499,7 @@ void HashtableTextDump::get_utf8(char* utf8_buffer, int utf8_length) {
case 'r': *to++ = '\r'; break;
case '\\': *to++ = '\\'; break;
default:
ShouldNotReachHere();
corrupted(_p, "Unsupported character");
}
}
}

View File

@ -28,6 +28,7 @@
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "services/diagnosticCommand.hpp"
#include "utilities/hashtable.hpp"
@ -49,7 +50,7 @@ public:
// the compact table to the shared archive.
//
// At dump time, the CompactHashtableWriter obtains all entries from the
// symbol table and adds them to a new temporary hash table. The hash
// symbol/string table and adds them to a new temporary hash table. The hash
// table size (number of buckets) is calculated using
// '(num_entries + bucket_size - 1) / bucket_size'. The default bucket
// size is 4 and can be changed by -XX:SharedSymbolTableBucketSize option.
@ -57,14 +58,14 @@ public:
// faster lookup. It also has relatively small number of empty buckets and
// good distribution of the entries.
//
// We use a simple hash function (symbol_hash % num_bucket) for the table.
// We use a simple hash function (hash % num_bucket) for the table.
// The new table is compacted when written out. Please see comments
// above the CompactHashtable class for the table layout detail. The bucket
// offsets are written to the archive as part of the compact table. The
// bucket offset is encoded in the low 30-bit (0-29) and the bucket type
// (regular or compact) are encoded in bit[31, 30]. For buckets with more
// than one entry, both symbol hash and symbol offset are written to the
// table. For buckets with only one entry, only the symbol offset is written
// than one entry, both hash and entry offset are written to the
// table. For buckets with only one entry, only the entry offset is written
// to the table and the buckets are tagged as compact in their type bits.
// Buckets without entry are skipped from the table. Their offsets are
// still written out for faster lookup.
@ -78,6 +79,7 @@ public:
public:
Entry(unsigned int hash, Symbol *symbol) : _next(NULL), _hash(hash), _literal(symbol) {}
Entry(unsigned int hash, oop string) : _next(NULL), _hash(hash), _literal(string) {}
void *value() {
return _literal;
@ -85,6 +87,9 @@ public:
Symbol *symbol() {
return (Symbol*)_literal;
}
oop string() {
return (oop)_literal;
}
unsigned int hash() {
return _hash;
}
@ -95,7 +100,7 @@ public:
private:
static int number_of_buckets(int num_entries);
const char* _table_name;
int _type;
int _num_entries;
int _num_buckets;
juint* _bucket_sizes;
@ -105,7 +110,7 @@ private:
public:
// This is called at dump-time only
CompactHashtableWriter(const char* table_name, int num_entries, CompactHashtableStats* stats);
CompactHashtableWriter(int table_type, int num_entries, CompactHashtableStats* stats);
~CompactHashtableWriter();
int get_required_bytes() {
@ -116,6 +121,10 @@ public:
add(hash, new Entry(hash, symbol));
}
void add(unsigned int hash, oop string) {
add(hash, new Entry(hash, string));
}
private:
void add(unsigned int hash, Entry* entry);
juint* dump_table(juint* p, juint** first_bucket, NumberSeq* summary);
@ -123,6 +132,7 @@ private:
public:
void dump(char** top, char* end);
const char* table_name();
};
#define REGULAR_BUCKET_TYPE 0
@ -136,23 +146,23 @@ public:
/////////////////////////////////////////////////////////////////////////////
//
// CompactHashtable is used to stored the CDS archive's symbol table. Used
// CompactHashtable is used to stored the CDS archive's symbol/string table. Used
// at runtime only to access the compact table from the archive.
//
// Because these tables are read-only (no entries can be added/deleted) at run-time
// and tend to have large number of entries, we try to minimize the footprint
// cost per entry.
//
// Layout of compact symbol table in the shared archive:
// Layout of compact table in the shared archive:
//
// uintx base_address;
// juint num_symbols;
// juint num_entries;
// juint num_buckets;
// juint bucket_infos[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
// juint table[]
//
// -----------------------------------
// | base_address | num_symbols |
// | base_address | num_entries |
// |---------------------------------|
// | num_buckets | bucket_info0 |
// |---------------------------------|
@ -177,9 +187,13 @@ public:
// compact buckets have '01' in their highest 2-bit, and regular buckets have
// '00' in their highest 2-bit.
//
// For normal buckets, each symbol's entry is 8 bytes in the table[]:
// juint hash; /* symbol hash */
// juint offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
// For normal buckets, each entry is 8 bytes in the table[]:
// juint hash; /* symbol/string hash */
// union {
// juint offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
// narrowOop str; /* String narrowOop encoding */
// }
//
//
// For compact buckets, each entry has only the 4-byte 'offset' in the table[].
//
@ -189,19 +203,41 @@ public:
//
template <class T, class N> class CompactHashtable VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
public:
enum CompactHashtableType {
_symbol_table = 0,
_string_table = 1
};
private:
CompactHashtableType _type;
uintx _base_address;
juint _entry_count;
juint _bucket_count;
juint _table_end_offset;
juint* _buckets;
inline bool equals(T entry, const char* name, int len) {
if (entry->equals(name, len)) {
assert(entry->refcount() == -1, "must be shared");
return true;
} else {
return false;
inline Symbol* lookup_entry(CompactHashtable<Symbol*, char>* const t,
juint* addr, const char* name, int len) {
Symbol* sym = (Symbol*)((void*)(_base_address + *addr));
if (sym->equals(name, len)) {
assert(sym->refcount() == -1, "must be shared");
return sym;
}
return NULL;
}
inline oop lookup_entry(CompactHashtable<oop, char>* const t,
juint* addr, const char* name, int len) {
narrowOop obj = (narrowOop)(*addr);
oop string = oopDesc::decode_heap_oop(obj);
if (java_lang_String::equals(string, (jchar*)name, len)) {
return string;
}
return NULL;
}
public:
@ -211,7 +247,14 @@ public:
_table_end_offset = 0;
_buckets = 0;
}
const char* init(const char *buffer);
const char* init(CompactHashtableType type, const char *buffer);
void reset() {
_entry_count = 0;
_bucket_count = 0;
_table_end_offset = 0;
_buckets = 0;
}
// Lookup an entry from the compact table
inline T lookup(const N* name, unsigned int hash, int len) {
@ -225,23 +268,22 @@ public:
juint* bucket_end = _buckets;
if (bucket_type == COMPACT_BUCKET_TYPE) {
// the compact bucket has one entry with symbol offset only
T entry = (T)((void*)(_base_address + bucket[0]));
if (equals(entry, name, len)) {
return entry;
// the compact bucket has one entry with entry offset only
T res = lookup_entry(this, &bucket[0], name, len);
if (res != NULL) {
return res;
}
} else {
// This is a regular bucket, which has more than one
// entries. Each entry is a pair of symbol (hash, offset).
// entries. Each entry is a pair of entry (hash, offset).
// Seek until the end of the bucket.
bucket_end += BUCKET_OFFSET(_buckets[index + 1]);
while (bucket < bucket_end) {
unsigned int h = (unsigned int)(bucket[0]);
if (h == hash) {
juint offset = bucket[1];
T entry = (T)((void*)(_base_address + offset));
if (equals(entry, name, len)) {
return entry;
T res = lookup_entry(this, &bucket[1], name, len);
if (res != NULL) {
return res;
}
}
bucket += 2;
@ -253,12 +295,15 @@ public:
// iterate over symbols
void symbols_do(SymbolClosure *cl);
// iterate over strings
void oops_do(OopClosure* f);
};
////////////////////////////////////////////////////////////////////////
//
// Read/Write the contents of a hashtable textual dump (created by
// SymbolTable::dump).
// SymbolTable::dump and StringTable::dump).
// Because the dump file may be big (hundred of MB in extreme cases),
// we use mmap for fast access when reading it.
//
@ -269,21 +314,29 @@ class HashtableTextDump VALUE_OBJ_CLASS_SPEC {
const char* _end;
const char* _filename;
size_t _size;
int _prefix_type;
int _line_no;
public:
HashtableTextDump(const char* filename);
~HashtableTextDump();
enum {
SymbolPrefix = 1 << 0,
StringPrefix = 1 << 1,
Unknown = 1 << 2
};
void quit(const char* err, const char* msg);
inline int remain() {
return (int)(_end - _p);
}
void corrupted(const char *p);
void corrupted(const char *p, const char *msg);
inline void corrupted_if(bool cond) {
if (cond) {
corrupted(_p);
corrupted(_p, NULL);
}
}
@ -292,7 +345,7 @@ public:
void skip_past(char c);
void check_version(const char* ver);
inline int get_num(char delim) {
inline bool get_num(char delim, int *utf8_length) {
const char* p = _p;
const char* end = _end;
int num = 0;
@ -303,18 +356,22 @@ public:
num = num * 10 + (c - '0');
} else if (c == delim) {
_p = p;
return num;
*utf8_length = num;
return true;
} else {
corrupted(p-1);
// Not [0-9], not 'delim'
return false;
}
}
corrupted(_end);
corrupted(_end, "Incorrect format");
ShouldNotReachHere();
return 0;
return false;
}
int scan_prefix();
int scan_prefix2();
void scan_prefix_type();
int scan_prefix(int* utf8_length);
int scan_string_prefix();
int scan_symbol_prefix();
jchar unescape(const char* from, const char* end, int count);
void get_utf8(char* utf8_buffer, int utf8_length);

View File

@ -118,6 +118,10 @@ class java_lang_String : AllStatic {
return hash_offset;
}
static void set_value_raw(oop string, typeArrayOop buffer) {
assert(initialized, "Must be initialized");
string->obj_field_put_raw(value_offset, buffer);
}
static void set_value(oop string, typeArrayOop buffer) {
assert(initialized && (value_offset > 0), "Must be initialized");
string->obj_field_put(value_offset, (oop)buffer);
@ -210,6 +214,7 @@ class java_lang_String : AllStatic {
// Debugging
static void print(oop java_string, outputStream* st);
friend class JavaClasses;
friend class StringTable;
};

View File

@ -38,6 +38,7 @@
#include "utilities/hashtable.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1StringDedup.hpp"
#endif
@ -87,19 +88,28 @@ class StableMemoryChecker : public StackObj {
// --------------------------------------------------------------------------
StringTable* StringTable::_the_table = NULL;
bool StringTable::_ignore_shared_strings = false;
bool StringTable::_needs_rehashing = false;
volatile int StringTable::_parallel_claimed_idx = 0;
CompactHashtable<oop, char> StringTable::_shared_table;
// Pick hashing algorithm
unsigned int StringTable::hash_string(const jchar* s, int len) {
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
java_lang_String::hash_code(s, len);
}
oop StringTable::lookup(int index, jchar* name,
int len, unsigned int hash) {
oop StringTable::lookup_shared(jchar* name, int len) {
// java_lang_String::hash_code() was used to compute hash values in the shared table. Don't
// use the hash value from StringTable::hash_string() as it might use alternate hashcode.
return _shared_table.lookup((const char*)name,
java_lang_String::hash_code(name, len), len);
}
oop StringTable::lookup_in_main_table(int index, jchar* name,
int len, unsigned int hash) {
int count = 0;
for (HashtableEntry<oop, mtSymbol>* l = bucket(index); l != NULL; l = l->next()) {
count++;
@ -140,7 +150,8 @@ oop StringTable::basic_add(int index_arg, Handle string, jchar* name,
// Since look-up was done lock-free, we need to check if another
// thread beat us in the race to insert the symbol.
oop test = lookup(index, name, len, hashValue); // calls lookup(u1*, int)
// No need to lookup the shared table from here since the caller (intern()) already did
oop test = lookup_in_main_table(index, name, len, hashValue); // calls lookup(u1*, int)
if (test != NULL) {
// Entry already added
return test;
@ -172,9 +183,14 @@ static void ensure_string_alive(oop string) {
}
oop StringTable::lookup(jchar* name, int len) {
oop string = lookup_shared(name, len);
if (string != NULL) {
return string;
}
unsigned int hash = hash_string(name, len);
int index = the_table()->hash_to_index(hash);
oop string = the_table()->lookup(index, name, len, hash);
string = the_table()->lookup_in_main_table(index, name, len, hash);
ensure_string_alive(string);
@ -184,9 +200,14 @@ oop StringTable::lookup(jchar* name, int len) {
oop StringTable::intern(Handle string_or_null, jchar* name,
int len, TRAPS) {
oop found_string = lookup_shared(name, len);
if (found_string != NULL) {
return found_string;
}
unsigned int hashValue = hash_string(name, len);
int index = the_table()->hash_to_index(hashValue);
oop found_string = the_table()->lookup(index, name, len, hashValue);
found_string = the_table()->lookup_in_main_table(index, name, len, hashValue);
// Found
if (found_string != NULL) {
@ -611,3 +632,131 @@ int StringtableDCmd::num_arguments() {
return 0;
}
}
// Sharing
bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
CompactHashtableWriter* ch_table) {
#if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS)
assert(UseG1GC, "Only support G1 GC");
assert(UseCompressedOops && UseCompressedClassPointers,
"Only support UseCompressedOops and UseCompressedClassPointers enabled");
Thread* THREAD = Thread::current();
G1CollectedHeap::heap()->begin_archive_alloc_range();
for (int i = 0; i < the_table()->table_size(); ++i) {
HashtableEntry<oop, mtSymbol>* bucket = the_table()->bucket(i);
for ( ; bucket != NULL; bucket = bucket->next()) {
oop s = bucket->literal();
unsigned int hash = java_lang_String::hash_code(s);
if (hash == 0) {
continue;
}
// allocate the new 'value' array first
typeArrayOop v = java_lang_String::value(s);
int v_len = v->size();
typeArrayOop new_v;
if (G1CollectedHeap::heap()->is_archive_alloc_too_large(v_len)) {
continue; // skip the current String. The 'value' array is too large to handle
} else {
new_v = (typeArrayOop)G1CollectedHeap::heap()->archive_mem_allocate(v_len);
if (new_v == NULL) {
return false; // allocation failed
}
}
// now allocate the new String object
int s_len = s->size();
oop new_s = (oop)G1CollectedHeap::heap()->archive_mem_allocate(s_len);
if (new_s == NULL) {
return false;
}
s->identity_hash();
v->identity_hash();
// copy the objects' data
Copy::aligned_disjoint_words((HeapWord*)s, (HeapWord*)new_s, s_len);
Copy::aligned_disjoint_words((HeapWord*)v, (HeapWord*)new_v, v_len);
// adjust the pointer to the 'value' field in the new String oop. Also pre-compute and set the
// 'hash' field. That avoids "write" to the shared strings at runtime by the deduplication process.
java_lang_String::set_value_raw(new_s, new_v);
if (java_lang_String::hash(new_s) == 0) {
java_lang_String::set_hash(new_s, hash);
}
// add to the compact table
ch_table->add(hash, new_s);
}
}
G1CollectedHeap::heap()->end_archive_alloc_range(string_space, os::vm_allocation_granularity());
assert(string_space->length() <= 2, "sanity");
#endif
return true;
}
bool StringTable::copy_compact_table(char** top, char *end, GrowableArray<MemRegion> *string_space,
size_t* space_size) {
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
if (PrintSharedSpaces) {
tty->print_cr("Shared strings are excluded from the archive as UseG1GC, "
"UseCompressedOops and UseCompressedClassPointers are required.");
}
return true;
}
CompactHashtableWriter ch_table(CompactHashtable<oop, char>::_string_table,
the_table()->number_of_entries(),
&MetaspaceShared::stats()->string);
// Copy the interned strings into the "string space" within the java heap
if (!copy_shared_string(string_space, &ch_table)) {
return false;
}
for (int i = 0; i < string_space->length(); i++) {
*space_size += string_space->at(i).byte_size();
}
// Now dump the compact table
if (*top + ch_table.get_required_bytes() > end) {
// not enough space left
return false;
}
ch_table.dump(top, end);
*top = (char*)align_pointer_up(*top, sizeof(void*));
#endif
return true;
}
void StringTable::shared_oops_do(OopClosure* f) {
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
_shared_table.oops_do(f);
#endif
}
const char* StringTable::init_shared_table(FileMapInfo *mapinfo, char *buffer) {
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
if (mapinfo->space_capacity(MetaspaceShared::first_string) == 0) {
// no shared string data
return buffer;
}
// initialize the shared table
juint *p = (juint*)buffer;
const char* end = _shared_table.init(
CompactHashtable<oop, char>::_string_table, (char*)p);
const char* aligned_end = (const char*)align_pointer_up(end, sizeof(void*));
if (_ignore_shared_strings) {
_shared_table.reset();
}
return aligned_end;
#endif
return buffer;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,10 @@
#include "memory/allocation.inline.hpp"
#include "utilities/hashtable.hpp"
template <class T, class N> class CompactHashtable;
class CompactHashtableWriter;
class FileMapInfo;
class StringTable : public RehashableHashtable<oop, mtSymbol> {
friend class VMStructs;
friend class Symbol;
@ -36,6 +40,10 @@ private:
// The string table
static StringTable* _the_table;
// Shared string table
static CompactHashtable<oop, char> _shared_table;
static bool _ignore_shared_strings;
// Set if one bucket is out of balance due to hash algorithm deficiency
static bool _needs_rehashing;
@ -46,7 +54,8 @@ private:
oop basic_add(int index, Handle string_or_null, jchar* name, int len,
unsigned int hashValue, TRAPS);
oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
oop lookup_in_main_table(int index, jchar* chars, int length, unsigned int hashValue);
static oop lookup_shared(jchar* name, int len);
// Apply the give oop closure to the entries to the buckets
// in the range [start_idx, end_idx).
@ -141,12 +150,14 @@ public:
static int verify_and_compare_entries();
// Sharing
static void copy_buckets(char** top, char*end) {
the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
}
static void copy_table(char** top, char*end) {
the_table()->Hashtable<oop, mtSymbol>::copy_table(top, end);
}
static void ignore_shared_strings(bool v) { _ignore_shared_strings = v; }
static bool shared_string_ignored() { return _ignore_shared_strings; }
static void shared_oops_do(OopClosure* f);
static bool copy_shared_string(GrowableArray<MemRegion> *string_space,
CompactHashtableWriter* ch_table);
static bool copy_compact_table(char** top, char* end, GrowableArray<MemRegion> *string_space,
size_t* space_size);
static const char* init_shared_table(FileMapInfo *mapinfo, char* buffer);
static void reverse() {
the_table()->Hashtable<oop, mtSymbol>::reverse();
}

View File

@ -539,7 +539,8 @@ void SymbolTable::dump(outputStream* st, bool verbose) {
bool SymbolTable::copy_compact_table(char** top, char*end) {
#if INCLUDE_CDS
CompactHashtableWriter ch_table("symbol", the_table()->number_of_entries(),
CompactHashtableWriter ch_table(CompactHashtable<Symbol*, char>::_symbol_table,
the_table()->number_of_entries(),
&MetaspaceShared::stats()->symbol);
if (*top + ch_table.get_required_bytes() > end) {
// not enough space left
@ -556,7 +557,6 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
}
}
char* old_top = *top;
ch_table.dump(top, end);
*top = (char*)align_pointer_up(*top, sizeof(void*));
@ -565,7 +565,8 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
}
const char* SymbolTable::init_shared_table(const char* buffer) {
const char* end = _shared_table.init(buffer);
const char* end = _shared_table.init(
CompactHashtable<Symbol*, char>::_symbol_table, buffer);
return (const char*)align_pointer_up(end, sizeof(void*));
}

View File

@ -190,7 +190,12 @@ class CodeCache : AllStatic {
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
// Returns the CodeBlobType for nmethods of the given compilation level
// Returns the CodeBlobType for the given nmethod
static int get_code_blob_type(nmethod* nm) {
return get_code_heap(nm)->code_blob_type();
}
// Returns the CodeBlobType for the given compilation level
static int get_code_blob_type(int comp_level) {
if (comp_level == CompLevel_none ||
comp_level == CompLevel_simple ||
@ -287,7 +292,7 @@ private:
// Iterate over all CodeBlobs
_code_blob_type = CodeBlobType::All;
} else if (nm != NULL) {
_code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
_code_blob_type = CodeCache::get_code_blob_type(nm);
} else {
// Only iterate over method code heaps, starting with non-profiled
_code_blob_type = CodeBlobType::MethodNonProfiled;

View File

@ -1421,7 +1421,7 @@ void nmethod::flush() {
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
if (PrintMethodFlushing) {
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
}
// We need to deallocate any ExceptionCache data.

View File

@ -285,9 +285,9 @@ void CMSCollector::ref_processor_init() {
_ref_processor =
new ReferenceProcessor(_span, // span
(ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
(int) ParallelGCThreads, // mt processing degree
ParallelGCThreads, // mt processing degree
_cmsGen->refs_discovery_is_mt(), // mt discovery
(int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
_cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
&_is_alive_closure); // closure for liveness info
// Initialize the _ref_processor field of CMSGen
@ -562,7 +562,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// are not shared with parallel scavenge (ParNew).
{
uint i;
uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
|| ParallelRefProcEnabled)
@ -5322,8 +5322,8 @@ CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
_bit_map(bit_map),
_work_queue(work_queue),
_mark_and_push(collector, span, bit_map, work_queue),
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
_low_water_mark(MIN2((work_queue->max_elems()/4),
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
{ }
// . see if we can share work_queues with ParNew? XXX
@ -6251,8 +6251,8 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
_span(span),
_bit_map(bit_map),
_work_queue(work_queue),
_low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
(uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
_low_water_mark(MIN2((work_queue->max_elems()/4),
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
{
_ref_processor = rp;

View File

@ -42,7 +42,7 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
uint n_threads) {
assert(n_threads > 0, "expected n_threads > 0");
assert(n_threads <= ParallelGCThreads,
err_msg("n_threads: %u > ParallelGCThreads: " UINTX_FORMAT, n_threads, ParallelGCThreads));
err_msg("n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads));
// Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean;

View File

@ -1349,7 +1349,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
// Trim off a prefix of at most objsFromOverflow items
Thread* tid = Thread::current();
size_t spin_count = (size_t)ParallelGCThreads;
size_t spin_count = ParallelGCThreads;
size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
// someone grabbed it before we did ...
@ -1466,9 +1466,9 @@ void ParNewGeneration::ref_processor_init() {
_ref_processor =
new ReferenceProcessor(_reserved, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(uint) ParallelGCThreads, // mt processing degree
ParallelGCThreads, // mt processing degree
refs_discovery_is_mt(), // mt discovery
(uint) ParallelGCThreads, // mt discovery degree
ParallelGCThreads, // mt discovery degree
refs_discovery_is_atomic(), // atomic_discovery
NULL); // is_alive_non_header
}

View File

@ -107,7 +107,8 @@ void CollectionSetChooser::verify() {
HeapRegion *curr = regions_at(index++);
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
guarantee(!curr->is_young(), "should not be young!");
guarantee(!curr->is_humongous(), "should not be humongous!");
guarantee(!curr->is_pinned(),
err_msg("Pinned region should not be in collection set (index %u)", curr->hrm_index()));
if (prev != NULL) {
guarantee(order_regions(prev, curr) != 1,
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
@ -149,8 +150,8 @@ void CollectionSetChooser::sort_regions() {
void CollectionSetChooser::add_region(HeapRegion* hr) {
assert(!hr->is_humongous(),
"Humongous regions shouldn't be added to the collection set");
assert(!hr->is_pinned(),
err_msg("Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()));
assert(!hr->is_young(), "should not be young!");
_regions.append(hr);
_length++;

View File

@ -103,13 +103,12 @@ public:
void sort_regions();
// Determine whether to add the given region to the CSet chooser or
// not. Currently, we skip humongous regions (we never add them to
// the CSet, we only reclaim them during cleanup) and regions whose
// live bytes are over the threshold.
// not. Currently, we skip pinned regions and regions whose live
// bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
bool should_add(HeapRegion* hr) {
assert(hr->is_marked(), "pre-condition");
assert(!hr->is_young(), "should never consider young regions");
return !hr->is_humongous() &&
return !hr->is_pinned() &&
hr->live_bytes() < _region_live_threshold_bytes;
}

View File

@ -30,6 +30,7 @@
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ErgoVerbose.hpp"
#include "gc/g1/g1Log.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
@ -177,7 +178,7 @@ class ClearBitmapHRClosure : public HeapRegionClosure {
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
}
return false;
@ -518,7 +519,7 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
_markStack(this),
// _finger set in set_non_marking_state
_max_worker_id((uint)ParallelGCThreads),
_max_worker_id(ParallelGCThreads),
// _active_tasks set in set_non_marking_state
// _tasks set inside the constructor
_task_queues(new CMTaskQueueSet((int) _max_worker_id)),
@ -579,8 +580,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
_root_regions.init(_g1h, this);
if (ConcGCThreads > ParallelGCThreads) {
warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
"than ParallelGCThreads (" UINTX_FORMAT ").",
warning("Can't have more ConcGCThreads (%u) "
"than ParallelGCThreads (%u).",
ConcGCThreads, ParallelGCThreads);
return;
}
@ -604,20 +605,20 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
double sleep_factor =
(1.0 - marking_task_overhead) / marking_task_overhead;
FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
_sleep_factor = sleep_factor;
_marking_task_overhead = marking_task_overhead;
} else {
// Calculate the number of parallel marking threads by scaling
// the number of parallel GC threads.
uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
_sleep_factor = 0.0;
_marking_task_overhead = 1.0;
}
assert(ConcGCThreads > 0, "Should have been set");
_parallel_marking_threads = (uint) ConcGCThreads;
_parallel_marking_threads = ConcGCThreads;
_max_parallel_marking_threads = _parallel_marking_threads;
if (parallel_marking_threads() > 1) {
@ -830,7 +831,7 @@ void ConcurrentMark::clearNextBitmap() {
// marking bitmap and getting it ready for the next cycle. During
// this time no other cycle can start. So, let's make sure that this
// is the case.
guarantee(!g1h->mark_in_progress(), "invariant");
guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
@ -844,7 +845,7 @@ void ConcurrentMark::clearNextBitmap() {
// Repeat the asserts from above.
guarantee(cmThread()->during_cycle(), "invariant");
guarantee(!g1h->mark_in_progress(), "invariant");
guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
}
class CheckBitmapClearHRClosure : public HeapRegionClosure {
@ -1254,7 +1255,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
// If a full collection has happened, we shouldn't do this.
if (has_aborted()) {
g1h->set_marking_complete(); // So bitmap clearing isn't confused
g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
return;
}
@ -1783,7 +1784,7 @@ public:
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
bool doHeapRegion(HeapRegion *hr) {
if (hr->is_continues_humongous()) {
if (hr->is_continues_humongous() || hr->is_archive()) {
return false;
}
// We use a claim value of zero here because all regions
@ -1888,7 +1889,7 @@ void ConcurrentMark::cleanup() {
// If a full collection has happened, we shouldn't do this.
if (has_aborted()) {
g1h->set_marking_complete(); // So bitmap clearing isn't confused
g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
return;
}
@ -1934,7 +1935,7 @@ void ConcurrentMark::cleanup() {
}
size_t start_used_bytes = g1h->used();
g1h->set_marking_complete();
g1h->collector_state()->set_mark_in_progress(false);
double count_end = os::elapsedTime();
double this_final_counting_time = (count_end - start);
@ -2756,7 +2757,7 @@ public:
void ConcurrentMark::verify_no_cset_oops() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
if (!G1CollectedHeap::heap()->mark_in_progress()) {
if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
return;
}

View File

@ -194,7 +194,7 @@ void ConcurrentMarkThread::run() {
// We don't want to update the marking status if a GC pause
// is already underway.
SuspendibleThreadSetJoiner sts_join;
g1h->set_marking_complete();
g1h->collector_state()->set_mark_in_progress(false);
}
// Check if cleanup set the free_regions_coming flag. If it

View File

@ -26,6 +26,7 @@
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1MarkSweep.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
@ -44,6 +45,8 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
HeapRegion** retained_old) {
HeapRegion* retained_region = *retained_old;
*retained_old = NULL;
assert(retained_region == NULL || !retained_region->is_archive(),
err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
// We will discard the current GC alloc region if:
// a) it's in the collection set (it can happen!),
@ -65,7 +68,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
// we allocate to in the region sets. We'll re-add it later, when
// it's retired again.
_g1h->_old_set.remove(retained_region);
bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
bool during_im = _g1h->collector_state()->during_initial_mark_pause();
retained_region->note_start_of_copying(during_im);
old->set(retained_region);
_g1h->_hr_printer.reuse(retained_region);
@ -168,3 +171,153 @@ void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
}
}
}
G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
// Create the archive allocator, and also enable archive object checking
// in mark-sweep, since we will be creating archive regions.
G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h);
G1MarkSweep::enable_archive_object_check();
return result;
}
bool G1ArchiveAllocator::alloc_new_region() {
// Allocate the highest free region in the reserved heap,
// and add it to our list of allocated regions. It is marked
// archive and added to the old set.
HeapRegion* hr = _g1h->alloc_highest_free_region();
if (hr == NULL) {
return false;
}
assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
hr->set_archive();
_g1h->_old_set.add(hr);
_g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
_allocated_regions.append(hr);
_allocation_region = hr;
// Set up _bottom and _max to begin allocating in the lowest
// min_region_size'd chunk of the allocated G1 region.
_bottom = hr->bottom();
_max = _bottom + HeapRegion::min_region_size_in_words();
// Tell mark-sweep that objects in this region are not to be marked.
G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
// Since we've modified the old set, call update_sizes.
_g1h->g1mm()->update_sizes();
return true;
}
HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
assert(word_size != 0, "size must not be zero");
if (_allocation_region == NULL) {
if (!alloc_new_region()) {
return NULL;
}
}
HeapWord* old_top = _allocation_region->top();
assert(_bottom >= _allocation_region->bottom(),
err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
p2i(_bottom), p2i(_allocation_region->bottom())));
assert(_max <= _allocation_region->end(),
err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
p2i(_max), p2i(_allocation_region->end())));
assert(_bottom <= old_top && old_top <= _max,
err_msg("inconsistent allocation state: expected "
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
p2i(_bottom), p2i(old_top), p2i(_max)));
// Allocate the next word_size words in the current allocation chunk.
// If allocation would cross the _max boundary, insert a filler and begin
// at the base of the next min_region_size'd chunk. Also advance to the next
// chunk if we don't yet cross the boundary, but the remainder would be too
// small to fill.
HeapWord* new_top = old_top + word_size;
size_t remainder = pointer_delta(_max, new_top);
if ((new_top > _max) ||
((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
if (old_top != _max) {
size_t fill_size = pointer_delta(_max, old_top);
CollectedHeap::fill_with_object(old_top, fill_size);
_summary_bytes_used += fill_size * HeapWordSize;
}
_allocation_region->set_top(_max);
old_top = _bottom = _max;
// Check if we've just used up the last min_region_size'd chunk
// in the current region, and if so, allocate a new one.
if (_bottom != _allocation_region->end()) {
_max = _bottom + HeapRegion::min_region_size_in_words();
} else {
if (!alloc_new_region()) {
return NULL;
}
old_top = _allocation_region->bottom();
}
}
_allocation_region->set_top(old_top + word_size);
_summary_bytes_used += word_size * HeapWordSize;
return old_top;
}
void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
size_t end_alignment_in_bytes) {
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
// If we've allocated nothing, simply return.
if (_allocation_region == NULL) {
return;
}
// If an end alignment was requested, insert filler objects.
if (end_alignment_in_bytes != 0) {
HeapWord* currtop = _allocation_region->top();
HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
size_t fill_size = pointer_delta(newtop, currtop);
if (fill_size != 0) {
if (fill_size < CollectedHeap::min_fill_size()) {
// If the required fill is smaller than we can represent,
// bump up to the next aligned address. We know we won't exceed the current
// region boundary because the max supported alignment is smaller than the min
// region size, and because the allocation code never leaves space smaller than
// the min_fill_size at the top of the current allocation region.
newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
end_alignment_in_bytes);
fill_size = pointer_delta(newtop, currtop);
}
HeapWord* fill = archive_mem_allocate(fill_size);
CollectedHeap::fill_with_objects(fill, fill_size);
}
}
// Loop through the allocated regions, and create MemRegions summarizing
// the allocated address range, combining contiguous ranges. Add the
// MemRegions to the GrowableArray provided by the caller.
int index = _allocated_regions.length() - 1;
assert(_allocated_regions.at(index) == _allocation_region,
err_msg("expected region %u at end of array, found %u",
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
HeapWord* base_address = _allocation_region->bottom();
HeapWord* top = base_address;
while (index >= 0) {
HeapRegion* next = _allocated_regions.at(index);
HeapWord* new_base = next->bottom();
HeapWord* new_top = next->top();
if (new_base != top) {
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
base_address = new_base;
}
top = new_top;
index = index - 1;
}
assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
_allocated_regions.clear();
_allocation_region = NULL;
};

View File

@ -269,4 +269,72 @@ public:
virtual void waste(size_t& wasted, size_t& undo_wasted);
};
// G1ArchiveAllocator is used to allocate memory in archive
// regions. Such regions are not modifiable by GC, being neither
// scavenged nor compacted, or even marked in the object header.
// They can contain no pointers to non-archive heap regions,
class G1ArchiveAllocator : public CHeapObj<mtGC> {
protected:
G1CollectedHeap* _g1h;
// The current allocation region
HeapRegion* _allocation_region;
// Regions allocated for the current archive range.
GrowableArray<HeapRegion*> _allocated_regions;
// The number of bytes used in the current range.
size_t _summary_bytes_used;
// Current allocation window within the current region.
HeapWord* _bottom;
HeapWord* _top;
HeapWord* _max;
// Allocate a new region for this archive allocator.
// Allocation is from the top of the reserved heap downward.
bool alloc_new_region();
public:
G1ArchiveAllocator(G1CollectedHeap* g1h) :
_g1h(g1h),
_allocation_region(NULL),
_allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
ResourceObj::C_HEAP),
2), true /* C_Heap */),
_summary_bytes_used(0),
_bottom(NULL),
_top(NULL),
_max(NULL) { }
virtual ~G1ArchiveAllocator() {
assert(_allocation_region == NULL, "_allocation_region not NULL");
}
static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h);
// Allocate memory for an individual object.
HeapWord* archive_mem_allocate(size_t word_size);
// Return the memory ranges used in the current archive, after
// aligning to the requested alignment.
void complete_archive(GrowableArray<MemRegion>* ranges,
size_t end_alignment_in_bytes);
// The number of bytes allocated by this allocator.
size_t used() {
return _summary_bytes_used;
}
// Clear the count of bytes allocated in prior G1 regions. This
// must be done when recalculate_use is used to reset the counter
// for the generic allocator, since it counts bytes in all G1
// regions, including those still associated with this allocator.
void clear_used() {
_summary_bytes_used = 0;
}
};
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP

View File

@ -128,6 +128,14 @@ public:
return biased_base()[biased_index];
}
// Return the index of the element of the given array that covers the given
// word in the heap.
idx_t get_index_by_address(HeapWord* value) const {
idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
this->verify_biased_index(biased_index);
return biased_index - _bias;
}
// Set the value of the array entry that corresponds to the given array.
void set_by_address(HeapWord * address, T value) {
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
@ -135,6 +143,18 @@ public:
biased_base()[biased_index] = value;
}
// Set the value of all array entries that correspond to addresses
// in the specified MemRegion.
void set_by_address(MemRegion range, T value) {
idx_t biased_start = ((uintptr_t)range.start()) >> this->shift_by();
idx_t biased_last = ((uintptr_t)range.last()) >> this->shift_by();
this->verify_biased_index(biased_start);
this->verify_biased_index(biased_last);
for (idx_t i = biased_start; i <= biased_last; i++) {
biased_base()[i] = value;
}
}
protected:
// Returns the address of the element the given address maps to
T* address_mapped_to(HeapWord* address) {

View File

@ -34,6 +34,7 @@
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ErgoVerbose.hpp"
#include "gc/g1/g1EvacFailure.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
@ -404,7 +405,7 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
// can move in an incremental collection.
bool G1CollectedHeap::is_scavengable(const void* p) {
HeapRegion* hr = heap_region_containing(p);
return !hr->is_humongous();
return !hr->is_pinned();
}
// Private methods.
@ -907,6 +908,207 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
return NULL;
}
void G1CollectedHeap::begin_archive_alloc_range() {
assert_at_safepoint(true /* should_be_vm_thread */);
if (_archive_allocator == NULL) {
_archive_allocator = G1ArchiveAllocator::create_allocator(this);
}
}
bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
// Allocations in archive regions cannot be of a size that would be considered
// humongous even for a minimum-sized region, because G1 region sizes/boundaries
// may be different at archive-restore time.
return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
}
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
assert_at_safepoint(true /* should_be_vm_thread */);
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
if (is_archive_alloc_too_large(word_size)) {
return NULL;
}
return _archive_allocator->archive_mem_allocate(word_size);
}
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
size_t end_alignment_in_bytes) {
assert_at_safepoint(true /* should_be_vm_thread */);
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
// Call complete_archive to do the real work, filling in the MemRegion
// array with the archive regions.
_archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
delete _archive_allocator;
_archive_allocator = NULL;
}
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
for (size_t i = 0; i < count; i++) {
if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
return false;
}
}
return true;
}
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MutexLockerEx x(Heap_lock);
MemRegion reserved = _hrm.reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
// Temporarily disable pretouching of heap pages. This interface is used
// when mmap'ing archived heap data in, so pre-touching is wasted.
FlagSetting fs(AlwaysPreTouch, false);
// Enable archive object checking in G1MarkSweep. We have to let it know
// about each archive range, so that objects in those ranges aren't marked.
G1MarkSweep::enable_archive_object_check();
// For each specified MemRegion range, allocate the corresponding G1
// regions and mark them as archive regions. We expect the ranges in
// ascending starting address order, without overlap.
for (size_t i = 0; i < count; i++) {
MemRegion curr_range = ranges[i];
HeapWord* start_address = curr_range.start();
size_t word_size = curr_range.word_size();
HeapWord* last_address = curr_range.last();
size_t commits = 0;
guarantee(reserved.contains(start_address) && reserved.contains(last_address),
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address)));
guarantee(start_address > prev_last_addr,
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr)));
prev_last_addr = last_address;
// Check for ranges that start in the same G1 region in which the previous
// range ended, and adjust the start address so we don't try to allocate
// the same region again. If the current range is entirely within that
// region, skip it, just adjusting the recorded top.
HeapRegion* start_region = _hrm.addr_to_region(start_address);
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
start_address = start_region->end();
if (start_address > last_address) {
_allocator->increase_used(word_size * HeapWordSize);
start_region->set_top(last_address + 1);
continue;
}
start_region->set_top(start_address);
curr_range = MemRegion(start_address, last_address + 1);
start_region = _hrm.addr_to_region(start_address);
}
// Perform the actual region allocation, exiting if it fails.
// Then note how much new space we have allocated.
if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
return false;
}
_allocator->increase_used(word_size * HeapWordSize);
if (commits != 0) {
ergo_verbose1(ErgoHeapSizing,
"attempt heap expansion",
ergo_format_reason("allocate archive regions")
ergo_format_byte("total size"),
HeapRegion::GrainWords * HeapWordSize * commits);
}
// Mark each G1 region touched by the range as archive, add it to the old set,
// and set the allocation context and top.
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
prev_last_region = last_region;
while (curr_region != NULL) {
assert(curr_region->is_empty() && !curr_region->is_pinned(),
err_msg("Region already in use (index %u)", curr_region->hrm_index()));
_hr_printer.alloc(curr_region, G1HRPrinter::Archive);
curr_region->set_allocation_context(AllocationContext::system());
curr_region->set_archive();
_old_set.add(curr_region);
if (curr_region != last_region) {
curr_region->set_top(curr_region->end());
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region->set_top(last_address + 1);
curr_region = NULL;
}
}
// Notify mark-sweep of the archive range.
G1MarkSweep::mark_range_archive(curr_range);
}
return true;
}
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
HeapWord *prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
// For each MemRegion, create filler objects, if needed, in the G1 regions
// that contain the address range. The address range actually within the
// MemRegion will not be modified. That is assumed to have been initialized
// elsewhere, probably via an mmap of archived heap data.
MutexLockerEx x(Heap_lock);
for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
assert(reserved.contains(start_address) && reserved.contains(last_address),
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address)));
assert(start_address > prev_last_addr,
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr)));
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
HeapWord* bottom_address = start_region->bottom();
// Check for a range beginning in the same region in which the
// previous one ended.
if (start_region == prev_last_region) {
bottom_address = prev_last_addr + 1;
}
// Verify that the regions were all marked as archive regions by
// alloc_archive_regions.
HeapRegion* curr_region = start_region;
while (curr_region != NULL) {
guarantee(curr_region->is_archive(),
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
}
prev_last_addr = last_address;
prev_last_region = last_region;
// Fill the memory below the allocated range with dummy object(s),
// if the region bottom does not match the range start, or if the previous
// range ended within the same G1 region, and there is a gap.
if (start_address != bottom_address) {
size_t fill_size = pointer_delta(start_address, bottom_address);
G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
_allocator->increase_used(fill_size * HeapWordSize);
}
}
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
@ -1039,7 +1241,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
} else {
HeapWord* result = humongous_obj_allocate(word_size, context);
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
g1_policy()->set_initiate_conc_mark_if_possible();
collector_state()->set_initiate_conc_mark_if_possible(true);
}
return result;
}
@ -1131,6 +1333,8 @@ public:
}
} else if (hr->is_continues_humongous()) {
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
} else if (hr->is_archive()) {
_hr_printer->post_compaction(hr, G1HRPrinter::Archive);
} else if (hr->is_old()) {
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
} else {
@ -1250,7 +1454,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
g1_policy()->stop_incremental_cset_building();
tear_down_region_sets(false /* free_list_only */);
g1_policy()->set_gcs_are_young(true);
collector_state()->set_gcs_are_young(true);
// See the comments in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() about
@ -1714,16 +1918,15 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_ref_processor_stw(NULL),
_bot_shared(NULL),
_evac_failure_scan_stack(NULL),
_mark_in_progress(false),
_cg1r(NULL),
_g1mm(NULL),
_refine_cte_cl(NULL),
_full_collection(false),
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
_humongous_reclaim_candidates(),
_has_humongous_reclaim_candidates(false),
_archive_allocator(NULL),
_free_regions_coming(false),
_young_list(new YoungList(this)),
_gc_time_stamp(0),
@ -1733,7 +1936,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_surviving_young_words(NULL),
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_concurrent_cycle_started(false),
_heap_summary_sent(false),
_in_cset_fast_test(),
_dirty_cards_region_list(NULL),
@ -1750,9 +1952,13 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_workers->initialize_workers();
_allocator = G1Allocator::create_allocator(this);
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
int n_queues = (int)ParallelGCThreads;
// Override the default _filler_array_max_size so that no humongous filler
// objects are created.
_filler_array_max_size = _humongous_object_threshold_in_words;
uint n_queues = ParallelGCThreads;
_task_queues = new RefToScanQueueSet(n_queues);
uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
@ -1762,7 +1968,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (int i = 0; i < n_queues; i++) {
for (uint i = 0; i < n_queues; i++) {
RefToScanQueue* q = new RefToScanQueue();
q->initialize();
_task_queues->register_queue(i, q);
@ -2064,11 +2270,11 @@ void G1CollectedHeap::ref_processing_init() {
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1),
// mt processing
(uint) ParallelGCThreads,
ParallelGCThreads,
// degree of mt processing
(ParallelGCThreads > 1) || (ConcGCThreads > 1),
// mt discovery
(uint) MAX2(ParallelGCThreads, ConcGCThreads),
MAX2(ParallelGCThreads, ConcGCThreads),
// degree of mt discovery
false,
// Reference discovery is not atomic
@ -2081,11 +2287,11 @@ void G1CollectedHeap::ref_processing_init() {
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1),
// mt processing
(uint) ParallelGCThreads,
ParallelGCThreads,
// degree of mt processing
(ParallelGCThreads > 1),
// mt discovery
(uint) ParallelGCThreads,
ParallelGCThreads,
// degree of mt discovery
true,
// Reference discovery is atomic
@ -2165,7 +2371,11 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
// Computes the sum of the storage used by the various regions.
size_t G1CollectedHeap::used() const {
return _allocator->used();
size_t result = _allocator->used();
if (_archive_allocator != NULL) {
result += _archive_allocator->used();
}
return result;
}
size_t G1CollectedHeap::used_unlocked() const {
@ -2288,7 +2498,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
}
void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
_concurrent_cycle_started = true;
collector_state()->set_concurrent_cycle_started(true);
_gc_timer_cm->register_gc_start(start_time);
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
@ -2296,7 +2506,7 @@ void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
}
void G1CollectedHeap::register_concurrent_cycle_end() {
if (_concurrent_cycle_started) {
if (collector_state()->concurrent_cycle_started()) {
if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
}
@ -2305,13 +2515,13 @@ void G1CollectedHeap::register_concurrent_cycle_end() {
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
// Clear state variables to prepare for the next concurrent cycle.
_concurrent_cycle_started = false;
collector_state()->set_concurrent_cycle_started(false);
_heap_summary_sent = false;
}
}
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
if (_concurrent_cycle_started) {
if (collector_state()->concurrent_cycle_started()) {
// This function can be called when:
// the cleanup pause is run
// the concurrent cycle is aborted before the cleanup pause.
@ -2325,22 +2535,6 @@ void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
}
}
G1YCType G1CollectedHeap::yc_type() {
bool is_young = g1_policy()->gcs_are_young();
bool is_initial_mark = g1_policy()->during_initial_mark_pause();
bool is_during_mark = mark_in_progress();
if (is_initial_mark) {
return InitialMark;
} else if (is_during_mark) {
return DuringMark;
} else if (is_young) {
return Normal;
} else {
return Mixed;
}
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
@ -2594,7 +2788,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
HeapRegion* result = _hrm.next_region_in_heap(from);
while (result != NULL && result->is_humongous()) {
while (result != NULL && result->is_pinned()) {
result = _hrm.next_region_in_heap(result);
}
return result;
@ -2902,6 +3096,31 @@ public:
size_t live_bytes() { return _live_bytes; }
};
class VerifyArchiveOopClosure: public OopClosure {
public:
VerifyArchiveOopClosure(HeapRegion *hr) { }
void do_oop(narrowOop *p) { do_oop_work(p); }
void do_oop( oop *p) { do_oop_work(p); }
template <class T> void do_oop_work(T *p) {
oop obj = oopDesc::load_decode_heap_oop(p);
guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
p2i(p), p2i(obj)));
}
};
class VerifyArchiveRegionClosure: public ObjectClosure {
public:
VerifyArchiveRegionClosure(HeapRegion *hr) { }
// Verify that all object pointers are to archive regions.
void do_object(oop o) {
VerifyArchiveOopClosure checkOop(NULL);
assert(o != NULL, "Should not be here for NULL oops");
o->oop_iterate_no_header(&checkOop);
}
};
class VerifyRegionClosure: public HeapRegionClosure {
private:
bool _par;
@ -2921,6 +3140,13 @@ public:
}
bool doHeapRegion(HeapRegion* r) {
// For archive regions, verify there are no heap pointers to
// non-pinned regions. For all others, verify liveness info.
if (r->is_archive()) {
VerifyArchiveRegionClosure verify_oop_pointers(r);
r->object_iterate(&verify_oop_pointers);
return true;
}
if (!r->is_continues_humongous()) {
bool failures = false;
r->verify(_vo, &failures);
@ -3105,7 +3331,7 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
switch (vo) {
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
@ -3116,7 +3342,10 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
switch (vo) {
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
case VerifyOption_G1UseMarkWord: {
HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
return !obj->is_gc_marked() && !hr->is_archive();
}
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
@ -3149,7 +3378,7 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
st->cr();
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, TS=gc time stamp, "
"CS=collection set, F=free, A=archive, TS=gc time stamp, "
"PTAMS=previous top-at-mark-start, "
"NTAMS=next top-at-mark-start)");
PrintRegionClosure blk(st);
@ -3251,6 +3480,28 @@ void G1CollectedHeap::print_all_rsets() {
}
#endif // PRODUCT
G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
YoungList* young_list = heap()->young_list();
size_t eden_used_bytes = young_list->eden_used_bytes();
size_t survivor_used_bytes = young_list->survivor_used_bytes();
size_t eden_capacity_bytes =
(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
VirtualSpaceSummary heap_summary = create_heap_space_summary();
return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes);
}
void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
const G1HeapSummary& heap_summary = create_g1_heap_summary();
gc_tracer->report_gc_heap_summary(when, heap_summary);
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
gc_tracer->report_metaspace_summary(when, metaspace_summary);
}
G1CollectedHeap* G1CollectedHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
@ -3587,8 +3838,8 @@ void G1CollectedHeap::log_gc_header() {
gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
.append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
.append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
.append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
.append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
gclog_or_tty->print("[%s", (const char*)gc_cause_str);
}
@ -3645,29 +3896,29 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->decide_on_conc_mark_initiation();
// We do not allow initial-mark to be piggy-backed on a mixed GC.
assert(!g1_policy()->during_initial_mark_pause() ||
g1_policy()->gcs_are_young(), "sanity");
assert(!collector_state()->during_initial_mark_pause() ||
collector_state()->gcs_are_young(), "sanity");
// We also do not allow mixed GCs during marking.
assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
// Record whether this pause is an initial mark. When the current
// thread has completed its logging output and it's safe to signal
// the CM thread, the flag's value in the policy has been reset.
bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
// Inner scope for scope based logging, timers, and stats collection
{
EvacuationInfo evacuation_info;
if (g1_policy()->during_initial_mark_pause()) {
if (collector_state()->during_initial_mark_pause()) {
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_old_marking_cycles_started();
register_concurrent_cycle_start(_gc_timer_stw->gc_start());
}
_gc_tracer_stw->report_yc_type(yc_type());
_gc_tracer_stw->report_yc_type(collector_state()->yc_type());
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
@ -3677,7 +3928,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
workers()->set_active_workers(active_workers);
double pause_start_sec = os::elapsedTime();
g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress());
log_gc_header();
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
@ -3771,7 +4022,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_young_list->print();
#endif // YOUNG_LIST_VERBOSE
if (g1_policy()->during_initial_mark_pause()) {
if (collector_state()->during_initial_mark_pause()) {
concurrent_mark()->checkpointRootsInitialPre();
}
@ -3848,6 +4099,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
if (evacuation_failed()) {
_allocator->set_used(recalculate_used());
if (_archive_allocator != NULL) {
_archive_allocator->clear_used();
}
for (uint i = 0; i < ParallelGCThreads; i++) {
if (_evacuation_failed_info_array[i].has_failed()) {
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
@ -3859,12 +4113,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_allocator->increase_used(g1_policy()->bytes_copied_during_gc());
}
if (g1_policy()->during_initial_mark_pause()) {
if (collector_state()->during_initial_mark_pause()) {
// We have to do this before we notify the CM threads that
// they can start working to make sure that all the
// appropriate initialization is done on the CM object.
concurrent_mark()->checkpointRootsInitialPost();
set_marking_started();
collector_state()->set_mark_in_progress(true);
// Note that we don't actually trigger the CM thread at
// this point. We do that later when we're sure that
// the current thread has completed its logging output.
@ -4343,7 +4597,7 @@ public:
pss.set_evac_failure_closure(&evac_failure_cl);
bool only_young = _g1h->g1_policy()->gcs_are_young();
bool only_young = _g1h->collector_state()->gcs_are_young();
// Non-IM young GC.
G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
@ -4369,7 +4623,7 @@ public:
bool trace_metadata = false;
if (_g1h->g1_policy()->during_initial_mark_pause()) {
if (_g1h->collector_state()->during_initial_mark_pause()) {
// We also need to mark copied objects.
strong_root_cl = &scan_mark_root_cl;
strong_cld_cl = &scan_mark_cld_cl;
@ -5021,7 +5275,7 @@ public:
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
if (_g1h->g1_policy()->during_initial_mark_pause()) {
if (_g1h->collector_state()->during_initial_mark_pause()) {
// We also need to mark copied objects.
copy_non_heap_cl = &copy_mark_non_heap_cl;
}
@ -5122,7 +5376,7 @@ public:
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
if (_g1h->g1_policy()->during_initial_mark_pause()) {
if (_g1h->collector_state()->during_initial_mark_pause()) {
// We also need to mark copied objects.
copy_non_heap_cl = &copy_mark_non_heap_cl;
}
@ -5234,7 +5488,7 @@ void G1CollectedHeap::process_discovered_references() {
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
if (g1_policy()->during_initial_mark_pause()) {
if (collector_state()->during_initial_mark_pause()) {
// We also need to mark copied objects.
copy_non_heap_cl = &copy_mark_non_heap_cl;
}
@ -5342,7 +5596,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
G1RootProcessor root_processor(this, n_workers);
G1ParTask g1_par_task(this, _task_queues, &root_processor, n_workers);
// InitialMark needs claim bits to keep track of the marked-through CLDs.
if (g1_policy()->during_initial_mark_pause()) {
if (collector_state()->during_initial_mark_pause()) {
ClassLoaderDataGraph::clear_claimed_marks();
}
@ -5598,7 +5852,7 @@ bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
// We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
// we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
// if we happen to be in that state.
if (mark_in_progress() || !_cmThread->in_progress()) {
if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
}
if (!res_p || !res_n) {
@ -6169,13 +6423,18 @@ public:
assert(!r->is_young(), "we should not come across young regions");
if (r->is_humongous()) {
// We ignore humongous regions, we left the humongous set unchanged
// We ignore humongous regions. We left the humongous set unchanged.
} else {
// Objects that were compacted would have ended up on regions
// that were previously old or free.
// that were previously old or free. Archive regions (which are
// old) will not have been touched.
assert(r->is_free() || r->is_old(), "invariant");
// We now consider them old, so register as such.
r->set_old();
// We now consider them old, so register as such. Leave
// archive regions set that way, however, while still adding
// them to the old set.
if (!r->is_archive()) {
r->set_old();
}
_old_set->add(r);
}
_total_used += r->used();
@ -6201,6 +6460,9 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
if (!free_list_only) {
_allocator->set_used(cl.total_used());
if (_archive_allocator != NULL) {
_archive_allocator->clear_used();
}
}
assert(_allocator->used_unlocked() == recalculate_used(),
err_msg("inconsistent _allocator->used_unlocked(), "
@ -6279,7 +6541,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
check_bitmaps("Old Region Allocation", new_alloc_region);
}
bool during_im = g1_policy()->during_initial_mark_pause();
bool during_im = collector_state()->during_initial_mark_pause();
new_alloc_region->note_start_of_copying(during_im);
return new_alloc_region;
}
@ -6290,7 +6552,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes,
InCSetState dest) {
bool during_im = g1_policy()->during_initial_mark_pause();
bool during_im = collector_state()->during_initial_mark_pause();
alloc_region->note_end_of_copying(during_im);
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
if (dest.is_young()) {
@ -6301,6 +6563,25 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
_hr_printer.retire(alloc_region);
}
HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
bool expanded = false;
uint index = _hrm.find_highest_free(&expanded);
if (index != G1_NO_HRM_INDEX) {
if (expanded) {
ergo_verbose1(ErgoHeapSizing,
"attempt heap expansion",
ergo_format_reason("requested address range outside heap bounds")
ergo_format_byte("region size"),
HeapRegion::GrainWords * HeapWordSize);
}
_hrm.allocate_free_regions_starting_at(index, 1);
return region_at(index);
}
return NULL;
}
// Heap region set verification
class VerifyRegionListsClosure : public HeapRegionClosure {
@ -6337,6 +6618,9 @@ public:
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
_old_count.increment(1u, hr->capacity());
} else {
// There are no other valid region types. Check for one invalid
// one we can identify: pinned without old or humongous set.
assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
ShouldNotReachHere();
}
return false;

View File

@ -31,6 +31,7 @@
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1HRPrinter.hpp"
#include "gc/g1/g1InCSetState.hpp"
#include "gc/g1/g1MonitoringSupport.hpp"
@ -187,6 +188,7 @@ class G1CollectedHeap : public CollectedHeap {
friend class SurvivorGCAllocRegion;
friend class OldGCAllocRegion;
friend class G1Allocator;
friend class G1ArchiveAllocator;
// Closures used in implementation.
friend class G1ParScanThreadState;
@ -249,6 +251,9 @@ private:
// Class that handles the different kinds of allocations.
G1Allocator* _allocator;
// Class that handles archive allocation ranges.
G1ArchiveAllocator* _archive_allocator;
// Statistics for each allocation context
AllocationContextStats _allocation_context_stats;
@ -328,6 +333,9 @@ private:
// (d) cause == _g1_humongous_allocation
bool should_do_concurrent_full_gc(GCCause::Cause cause);
// indicates whether we are in young or mixed GC mode
G1CollectorState _collector_state;
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
// concurrent cycles) we have started.
volatile uint _old_marking_cycles_started;
@ -336,7 +344,6 @@ private:
// concurrent cycles) we have completed.
volatile uint _old_marking_cycles_completed;
bool _concurrent_cycle_started;
bool _heap_summary_sent;
// This is a non-product method that is helpful for testing. It is
@ -367,6 +374,8 @@ private:
void log_gc_header();
void log_gc_footer(double pause_time_sec);
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
@ -571,6 +580,10 @@ protected:
void retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, InCSetState dest);
// Allocate the highest free region in the reserved heap. This will commit
// regions as necessary.
HeapRegion* alloc_highest_free_region();
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
// - if clear_all_soft_refs is true, all soft references should be
@ -701,8 +714,6 @@ public:
void register_concurrent_cycle_end();
void trace_heap_after_concurrent_cycle();
G1YCType yc_type();
G1HRPrinter* hr_printer() { return &_hr_printer; }
// Frees a non-humongous region by initializing its contents and
@ -728,6 +739,44 @@ public:
void free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list,
bool par);
// Facility for allocating in 'archive' regions in high heap memory and
// recording the allocated ranges. These should all be called from the
// VM thread at safepoints, without the heap lock held. They can be used
// to create and archive a set of heap regions which can be mapped at the
// same fixed addresses in a subsequent JVM invocation.
void begin_archive_alloc_range();
// Check if the requested size would be too large for an archive allocation.
bool is_archive_alloc_too_large(size_t word_size);
// Allocate memory of the requested size from the archive region. This will
// return NULL if the size is too large or if no memory is available. It
// does not trigger a garbage collection.
HeapWord* archive_mem_allocate(size_t word_size);
// Optionally aligns the end address and returns the allocated ranges in
// an array of MemRegions in order of ascending addresses.
void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
size_t end_alignment_in_bytes = 0);
// Facility for allocating a fixed range within the heap and marking
// the containing regions as 'archive'. For use at JVM init time, when the
// caller may mmap archived heap data at the specified range(s).
// Verify that the MemRegions specified in the argument array are within the
// reserved heap.
bool check_archive_addresses(MemRegion* range, size_t count);
// Commit the appropriate G1 regions containing the specified MemRegions
// and mark them as 'archive' regions. The regions in the array must be
// non-overlapping and in order of ascending address.
bool alloc_archive_regions(MemRegion* range, size_t count);
// Insert any required filler objects in the G1 regions around the specified
// ranges to make the regions parseable. This must be called after
// alloc_archive_regions, and after class loading has occurred.
void fill_archive_regions(MemRegion* range, size_t count);
protected:
// Shrink the garbage-first heap by at most the given size (in bytes!).
@ -791,7 +840,6 @@ protected:
// The concurrent marker (and the thread it runs in.)
ConcurrentMark* _cm;
ConcurrentMarkThread* _cmThread;
bool _mark_in_progress;
// The concurrent refiner.
ConcurrentG1Refine* _cg1r;
@ -1019,6 +1067,8 @@ public:
return CollectedHeap::G1CollectedHeap;
}
G1CollectorState* collector_state() { return &_collector_state; }
// The current policy object for the collector.
G1CollectorPolicy* g1_policy() const { return _g1_policy; }
@ -1391,6 +1441,11 @@ public:
return word_size > _humongous_object_threshold_in_words;
}
// Returns the humongous threshold for a specific region size
static size_t humongous_threshold_for(size_t region_size) {
return (region_size / 2);
}
// Update mod union table with the set of dirty cards.
void updateModUnion();
@ -1399,17 +1454,6 @@ public:
// bits.
void markModUnionRange(MemRegion mr);
// Records the fact that a marking phase is no longer in progress.
void set_marking_complete() {
_mark_in_progress = false;
}
void set_marking_started() {
_mark_in_progress = true;
}
bool mark_in_progress() {
return _mark_in_progress;
}
// Print the maximum heap capacity.
virtual size_t max_capacity() const;
@ -1448,21 +1492,23 @@ public:
// Determine if an object is dead, given the object and also
// the region to which the object belongs. An object is dead
// iff a) it was not allocated since the last mark and b) it
// is not marked.
// iff a) it was not allocated since the last mark, b) it
// is not marked, and c) it is not in an archive region.
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
return
!hr->obj_allocated_since_prev_marking(obj) &&
!isMarkedPrev(obj);
!isMarkedPrev(obj) &&
!hr->is_archive();
}
// This function returns true when an object has been
// around since the previous marking and hasn't yet
// been marked during this marking.
// been marked during this marking, and is not in an archive region.
bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
return
!hr->obj_allocated_since_next_marking(obj) &&
!isMarkedNext(obj);
!isMarkedNext(obj) &&
!hr->is_archive();
}
// Determine if an object is dead, given only the object itself.
@ -1522,14 +1568,6 @@ public:
void redirty_logged_cards();
// Verification
// The following is just to alert the verification code
// that a full collection has occurred and that the
// remembered sets are no longer up to date.
bool _full_collection;
void set_full_collection() { _full_collection = true;}
void clear_full_collection() {_full_collection = false;}
bool full_collection() {return _full_collection;}
// Perform any cleanup actions necessary before allowing a verification.
virtual void prepare_for_verify();
@ -1565,6 +1603,8 @@ public:
bool is_obj_dead_cond(const oop obj,
const VerifyOption vo) const;
G1HeapSummary create_g1_heap_summary();
// Printing
virtual void print_on(outputStream* st) const;

View File

@ -29,6 +29,7 @@
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
@ -288,9 +289,9 @@ G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
_evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
// Now check if G1EvacuationFailureALot is enabled for the current GC type.
const bool gcs_are_young = g1_policy()->gcs_are_young();
const bool during_im = g1_policy()->during_initial_mark_pause();
const bool during_marking = mark_in_progress();
const bool gcs_are_young = collector_state()->gcs_are_young();
const bool during_im = collector_state()->during_initial_mark_pause();
const bool during_marking = collector_state()->mark_in_progress();
_evacuation_failure_alot_for_current_gc &=
evacuation_failure_alot_for_gc_type(gcs_are_young,

View File

@ -107,22 +107,11 @@ G1CollectorPolicy::G1CollectorPolicy() :
_pause_time_target_ms((double) MaxGCPauseMillis),
_gcs_are_young(true),
_during_marking(false),
_in_marking_window(false),
_in_marking_window_im(false),
_recent_prev_end_times_for_all_gcs_sec(
new TruncatedSeq(NumPrevPausesForHeuristics)),
_recent_avg_pause_time_ratio(0.0),
_initiate_conc_mark_if_possible(false),
_during_initial_mark_pause(false),
_last_young_gc(false),
_last_gc_was_young(false),
_eden_used_bytes_before_gc(0),
_survivor_used_bytes_before_gc(0),
_heap_used_bytes_before_gc(0),
@ -334,6 +323,8 @@ void G1CollectorPolicy::post_heap_initialize() {
}
}
G1CollectorState* G1CollectorPolicy::collector_state() { return _g1->collector_state(); }
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
_min_desired_young_length(0), _max_desired_young_length(0) {
if (FLAG_IS_CMDLINE(NewRatio)) {
@ -552,7 +543,7 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
uint young_list_target_length = 0;
if (adaptive_young_list_length()) {
if (gcs_are_young()) {
if (collector_state()->gcs_are_young()) {
young_list_target_length =
calculate_young_list_target_length(rs_lengths,
base_min_length,
@ -594,7 +585,7 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
uint desired_min_length,
uint desired_max_length) {
assert(adaptive_young_list_length(), "pre-condition");
assert(gcs_are_young(), "only call this for young GCs");
assert(collector_state()->gcs_are_young(), "only call this for young GCs");
// In case some edge-condition makes the desired max length too small...
if (desired_max_length <= desired_min_length) {
@ -697,7 +688,7 @@ double G1CollectorPolicy::predict_survivor_regions_evac_time() {
for (HeapRegion * r = _recorded_survivor_head;
r != NULL && r != _recorded_survivor_tail->get_next_young_region();
r = r->get_next_young_region()) {
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
}
return survivor_regions_evac_time;
}
@ -782,7 +773,7 @@ void G1CollectorPolicy::record_full_collection_start() {
_full_collection_start_sec = os::elapsedTime();
record_heap_size_info_at_start(true /* full */);
// Release the future to-space so that it is available for compaction into.
_g1->set_full_collection();
collector_state()->set_full_collection(true);
}
void G1CollectorPolicy::record_full_collection_end() {
@ -796,16 +787,16 @@ void G1CollectorPolicy::record_full_collection_end() {
update_recent_gc_times(end_sec, full_gc_time_ms);
_g1->clear_full_collection();
collector_state()->set_full_collection(false);
// "Nuke" the heuristics that control the young/mixed GC
// transitions and make sure we start with young GCs after the Full GC.
set_gcs_are_young(true);
_last_young_gc = false;
clear_initiate_conc_mark_if_possible();
clear_during_initial_mark_pause();
_in_marking_window = false;
_in_marking_window_im = false;
collector_state()->set_gcs_are_young(true);
collector_state()->set_last_young_gc(false);
collector_state()->set_initiate_conc_mark_if_possible(false);
collector_state()->set_during_initial_mark_pause(false);
collector_state()->set_in_marking_window(false);
collector_state()->set_in_marking_window_im(false);
_short_lived_surv_rate_group->start_adding_regions();
// also call this on any additional surv rate groups
@ -845,7 +836,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
_collection_set_bytes_used_before = 0;
_bytes_copied_during_gc = 0;
_last_gc_was_young = false;
collector_state()->set_last_gc_was_young(false);
// do that for any other surv rate groups
_short_lived_surv_rate_group->stop_adding_regions();
@ -856,15 +847,15 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
void G1CollectorPolicy::record_concurrent_mark_init_end(double
mark_init_elapsed_time_ms) {
_during_marking = true;
assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
clear_during_initial_mark_pause();
collector_state()->set_during_marking(true);
assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
collector_state()->set_during_initial_mark_pause(false);
_cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
}
void G1CollectorPolicy::record_concurrent_mark_remark_start() {
_mark_remark_start_sec = os::elapsedTime();
_during_marking = false;
collector_state()->set_during_marking(false);
}
void G1CollectorPolicy::record_concurrent_mark_remark_end() {
@ -882,8 +873,8 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
}
void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
_last_young_gc = true;
_in_marking_window = false;
collector_state()->set_last_young_gc(true);
collector_state()->set_in_marking_window(false);
}
void G1CollectorPolicy::record_concurrent_pause() {
@ -904,7 +895,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
size_t alloc_byte_size = alloc_word_size * HeapWordSize;
if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
if (gcs_are_young() && !_last_young_gc) {
if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
ergo_verbose5(ErgoConcCycles,
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
@ -959,14 +950,14 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
}
#endif // PRODUCT
last_pause_included_initial_mark = during_initial_mark_pause();
last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
if (last_pause_included_initial_mark) {
record_concurrent_mark_init_end(0.0);
} else if (need_to_start_conc_mark("end of GC")) {
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
set_initiate_conc_mark_if_possible();
collector_state()->set_initiate_conc_mark_if_possible(true);
}
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
@ -1028,37 +1019,37 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
}
}
bool new_in_marking_window = _in_marking_window;
bool new_in_marking_window = collector_state()->in_marking_window();
bool new_in_marking_window_im = false;
if (last_pause_included_initial_mark) {
new_in_marking_window = true;
new_in_marking_window_im = true;
}
if (_last_young_gc) {
if (collector_state()->last_young_gc()) {
// This is supposed to to be the "last young GC" before we start
// doing mixed GCs. Here we decide whether to start mixed GCs or not.
if (!last_pause_included_initial_mark) {
if (next_gc_should_be_mixed("start mixed GCs",
"do not start mixed GCs")) {
set_gcs_are_young(false);
collector_state()->set_gcs_are_young(false);
}
} else {
ergo_verbose0(ErgoMixedGCs,
"do not start mixed GCs",
ergo_format_reason("concurrent cycle is about to start"));
}
_last_young_gc = false;
collector_state()->set_last_young_gc(false);
}
if (!_last_gc_was_young) {
if (!collector_state()->last_gc_was_young()) {
// This is a mixed GC. Here we decide whether to continue doing
// mixed GCs or not.
if (!next_gc_should_be_mixed("continue mixed GCs",
"do not continue mixed GCs")) {
set_gcs_are_young(true);
collector_state()->set_gcs_are_young(true);
}
}
@ -1077,7 +1068,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
if (_last_gc_was_young) {
if (collector_state()->last_gc_was_young()) {
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
} else {
_mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
@ -1087,7 +1078,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
if (_max_rs_lengths > 0) {
double cards_per_entry_ratio =
(double) cards_scanned / (double) _max_rs_lengths;
if (_last_gc_was_young) {
if (collector_state()->last_gc_was_young()) {
_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
} else {
_mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
@ -1119,7 +1110,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
if (copied_bytes > 0) {
cost_per_byte_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
if (_in_marking_window) {
if (collector_state()->in_marking_window()) {
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
} else {
_cost_per_byte_ms_seq->add(cost_per_byte_ms);
@ -1162,8 +1153,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
_rs_lengths_seq->add((double) _max_rs_lengths);
}
_in_marking_window = new_in_marking_window;
_in_marking_window_im = new_in_marking_window_im;
collector_state()->set_in_marking_window(new_in_marking_window);
collector_state()->set_in_marking_window_im(new_in_marking_window_im);
_free_regions_at_end_of_collection = _g1->num_free_regions();
update_young_list_target_length();
@ -1301,7 +1292,7 @@ double
G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
size_t rs_length = predict_rs_length_diff();
size_t card_num;
if (gcs_are_young()) {
if (collector_state()->gcs_are_young()) {
card_num = predict_young_card_num(rs_length);
} else {
card_num = predict_non_young_card_num(rs_length);
@ -1467,7 +1458,7 @@ bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
ergo_format_reason("requested by GC cause")
ergo_format_str("GC cause"),
GCCause::to_string(gc_cause));
set_initiate_conc_mark_if_possible();
collector_state()->set_initiate_conc_mark_if_possible(true);
return true;
} else {
ergo_verbose1(ErgoConcCycles,
@ -1484,13 +1475,13 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
// We are about to decide on whether this pause will be an
// initial-mark pause.
// First, during_initial_mark_pause() should not be already set. We
// First, collector_state()->during_initial_mark_pause() should not be already set. We
// will set it here if we have to. However, it should be cleared by
// the end of the pause (it's only set for the duration of an
// initial-mark pause).
assert(!during_initial_mark_pause(), "pre-condition");
assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
if (initiate_conc_mark_if_possible()) {
if (collector_state()->initiate_conc_mark_if_possible()) {
// We had noticed on a previous pause that the heap occupancy has
// gone over the initiating threshold and we should start a
// concurrent marking cycle. So we might initiate one.
@ -1501,10 +1492,10 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
// it has completed the last one. So we can go ahead and
// initiate a new cycle.
set_during_initial_mark_pause();
collector_state()->set_during_initial_mark_pause(true);
// We do not allow mixed GCs during marking.
if (!gcs_are_young()) {
set_gcs_are_young(true);
if (!collector_state()->gcs_are_young()) {
collector_state()->set_gcs_are_young(true);
ergo_verbose0(ErgoMixedGCs,
"end mixed GCs",
ergo_format_reason("concurrent cycle is about to start"));
@ -1512,7 +1503,7 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
// And we can now clear initiate_conc_mark_if_possible() as
// we've already acted on it.
clear_initiate_conc_mark_if_possible();
collector_state()->set_initiate_conc_mark_if_possible(false);
ergo_verbose0(ErgoConcCycles,
"initiate concurrent cycle",
@ -1686,7 +1677,7 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
// retiring the current allocation region) or a concurrent
// refine thread (RSet sampling).
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
size_t used_bytes = hr->used();
_inc_cset_recorded_rs_lengths += rs_length;
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
@ -1721,7 +1712,7 @@ void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
_inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
_inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
@ -1914,9 +1905,9 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
ergo_format_ms("target pause time"),
_pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
_last_gc_was_young = gcs_are_young() ? true : false;
collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
if (_last_gc_was_young) {
if (collector_state()->last_gc_was_young()) {
_trace_young_gen_time_data.increment_young_collection_count();
} else {
_trace_young_gen_time_data.increment_mixed_collection_count();
@ -1967,7 +1958,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
// Set the start of the non-young choice time.
double non_young_start_time_sec = young_end_time_sec;
if (!gcs_are_young()) {
if (!collector_state()->gcs_are_young()) {
CollectionSetChooser* cset_chooser = _collectionSetChooser;
cset_chooser->verify();
const uint min_old_cset_length = calc_min_old_cset_length();
@ -2013,7 +2004,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf
break;
}
double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
if (check_time_remaining) {
if (predicted_time_ms > time_remaining_ms) {
// Too expensive for the current CSet.

View File

@ -27,6 +27,7 @@
#include "gc/g1/collectionSetChooser.hpp"
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1MMUTracker.hpp"
#include "gc/shared/collectorPolicy.hpp"
@ -193,9 +194,6 @@ private:
double _stop_world_start;
// indicates whether we are in young or mixed GC mode
bool _gcs_are_young;
uint _young_list_target_length;
uint _young_list_fixed_length;
@ -203,12 +201,6 @@ private:
// locker is active. This should be >= _young_list_target_length;
uint _young_list_max_length;
bool _last_gc_was_young;
bool _during_marking;
bool _in_marking_window;
bool _in_marking_window_im;
SurvRateGroup* _short_lived_surv_rate_group;
SurvRateGroup* _survivor_surv_rate_group;
// add here any more surv rate groups
@ -218,10 +210,6 @@ private:
double _reserve_factor;
uint _reserve_regions;
bool during_marking() {
return _during_marking;
}
enum PredictionConstants {
TruncatedSeqLength = 10
};
@ -363,7 +351,7 @@ public:
}
double predict_rs_scan_time_ms(size_t card_num) {
if (gcs_are_young()) {
if (collector_state()->gcs_are_young()) {
return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
} else {
return predict_mixed_rs_scan_time_ms(card_num);
@ -390,7 +378,7 @@ public:
}
double predict_object_copy_time_ms(size_t bytes_to_copy) {
if (_in_marking_window && !_in_marking_window_im) {
if (collector_state()->during_concurrent_mark()) {
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
} else {
return (double) bytes_to_copy *
@ -428,7 +416,7 @@ public:
double predict_survivor_regions_evac_time();
void cset_regions_freed() {
bool propagate = _last_gc_was_young && !_in_marking_window;
bool propagate = collector_state()->should_propagate();
_short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
_survivor_surv_rate_group->all_surviving_words_recorded(propagate);
// also call it on any more surv rate groups
@ -552,33 +540,6 @@ private:
return _recent_avg_pause_time_ratio;
}
// At the end of a pause we check the heap occupancy and we decide
// whether we will start a marking cycle during the next pause. If
// we decide that we want to do that, we will set this parameter to
// true. So, this parameter will stay true between the end of a
// pause and the beginning of a subsequent pause (not necessarily
// the next one, see the comments on the next field) when we decide
// that we will indeed start a marking cycle and do the initial-mark
// work.
volatile bool _initiate_conc_mark_if_possible;
// If initiate_conc_mark_if_possible() is set at the beginning of a
// pause, it is a suggestion that the pause should start a marking
// cycle by doing the initial-mark work. However, it is possible
// that the concurrent marking thread is still finishing up the
// previous marking cycle (e.g., clearing the next marking
// bitmap). If that is the case we cannot start a new cycle and
// we'll have to wait for the concurrent marking thread to finish
// what it is doing. In this case we will postpone the marking cycle
// initiation decision for the next pause. When we eventually decide
// to start a cycle, we will set _during_initial_mark_pause which
// will stay true until the end of the initial-mark pause and it's
// the condition that indicates that a pause is doing the
// initial-mark work.
volatile bool _during_initial_mark_pause;
bool _last_young_gc;
// This set of variables tracks the collector efficiency, in order to
// determine whether we should initiate a new marking.
double _cur_mark_stop_world_time_ms;
@ -647,6 +608,8 @@ public:
return CollectorPolicy::G1CollectorPolicyKind;
}
G1CollectorState* collector_state();
G1GCPhaseTimes* phase_times() const { return _phase_times; }
// Check the current value of the young list RSet lengths and
@ -786,14 +749,6 @@ public:
void print_collection_set(HeapRegion* list_head, outputStream* st);
#endif // !PRODUCT
bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
bool during_initial_mark_pause() { return _during_initial_mark_pause; }
void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
// This sets the initiate_conc_mark_if_possible() flag to start a
// new cycle, as long as we are not already in one. It's best if it
// is called during a safepoint when the test whether a cycle is in
@ -837,13 +792,6 @@ public:
return _young_list_max_length;
}
bool gcs_are_young() {
return _gcs_are_young;
}
void set_gcs_are_young(bool gcs_are_young) {
_gcs_are_young = gcs_are_young;
}
bool adaptive_young_list_length() {
return _young_gen_sizer->adaptive_young_list_length();
}

View File

@ -0,0 +1,141 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
#define SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
#include "utilities/globalDefinitions.hpp"
#include "gc/g1/g1YCTypes.hpp"
// Various state variables that indicate
// the phase of the G1 collection.
class G1CollectorState VALUE_OBJ_CLASS_SPEC {
// Indicates whether we are in "full young" or "mixed" GC mode.
bool _gcs_are_young;
// Was the last GC "young"?
bool _last_gc_was_young;
// Is this the "last young GC" before we start doing mixed GCs?
// Set after a concurrent mark has completed.
bool _last_young_gc;
// If initiate_conc_mark_if_possible() is set at the beginning of a
// pause, it is a suggestion that the pause should start a marking
// cycle by doing the initial-mark work. However, it is possible
// that the concurrent marking thread is still finishing up the
// previous marking cycle (e.g., clearing the next marking
// bitmap). If that is the case we cannot start a new cycle and
// we'll have to wait for the concurrent marking thread to finish
// what it is doing. In this case we will postpone the marking cycle
// initiation decision for the next pause. When we eventually decide
// to start a cycle, we will set _during_initial_mark_pause which
// will stay true until the end of the initial-mark pause and it's
// the condition that indicates that a pause is doing the
// initial-mark work.
volatile bool _during_initial_mark_pause;
// At the end of a pause we check the heap occupancy and we decide
// whether we will start a marking cycle during the next pause. If
// we decide that we want to do that, we will set this parameter to
// true. So, this parameter will stay true between the end of a
// pause and the beginning of a subsequent pause (not necessarily
// the next one, see the comments on the next field) when we decide
// that we will indeed start a marking cycle and do the initial-mark
// work.
volatile bool _initiate_conc_mark_if_possible;
// NOTE: if some of these are synonyms for others,
// the redundant fields should be eliminated. XXX
bool _during_marking;
bool _mark_in_progress;
bool _in_marking_window;
bool _in_marking_window_im;
bool _concurrent_cycle_started;
bool _full_collection;
public:
G1CollectorState() :
_gcs_are_young(true),
_last_gc_was_young(false),
_last_young_gc(false),
_during_initial_mark_pause(false),
_initiate_conc_mark_if_possible(false),
_during_marking(false),
_mark_in_progress(false),
_in_marking_window(false),
_in_marking_window_im(false),
_concurrent_cycle_started(false),
_full_collection(false) {}
// Setters
void set_gcs_are_young(bool v) { _gcs_are_young = v; }
void set_last_gc_was_young(bool v) { _last_gc_was_young = v; }
void set_last_young_gc(bool v) { _last_young_gc = v; }
void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; }
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
void set_during_marking(bool v) { _during_marking = v; }
void set_mark_in_progress(bool v) { _mark_in_progress = v; }
void set_in_marking_window(bool v) { _in_marking_window = v; }
void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
void set_concurrent_cycle_started(bool v) { _concurrent_cycle_started = v; }
void set_full_collection(bool v) { _full_collection = v; }
// Getters
bool gcs_are_young() { return _gcs_are_young; }
bool last_gc_was_young() { return _last_gc_was_young; }
bool last_young_gc() { return _last_young_gc; }
bool during_initial_mark_pause() { return _during_initial_mark_pause; }
bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
bool during_marking() { return _during_marking; }
bool mark_in_progress() { return _mark_in_progress; }
bool in_marking_window() { return _in_marking_window; }
bool in_marking_window_im() { return _in_marking_window_im; }
bool concurrent_cycle_started() { return _concurrent_cycle_started; }
bool full_collection() { return _full_collection; }
// Composite booleans (clients worry about flickering)
bool during_concurrent_mark() {
return (_in_marking_window && !_in_marking_window_im);
}
bool should_propagate() { // XXX should have a more suitable state name or abstraction for this
return (_last_young_gc && !_in_marking_window);
}
G1YCType yc_type() {
if (during_initial_mark_pause()) {
return InitialMark;
} else if (mark_in_progress()) {
return DuringMark;
} else if (gcs_are_young()) {
return Normal;
} else {
return Mixed;
}
}
};
#endif /* SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP */

View File

@ -26,6 +26,7 @@
#include "gc/g1/concurrentMark.inline.hpp"
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1EvacFailure.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1_globals.hpp"
@ -186,10 +187,10 @@ public:
}
bool doHeapRegion(HeapRegion *hr) {
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
bool during_conc_mark = _g1h->mark_in_progress();
bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause();
bool during_conc_mark = _g1h->collector_state()->mark_in_progress();
assert(!hr->is_humongous(), "sanity");
assert(!hr->is_pinned(), err_msg("Unexpected pinned region at index %u", hr->hrm_index()));
assert(hr->in_collection_set(), "bad CS");
if (_hrclaimer->claim_region(hr->hrm_index())) {

View File

@ -54,6 +54,7 @@ const char* G1HRPrinter::region_type_name(RegionType type) {
case SingleHumongous: return "SingleH";
case StartsHumongous: return "StartsH";
case ContinuesHumongous: return "ContinuesH";
case Archive: return "Archive";
default: ShouldNotReachHere();
}
// trying to keep the Windows compiler happy

View File

@ -52,7 +52,8 @@ public:
Old,
SingleHumongous,
StartsHumongous,
ContinuesHumongous
ContinuesHumongous,
Archive
} RegionType;
typedef enum {

View File

@ -57,6 +57,9 @@
class HeapRegion;
bool G1MarkSweep::_archive_check_enabled = false;
G1ArchiveRegionMap G1MarkSweep::_archive_region_map;
void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
@ -212,7 +215,7 @@ class G1AdjustPointersClosure: public HeapRegionClosure {
// point all the oops to the new location
MarkSweep::adjust_pointers(obj);
}
} else {
} else if (!r->is_pinned()) {
// This really ought to be "as_CompactibleSpace"...
r->adjust_pointers();
}
@ -275,7 +278,7 @@ public:
}
hr->reset_during_compaction();
}
} else {
} else if (!hr->is_pinned()) {
hr->compact();
}
return false;
@ -298,6 +301,26 @@ void G1MarkSweep::mark_sweep_phase4() {
}
void G1MarkSweep::enable_archive_object_check() {
assert(!_archive_check_enabled, "archive range check already enabled");
_archive_check_enabled = true;
size_t length = Universe::heap()->max_capacity();
_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
(HeapWord*)Universe::heap()->base() + length,
HeapRegion::GrainBytes);
}
void G1MarkSweep::mark_range_archive(MemRegion range) {
assert(_archive_check_enabled, "archive range check not enabled");
_archive_region_map.set_by_address(range, true);
}
bool G1MarkSweep::in_archive_range(oop object) {
// This is the out-of-line part of is_archive_object test, done separately
// to avoid additional performance impact when the check is not enabled.
return _archive_region_map.get_by_address((HeapWord*)object);
}
void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->heap_region_iterate(blk);
@ -357,7 +380,7 @@ bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
} else {
assert(hr->is_continues_humongous(), "Invalid humongous.");
}
} else {
} else if (!hr->is_pinned()) {
prepare_for_compaction(hr, hr->end());
}
return false;

View File

@ -44,6 +44,7 @@ class ReferenceProcessor;
//
// Class unloading will only occur when a full gc is invoked.
class G1PrepareCompactClosure;
class G1ArchiveRegionMap;
class G1MarkSweep : AllStatic {
public:
@ -54,7 +55,22 @@ class G1MarkSweep : AllStatic {
static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
// Create the _archive_region_map which is used to identify archive objects.
static void enable_archive_object_check();
// Mark the regions containing the specified address range as archive regions.
static void mark_range_archive(MemRegion range);
// Check if an object is in an archive region using the _archive_region_map.
static bool in_archive_range(oop object);
// Check if archive object checking is enabled, to avoid calling in_archive_range
// unnecessarily.
static bool archive_check_enabled() { return G1MarkSweep::_archive_check_enabled; }
private:
static bool _archive_check_enabled;
static G1ArchiveRegionMap _archive_region_map;
// Mark live objects
static void mark_sweep_phase1(bool& marked_for_deopt,
@ -93,4 +109,12 @@ class G1PrepareCompactClosure : public HeapRegionClosure {
bool doHeapRegion(HeapRegion* hr);
};
// G1ArchiveRegionMap is a boolean array used to mark G1 regions as
// archive regions. This allows a quick check for whether an object
// should not be marked because it is in an archive region.
class G1ArchiveRegionMap : public G1BiasedMappedArray<bool> {
protected:
bool default_value() const { return false; }
};
#endif // SHARE_VM_GC_G1_G1MARKSWEEP_HPP

View File

@ -51,7 +51,7 @@ void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan
_worker_id = par_scan_state->queue_num();
assert(_worker_id < ParallelGCThreads,
err_msg("The given worker id %u must be less than the number of threads " UINTX_FORMAT, _worker_id, ParallelGCThreads));
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads));
}
// Generate G1 specialized oop_oop_iterate functions.

View File

@ -627,7 +627,7 @@ void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header
void G1RemSet::prepare_for_verify() {
if (G1HRRSFlushLogBuffersOnVerify &&
(VerifyBeforeGC || VerifyAfterGC)
&& (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) {
&& (!_g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC)) {
cleanupHRRS();
_g1->set_refine_cte_cl_concurrency(false);
if (SafepointSynchronize::is_at_safepoint()) {

View File

@ -30,6 +30,7 @@
#include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
#include "gc/g1/g1RootProcessor.hpp"
@ -199,7 +200,7 @@ void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
// as implicitly live).
{
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
if (!_process_strong_tasks->is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->mark_in_progress()) {
if (!_process_strong_tasks->is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_in_progress()) {
JavaThread::satb_mark_queue_set().filter_thread_buffers();
}
}

View File

@ -23,12 +23,14 @@
*/
#include "precompiled.hpp"
#include "classfile/stringTable.hpp"
#include "gc/g1/g1Log.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1StringDedupQueue.hpp"
#include "gc/g1/g1StringDedupTable.hpp"
#include "gc/g1/g1StringDedupThread.hpp"
#include "gc/g1/suspendibleThreadSet.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
G1StringDedupThread* G1StringDedupThread::_thread = NULL;
@ -55,11 +57,36 @@ G1StringDedupThread* G1StringDedupThread::thread() {
return _thread;
}
class G1StringDedupSharedClosure: public OopClosure {
private:
G1StringDedupStat& _stat;
public:
G1StringDedupSharedClosure(G1StringDedupStat& stat) : _stat(stat) {}
virtual void do_oop(oop* p) { ShouldNotReachHere(); }
virtual void do_oop(narrowOop* p) {
oop java_string = oopDesc::load_decode_heap_oop(p);
G1StringDedupTable::deduplicate(java_string, _stat);
}
};
// The CDS archive does not include the string dedupication table. Only the string
// table is saved in the archive. The shared strings from CDS archive need to be
// added to the string dedupication table before deduplication occurs. That is
// done in the begining of the G1StringDedupThread (see G1StringDedupThread::run()
// below).
void G1StringDedupThread::deduplicate_shared_strings(G1StringDedupStat& stat) {
G1StringDedupSharedClosure sharedStringDedup(stat);
StringTable::shared_oops_do(&sharedStringDedup);
}
void G1StringDedupThread::run() {
G1StringDedupStat total_stat;
initialize_in_thread();
wait_for_universe_init();
deduplicate_shared_strings(total_stat);
// Main loop
for (;;) {

View File

@ -52,6 +52,8 @@ public:
static G1StringDedupThread* thread();
virtual void run();
void deduplicate_shared_strings(G1StringDedupStat& stat);
};
#endif // SHARE_VM_GC_G1_G1STRINGDEDUPTHREAD_HPP

View File

@ -103,6 +103,10 @@ size_t HeapRegion::max_region_size() {
return HeapRegionBounds::max_size();
}
size_t HeapRegion::min_region_size_in_words() {
return HeapRegionBounds::min_size() >> LogHeapWordSize;
}
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
size_t region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
@ -711,12 +715,12 @@ public:
_n_failures++;
}
if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
if (!_g1h->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) {
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
HeapRegion* to = _g1h->heap_region_containing(obj);
if (from != NULL && to != NULL &&
from != to &&
!to->is_humongous()) {
!to->is_pinned()) {
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
jbyte cv_field = *_bs->byte_for_const(p);
const jbyte dirty = CardTableModRefBS::dirty_card_val();

View File

@ -331,6 +331,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
}
static size_t max_region_size();
static size_t min_region_size_in_words();
// It sets up the heap region size (GrainBytes / GrainWords), as
// well as other related fields that are based on the heap region
@ -417,6 +418,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
bool is_old() const { return _type.is_old(); }
// A pinned region contains objects which are not moved by garbage collections.
// Humongous regions and archive regions are pinned.
bool is_pinned() const { return _type.is_pinned(); }
// An archive region is a pinned region, also tagged as old, which
// should not be marked during mark/sweep. This allows the address
// space to be shared by JVM instances.
bool is_archive() const { return _type.is_archive(); }
// For a humongous region, region in which it starts.
HeapRegion* humongous_start_region() const {
return _humongous_start_region;
@ -670,6 +680,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
void set_old() { _type.set_old(); }
void set_archive() { _type.set_archive(); }
// Determine if an object has been allocated since the last
// mark performed by the collector. This returns true iff the object
// is within the unmarked area of the region.

View File

@ -278,6 +278,55 @@ uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx)
return num_regions;
}
uint HeapRegionManager::find_highest_free(bool* expanded) {
// Loop downwards from the highest region index, looking for an
// entry which is either free or not yet committed. If not yet
// committed, expand_at that index.
uint curr = max_length() - 1;
while (true) {
HeapRegion *hr = _regions.get_by_index(curr);
if (hr == NULL) {
uint res = expand_at(curr, 1);
if (res == 1) {
*expanded = true;
return curr;
}
} else {
if (hr->is_free()) {
*expanded = false;
return curr;
}
}
if (curr == 0) {
return G1_NO_HRM_INDEX;
}
curr--;
}
}
bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
size_t commits = 0;
uint start_index = (uint)_regions.get_index_by_address(range.start());
uint last_index = (uint)_regions.get_index_by_address(range.last());
// Ensure that each G1 region in the range is free, returning false if not.
// Commit those that are not yet available, and keep count.
for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
if (!is_available(curr_index)) {
commits++;
expand_at(curr_index, 1);
}
HeapRegion* curr_region = _regions.get_by_index(curr_index);
if (!curr_region->is_free()) {
return false;
}
}
allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
*commit_count = commits;
return true;
}
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
const uint start_index = hrclaimer->start_region_for_worker(worker_id);

View File

@ -221,6 +221,16 @@ public:
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
// Find the highest free or uncommitted region in the reserved heap,
// and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
// Set the 'expanded' boolean true if a new region was committed.
uint find_highest_free(bool* expanded);
// Allocate the regions that contain the address range specified, committing the
// regions if necessary. Return false if any of the regions is already committed
// and not free, and return the number of regions newly committed in commit_count.
bool allocate_containing_regions(MemRegion range, size_t* commit_count);
// Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true.
void iterate(HeapRegionClosure* blk) const;

View File

@ -817,7 +817,7 @@ OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
// This can be done by either mutator threads together with the
// concurrent refinement threads or GC threads.
uint HeapRegionRemSet::num_par_rem_sets() {
return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
}
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,

View File

@ -42,7 +42,8 @@ void HeapRegionSetBase::verify_region(HeapRegion* hr) {
assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
assert(!hr->is_empty() || hr->is_free() || hr->is_archive(),
err_msg("Empty region %u is not free or archive for set %s", hr->hrm_index(), name()));
assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
}
#endif

View File

@ -33,6 +33,7 @@ bool HeapRegionType::is_valid(Tag tag) {
case StartsHumongousTag:
case ContinuesHumongousTag:
case OldTag:
case ArchiveTag:
return true;
}
return false;
@ -47,6 +48,7 @@ const char* HeapRegionType::get_str() const {
case StartsHumongousTag: return "HUMS";
case ContinuesHumongousTag: return "HUMC";
case OldTag: return "OLD";
case ArchiveTag: return "ARC";
}
ShouldNotReachHere();
// keep some compilers happy
@ -62,6 +64,7 @@ const char* HeapRegionType::get_short_str() const {
case StartsHumongousTag: return "HS";
case ContinuesHumongousTag: return "HC";
case OldTag: return "O";
case ArchiveTag: return "A";
}
ShouldNotReachHere();
// keep some compilers happy

View File

@ -44,15 +44,18 @@ private:
//
// 0000 0 [ 0] Free
//
// 0001 0 Young Mask
// 0001 0 [ 2] Young Mask
// 0001 0 [ 2] Eden
// 0001 1 [ 3] Survivor
//
// 0010 0 Humongous Mask
// 0010 0 [ 4] Starts Humongous
// 0010 1 [ 5] Continues Humongous
// 0010 0 [ 4] Humongous Mask
// 0100 0 [ 8] Pinned Mask
// 0110 0 [12] Starts Humongous
// 0110 1 [13] Continues Humongous
//
// 01000 [ 8] Old
// 1000 0 [16] Old Mask
//
// 1100 0 [24] Archive
typedef enum {
FreeTag = 0,
@ -61,10 +64,14 @@ private:
SurvTag = YoungMask + 1,
HumongousMask = 4,
StartsHumongousTag = HumongousMask,
ContinuesHumongousTag = HumongousMask + 1,
PinnedMask = 8,
StartsHumongousTag = HumongousMask | PinnedMask,
ContinuesHumongousTag = HumongousMask | PinnedMask + 1,
OldTag = 8
OldMask = 16,
OldTag = OldMask,
ArchiveTag = PinnedMask | OldMask
} Tag;
volatile Tag _tag;
@ -108,7 +115,13 @@ public:
bool is_starts_humongous() const { return get() == StartsHumongousTag; }
bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
bool is_old() const { return get() == OldTag; }
bool is_archive() const { return get() == ArchiveTag; }
// is_old regions may or may not also be pinned
bool is_old() const { return (get() & OldMask) != 0; }
// is_pinned regions may be archive or humongous
bool is_pinned() const { return (get() & PinnedMask) != 0; }
// Setters
@ -123,6 +136,8 @@ public:
void set_old() { set(OldTag); }
void set_archive() { set_from(ArchiveTag, FreeTag); }
// Misc
const char* get_str() const;

View File

@ -484,12 +484,12 @@ void GCTaskManager::set_active_gang() {
assert(!all_workers_active() || active_workers() == ParallelGCThreads,
err_msg("all_workers_active() is incorrect: "
"active %d ParallelGCThreads " UINTX_FORMAT, active_workers(),
"active %d ParallelGCThreads %u", active_workers(),
ParallelGCThreads));
if (TraceDynamicGCThreads) {
gclog_or_tty->print_cr("GCTaskManager::set_active_gang(): "
"all_workers_active() %d workers %d "
"active %d ParallelGCThreads " UINTX_FORMAT,
"active %d ParallelGCThreads %u",
all_workers_active(), workers(), active_workers(),
ParallelGCThreads);
}

View File

@ -255,7 +255,7 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
which_stack_index = which;
assert(manager->active_workers() == ParallelGCThreads,
err_msg("all_workers_active has been incorrectly set: "
" active %d ParallelGCThreads " UINTX_FORMAT, manager->active_workers(),
" active %d ParallelGCThreads %u", manager->active_workers(),
ParallelGCThreads));
} else {
which_stack_index = ParCompactionManager::pop_recycled_stack_index();
@ -334,7 +334,7 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
which_stack_index = which;
assert(manager->active_workers() == ParallelGCThreads,
err_msg("all_workers_active has been incorrectly set: "
" active %d ParallelGCThreads " UINTX_FORMAT, manager->active_workers(),
" active %d ParallelGCThreads %u", manager->active_workers(),
ParallelGCThreads));
} else {
which_stack_index = stack_index();

View File

@ -170,8 +170,8 @@ void ParCompactionManager::verify_region_list_empty(uint list_index) {
}
ParCompactionManager*
ParCompactionManager::gc_thread_compaction_manager(int index) {
assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
ParCompactionManager::gc_thread_compaction_manager(uint index) {
assert(index < ParallelGCThreads, "index out of range");
assert(_manager_array != NULL, "Sanity");
return _manager_array[index];
}

View File

@ -133,7 +133,7 @@ private:
RegionTaskQueue* region_stack() { return _region_stack; }
void set_region_stack(RegionTaskQueue* v) { _region_stack = v; }
inline static ParCompactionManager* manager_array(int index);
inline static ParCompactionManager* manager_array(uint index);
inline static RegionTaskQueue* region_list(int index) {
return _region_list[index];
@ -177,7 +177,7 @@ private:
void follow_class_loader(ClassLoaderData* klass);
// Access function for compaction managers
static ParCompactionManager* gc_thread_compaction_manager(int index);
static ParCompactionManager* gc_thread_compaction_manager(uint index);
static bool steal(int queue_num, int* seed, oop& t);
static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t);
@ -229,10 +229,9 @@ private:
};
};
inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
inline ParCompactionManager* ParCompactionManager::manager_array(uint index) {
assert(_manager_array != NULL, "access of NULL manager_array");
assert(index >= 0 && index <= (int)ParallelGCThreads,
"out of range manager_array access");
assert(index <= ParallelGCThreads, "out of range manager_array access");
return _manager_array[index];
}

View File

@ -96,7 +96,7 @@ void PSMarkSweep::invoke(bool maximum_heap_compaction) {
heap->collector_policy()->should_clear_all_soft_refs();
uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
UIntXFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
}

View File

@ -832,10 +832,10 @@ void PSParallelCompact::post_initialize() {
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(uint) ParallelGCThreads, // mt processing degree
true, // mt discovery
(uint) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
ParallelGCThreads, // mt processing degree
true, // mt discovery
ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
&_is_alive_closure); // non-header is alive closure
_counters = new CollectorCounters("PSParallelCompact", 1);

View File

@ -75,8 +75,8 @@ bool PSPromotionManager::should_scavenge(narrowOop* p, bool check_to_space) {
return PSScavenge::should_scavenge(p, check_to_space);
}
PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(uint index) {
assert(index < ParallelGCThreads, "index out of range");
assert(_manager_array != NULL, "Sanity");
return &_manager_array[index];
}

View File

@ -90,7 +90,7 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
static PSOldGen* old_gen() { return _old_gen; }
static MutableSpace* young_space() { return _young_space; }
inline static PSPromotionManager* manager_array(int index);
inline static PSPromotionManager* manager_array(uint index);
template <class T> inline void claim_or_forward_internal_depth(T* p);
// On the task queues we push reference locations as well as
@ -154,7 +154,7 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
static void pre_scavenge();
static bool post_scavenge(YoungGCTracer& gc_tracer);
static PSPromotionManager* gc_thread_promotion_manager(int index);
static PSPromotionManager* gc_thread_promotion_manager(uint index);
static PSPromotionManager* vm_thread_promotion_manager();
static bool steal_depth(int queue_num, int* seed, StarTask& t);

View File

@ -33,9 +33,9 @@
#include "gc/shared/taskqueue.inline.hpp"
#include "oops/oop.inline.hpp"
inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
assert(_manager_array != NULL, "access of NULL manager_array");
assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
assert(index <= ParallelGCThreads, "out of range manager_array access");
return &_manager_array[index];
}

View File

@ -845,9 +845,9 @@ void PSScavenge::initialize() {
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(uint) ParallelGCThreads, // mt processing degree
ParallelGCThreads, // mt processing degree
true, // mt discovery
(uint) ParallelGCThreads, // mt discovery degree
ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
NULL); // header provides liveness info

View File

@ -313,7 +313,7 @@ void MarkSweep::restore_marks() {
MarkSweep::IsAliveClosure MarkSweep::is_alive;
bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked() || is_archive_object(p); }
MarkSweep::KeepAliveClosure MarkSweep::keep_alive;

View File

@ -147,6 +147,9 @@ class MarkSweep : AllStatic {
// Reference Processing
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
// Archive Object handling
static inline bool is_archive_object(oop object);
static STWGCTimer* gc_timer() { return _gc_timer; }
static SerialOldTracer* gc_tracer() { return _gc_tracer; }

View File

@ -37,6 +37,7 @@
#include "utilities/stack.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1MarkSweep.hpp"
#endif // INCLUDE_ALL_GCS
inline void MarkSweep::mark_object(oop obj) {
@ -57,6 +58,15 @@ inline void MarkSweep::mark_object(oop obj) {
}
}
inline bool MarkSweep::is_archive_object(oop object) {
#if INCLUDE_ALL_GCS
return (G1MarkSweep::archive_check_enabled() &&
G1MarkSweep::in_archive_range(object));
#else
return false;
#endif
}
inline void MarkSweep::follow_klass(Klass* klass) {
oop op = klass->klass_holder();
MarkSweep::mark_and_push(&op);
@ -74,7 +84,8 @@ template <class T> inline void MarkSweep::follow_root(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (!obj->mark()->is_marked()) {
if (!obj->mark()->is_marked() &&
!is_archive_object(obj)) {
mark_object(obj);
follow_object(obj);
}
@ -87,7 +98,8 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (!obj->mark()->is_marked()) {
if (!obj->mark()->is_marked() &&
!is_archive_object(obj)) {
mark_object(obj);
_marking_stack.push(obj);
}
@ -111,15 +123,18 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
assert(Universe::heap()->is_in(obj), "should be in heap");
oop new_obj = oop(obj->mark()->decode_pointer());
assert(new_obj != NULL || // is forwarding ptr?
assert(is_archive_object(obj) || // no forwarding of archive objects
new_obj != NULL || // is forwarding ptr?
obj->mark() == markOopDesc::prototype() || // not gc marked?
(UseBiasedLocking && obj->mark()->has_bias_pattern()),
// not gc marked?
// not gc marked?
"should be forwarded");
if (new_obj != NULL) {
assert(Universe::heap()->is_in_reserved(new_obj),
"should be in object space");
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (!is_archive_object(obj)) {
assert(Universe::heap()->is_in_reserved(new_obj),
"should be in object space");
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
}
}
}

View File

@ -88,9 +88,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
static int _fire_out_of_memory_count;
#endif
// Used for filler objects (static, but initialized in ctor).
static size_t _filler_array_max_size;
GCHeapLog* _gc_heap_log;
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
@ -102,6 +99,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
BarrierSet* _barrier_set;
bool _is_gc_active;
// Used for filler objects (static, but initialized in ctor).
static size_t _filler_array_max_size;
unsigned int _total_collections; // ... started
unsigned int _total_full_collections; // ... started
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)

View File

@ -785,7 +785,7 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
// free memory should be here, especially if they are expensive. If this
// attempt fails, an OOM exception will be thrown.
{
UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
UIntXFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
gch->do_collection(true /* full */,
true /* clear_all_soft_refs */,

View File

@ -78,11 +78,13 @@ class MetaspaceSizes : public StackObj {
class GCHeapSummary;
class PSHeapSummary;
class G1HeapSummary;
class GCHeapSummaryVisitor {
public:
virtual void visit(const GCHeapSummary* heap_summary) const = 0;
virtual void visit(const PSHeapSummary* heap_summary) const {}
virtual void visit(const G1HeapSummary* heap_summary) const {}
};
class GCHeapSummary : public StackObj {
@ -125,6 +127,22 @@ class PSHeapSummary : public GCHeapSummary {
}
};
class G1HeapSummary : public GCHeapSummary {
size_t _edenUsed;
size_t _edenCapacity;
size_t _survivorUsed;
public:
G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed) :
GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed) { }
const size_t edenUsed() const { return _edenUsed; }
const size_t edenCapacity() const { return _edenCapacity; }
const size_t survivorUsed() const { return _survivorUsed; }
virtual void accept(GCHeapSummaryVisitor* visitor) const {
visitor->visit(this);
}
};
class MetaspaceSummary : public StackObj {
size_t _capacity_until_GC;
MetaspaceSizes _meta_space;

View File

@ -44,6 +44,7 @@ class GCHeapSummary;
class MetaspaceChunkFreeListSummary;
class MetaspaceSummary;
class PSHeapSummary;
class G1HeapSummary;
class ReferenceProcessorStats;
class TimePartitions;
class BoolObjectClosure;

View File

@ -263,6 +263,20 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
}
}
void visit(const G1HeapSummary* g1_heap_summary) const {
visit((GCHeapSummary*)g1_heap_summary);
EventG1HeapSummary e;
if (e.should_commit()) {
e.set_gcId(_gc_id.id());
e.set_when((u1)_when);
e.set_edenUsedSize(g1_heap_summary->edenUsed());
e.set_edenTotalSize(g1_heap_summary->edenCapacity());
e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
e.commit();
}
}
void visit(const PSHeapSummary* ps_heap_summary) const {
visit((GCHeapSummary*)ps_heap_summary);

View File

@ -197,7 +197,7 @@ bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
if (UseG1GC && ClassUnloadingWithConcurrentMark) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->g1_policy()->set_initiate_conc_mark_if_possible();
g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
GCCauseSetter x(g1h, _gc_cause);

View File

@ -309,6 +309,8 @@ IRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* thread)
Handle exception = get_preinitialized_exception(
SystemDictionary::StackOverflowError_klass(),
CHECK);
// Increment counter for hs_err file reporting
Atomic::inc(&Exceptions::_stack_overflow_errors);
THROW_HANDLE(exception);
IRT_END

View File

@ -28,10 +28,14 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/altHashing.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CollectedHeap.hpp"
#endif
#include "memory/filemap.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
#include "oops/objArrayOop.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
@ -165,6 +169,9 @@ void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment
_version = _current_version;
_alignment = alignment;
_obj_alignment = ObjectAlignmentInBytes;
_narrow_oop_mode = Universe::narrow_oop_mode();
_narrow_oop_shift = Universe::narrow_oop_shift();
_max_heap_size = MaxHeapSize;
_classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
_classpath_entry_table = mapinfo->_classpath_entry_table;
_classpath_entry_size = mapinfo->_classpath_entry_size;
@ -440,7 +447,16 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
} else {
si->_file_offset = _file_offset;
}
si->_base = base;
if (MetaspaceShared::is_string_region(region)) {
assert((base - (char*)Universe::narrow_oop_base()) % HeapWordSize == 0, "Sanity");
if (base != NULL) {
si->_addr._offset = (intx)oopDesc::encode_heap_oop_not_null((oop)base);
} else {
si->_addr._offset = 0;
}
} else {
si->_addr._base = base;
}
si->_used = size;
si->_capacity = capacity;
si->_read_only = read_only;
@ -449,6 +465,38 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
write_bytes_aligned(base, (int)size);
}
// Write the string space. The string space contains one or multiple GC(G1) regions.
// When the total string space size is smaller than one GC region of the dump time,
// only one string region is used for shared strings.
//
// If the total string space size is bigger than one GC region, there would be more
// than one GC regions allocated for shared strings. The first/bottom GC region might
// be a partial GC region with the empty portion at the higher address within that region.
// The non-empty portion of the first region is written into the archive as one string
// region. The rest are consecutive full GC regions if they exist, which can be written
// out in one chunk as another string region.
void FileMapInfo::write_string_regions(GrowableArray<MemRegion> *regions) {
for (int i = MetaspaceShared::first_string;
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
char* start = NULL;
size_t size = 0;
if (regions->is_nonempty()) {
if (i == MetaspaceShared::first_string) {
MemRegion first = regions->first();
start = (char*)first.start();
size = first.byte_size();
} else {
int len = regions->length();
if (len > 1) {
start = (char*)regions->at(1).start();
size = (char*)regions->at(len - 1).end() - start;
}
}
}
write_region(i, start, size, size, false, false);
}
}
// Dump bytes to file -- at the current file position.
@ -513,7 +561,8 @@ void FileMapInfo::close() {
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private.
bool FileMapInfo::remap_shared_readonly_as_readwrite() {
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
int idx = 0;
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[idx];
if (!si->_read_only) {
// the space is already readwrite so we are done
return true;
@ -523,15 +572,16 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
if (!open_for_read()) {
return false;
}
char *addr = _header->region_addr(idx);
char *base = os::remap_memory(_fd, _full_path, si->_file_offset,
si->_base, size, false /* !read_only */,
addr, size, false /* !read_only */,
si->_allow_exec);
close();
if (base == NULL) {
fail_continue("Unable to remap shared readonly space (errno=%d).", errno);
return false;
}
if (base != si->_base) {
if (base != addr) {
fail_continue("Unable to remap shared readonly space at required address.");
return false;
}
@ -542,7 +592,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
// Map the whole region at once, assumed to be allocated contiguously.
ReservedSpace FileMapInfo::reserve_shared_memory() {
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
char* requested_addr = si->_base;
char* requested_addr = _header->region_addr(0);
size_t size = FileMapInfo::shared_spaces_size();
@ -560,20 +610,31 @@ ReservedSpace FileMapInfo::reserve_shared_memory() {
}
// Memory map a region in the address space.
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode",
"String1", "String2" };
char* FileMapInfo::map_region(int i) {
assert(!MetaspaceShared::is_string_region(i), "sanity");
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used;
size_t alignment = os::vm_allocation_granularity();
size_t size = align_size_up(used, alignment);
char *requested_addr = si->_base;
char *requested_addr = _header->region_addr(i);
bool read_only;
// If a tool agent is in use (debugging enabled), we must map the address space RW
if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space()) {
read_only = false;
} else {
read_only = si->_read_only;
}
// map the contents of the CDS archive in this memory
char *base = os::map_memory(_fd, _full_path, si->_file_offset,
requested_addr, size, si->_read_only,
requested_addr, size, read_only,
si->_allow_exec);
if (base == NULL || base != si->_base) {
if (base == NULL || base != requested_addr) {
fail_continue("Unable to map %s shared space at required address.", shared_region_name[i]);
return NULL;
}
@ -582,15 +643,119 @@ char* FileMapInfo::map_region(int i) {
// in method FileMapInfo::reserve_shared_memory(), which is not called on Windows.
MemTracker::record_virtual_memory_type((address)base, mtClassShared);
#endif
return base;
}
MemRegion *string_ranges = NULL;
int num_ranges = 0;
bool FileMapInfo::map_string_regions() {
#if INCLUDE_ALL_GCS
if (UseG1GC && UseCompressedOops && UseCompressedClassPointers) {
if (narrow_oop_mode() == Universe::narrow_oop_mode() &&
narrow_oop_shift() == Universe::narrow_oop_shift()) {
string_ranges = new MemRegion[MetaspaceShared::max_strings];
struct FileMapInfo::FileMapHeader::space_info* si;
for (int i = MetaspaceShared::first_string;
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
si = &_header->_space[i];
size_t used = si->_used;
if (used > 0) {
size_t size = used;
char* requested_addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
(narrowOop)si->_addr._offset));
string_ranges[num_ranges] = MemRegion((HeapWord*)requested_addr, size / HeapWordSize);
num_ranges ++;
}
}
if (num_ranges == 0) {
return true; // no shared string data
}
// Check that ranges are within the java heap
if (!G1CollectedHeap::heap()->check_archive_addresses(string_ranges, num_ranges)) {
fail_continue("Unable to allocate shared string space: range is not "
"within java heap.");
return false;
}
// allocate from java heap
if (!G1CollectedHeap::heap()->alloc_archive_regions(string_ranges, num_ranges)) {
fail_continue("Unable to allocate shared string space: range is "
"already in use.");
return false;
}
// Map the string data. No need to call MemTracker::record_virtual_memory_type()
// for mapped string regions as they are part of the reserved java heap, which
// is already recorded.
for (int i = 0; i < num_ranges; i++) {
si = &_header->_space[MetaspaceShared::first_string + i];
char* addr = (char*)string_ranges[i].start();
char* base = os::map_memory(_fd, _full_path, si->_file_offset,
addr, string_ranges[i].byte_size(), si->_read_only,
si->_allow_exec);
if (base == NULL || base != addr) {
fail_continue("Unable to map shared string space at required address.");
return false;
}
}
return true; // the shared string data is mapped successfuly
} else {
// narrow oop encoding differ, the shared string data are not used
if (PrintSharedSpaces && _header->_space[MetaspaceShared::first_string]._used > 0) {
tty->print_cr("Shared string data from the CDS archive is being ignored. "
"The current CompressedOops encoding differs from that archived "
"due to heap size change. The archive was dumped using max heap "
"size %dM.", max_heap_size() >> 20);
}
}
} else {
if (PrintSharedSpaces && _header->_space[MetaspaceShared::first_string]._used > 0) {
tty->print_cr("Shared string data from the CDS archive is being ignored. UseG1GC, "
"UseCompressedOops and UseCompressedClassPointers are required.");
}
}
// if we get here, the shared string data is not mapped
assert(string_ranges == NULL && num_ranges == 0, "sanity");
StringTable::ignore_shared_strings(true);
#endif
return true;
}
bool FileMapInfo::verify_string_regions() {
for (int i = MetaspaceShared::first_string;
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
if (!verify_region_checksum(i)) {
return false;
}
}
return true;
}
void FileMapInfo::fixup_string_regions() {
if (string_ranges != NULL) {
G1CollectedHeap::heap()->fill_archive_regions(string_ranges, num_ranges);
}
}
bool FileMapInfo::verify_region_checksum(int i) {
if (!VerifySharedSpaces) {
return true;
}
const char* buf = _header->_space[i]._base;
size_t sz = _header->_space[i]._used;
if (sz == 0) {
return true; // no data
}
if (MetaspaceShared::is_string_region(i) && StringTable::shared_string_ignored()) {
return true; // shared string data are not mapped
}
const char* buf = _header->region_addr(i);
int crc = ClassLoader::crc32(0, buf, (jint)sz);
if (crc != _header->_space[i]._crc) {
fail_continue("Checksum verification failed.");
@ -602,14 +767,36 @@ bool FileMapInfo::verify_region_checksum(int i) {
// Unmap a memory region in the address space.
void FileMapInfo::unmap_region(int i) {
assert(!MetaspaceShared::is_string_region(i), "sanity");
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used;
size_t size = align_size_up(used, os::vm_allocation_granularity());
if (!os::unmap_memory(si->_base, size)) {
if (used == 0) {
return;
}
char* addr = _header->region_addr(i);
if (!os::unmap_memory(addr, size)) {
fail_stop("Unable to unmap shared space.");
}
}
void FileMapInfo::unmap_string_regions() {
for (int i = MetaspaceShared::first_string;
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used;
if (used > 0) {
size_t size = align_size_up(used, os::vm_allocation_granularity());
char* addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
(narrowOop)si->_addr._offset));
if (!os::unmap_memory(addr, size)) {
fail_stop("Unable to unmap shared space.");
}
}
}
}
void FileMapInfo::assert_mark(bool check) {
if (!check) {
@ -637,11 +824,6 @@ bool FileMapInfo::_validating_classpath_entry_table = false;
bool FileMapInfo::initialize() {
assert(UseSharedSpaces, "UseSharedSpaces expected.");
if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space()) {
fail_continue("Tool agent requires sharing to be disabled.");
return false;
}
if (!open_for_read()) {
return false;
}
@ -658,6 +840,15 @@ bool FileMapInfo::initialize() {
return true;
}
char* FileMapInfo::FileMapHeader::region_addr(int idx) {
if (MetaspaceShared::is_string_region(idx)) {
return (char*)((void*)oopDesc::decode_heap_oop_not_null(
(narrowOop)_space[idx]._addr._offset));
} else {
return _space[idx]._addr._base;
}
}
int FileMapInfo::FileMapHeader::compute_crc() {
char* header = data();
// start computing from the field after _crc
@ -729,8 +920,12 @@ bool FileMapInfo::validate_header() {
// True if the p is within the mapped shared space, otherwise, false.
bool FileMapInfo::is_in_shared_space(const void* p) {
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
if (p >= _header->_space[i]._base &&
p < _header->_space[i]._base + _header->_space[i]._used) {
char *base;
if (MetaspaceShared::is_string_region(i) && _header->_space[i]._used == 0) {
continue;
}
base = _header->region_addr(i);
if (p >= base && p < base + _header->_space[i]._used) {
return true;
}
}
@ -742,9 +937,10 @@ void FileMapInfo::print_shared_spaces() {
gclog_or_tty->print_cr("Shared Spaces:");
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
char *base = _header->region_addr(i);
gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
shared_region_name[i],
si->_base, si->_base + si->_used);
base, base + si->_used);
}
}
@ -753,12 +949,14 @@ void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
FileMapInfo *map_info = FileMapInfo::current_info();
if (map_info) {
map_info->fail_continue("%s", msg);
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
if (map_info->_header->_space[i]._base != NULL) {
for (int i = 0; i < MetaspaceShared::num_non_strings; i++) {
char *addr = map_info->_header->region_addr(i);
if (addr != NULL && !MetaspaceShared::is_string_region(i)) {
map_info->unmap_region(i);
map_info->_header->_space[i]._base = NULL;
map_info->_header->_space[i]._addr._base = NULL;
}
}
map_info->unmap_string_regions();
} else if (DumpSharedSpaces) {
fail_stop("%s", msg);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,11 +94,18 @@ public:
int _version; // (from enum, above.)
size_t _alignment; // how shared archive should be aligned
int _obj_alignment; // value of ObjectAlignmentInBytes
int _narrow_oop_shift; // compressed oop encoding shift
uintx _max_heap_size; // java max heap size during dumping
Universe::NARROW_OOP_MODE _narrow_oop_mode; // compressed oop encoding mode
struct space_info {
int _crc; // crc checksum of the current space
size_t _file_offset; // sizeof(this) rounded to vm page size
char* _base; // copy-on-write base address
union {
char* _base; // copy-on-write base address
intx _offset; // offset from the compressed oop encoding base, only used
// by string space
} _addr;
size_t _capacity; // for validity checking
size_t _used; // for setting space top on read
bool _read_only; // read only space?
@ -138,6 +145,8 @@ public:
size_t _classpath_entry_size;
SharedClassPathEntry* _classpath_entry_table;
char* region_addr(int idx);
virtual bool validate();
virtual void populate(FileMapInfo* info, size_t alignment);
int compute_crc();
@ -166,8 +175,10 @@ public:
void invalidate();
int version() { return _header->_version; }
size_t alignment() { return _header->_alignment; }
Universe::NARROW_OOP_MODE narrow_oop_mode() { return _header->_narrow_oop_mode; }
int narrow_oop_shift() { return _header->_narrow_oop_shift; }
uintx max_heap_size() { return _header->_max_heap_size; }
size_t space_capacity(int i) { return _header->_space[i]._capacity; }
char* region_base(int i) { return _header->_space[i]._base; }
struct FileMapHeader* header() { return _header; }
static FileMapInfo* current_info() {
@ -185,10 +196,15 @@ public:
void write_space(int i, Metaspace* space, bool read_only);
void write_region(int region, char* base, size_t size,
size_t capacity, bool read_only, bool allow_exec);
void write_string_regions(GrowableArray<MemRegion> *regions);
void write_bytes(const void* buffer, int count);
void write_bytes_aligned(const void* buffer, int count);
char* map_region(int i);
bool map_string_regions();
bool verify_string_regions();
void fixup_string_regions();
void unmap_region(int i);
void unmap_string_regions();
bool verify_region_checksum(int i);
void close();
bool is_open() { return _file_open; }

View File

@ -3132,10 +3132,21 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
initialize_class_space(metaspace_rs);
if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
compressed_class_space_size(), p2i(metaspace_rs.base()), p2i(requested_addr));
print_compressed_class_space(gclog_or_tty, requested_addr);
}
}
void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
if (_class_space_list != NULL) {
address base = (address)_class_space_list->current_virtual_space()->bottom();
st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
compressed_class_space_size(), p2i(base));
if (requested_addr != 0) {
st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
}
st->cr();
}
}
@ -3296,7 +3307,7 @@ void Metaspace::global_initialize() {
// Map in spaces now also
if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
cds_total = FileMapInfo::shared_spaces_size();
cds_address = (address)mapinfo->region_base(0);
cds_address = (address)mapinfo->header()->region_addr(0);
} else {
assert(!mapinfo->is_open() && !UseSharedSpaces,
"archive file not closed or shared spaces not disabled.");

View File

@ -254,6 +254,8 @@ class Metaspace : public CHeapObj<mtClass> {
// Debugging support
void verify();
static void print_compressed_class_space(outputStream* st, const char* requested_addr = 0);
class AllocRecordClosure : public StackObj {
public:
virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0;

View File

@ -422,6 +422,8 @@ private:
GrowableArray<Klass*> *_class_promote_order;
VirtualSpace _md_vs;
VirtualSpace _mc_vs;
CompactHashtableWriter* _string_cht;
GrowableArray<MemRegion> *_string_regions;
public:
VM_PopulateDumpSharedSpace(ClassLoaderData* loader_data,
@ -540,7 +542,7 @@ void VM_PopulateDumpSharedSpace::doit() {
NOT_PRODUCT(SystemDictionary::verify();)
// Copy the the symbol table, and the system dictionary to the shared
// Copy the the symbol table, string table, and the system dictionary to the shared
// space in usable form. Copy the hashtable
// buckets first [read-write], then copy the linked lists of entries
// [read-only].
@ -548,6 +550,15 @@ void VM_PopulateDumpSharedSpace::doit() {
NOT_PRODUCT(SymbolTable::verify());
handle_misc_data_space_failure(SymbolTable::copy_compact_table(&md_top, md_end));
size_t ss_bytes = 0;
char* ss_low;
// The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
_string_regions = new GrowableArray<MemRegion>(2);
NOT_PRODUCT(StringTable::verify());
handle_misc_data_space_failure(StringTable::copy_compact_table(&md_top, md_end, _string_regions,
&ss_bytes));
ss_low = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
SystemDictionary::reverse();
SystemDictionary::copy_buckets(&md_top, md_end);
@ -576,7 +587,8 @@ void VM_PopulateDumpSharedSpace::doit() {
const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType);
const size_t md_alloced = md_end-md_low;
const size_t mc_alloced = mc_end-mc_low;
const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced;
const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced
+ ss_bytes;
// Occupied size of each space.
const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType);
@ -585,11 +597,12 @@ void VM_PopulateDumpSharedSpace::doit() {
const size_t mc_bytes = size_t(mc_top - mc_low);
// Percent of total size
const size_t total_bytes = ro_bytes + rw_bytes + md_bytes + mc_bytes;
const size_t total_bytes = ro_bytes + rw_bytes + md_bytes + mc_bytes + ss_bytes;
const double ro_t_perc = ro_bytes / double(total_bytes) * 100.0;
const double rw_t_perc = rw_bytes / double(total_bytes) * 100.0;
const double md_t_perc = md_bytes / double(total_bytes) * 100.0;
const double mc_t_perc = mc_bytes / double(total_bytes) * 100.0;
const double ss_t_perc = ss_bytes / double(total_bytes) * 100.0;
// Percent of fullness of each space
const double ro_u_perc = ro_bytes / double(ro_alloced) * 100.0;
@ -602,6 +615,7 @@ void VM_PopulateDumpSharedSpace::doit() {
tty->print_cr(fmt_space, "rw", rw_bytes, rw_t_perc, rw_alloced, rw_u_perc, rw_space->bottom());
tty->print_cr(fmt_space, "md", md_bytes, md_t_perc, md_alloced, md_u_perc, md_low);
tty->print_cr(fmt_space, "mc", mc_bytes, mc_t_perc, mc_alloced, mc_u_perc, mc_low);
tty->print_cr(fmt_space, "st", ss_bytes, ss_t_perc, ss_bytes, 100.0, ss_low);
tty->print_cr("total : %9d [100.0%% of total] out of %9d bytes [%4.1f%% used]",
total_bytes, total_alloced, total_u_perc);
@ -631,6 +645,7 @@ void VM_PopulateDumpSharedSpace::doit() {
pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
SharedMiscCodeSize,
true, true);
mapinfo->write_string_regions(_string_regions);
// Pass 2 - write data.
mapinfo->open_for_write();
@ -646,6 +661,8 @@ void VM_PopulateDumpSharedSpace::doit() {
pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
SharedMiscCodeSize,
true, true);
mapinfo->write_string_regions(_string_regions);
mapinfo->close();
memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
@ -942,6 +959,11 @@ bool MetaspaceShared::is_in_shared_space(const void* p) {
return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
}
bool MetaspaceShared::is_string_region(int idx) {
return (idx >= MetaspaceShared::first_string &&
idx < MetaspaceShared::first_string + MetaspaceShared::max_strings);
}
void MetaspaceShared::print_shared_spaces() {
if (UseSharedSpaces) {
FileMapInfo::current_info()->print_shared_spaces();
@ -972,13 +994,15 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
// Map each shared region
if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
mapinfo->verify_region_checksum(ro) &&
mapinfo->verify_region_checksum(ro) &&
(_rw_base = mapinfo->map_region(rw)) != NULL &&
mapinfo->verify_region_checksum(rw) &&
mapinfo->verify_region_checksum(rw) &&
(_md_base = mapinfo->map_region(md)) != NULL &&
mapinfo->verify_region_checksum(md) &&
mapinfo->verify_region_checksum(md) &&
(_mc_base = mapinfo->map_region(mc)) != NULL &&
mapinfo->verify_region_checksum(mc) &&
mapinfo->verify_region_checksum(mc) &&
mapinfo->map_string_regions() &&
mapinfo->verify_string_regions() &&
(image_alignment == (size_t)max_alignment()) &&
mapinfo->validate_classpath_entry_table()) {
// Success (no need to do anything)
@ -990,6 +1014,7 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
if (_rw_base != NULL) mapinfo->unmap_region(rw);
if (_md_base != NULL) mapinfo->unmap_region(md);
if (_mc_base != NULL) mapinfo->unmap_region(mc);
mapinfo->unmap_string_regions();
#ifndef _WINDOWS
// Release the entire mapped region
shared_rs.release();
@ -1011,7 +1036,7 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
void MetaspaceShared::initialize_shared_spaces() {
FileMapInfo *mapinfo = FileMapInfo::current_info();
char* buffer = mapinfo->region_base(md);
char* buffer = mapinfo->header()->region_addr(md);
// Skip over (reserve space for) a list of addresses of C++ vtables
// for Klass objects. They get filled in later.
@ -1027,13 +1052,16 @@ void MetaspaceShared::initialize_shared_spaces() {
buffer += sizeof(intptr_t);
buffer += vtable_size;
// Create the shared symbol table using the bucket array at this spot in the
// Create the shared symbol table using the compact table at this spot in the
// misc data space. (Todo: move this to read-only space. Currently
// this is mapped copy-on-write but will never be written into).
buffer = (char*)SymbolTable::init_shared_table(buffer);
SymbolTable::create_table();
// Create the shared string table using the compact table
buffer = (char*)StringTable::init_shared_table(mapinfo, buffer);
// Create the shared dictionary using the bucket array at this spot in
// the misc data space. Since the shared dictionary table is never
// modified, this region (of mapped pages) will be (effectively, if
@ -1100,6 +1128,11 @@ void MetaspaceShared::initialize_shared_spaces() {
}
}
void MetaspaceShared::fixup_shared_string_regions() {
FileMapInfo *mapinfo = FileMapInfo::current_info();
mapinfo->fixup_string_regions();
}
// JVM/TI RedefineClasses() support:
bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");

View File

@ -53,6 +53,7 @@ public:
memset(this, 0, sizeof(*this));
}
CompactHashtableStats symbol;
CompactHashtableStats string;
};
// Class Data Sharing Support
@ -90,7 +91,10 @@ class MetaspaceShared : AllStatic {
rw = 1, // read-write shared space in the heap
md = 2, // miscellaneous data for initializing tables, etc.
mc = 3, // miscellaneous code - vtable replacement.
n_regions = 4
max_strings = 2, // max number of string regions in string space
num_non_strings = 4, // number of non-string regions
first_string = num_non_strings, // index of first string region
n_regions = max_strings + num_non_strings // total number of regions
};
// Accessor functions to save shared space created for metadata, which has
@ -124,10 +128,13 @@ class MetaspaceShared : AllStatic {
}
static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
static void initialize_shared_spaces() NOT_CDS_RETURN;
static void fixup_shared_string_regions() NOT_CDS_RETURN;
// Return true if given address is in the mapped shared space.
static bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
static bool is_string_region(int idx) NOT_CDS_RETURN_(false);
static void generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top, char* md_end,

View File

@ -311,6 +311,7 @@ void Universe::genesis(TRAPS) {
SystemDictionary::Cloneable_klass(), "u3");
assert(_the_array_interfaces_array->at(1) ==
SystemDictionary::Serializable_klass(), "u3");
MetaspaceShared::fixup_shared_string_regions();
} else {
// Set up shared interfaces array. (Do this before supers are set up.)
_the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
@ -754,7 +755,7 @@ jint Universe::initialize_heap() {
Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
Universe::print_compressed_oops_mode();
Universe::print_compressed_oops_mode(tty);
}
// Tell tests in which mode we run.
@ -781,27 +782,24 @@ jint Universe::initialize_heap() {
return JNI_OK;
}
void Universe::print_compressed_oops_mode() {
tty->cr();
tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
void Universe::print_compressed_oops_mode(outputStream* st) {
st->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
p2i(Universe::heap()->base()), Universe::heap()->reserved_region().byte_size()/M);
tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
st->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
if (Universe::narrow_oop_base() != 0) {
tty->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
}
if (Universe::narrow_oop_shift() != 0) {
tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
st->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
}
if (!Universe::narrow_oop_use_implicit_null_checks()) {
tty->print(", no protected page in front of the heap");
st->print(", no protected page in front of the heap");
}
tty->cr();
tty->cr();
st->cr();
}
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {

View File

@ -398,7 +398,7 @@ class Universe: AllStatic {
static void set_narrow_ptrs_base(address a) { _narrow_ptrs_base = a; }
static address narrow_ptrs_base() { return _narrow_ptrs_base; }
static void print_compressed_oops_mode();
static void print_compressed_oops_mode(outputStream* st);
// this is set in vm_version on sparc (and then reset in universe afaict)
static void set_narrow_oop_shift(int shift) {

View File

@ -43,6 +43,7 @@
#include "runtime/deoptimization.hpp"
#include "runtime/relocator.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/events.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
@ -174,6 +175,9 @@ void VM_RedefineClasses::doit_epilogue() {
// Free os::malloc allocated memory.
os::free(_scratch_classes);
// Reset the_class_oop to null for error printing.
_the_class_oop = NULL;
if (RC_TRACE_ENABLED(0x00000004)) {
// Used to have separate timers for "doit" and "all", but the timer
// overhead skewed the measurements.
@ -4105,6 +4109,13 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
java_lang_Class::classRedefinedCount(the_class_mirror),
os::available_memory() >> 10));
{
ResourceMark rm(THREAD);
Events::log_redefinition(THREAD, "redefined class name=%s, count=%d",
the_class->external_name(),
java_lang_Class::classRedefinedCount(the_class_mirror));
}
RC_TIMER_STOP(_timer_rsc_phase2);
} // end redefine_single_class()
@ -4249,3 +4260,11 @@ void VM_RedefineClasses::dump_methods() {
tty->cr();
}
}
void VM_RedefineClasses::print_on_error(outputStream* st) const {
VM_Operation::print_on_error(st);
if (_the_class_oop != NULL) {
ResourceMark rm;
st->print_cr(", redefining class %s", _the_class_oop->external_name());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -539,5 +539,8 @@ class VM_RedefineClasses: public VM_Operation {
static unsigned char * get_cached_class_file_bytes(JvmtiCachedClassFileData *cache) {
return cache == NULL ? NULL : cache->data;
}
// Error printing
void print_on_error(outputStream* st) const;
};
#endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP

View File

@ -31,6 +31,7 @@
#include "code/codeCache.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "prims/wbtestmethods/parserTests.hpp"
@ -710,6 +711,24 @@ WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name))
return NULL;
WB_END
WB_ENTRY(jobject, WB_GetIntVMFlag(JNIEnv* env, jobject o, jstring name))
int result;
if (GetVMFlag <int> (thread, env, name, &result, &CommandLineFlags::intAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
}
return NULL;
WB_END
WB_ENTRY(jobject, WB_GetUintVMFlag(JNIEnv* env, jobject o, jstring name))
uint result;
if (GetVMFlag <uint> (thread, env, name, &result, &CommandLineFlags::uintAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
}
return NULL;
WB_END
WB_ENTRY(jobject, WB_GetIntxVMFlag(JNIEnv* env, jobject o, jstring name))
intx result;
if (GetVMFlag <intx> (thread, env, name, &result, &CommandLineFlags::intxAt)) {
@ -771,6 +790,16 @@ WB_ENTRY(void, WB_SetBooleanVMFlag(JNIEnv* env, jobject o, jstring name, jboolea
SetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAtPut);
WB_END
WB_ENTRY(void, WB_SetIntVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
int result = value;
SetVMFlag <int> (thread, env, name, &result, &CommandLineFlags::intAtPut);
WB_END
WB_ENTRY(void, WB_SetUintVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
uint result = value;
SetVMFlag <uint> (thread, env, name, &result, &CommandLineFlags::uintAtPut);
WB_END
WB_ENTRY(void, WB_SetIntxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
intx result = value;
SetVMFlag <intx> (thread, env, name, &result, &CommandLineFlags::intxAtPut);
@ -1179,6 +1208,11 @@ WB_ENTRY(jobject, WB_GetMethodStringOption(JNIEnv* env, jobject wb, jobject meth
return NULL;
WB_END
WB_ENTRY(jboolean, WB_IsShared(JNIEnv* env, jobject wb, jobject obj))
oop obj_oop = JNIHandles::resolve(obj);
return MetaspaceShared::is_in_shared_space((void*)obj_oop);
WB_END
//Some convenience methods to deal with objects from java
int WhiteBox::offset_for_field(const char* field_name, oop object,
Symbol* signature_symbol) {
@ -1336,6 +1370,8 @@ static JNINativeMethod methods[] = {
{CC"isConstantVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
{CC"isLockedVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsLockedVMFlag},
{CC"setBooleanVMFlag", CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
{CC"setIntVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntVMFlag},
{CC"setUintVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintVMFlag},
{CC"setIntxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntxVMFlag},
{CC"setUintxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintxVMFlag},
{CC"setUint64VMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUint64VMFlag},
@ -1345,6 +1381,10 @@ static JNINativeMethod methods[] = {
(void*)&WB_SetStringVMFlag},
{CC"getBooleanVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Boolean;",
(void*)&WB_GetBooleanVMFlag},
{CC"getIntVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;",
(void*)&WB_GetIntVMFlag},
{CC"getUintVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;",
(void*)&WB_GetUintVMFlag},
{CC"getIntxVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;",
(void*)&WB_GetIntxVMFlag},
{CC"getUintxVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;",
@ -1397,6 +1437,7 @@ static JNINativeMethod methods[] = {
{CC"getMethodStringOption",
CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/String;",
(void*)&WB_GetMethodStringOption},
{CC"isShared", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsShared },
};
#undef CC

View File

@ -586,11 +586,12 @@ static bool set_fp_numeric_flag(char* name, char* value, Flag::Flags origin) {
static bool set_numeric_flag(char* name, char* value, Flag::Flags origin) {
julong v;
int int_v;
intx intx_v;
bool is_neg = false;
// Check the sign first since atomull() parses only unsigned values.
if (*value == '-') {
if (!CommandLineFlags::intxAt(name, &intx_v)) {
if (!CommandLineFlags::intxAt(name, &intx_v) && !CommandLineFlags::intAt(name, &int_v)) {
return false;
}
value++;
@ -599,6 +600,17 @@ static bool set_numeric_flag(char* name, char* value, Flag::Flags origin) {
if (!atomull(value, &v)) {
return false;
}
int_v = (int) v;
if (is_neg) {
int_v = -int_v;
}
if (CommandLineFlags::intAtPut(name, &int_v, origin)) {
return true;
}
uint uint_v = (uint) v;
if (!is_neg && CommandLineFlags::uintAtPut(name, &uint_v, origin)) {
return true;
}
intx_v = (intx) v;
if (is_neg) {
intx_v = -intx_v;
@ -1375,7 +1387,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
if (PrintGCDetails && Verbose) {
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
(unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
tty->print_cr("ConcGCThreads: %u", (uint) ConcGCThreads);
tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
}
}
#endif // INCLUDE_ALL_GCS
@ -1693,7 +1705,7 @@ void Arguments::set_g1_gc_flags() {
if (PrintGCDetails && Verbose) {
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
(unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
tty->print_cr("ConcGCThreads: %u", (uint) ConcGCThreads);
tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
}
}
@ -3231,7 +3243,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
jio_fprintf(defaultStream::error_stream(),
"Please use -XX:ConcGCThreads in place of "
"-XX:ParallelMarkingThreads or -XX:ParallelCMSThreads in the future\n");
FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads);
FLAG_SET_CMDLINE(uint, ConcGCThreads, conc_threads);
} else if (match_option(option, "-XX:MaxDirectMemorySize=", &tail)) {
julong max_direct_memory_size = 0;
ArgsRange errcode = parse_memory_size(tail, &max_direct_memory_size, 0);

View File

@ -93,6 +93,32 @@ void Flag::set_bool(bool value) {
*((bool*) _addr) = value;
}
bool Flag::is_int() const {
return strcmp(_type, "int") == 0;
}
int Flag::get_int() const {
return *((int*) _addr);
}
void Flag::set_int(int value) {
check_writable();
*((int*) _addr) = value;
}
bool Flag::is_uint() const {
return strcmp(_type, "uint") == 0;
}
uint Flag::get_uint() const {
return *((uint*) _addr);
}
void Flag::set_uint(uint value) {
check_writable();
*((uint*) _addr) = value;
}
bool Flag::is_intx() const {
return strcmp(_type, "intx") == 0;
}
@ -316,6 +342,12 @@ void Flag::print_on(outputStream* st, bool withComments) {
if (is_bool()) {
st->print("%-16s", get_bool() ? "true" : "false");
}
if (is_int()) {
st->print("%-16d", get_int());
}
if (is_uint()) {
st->print("%-16u", get_uint());
}
if (is_intx()) {
st->print("%-16ld", get_intx());
}
@ -411,6 +443,10 @@ void Flag::print_kind(outputStream* st) {
void Flag::print_as_flag(outputStream* st) {
if (is_bool()) {
st->print("-XX:%s%s", get_bool() ? "+" : "-", _name);
} else if (is_int()) {
st->print("-XX:%s=%d", _name, get_int());
} else if (is_uint()) {
st->print("-XX:%s=%u", _name, get_uint());
} else if (is_intx()) {
st->print("-XX:%s=" INTX_FORMAT, _name, get_intx());
} else if (is_uintx()) {
@ -663,6 +699,62 @@ void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Fla
faddr->set_origin(origin);
}
bool CommandLineFlags::intAt(const char* name, size_t len, int* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_int()) return false;
*value = result->get_int();
return true;
}
bool CommandLineFlags::intAtPut(const char* name, size_t len, int* value, Flag::Flags origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_int()) return false;
int old_value = result->get_int();
trace_flag_changed<EventIntFlagChanged, s4>(name, old_value, *value, origin);
result->set_int(*value);
*value = old_value;
result->set_origin(origin);
return true;
}
void CommandLineFlagsEx::intAtPut(CommandLineFlagWithType flag, int value, Flag::Flags origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_int(), "wrong flag type");
trace_flag_changed<EventIntFlagChanged, s4>(faddr->_name, faddr->get_int(), value, origin);
faddr->set_int(value);
faddr->set_origin(origin);
}
bool CommandLineFlags::uintAt(const char* name, size_t len, uint* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uint()) return false;
*value = result->get_uint();
return true;
}
bool CommandLineFlags::uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_uint()) return false;
uint old_value = result->get_uint();
trace_flag_changed<EventUnsignedIntFlagChanged, u4>(name, old_value, *value, origin);
result->set_uint(*value);
*value = old_value;
result->set_origin(origin);
return true;
}
void CommandLineFlagsEx::uintAtPut(CommandLineFlagWithType flag, uint value, Flag::Flags origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_uint(), "wrong flag type");
trace_flag_changed<EventUnsignedIntFlagChanged, u4>(faddr->_name, faddr->get_uint(), value, origin);
faddr->set_uint(value);
faddr->set_origin(origin);
}
bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;

View File

@ -279,6 +279,14 @@ struct Flag {
bool get_bool() const;
void set_bool(bool value);
bool is_int() const;
int get_int() const;
void set_int(int value);
bool is_uint() const;
uint get_uint() const;
void set_uint(uint value);
bool is_intx() const;
intx get_intx() const;
void set_intx(intx value);
@ -363,13 +371,28 @@ class CounterSetting {
~CounterSetting() { (*counter)--; }
};
class IntFlagSetting {
int val;
int* flag;
public:
IntFlagSetting(int& fl, int newValue) { flag = &fl; val = fl; fl = newValue; }
~IntFlagSetting() { *flag = val; }
};
class UIntFlagSetting {
uint val;
uint* flag;
public:
UIntFlagSetting(uint& fl, uint newValue) { flag = &fl; val = fl; fl = newValue; }
~UIntFlagSetting() { *flag = val; }
};
class UIntXFlagSetting {
uintx val;
uintx* flag;
public:
UIntFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; }
~UIntFlagSetting() { *flag = val; }
UIntXFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; }
~UIntXFlagSetting() { *flag = val; }
};
class DoubleFlagSetting {
@ -396,6 +419,16 @@ class CommandLineFlags {
static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
static bool intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false);
static bool intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); }
static bool intAtPut(const char* name, size_t len, int* value, Flag::Flags origin);
static bool intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); }
static bool uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false);
static bool uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin);
static bool uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); }
static bool intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
static bool intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
@ -1448,7 +1481,7 @@ class CommandLineFlags {
"The standard deviation used by the parallel compact dead wood " \
"limiter (a number between 0-100)") \
\
product(uintx, ParallelGCThreads, 0, \
product(uint, ParallelGCThreads, 0, \
"Number of parallel threads parallel gc will use") \
\
product(bool, UseDynamicNumberOfGCThreads, false, \
@ -1473,7 +1506,7 @@ class CommandLineFlags {
develop(uintx, ParallelOldGCSplitInterval, 3, \
"How often to provoke splitting a young gen space") \
\
product(uintx, ConcGCThreads, 0, \
product(uint, ConcGCThreads, 0, \
"Number of threads concurrent gc will use") \
\
product(size_t, YoungPLABSize, 4096, \

View File

@ -197,6 +197,8 @@ typedef enum {
class CommandLineFlagsEx : CommandLineFlags {
public:
static void boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin);
static void intAtPut(CommandLineFlagWithType flag, int value, Flag::Flags origin);
static void uintAtPut(CommandLineFlagWithType flag, uint value, Flag::Flags origin);
static void intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin);
static void uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin);
static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin);

View File

@ -670,8 +670,8 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
}
// always move the block
void* ptr = os::malloc(size, memflags, stack);
if (PrintMalloc) {
tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
if (PrintMalloc && tty != NULL) {
tty->print_cr("os::realloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
}
// Copy to new memory if malloc didn't fail
if ( ptr != NULL ) {
@ -843,7 +843,7 @@ void os::print_cpu_info(outputStream* st) {
pd_print_cpu_info(st);
}
void os::print_date_and_time(outputStream *st) {
void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) {
const int secs_per_day = 86400;
const int secs_per_hour = 3600;
const int secs_per_min = 60;
@ -852,6 +852,12 @@ void os::print_date_and_time(outputStream *st) {
(void)time(&tloc);
st->print("time: %s", ctime(&tloc)); // ctime adds newline.
struct tm tz;
if (localtime_pd(&tloc, &tz) != NULL) {
::strftime(buf, buflen, "%Z", &tz);
st->print_cr("timezone: %s", buf);
}
double t = os::elapsedTime();
// NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
// Linux. Must be a bug in glibc ? Workaround is to round "t" to int

View File

@ -596,7 +596,7 @@ class os: AllStatic {
static void print_register_info(outputStream* st, void* context);
static void print_siginfo(outputStream* st, void* siginfo);
static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
static void print_date_and_time(outputStream* st);
static void print_date_and_time(outputStream* st, char* buf, size_t buflen);
static void print_location(outputStream* st, intptr_t x, bool verbose = false);
static size_t lasterror(char *buf, size_t len);

View File

@ -732,6 +732,8 @@ JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
if (StackTraceInThrowable) {
java_lang_Throwable::fill_in_stack_trace(exception);
}
// Increment counter for hs_err file reporting
Atomic::inc(&Exceptions::_stack_overflow_errors);
throw_and_post_jvmti_exception(thread, exception);
JRT_END

View File

@ -96,7 +96,7 @@ StubCodeGenerator::~StubCodeGenerator() {
toprint[toprint_len++] = cdesc;
if (cdesc == _first_stub) { saw_first = true; break; }
}
assert(saw_first, "must get both first & last");
assert(toprint_len == 0 || saw_first, "must get both first & last");
// Print in reverse order:
qsort(toprint, toprint_len, sizeof(toprint[0]), compare_cdesc);
for (int i = 0; i < toprint_len; i++) {

View File

@ -678,7 +678,7 @@ void NMethodSweeper::possibly_flush(nmethod* nm) {
// ReservedCodeCacheSize
int reset_val = hotness_counter_reset_val();
int time_since_reset = reset_val - nm->hotness_counter();
int code_blob_type = (CodeCache::get_code_blob_type(nm->comp_level()));
int code_blob_type = CodeCache::get_code_blob_type(nm);
double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
// The less free space in the code cache we have - the bigger reverse_free_ratio() is.
// I.e., 'threshold' increases with lower available space in the code cache and a higher

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/padded.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
@ -638,11 +639,11 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
// hashCode() is a heap mutator ...
// Relaxing assertion for bug 6320749.
assert(Universe::verify_in_progress() ||
assert(Universe::verify_in_progress() || DumpSharedSpaces ||
!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(Universe::verify_in_progress() ||
assert(Universe::verify_in_progress() || DumpSharedSpaces ||
Self->is_Java_thread() , "invariant");
assert(Universe::verify_in_progress() ||
assert(Universe::verify_in_progress() || DumpSharedSpaces ||
((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
ObjectMonitor* monitor = NULL;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,7 @@ double elapsedTimer::seconds() const {
}
jlong elapsedTimer::milliseconds() const {
return TimeHelper::counter_to_millis(_counter);
return (jlong)TimeHelper::counter_to_millis(_counter);
}
jlong elapsedTimer::active_ticks() const {
@ -89,7 +89,7 @@ double TimeStamp::seconds() const {
jlong TimeStamp::milliseconds() const {
assert(is_updated(), "must not be clear");
jlong new_count = os::elapsed_counter();
return TimeHelper::counter_to_millis(new_count - _counter);
return (jlong)TimeHelper::counter_to_millis(new_count - _counter);
}
jlong TimeStamp::ticks_since_update() const {

View File

@ -192,7 +192,7 @@ class VM_Operation: public CHeapObj<mtInternal> {
static const char* mode_to_string(Mode mode);
// Debugging
void print_on_error(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
const char* name() const { return _names[type()]; }
static const char* name(int type) {
assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type");

View File

@ -1558,6 +1558,12 @@ bool add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag,
if (flag->is_bool()) {
global->value.z = flag->get_bool() ? JNI_TRUE : JNI_FALSE;
global->type = JMM_VMGLOBAL_TYPE_JBOOLEAN;
} else if (flag->is_int()) {
global->value.j = (jlong)flag->get_int();
global->type = JMM_VMGLOBAL_TYPE_JLONG;
} else if (flag->is_uint()) {
global->value.j = (jlong)flag->get_uint();
global->type = JMM_VMGLOBAL_TYPE_JLONG;
} else if (flag->is_intx()) {
global->value.j = (jlong)flag->get_intx();
global->type = JMM_VMGLOBAL_TYPE_JLONG;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -347,6 +347,13 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
return true;
}
// Mapped CDS string region.
// The string region(s) is part of the java heap.
if (reserved_rgn->flag() == mtJavaHeap) {
assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
return true;
}
ShouldNotReachHere();
return false;
}

View File

@ -44,6 +44,36 @@ int WriteableFlags::set_bool_flag(const char* name, bool value, Flag::Flags orig
return CommandLineFlags::boolAtPut((char*)name, &value, origin) ? SUCCESS : ERR_OTHER;
}
// set a int global flag
int WriteableFlags::set_int_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
int value;
if (sscanf(arg, "%d", &value)) {
return set_int_flag(name, value, origin, err_msg);
}
err_msg.print("flag value must be an integer");
return WRONG_FORMAT;
}
int WriteableFlags::set_int_flag(const char* name, int value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
return CommandLineFlags::intAtPut((char*)name, &value, origin) ? SUCCESS : ERR_OTHER;
}
// set a uint global flag
int WriteableFlags::set_uint_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
uint value;
if (sscanf(arg, "%u", &value)) {
return set_uint_flag(name, value, origin, err_msg);
}
err_msg.print("flag value must be an unsigned integer");
return WRONG_FORMAT;
}
int WriteableFlags::set_uint_flag(const char* name, uint value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
return CommandLineFlags::uintAtPut((char*)name, &value, origin) ? SUCCESS : ERR_OTHER;
}
// set a intx global flag
int WriteableFlags::set_intx_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
intx value;
@ -173,6 +203,10 @@ int WriteableFlags::set_flag_from_char(Flag* f, const void* value, Flag::Flags o
}
if (f->is_bool()) {
return set_bool_flag(f->_name, flag_value, origin, err_msg);
} else if (f->is_int()) {
return set_int_flag(f->_name, flag_value, origin, err_msg);
} else if (f->is_uint()) {
return set_uint_flag(f->_name, flag_value, origin, err_msg);
} else if (f->is_intx()) {
return set_intx_flag(f->_name, flag_value, origin, err_msg);
} else if (f->is_uintx()) {
@ -195,6 +229,12 @@ int WriteableFlags::set_flag_from_jvalue(Flag* f, const void* value, Flag::Flags
if (f->is_bool()) {
bool bvalue = (new_value.z == JNI_TRUE ? true : false);
return set_bool_flag(f->_name, bvalue, origin, err_msg);
} else if (f->is_int()) {
int ivalue = (int)new_value.j;
return set_int_flag(f->_name, ivalue, origin, err_msg);
} else if (f->is_uint()) {
uint uvalue = (uint)new_value.j;
return set_uint_flag(f->_name, uvalue, origin, err_msg);
} else if (f->is_intx()) {
intx ivalue = (intx)new_value.j;
return set_intx_flag(f->_name, ivalue, origin, err_msg);

View File

@ -56,6 +56,10 @@ private:
// set a boolean global flag
static int set_bool_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a int global flag
static int set_int_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a uint global flag
static int set_uint_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a intx global flag
static int set_intx_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a uintx global flag
@ -66,6 +70,10 @@ private:
static int set_size_t_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a boolean global flag
static int set_bool_flag(const char* name, bool value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a int global flag
static int set_int_flag(const char* name, int value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a uint global flag
static int set_uint_flag(const char* name, uint value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a intx global flag
static int set_intx_flag(const char* name, intx value, Flag::Flags origin, FormatBuffer<80>& err_msg);
// set a uintx global flag

View File

@ -122,6 +122,22 @@ Declares a structure type that can be used in other events.
<value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
</event>
<event id="IntFlagChanged" path="vm/flag/int_changed" label="Int Flag Changed"
is_instant="true">
<value type="UTF8" field="name" label="Name" />
<value type="INTEGER" field="old_value" label="Old Value" />
<value type="INTEGER" field="new_value" label="New Value" />
<value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
</event>
<event id="UnsignedIntFlagChanged" path="vm/flag/uint_changed" label="Unsigned Int Flag Changed"
is_instant="true">
<value type="UTF8" field="name" label="Name" />
<value type="UINT" field="old_value" label="Old Value" />
<value type="UINT" field="new_value" label="New Value" />
<value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
</event>
<event id="LongFlagChanged" path="vm/flag/long_changed" label="Long Flag Changed"
is_instant="true">
<value type="UTF8" field="name" label="Name" />
@ -248,6 +264,15 @@ Declares a structure type that can be used in other events.
<structvalue type="ObjectSpace" field="toSpace" label="To Space"/>
</event>
<event id="G1HeapSummary" path="vm/gc/heap/g1_summary" label="G1 Heap Summary" is_instant="true">
<value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
<value type="GCWHEN" field="when" label="When" />
<value type="BYTES64" field="edenUsedSize" label="Eden Used Size" />
<value type="BYTES64" field="edenTotalSize" label="Eden Total Size" />
<value type="BYTES64" field="survivorUsedSize" label="Survivor Used Size" />
</event>
<event id="GCGarbageCollection" path="vm/gc/collector/garbage_collection" label="Garbage Collection"
description="Garbage collection performed by the JVM">
<value type="UINT" field="gcId" label="GC ID" relation="GC_ID" />

View File

@ -770,3 +770,31 @@ extern "C" void pns(void* sp, void* fp, void* pc) { // print native stack
}
#endif // !PRODUCT
//////////////////////////////////////////////////////////////////////////////
// Test multiple STATIC_ASSERT forms in various scopes.
#ifndef PRODUCT
// namespace scope
STATIC_ASSERT(true);
STATIC_ASSERT(true);
STATIC_ASSERT(1 == 1);
STATIC_ASSERT(0 == 0);
void test_multiple_static_assert_forms_in_function_scope() {
STATIC_ASSERT(true);
STATIC_ASSERT(true);
STATIC_ASSERT(0 == 0);
STATIC_ASSERT(1 == 1);
}
// class scope
struct TestMultipleStaticAssertFormsInClassScope {
STATIC_ASSERT(true);
STATIC_ASSERT(true);
STATIC_ASSERT(0 == 0);
STATIC_ASSERT(1 == 1);
};
#endif // !PRODUCT

View File

@ -223,7 +223,8 @@ template<bool x> struct STATIC_ASSERT_FAILURE;
template<> struct STATIC_ASSERT_FAILURE<true> { enum { value = 1 }; };
#define STATIC_ASSERT(Cond) \
typedef char STATIC_ASSERT_DUMMY_TYPE[ STATIC_ASSERT_FAILURE< (Cond) >::value ]
typedef char PASTE_TOKENS(STATIC_ASSERT_DUMMY_TYPE_, __LINE__)[ \
STATIC_ASSERT_FAILURE< (Cond) >::value ]
// out of shared space reporting
enum SharedSpaceType {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,7 @@
EventLog* Events::_logs = NULL;
StringEventLog* Events::_messages = NULL;
StringEventLog* Events::_exceptions = NULL;
StringEventLog* Events::_redefinitions = NULL;
StringEventLog* Events::_deopt_messages = NULL;
EventLog::EventLog() {
@ -66,6 +67,7 @@ void Events::init() {
if (LogEvents) {
_messages = new StringEventLog("Events");
_exceptions = new StringEventLog("Internal exceptions");
_redefinitions = new StringEventLog("Classes redefined");
_deopt_messages = new StringEventLog("Deoptimization events");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -186,6 +186,9 @@ class Events : AllStatic {
// Deoptization related messages
static StringEventLog* _deopt_messages;
// Redefinition related messages
static StringEventLog* _redefinitions;
public:
static void print_all(outputStream* out);
@ -198,6 +201,8 @@ class Events : AllStatic {
// Log exception related message
static void log_exception(Thread* thread, const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
static void log_redefinition(Thread* thread, const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
static void log_deopt_message(Thread* thread, const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
// Register default loggers
@ -222,6 +227,15 @@ inline void Events::log_exception(Thread* thread, const char* format, ...) {
}
}
inline void Events::log_redefinition(Thread* thread, const char* format, ...) {
if (LogEvents) {
va_list ap;
va_start(ap, format);
_redefinitions->logv(thread, format, ap);
va_end(ap);
}
}
inline void Events::log_deopt_message(Thread* thread, const char* format, ...) {
if (LogEvents) {
va_list ap;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -154,15 +154,21 @@ void Exceptions::_throw(Thread* thread, const char* file, int line, Handle h_exc
return;
}
if (h_exception->is_a(SystemDictionary::OutOfMemoryError_klass())) {
count_out_of_memory_exceptions(h_exception);
}
assert(h_exception->is_a(SystemDictionary::Throwable_klass()), "exception is not a subclass of java/lang/Throwable");
// set the pending exception
thread->set_pending_exception(h_exception(), file, line);
// vm log
Events::log_exception(thread, "Exception <%s%s%s> (" INTPTR_FORMAT ") thrown at [%s, line %d]",
h_exception->print_value_string(), message ? ": " : "", message ? message : "",
(address)h_exception(), file, line);
if (LogEvents){
Events::log_exception(thread, "Exception <%s%s%s> (" INTPTR_FORMAT ") thrown at [%s, line %d]",
h_exception->print_value_string(), message ? ": " : "", message ? message : "",
(address)h_exception(), file, line);
}
}
@ -228,6 +234,8 @@ void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file
if (StackTraceInThrowable) {
java_lang_Throwable::fill_in_stack_trace(exception, method());
}
// Increment counter for hs_err file reporting
Atomic::inc(&Exceptions::_stack_overflow_errors);
} else {
// if prior exception, throw that one instead
exception = Handle(THREAD, THREAD->pending_exception());
@ -404,6 +412,44 @@ Handle Exceptions::new_exception(Thread* thread, Symbol* name,
h_prot, to_utf8_safe);
}
// Exception counting for hs_err file
volatile int Exceptions::_stack_overflow_errors = 0;
volatile int Exceptions::_out_of_memory_error_java_heap_errors = 0;
volatile int Exceptions::_out_of_memory_error_metaspace_errors = 0;
volatile int Exceptions::_out_of_memory_error_class_metaspace_errors = 0;
void Exceptions::count_out_of_memory_exceptions(Handle exception) {
if (exception() == Universe::out_of_memory_error_metaspace()) {
Atomic::inc(&_out_of_memory_error_metaspace_errors);
} else if (exception() == Universe::out_of_memory_error_class_metaspace()) {
Atomic::inc(&_out_of_memory_error_class_metaspace_errors);
} else {
// everything else reported as java heap OOM
Atomic::inc(&_out_of_memory_error_java_heap_errors);
}
}
void print_oom_count(outputStream* st, const char *err, int count) {
if (count > 0) {
st->print_cr("OutOfMemoryError %s=%d", err, count);
}
}
bool Exceptions::has_exception_counts() {
return (_stack_overflow_errors + _out_of_memory_error_java_heap_errors +
_out_of_memory_error_metaspace_errors + _out_of_memory_error_class_metaspace_errors) > 0;
}
void Exceptions::print_exception_counts_on_error(outputStream* st) {
print_oom_count(st, "java_heap_errors", _out_of_memory_error_java_heap_errors);
print_oom_count(st, "metaspace_errors", _out_of_memory_error_metaspace_errors);
print_oom_count(st, "class_metaspace_errors", _out_of_memory_error_class_metaspace_errors);
if (_stack_overflow_errors > 0) {
st->print_cr("StackOverflowErrors=%d", _stack_overflow_errors);
}
}
// Implementation of ExceptionMark
ExceptionMark::ExceptionMark(Thread*& thread) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,6 +102,11 @@ class ThreadShadow: public CHeapObj<mtThread> {
class Exceptions {
static bool special_exception(Thread *thread, const char* file, int line, Handle exception);
static bool special_exception(Thread* thread, const char* file, int line, Symbol* name, const char* message);
// Count out of memory errors that are interesting in error diagnosis
static volatile int _out_of_memory_error_java_heap_errors;
static volatile int _out_of_memory_error_metaspace_errors;
static volatile int _out_of_memory_error_class_metaspace_errors;
public:
// this enum is defined to indicate whether it is safe to
// ignore the encoding scheme of the original message string.
@ -160,6 +165,14 @@ class Exceptions {
static void throw_stack_overflow_exception(Thread* thread, const char* file, int line, methodHandle method);
// Exception counting for error files of interesting exceptions that may have
// caused a problem for the jvm
static volatile int _stack_overflow_errors;
static bool has_exception_counts();
static void count_out_of_memory_exceptions(Handle exception);
static void print_exception_counts_on_error(outputStream* st);
// for AbortVMOnException flag
NOT_PRODUCT(static void debug_check_abort(Handle exception, const char* message = NULL);)
NOT_PRODUCT(static void debug_check_abort(const char *value_string, const char* message = NULL);)

View File

@ -34,6 +34,15 @@
// Makes a string of the macro expansion of a
#define XSTR(a) STR(a)
// Apply pre-processor token pasting to the expansions of x and y.
// The token pasting operator (##) prevents its arguments from being
// expanded. This macro allows expansion of its arguments before the
// concatenation is performed. Note: One auxilliary level ought to be
// sufficient, but two are used because of bugs in some preprocesors.
#define PASTE_TOKENS(x, y) PASTE_TOKENS_AUX(x, y)
#define PASTE_TOKENS_AUX(x, y) PASTE_TOKENS_AUX2(x, y)
#define PASTE_TOKENS_AUX2(x, y) x ## y
// -DINCLUDE_<something>=0 | 1 can be specified on the command line to include
// or exclude functionality.

View File

@ -714,6 +714,24 @@ void VMError::report(outputStream* st) {
st->cr();
}
STEP(182, "(printing number of OutOfMemoryError and StackOverflow exceptions)")
if (_verbose && Exceptions::has_exception_counts()) {
st->print_cr("OutOfMemory and StackOverflow Exception counts:");
Exceptions::print_exception_counts_on_error(st);
st->cr();
}
STEP(185, "(printing compressed oops mode")
if (_verbose && UseCompressedOops) {
Universe::print_compressed_oops_mode(st);
if (UseCompressedClassPointers) {
Metaspace::print_compressed_class_space(st);
}
st->cr();
}
STEP(190, "(printing heap information)" )
if (_verbose && Universe::is_fully_initialized()) {
@ -819,7 +837,7 @@ void VMError::report(outputStream* st) {
STEP(280, "(printing date and time)" )
if (_verbose) {
os::print_date_and_time(st);
os::print_date_and_time(st, buf, sizeof(buf));
st->cr();
}

Some files were not shown because too many files have changed in this diff Show More