Merge
This commit is contained in:
commit
32d59f1ed0
@ -62,7 +62,7 @@ define_pd_global(intx, StackRedPages, 1);
|
||||
// due to lack of optimization caused by C++ compiler bugs
|
||||
define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
|
||||
#else
|
||||
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
|
||||
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+5));
|
||||
#endif // AMD64
|
||||
|
||||
define_pd_global(intx, PreInflateSpin, 10);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -319,24 +319,24 @@ void BlockListBuilder::set_leaders() {
|
||||
|
||||
case Bytecodes::_tableswitch: {
|
||||
// set block for each case
|
||||
Bytecode_tableswitch *switch_ = Bytecode_tableswitch_at(s.cur_bcp());
|
||||
int l = switch_->length();
|
||||
Bytecode_tableswitch sw(&s);
|
||||
int l = sw.length();
|
||||
for (int i = 0; i < l; i++) {
|
||||
make_block_at(cur_bci + switch_->dest_offset_at(i), current);
|
||||
make_block_at(cur_bci + sw.dest_offset_at(i), current);
|
||||
}
|
||||
make_block_at(cur_bci + switch_->default_offset(), current);
|
||||
make_block_at(cur_bci + sw.default_offset(), current);
|
||||
current = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_lookupswitch: {
|
||||
// set block for each case
|
||||
Bytecode_lookupswitch *switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
|
||||
int l = switch_->number_of_pairs();
|
||||
Bytecode_lookupswitch sw(&s);
|
||||
int l = sw.number_of_pairs();
|
||||
for (int i = 0; i < l; i++) {
|
||||
make_block_at(cur_bci + switch_->pair_at(i)->offset(), current);
|
||||
make_block_at(cur_bci + sw.pair_at(i).offset(), current);
|
||||
}
|
||||
make_block_at(cur_bci + switch_->default_offset(), current);
|
||||
make_block_at(cur_bci + sw.default_offset(), current);
|
||||
current = NULL;
|
||||
break;
|
||||
}
|
||||
@ -1275,15 +1275,15 @@ void GraphBuilder::ret(int local_index) {
|
||||
|
||||
|
||||
void GraphBuilder::table_switch() {
|
||||
Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(method()->code() + bci());
|
||||
const int l = switch_->length();
|
||||
Bytecode_tableswitch sw(stream());
|
||||
const int l = sw.length();
|
||||
if (CanonicalizeNodes && l == 1) {
|
||||
// total of 2 successors => use If instead of switch
|
||||
// Note: This code should go into the canonicalizer as soon as it can
|
||||
// can handle canonicalized forms that contain more than one node.
|
||||
Value key = append(new Constant(new IntConstant(switch_->low_key())));
|
||||
BlockBegin* tsux = block_at(bci() + switch_->dest_offset_at(0));
|
||||
BlockBegin* fsux = block_at(bci() + switch_->default_offset());
|
||||
Value key = append(new Constant(new IntConstant(sw.low_key())));
|
||||
BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
|
||||
BlockBegin* fsux = block_at(bci() + sw.default_offset());
|
||||
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
|
||||
ValueStack* state_before = is_bb ? copy_state_before() : NULL;
|
||||
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
|
||||
@ -1293,29 +1293,29 @@ void GraphBuilder::table_switch() {
|
||||
int i;
|
||||
bool has_bb = false;
|
||||
for (i = 0; i < l; i++) {
|
||||
sux->at_put(i, block_at(bci() + switch_->dest_offset_at(i)));
|
||||
if (switch_->dest_offset_at(i) < 0) has_bb = true;
|
||||
sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
|
||||
if (sw.dest_offset_at(i) < 0) has_bb = true;
|
||||
}
|
||||
// add default successor
|
||||
sux->at_put(i, block_at(bci() + switch_->default_offset()));
|
||||
sux->at_put(i, block_at(bci() + sw.default_offset()));
|
||||
ValueStack* state_before = has_bb ? copy_state_before() : NULL;
|
||||
append(new TableSwitch(ipop(), sux, switch_->low_key(), state_before, has_bb));
|
||||
append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GraphBuilder::lookup_switch() {
|
||||
Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(method()->code() + bci());
|
||||
const int l = switch_->number_of_pairs();
|
||||
Bytecode_lookupswitch sw(stream());
|
||||
const int l = sw.number_of_pairs();
|
||||
if (CanonicalizeNodes && l == 1) {
|
||||
// total of 2 successors => use If instead of switch
|
||||
// Note: This code should go into the canonicalizer as soon as it can
|
||||
// can handle canonicalized forms that contain more than one node.
|
||||
// simplify to If
|
||||
LookupswitchPair* pair = switch_->pair_at(0);
|
||||
Value key = append(new Constant(new IntConstant(pair->match())));
|
||||
BlockBegin* tsux = block_at(bci() + pair->offset());
|
||||
BlockBegin* fsux = block_at(bci() + switch_->default_offset());
|
||||
LookupswitchPair pair = sw.pair_at(0);
|
||||
Value key = append(new Constant(new IntConstant(pair.match())));
|
||||
BlockBegin* tsux = block_at(bci() + pair.offset());
|
||||
BlockBegin* fsux = block_at(bci() + sw.default_offset());
|
||||
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
|
||||
ValueStack* state_before = is_bb ? copy_state_before() : NULL;
|
||||
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
|
||||
@ -1326,13 +1326,13 @@ void GraphBuilder::lookup_switch() {
|
||||
int i;
|
||||
bool has_bb = false;
|
||||
for (i = 0; i < l; i++) {
|
||||
LookupswitchPair* pair = switch_->pair_at(i);
|
||||
if (pair->offset() < 0) has_bb = true;
|
||||
sux->at_put(i, block_at(bci() + pair->offset()));
|
||||
keys->at_put(i, pair->match());
|
||||
LookupswitchPair pair = sw.pair_at(i);
|
||||
if (pair.offset() < 0) has_bb = true;
|
||||
sux->at_put(i, block_at(bci() + pair.offset()));
|
||||
keys->at_put(i, pair.match());
|
||||
}
|
||||
// add default successor
|
||||
sux->at_put(i, block_at(bci() + switch_->default_offset()));
|
||||
sux->at_put(i, block_at(bci() + sw.default_offset()));
|
||||
ValueStack* state_before = has_bb ? copy_state_before() : NULL;
|
||||
append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -369,7 +369,7 @@ static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, meth
|
||||
if (branch_bci != InvocationEntryBci) {
|
||||
// Compute desination bci
|
||||
address pc = method()->code_base() + branch_bci;
|
||||
Bytecodes::Code branch = Bytecodes::code_at(pc, method());
|
||||
Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
|
||||
int offset = 0;
|
||||
switch (branch) {
|
||||
case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
|
||||
@ -659,14 +659,14 @@ JRT_END
|
||||
|
||||
|
||||
static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
|
||||
Bytecode_field* field_access = Bytecode_field_at(caller, bci);
|
||||
Bytecode_field field_access(caller, bci);
|
||||
// This can be static or non-static field access
|
||||
Bytecodes::Code code = field_access->code();
|
||||
Bytecodes::Code code = field_access.code();
|
||||
|
||||
// We must load class, initialize class and resolvethe field
|
||||
FieldAccessInfo result; // initialize class if needed
|
||||
constantPoolHandle constants(THREAD, caller->constants());
|
||||
LinkResolver::resolve_field(result, constants, field_access->index(), Bytecodes::java_code(code), false, CHECK_NULL);
|
||||
LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
|
||||
return result.klass()();
|
||||
}
|
||||
|
||||
@ -767,7 +767,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
|
||||
Events::log("patch_code @ " INTPTR_FORMAT , caller_frame.pc());
|
||||
|
||||
Bytecodes::Code code = Bytecode_at(caller_method->bcp_from(bci))->java_code();
|
||||
Bytecodes::Code code = caller_method()->java_code_at(bci);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// this is used by assertions in the access_field_patching_id
|
||||
@ -779,11 +779,11 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code
|
||||
if (stub_id == Runtime1::access_field_patching_id) {
|
||||
|
||||
Bytecode_field* field_access = Bytecode_field_at(caller_method, bci);
|
||||
Bytecode_field field_access(caller_method, bci);
|
||||
FieldAccessInfo result; // initialize class if needed
|
||||
Bytecodes::Code code = field_access->code();
|
||||
Bytecodes::Code code = field_access.code();
|
||||
constantPoolHandle constants(THREAD, caller_method->constants());
|
||||
LinkResolver::resolve_field(result, constants, field_access->index(), Bytecodes::java_code(code), false, CHECK);
|
||||
LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
|
||||
patch_field_offset = result.field_offset();
|
||||
|
||||
// If we're patching a field which is volatile then at compile it
|
||||
@ -811,36 +811,36 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_new:
|
||||
{ Bytecode_new* bnew = Bytecode_new_at(caller_method->bcp_from(bci));
|
||||
k = caller_method->constants()->klass_at(bnew->index(), CHECK);
|
||||
{ Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
|
||||
k = caller_method->constants()->klass_at(bnew.index(), CHECK);
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_multianewarray:
|
||||
{ Bytecode_multianewarray* mna = Bytecode_multianewarray_at(caller_method->bcp_from(bci));
|
||||
k = caller_method->constants()->klass_at(mna->index(), CHECK);
|
||||
{ Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
|
||||
k = caller_method->constants()->klass_at(mna.index(), CHECK);
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_instanceof:
|
||||
{ Bytecode_instanceof* io = Bytecode_instanceof_at(caller_method->bcp_from(bci));
|
||||
k = caller_method->constants()->klass_at(io->index(), CHECK);
|
||||
{ Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
|
||||
k = caller_method->constants()->klass_at(io.index(), CHECK);
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_checkcast:
|
||||
{ Bytecode_checkcast* cc = Bytecode_checkcast_at(caller_method->bcp_from(bci));
|
||||
k = caller_method->constants()->klass_at(cc->index(), CHECK);
|
||||
{ Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
|
||||
k = caller_method->constants()->klass_at(cc.index(), CHECK);
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_anewarray:
|
||||
{ Bytecode_anewarray* anew = Bytecode_anewarray_at(caller_method->bcp_from(bci));
|
||||
klassOop ek = caller_method->constants()->klass_at(anew->index(), CHECK);
|
||||
{ Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
|
||||
klassOop ek = caller_method->constants()->klass_at(anew.index(), CHECK);
|
||||
k = Klass::cast(ek)->array_klass(CHECK);
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_ldc:
|
||||
case Bytecodes::_ldc_w:
|
||||
{
|
||||
Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci);
|
||||
k = cc->resolve_constant(CHECK);
|
||||
Bytecode_loadconstant cc(caller_method, bci);
|
||||
k = cc.resolve_constant(CHECK);
|
||||
assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant");
|
||||
}
|
||||
break;
|
||||
|
@ -769,15 +769,15 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
|
||||
case Bytecodes::_tableswitch:
|
||||
{
|
||||
state.spop();
|
||||
Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(s.cur_bcp());
|
||||
int len = switch_->length();
|
||||
Bytecode_tableswitch sw(&s);
|
||||
int len = sw.length();
|
||||
int dest_bci;
|
||||
for (int i = 0; i < len; i++) {
|
||||
dest_bci = s.cur_bci() + switch_->dest_offset_at(i);
|
||||
dest_bci = s.cur_bci() + sw.dest_offset_at(i);
|
||||
assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
|
||||
successors.push(_methodBlocks->block_containing(dest_bci));
|
||||
}
|
||||
dest_bci = s.cur_bci() + switch_->default_offset();
|
||||
dest_bci = s.cur_bci() + sw.default_offset();
|
||||
assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
|
||||
successors.push(_methodBlocks->block_containing(dest_bci));
|
||||
assert(s.next_bci() == limit_bci, "branch must end block");
|
||||
@ -787,15 +787,15 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
|
||||
case Bytecodes::_lookupswitch:
|
||||
{
|
||||
state.spop();
|
||||
Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
|
||||
int len = switch_->number_of_pairs();
|
||||
Bytecode_lookupswitch sw(&s);
|
||||
int len = sw.number_of_pairs();
|
||||
int dest_bci;
|
||||
for (int i = 0; i < len; i++) {
|
||||
dest_bci = s.cur_bci() + switch_->pair_at(i)->offset();
|
||||
dest_bci = s.cur_bci() + sw.pair_at(i).offset();
|
||||
assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
|
||||
successors.push(_methodBlocks->block_containing(dest_bci));
|
||||
}
|
||||
dest_bci = s.cur_bci() + switch_->default_offset();
|
||||
dest_bci = s.cur_bci() + sw.default_offset();
|
||||
assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
|
||||
successors.push(_methodBlocks->block_containing(dest_bci));
|
||||
fall_through = false;
|
||||
|
@ -409,15 +409,15 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
|
||||
} else {
|
||||
fail_type = _unloaded_ciinstance_klass;
|
||||
}
|
||||
klassOop found_klass;
|
||||
KlassHandle found_klass;
|
||||
if (!require_local) {
|
||||
found_klass =
|
||||
SystemDictionary::find_constrained_instance_or_array_klass(sym, loader,
|
||||
KILL_COMPILE_ON_FATAL_(fail_type));
|
||||
klassOop kls = SystemDictionary::find_constrained_instance_or_array_klass(
|
||||
sym, loader, KILL_COMPILE_ON_FATAL_(fail_type));
|
||||
found_klass = KlassHandle(THREAD, kls);
|
||||
} else {
|
||||
found_klass =
|
||||
SystemDictionary::find_instance_or_array_klass(sym, loader, domain,
|
||||
KILL_COMPILE_ON_FATAL_(fail_type));
|
||||
klassOop kls = SystemDictionary::find_instance_or_array_klass(
|
||||
sym, loader, domain, KILL_COMPILE_ON_FATAL_(fail_type));
|
||||
found_klass = KlassHandle(THREAD, kls);
|
||||
}
|
||||
|
||||
// If we fail to find an array klass, look again for its element type.
|
||||
@ -444,9 +444,9 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
|
||||
}
|
||||
}
|
||||
|
||||
if (found_klass != NULL) {
|
||||
if (found_klass() != NULL) {
|
||||
// Found it. Build a CI handle.
|
||||
return get_object(found_klass)->as_klass();
|
||||
return get_object(found_klass())->as_klass();
|
||||
}
|
||||
|
||||
if (require_local) return NULL;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -144,7 +144,7 @@ class ciMethod : public ciObject {
|
||||
|
||||
Bytecodes::Code java_code_at_bci(int bci) {
|
||||
address bcp = code() + bci;
|
||||
return Bytecodes::java_code_at(bcp);
|
||||
return Bytecodes::java_code_at(NULL, bcp);
|
||||
}
|
||||
BCEscapeAnalyzer *get_bcea();
|
||||
ciMethodBlocks *get_method_blocks();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -175,15 +175,15 @@ void ciMethodBlocks::do_analysis() {
|
||||
case Bytecodes::_tableswitch :
|
||||
{
|
||||
cur_block->set_control_bci(bci);
|
||||
Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(s.cur_bcp());
|
||||
int len = switch_->length();
|
||||
Bytecode_tableswitch sw(&s);
|
||||
int len = sw.length();
|
||||
ciBlock *dest;
|
||||
int dest_bci;
|
||||
for (int i = 0; i < len; i++) {
|
||||
dest_bci = s.cur_bci() + switch_->dest_offset_at(i);
|
||||
dest_bci = s.cur_bci() + sw.dest_offset_at(i);
|
||||
dest = make_block_at(dest_bci);
|
||||
}
|
||||
dest_bci = s.cur_bci() + switch_->default_offset();
|
||||
dest_bci = s.cur_bci() + sw.default_offset();
|
||||
make_block_at(dest_bci);
|
||||
if (s.next_bci() < limit_bci) {
|
||||
dest = make_block_at(s.next_bci());
|
||||
@ -194,15 +194,15 @@ void ciMethodBlocks::do_analysis() {
|
||||
case Bytecodes::_lookupswitch:
|
||||
{
|
||||
cur_block->set_control_bci(bci);
|
||||
Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
|
||||
int len = switch_->number_of_pairs();
|
||||
Bytecode_lookupswitch sw(&s);
|
||||
int len = sw.number_of_pairs();
|
||||
ciBlock *dest;
|
||||
int dest_bci;
|
||||
for (int i = 0; i < len; i++) {
|
||||
dest_bci = s.cur_bci() + switch_->pair_at(i)->offset();
|
||||
dest_bci = s.cur_bci() + sw.pair_at(i).offset();
|
||||
dest = make_block_at(dest_bci);
|
||||
}
|
||||
dest_bci = s.cur_bci() + switch_->default_offset();
|
||||
dest_bci = s.cur_bci() + sw.default_offset();
|
||||
dest = make_block_at(dest_bci);
|
||||
if (s.next_bci() < limit_bci) {
|
||||
dest = make_block_at(s.next_bci());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -78,8 +78,8 @@ private:
|
||||
else { assert(!is_wide(), "must not be a wide instruction"); }
|
||||
}
|
||||
|
||||
Bytecode* bytecode() const { return Bytecode_at(_bc_start); }
|
||||
Bytecode* next_bytecode() const { return Bytecode_at(_pc); }
|
||||
Bytecode bytecode() const { return Bytecode(this, _bc_start); }
|
||||
Bytecode next_bytecode() const { return Bytecode(this, _pc); }
|
||||
|
||||
public:
|
||||
// End-Of-Bytecodes
|
||||
@ -151,11 +151,11 @@ public:
|
||||
bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
|
||||
|
||||
int get_index_u1() const {
|
||||
return bytecode()->get_index_u1(cur_bc_raw());
|
||||
return bytecode().get_index_u1(cur_bc_raw());
|
||||
}
|
||||
|
||||
int get_index_u1_cpcache() const {
|
||||
return bytecode()->get_index_u1_cpcache(cur_bc_raw());
|
||||
return bytecode().get_index_u1_cpcache(cur_bc_raw());
|
||||
}
|
||||
|
||||
// Get a byte index following this bytecode.
|
||||
@ -169,29 +169,29 @@ public:
|
||||
|
||||
// Get 2-byte index (byte swapping depending on which bytecode)
|
||||
int get_index_u2(bool is_wide = false) const {
|
||||
return bytecode()->get_index_u2(cur_bc_raw(), is_wide);
|
||||
return bytecode().get_index_u2(cur_bc_raw(), is_wide);
|
||||
}
|
||||
|
||||
// Get 2-byte index in native byte order. (Rewriter::rewrite makes these.)
|
||||
int get_index_u2_cpcache() const {
|
||||
return bytecode()->get_index_u2_cpcache(cur_bc_raw());
|
||||
return bytecode().get_index_u2_cpcache(cur_bc_raw());
|
||||
}
|
||||
|
||||
// Get 4-byte index, for invokedynamic.
|
||||
int get_index_u4() const {
|
||||
return bytecode()->get_index_u4(cur_bc_raw());
|
||||
return bytecode().get_index_u4(cur_bc_raw());
|
||||
}
|
||||
|
||||
bool has_index_u4() const {
|
||||
return bytecode()->has_index_u4(cur_bc_raw());
|
||||
return bytecode().has_index_u4(cur_bc_raw());
|
||||
}
|
||||
|
||||
// Get dimensions byte (multinewarray)
|
||||
int get_dimensions() const { return *(unsigned char*)(_pc-1); }
|
||||
|
||||
// Sign-extended index byte/short, no widening
|
||||
int get_constant_u1() const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); }
|
||||
int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
|
||||
int get_constant_u1() const { return bytecode().get_constant_u1(instruction_size()-1, cur_bc_raw()); }
|
||||
int get_constant_u2(bool is_wide = false) const { return bytecode().get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
|
||||
|
||||
// Get a byte signed constant for "iinc". Invalid for other bytecodes.
|
||||
// If prefixed with a wide bytecode, get a wide constant
|
||||
@ -199,18 +199,18 @@ public:
|
||||
|
||||
// 2-byte branch offset from current pc
|
||||
int get_dest() const {
|
||||
return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw());
|
||||
return cur_bci() + bytecode().get_offset_s2(cur_bc_raw());
|
||||
}
|
||||
|
||||
// 2-byte branch offset from next pc
|
||||
int next_get_dest() const {
|
||||
assert(_pc < _end, "");
|
||||
return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq);
|
||||
return next_bci() + next_bytecode().get_offset_s2(Bytecodes::_ifeq);
|
||||
}
|
||||
|
||||
// 4-byte branch offset from current pc
|
||||
int get_far_dest() const {
|
||||
return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw());
|
||||
return cur_bci() + bytecode().get_offset_s4(cur_bc_raw());
|
||||
}
|
||||
|
||||
// For a lookup or switch table, return target destination
|
||||
@ -407,4 +407,11 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
// Implementation for declarations in bytecode.hpp
|
||||
Bytecode::Bytecode(const ciBytecodeStream* stream, address bcp): _bcp(bcp != NULL ? bcp : stream->cur_bcp()), _code(Bytecodes::code_at(NULL, addr_at(0))) {}
|
||||
Bytecode_lookupswitch::Bytecode_lookupswitch(const ciBytecodeStream* stream): Bytecode(stream) { verify(); }
|
||||
Bytecode_tableswitch::Bytecode_tableswitch(const ciBytecodeStream* stream): Bytecode(stream) { verify(); }
|
||||
|
||||
#endif // SHARE_VM_CI_CISTREAMS_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1698,18 +1698,17 @@ ciTypeFlow::Block::successors(ciBytecodeStream* str,
|
||||
break;
|
||||
|
||||
case Bytecodes::_tableswitch: {
|
||||
Bytecode_tableswitch *tableswitch =
|
||||
Bytecode_tableswitch_at(str->cur_bcp());
|
||||
Bytecode_tableswitch tableswitch(str);
|
||||
|
||||
int len = tableswitch->length();
|
||||
int len = tableswitch.length();
|
||||
_successors =
|
||||
new (arena) GrowableArray<Block*>(arena, len+1, 0, NULL);
|
||||
int bci = current_bci + tableswitch->default_offset();
|
||||
int bci = current_bci + tableswitch.default_offset();
|
||||
Block* block = analyzer->block_at(bci, jsrs);
|
||||
assert(_successors->length() == SWITCH_DEFAULT, "");
|
||||
_successors->append(block);
|
||||
while (--len >= 0) {
|
||||
int bci = current_bci + tableswitch->dest_offset_at(len);
|
||||
int bci = current_bci + tableswitch.dest_offset_at(len);
|
||||
block = analyzer->block_at(bci, jsrs);
|
||||
assert(_successors->length() >= SWITCH_CASES, "");
|
||||
_successors->append_if_missing(block);
|
||||
@ -1718,19 +1717,18 @@ ciTypeFlow::Block::successors(ciBytecodeStream* str,
|
||||
}
|
||||
|
||||
case Bytecodes::_lookupswitch: {
|
||||
Bytecode_lookupswitch *lookupswitch =
|
||||
Bytecode_lookupswitch_at(str->cur_bcp());
|
||||
Bytecode_lookupswitch lookupswitch(str);
|
||||
|
||||
int npairs = lookupswitch->number_of_pairs();
|
||||
int npairs = lookupswitch.number_of_pairs();
|
||||
_successors =
|
||||
new (arena) GrowableArray<Block*>(arena, npairs+1, 0, NULL);
|
||||
int bci = current_bci + lookupswitch->default_offset();
|
||||
int bci = current_bci + lookupswitch.default_offset();
|
||||
Block* block = analyzer->block_at(bci, jsrs);
|
||||
assert(_successors->length() == SWITCH_DEFAULT, "");
|
||||
_successors->append(block);
|
||||
while(--npairs >= 0) {
|
||||
LookupswitchPair *pair = lookupswitch->pair_at(npairs);
|
||||
int bci = current_bci + pair->offset();
|
||||
LookupswitchPair pair = lookupswitch.pair_at(npairs);
|
||||
int bci = current_bci + pair.offset();
|
||||
Block* block = analyzer->block_at(bci, jsrs);
|
||||
assert(_successors->length() >= SWITCH_CASES, "");
|
||||
_successors->append_if_missing(block);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1863,9 +1863,9 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
|
||||
#ifndef SHARK
|
||||
if (!method()->is_native()) {
|
||||
SimpleScopeDesc ssd(this, fr.pc());
|
||||
Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci());
|
||||
bool has_receiver = call->has_receiver();
|
||||
symbolOop signature = call->signature();
|
||||
Bytecode_invoke call(ssd.method(), ssd.bci());
|
||||
bool has_receiver = call.has_receiver();
|
||||
symbolOop signature = call.signature();
|
||||
fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
|
||||
}
|
||||
#endif // !SHARK
|
||||
@ -2698,8 +2698,7 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
|
||||
} else if (sd->method()->is_native()) {
|
||||
st->print("method is native");
|
||||
} else {
|
||||
address bcp = sd->method()->bcp_from(sd->bci());
|
||||
Bytecodes::Code bc = Bytecodes::java_code_at(bcp);
|
||||
Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
|
||||
st->print(";*%s", Bytecodes::name(bc));
|
||||
switch (bc) {
|
||||
case Bytecodes::_invokevirtual:
|
||||
@ -2707,10 +2706,10 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
|
||||
case Bytecodes::_invokestatic:
|
||||
case Bytecodes::_invokeinterface:
|
||||
{
|
||||
Bytecode_invoke* invoke = Bytecode_invoke_at(sd->method(), sd->bci());
|
||||
Bytecode_invoke invoke(sd->method(), sd->bci());
|
||||
st->print(" ");
|
||||
if (invoke->name() != NULL)
|
||||
invoke->name()->print_symbol_on(st);
|
||||
if (invoke.name() != NULL)
|
||||
invoke.name()->print_symbol_on(st);
|
||||
else
|
||||
st->print("<UNKNOWN>");
|
||||
break;
|
||||
@ -2720,10 +2719,10 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
|
||||
case Bytecodes::_getstatic:
|
||||
case Bytecodes::_putstatic:
|
||||
{
|
||||
Bytecode_field* field = Bytecode_field_at(sd->method(), sd->bci());
|
||||
Bytecode_field field(sd->method(), sd->bci());
|
||||
st->print(" ");
|
||||
if (field->name() != NULL)
|
||||
field->name()->print_symbol_on(st);
|
||||
if (field.name() != NULL)
|
||||
field.name()->print_symbol_on(st);
|
||||
else
|
||||
st->print("<UNKNOWN>");
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -286,16 +286,15 @@ void MethodLiveness::init_basic_blocks() {
|
||||
break;
|
||||
case Bytecodes::_tableswitch:
|
||||
{
|
||||
Bytecode_tableswitch *tableswitch =
|
||||
Bytecode_tableswitch_at(bytes.cur_bcp());
|
||||
Bytecode_tableswitch tableswitch(&bytes);
|
||||
|
||||
int len = tableswitch->length();
|
||||
int len = tableswitch.length();
|
||||
|
||||
dest = _block_map->at(bci + tableswitch->default_offset());
|
||||
dest = _block_map->at(bci + tableswitch.default_offset());
|
||||
assert(dest != NULL, "branch desination must start a block.");
|
||||
dest->add_normal_predecessor(current_block);
|
||||
while (--len >= 0) {
|
||||
dest = _block_map->at(bci + tableswitch->dest_offset_at(len));
|
||||
dest = _block_map->at(bci + tableswitch.dest_offset_at(len));
|
||||
assert(dest != NULL, "branch desination must start a block.");
|
||||
dest->add_normal_predecessor(current_block);
|
||||
}
|
||||
@ -304,17 +303,16 @@ void MethodLiveness::init_basic_blocks() {
|
||||
|
||||
case Bytecodes::_lookupswitch:
|
||||
{
|
||||
Bytecode_lookupswitch *lookupswitch =
|
||||
Bytecode_lookupswitch_at(bytes.cur_bcp());
|
||||
Bytecode_lookupswitch lookupswitch(&bytes);
|
||||
|
||||
int npairs = lookupswitch->number_of_pairs();
|
||||
int npairs = lookupswitch.number_of_pairs();
|
||||
|
||||
dest = _block_map->at(bci + lookupswitch->default_offset());
|
||||
dest = _block_map->at(bci + lookupswitch.default_offset());
|
||||
assert(dest != NULL, "branch desination must start a block.");
|
||||
dest->add_normal_predecessor(current_block);
|
||||
while(--npairs >= 0) {
|
||||
LookupswitchPair *pair = lookupswitch->pair_at(npairs);
|
||||
dest = _block_map->at( bci + pair->offset());
|
||||
LookupswitchPair pair = lookupswitch.pair_at(npairs);
|
||||
dest = _block_map->at( bci + pair.offset());
|
||||
assert(dest != NULL, "branch desination must start a block.");
|
||||
dest->add_normal_predecessor(current_block);
|
||||
}
|
||||
|
@ -4979,6 +4979,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
||||
if (should_unload_classes()) {
|
||||
CodeCache::gc_epilogue();
|
||||
}
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
// If we encountered any (marking stack / work queue) overflow
|
||||
// events during the current CMS cycle, take appropriate
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -458,6 +458,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
||||
_marking_task_overhead(1.0),
|
||||
_cleanup_sleep_factor(0.0),
|
||||
_cleanup_task_overhead(1.0),
|
||||
_cleanup_list("Cleanup List"),
|
||||
_region_bm(max_regions, false /* in_resource_area*/),
|
||||
_card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
|
||||
CardTableModRefBS::card_shift,
|
||||
@ -521,12 +522,6 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
||||
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
|
||||
satb_qs.set_buffer_size(G1SATBBufferSize);
|
||||
|
||||
int size = (int) MAX2(ParallelGCThreads, (size_t)1);
|
||||
_par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
|
||||
for (int i = 0 ; i < size; i++) {
|
||||
_par_cleanup_thread_state[i] = new ParCleanupThreadState;
|
||||
}
|
||||
|
||||
_tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
|
||||
_accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
|
||||
|
||||
@ -711,11 +706,6 @@ void ConcurrentMark::set_non_marking_state() {
|
||||
}
|
||||
|
||||
ConcurrentMark::~ConcurrentMark() {
|
||||
int size = (int) MAX2(ParallelGCThreads, (size_t)1);
|
||||
for (int i = 0; i < size; i++) delete _par_cleanup_thread_state[i];
|
||||
FREE_C_HEAP_ARRAY(ParCleanupThreadState*,
|
||||
_par_cleanup_thread_state);
|
||||
|
||||
for (int i = 0; i < (int) _max_task_num; ++i) {
|
||||
delete _task_queues->queue(i);
|
||||
delete _tasks[i];
|
||||
@ -1171,12 +1161,12 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
if (G1TraceMarkStackOverflow)
|
||||
gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
|
||||
} else {
|
||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||
// We're done with marking.
|
||||
// This is the end of the marking cycle, we're expected all
|
||||
// threads to have SATB queues with active set to true.
|
||||
JavaThread::satb_mark_queue_set().set_active_all_threads(
|
||||
false, /* new active value */
|
||||
true /* expected_active */);
|
||||
satb_mq_set.set_active_all_threads(false, /* new active value */
|
||||
true /* expected_active */);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
@ -1510,21 +1500,20 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
||||
size_t _max_live_bytes;
|
||||
size_t _regions_claimed;
|
||||
size_t _freed_bytes;
|
||||
size_t _cleared_h_regions;
|
||||
size_t _freed_regions;
|
||||
UncleanRegionList* _unclean_region_list;
|
||||
FreeRegionList _local_cleanup_list;
|
||||
HumongousRegionSet _humongous_proxy_set;
|
||||
double _claimed_region_time;
|
||||
double _max_region_time;
|
||||
|
||||
public:
|
||||
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
|
||||
UncleanRegionList* list,
|
||||
int worker_num);
|
||||
size_t freed_bytes() { return _freed_bytes; }
|
||||
size_t cleared_h_regions() { return _cleared_h_regions; }
|
||||
size_t freed_regions() { return _freed_regions; }
|
||||
UncleanRegionList* unclean_region_list() {
|
||||
return _unclean_region_list;
|
||||
FreeRegionList* local_cleanup_list() {
|
||||
return &_local_cleanup_list;
|
||||
}
|
||||
HumongousRegionSet* humongous_proxy_set() {
|
||||
return &_humongous_proxy_set;
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion *r);
|
||||
@ -1537,25 +1526,22 @@ public:
|
||||
|
||||
class G1ParNoteEndTask: public AbstractGangTask {
|
||||
friend class G1NoteEndOfConcMarkClosure;
|
||||
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
size_t _max_live_bytes;
|
||||
size_t _freed_bytes;
|
||||
ConcurrentMark::ParCleanupThreadState** _par_cleanup_thread_state;
|
||||
FreeRegionList* _cleanup_list;
|
||||
|
||||
public:
|
||||
G1ParNoteEndTask(G1CollectedHeap* g1h,
|
||||
ConcurrentMark::ParCleanupThreadState**
|
||||
par_cleanup_thread_state) :
|
||||
FreeRegionList* cleanup_list) :
|
||||
AbstractGangTask("G1 note end"), _g1h(g1h),
|
||||
_max_live_bytes(0), _freed_bytes(0),
|
||||
_par_cleanup_thread_state(par_cleanup_thread_state)
|
||||
{}
|
||||
_max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
|
||||
|
||||
void work(int i) {
|
||||
double start = os::elapsedTime();
|
||||
G1NoteEndOfConcMarkClosure g1_note_end(_g1h,
|
||||
&_par_cleanup_thread_state[i]->list,
|
||||
i);
|
||||
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
|
||||
HeapRegion::NoteEndClaimValue);
|
||||
@ -1564,14 +1550,18 @@ public:
|
||||
}
|
||||
assert(g1_note_end.complete(), "Shouldn't have yielded!");
|
||||
|
||||
// Now finish up freeing the current thread's regions.
|
||||
_g1h->finish_free_region_work(g1_note_end.freed_bytes(),
|
||||
g1_note_end.cleared_h_regions(),
|
||||
0, NULL);
|
||||
// Now update the lists
|
||||
_g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
|
||||
NULL /* free_list */,
|
||||
g1_note_end.humongous_proxy_set(),
|
||||
true /* par */);
|
||||
{
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
_max_live_bytes += g1_note_end.max_live_bytes();
|
||||
_freed_bytes += g1_note_end.freed_bytes();
|
||||
|
||||
_cleanup_list->add_as_tail(g1_note_end.local_cleanup_list());
|
||||
assert(g1_note_end.local_cleanup_list()->is_empty(), "post-condition");
|
||||
}
|
||||
double end = os::elapsedTime();
|
||||
if (G1PrintParCleanupStats) {
|
||||
@ -1612,30 +1602,28 @@ public:
|
||||
|
||||
G1NoteEndOfConcMarkClosure::
|
||||
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
|
||||
UncleanRegionList* list,
|
||||
int worker_num)
|
||||
: _g1(g1), _worker_num(worker_num),
|
||||
_max_live_bytes(0), _regions_claimed(0),
|
||||
_freed_bytes(0), _cleared_h_regions(0), _freed_regions(0),
|
||||
_freed_bytes(0),
|
||||
_claimed_region_time(0.0), _max_region_time(0.0),
|
||||
_unclean_region_list(list)
|
||||
{}
|
||||
_local_cleanup_list("Local Cleanup List"),
|
||||
_humongous_proxy_set("Local Cleanup Humongous Proxy Set") { }
|
||||
|
||||
bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *r) {
|
||||
bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
|
||||
// We use a claim value of zero here because all regions
|
||||
// were claimed with value 1 in the FinalCount task.
|
||||
r->reset_gc_time_stamp();
|
||||
if (!r->continuesHumongous()) {
|
||||
hr->reset_gc_time_stamp();
|
||||
if (!hr->continuesHumongous()) {
|
||||
double start = os::elapsedTime();
|
||||
_regions_claimed++;
|
||||
r->note_end_of_marking();
|
||||
_max_live_bytes += r->max_live_bytes();
|
||||
_g1->free_region_if_totally_empty_work(r,
|
||||
_freed_bytes,
|
||||
_cleared_h_regions,
|
||||
_freed_regions,
|
||||
_unclean_region_list,
|
||||
true /*par*/);
|
||||
hr->note_end_of_marking();
|
||||
_max_live_bytes += hr->max_live_bytes();
|
||||
_g1->free_region_if_totally_empty(hr,
|
||||
&_freed_bytes,
|
||||
&_local_cleanup_list,
|
||||
&_humongous_proxy_set,
|
||||
true /* par */);
|
||||
double region_time = (os::elapsedTime() - start);
|
||||
_claimed_region_time += region_time;
|
||||
if (region_time > _max_region_time) _max_region_time = region_time;
|
||||
@ -1655,6 +1643,8 @@ void ConcurrentMark::cleanup() {
|
||||
return;
|
||||
}
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||
@ -1719,7 +1709,7 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
// Note end of marking in all heap regions.
|
||||
double note_end_start = os::elapsedTime();
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state);
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
int n_workers = g1h->workers()->total_workers();
|
||||
g1h->set_par_threads(n_workers);
|
||||
@ -1731,9 +1721,14 @@ void ConcurrentMark::cleanup() {
|
||||
} else {
|
||||
g1_par_note_end_task.work(0);
|
||||
}
|
||||
g1h->set_unclean_regions_coming(true);
|
||||
|
||||
if (!cleanup_list_is_empty()) {
|
||||
// The cleanup list is not empty, so we'll have to process it
|
||||
// concurrently. Notify anyone else that might be wanting free
|
||||
// regions that there will be more free regions coming soon.
|
||||
g1h->set_free_regions_coming();
|
||||
}
|
||||
double note_end_end = os::elapsedTime();
|
||||
// Tell the mutators that there might be unclean regions coming...
|
||||
if (G1PrintParCleanupStats) {
|
||||
gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
|
||||
(note_end_end - note_end_start)*1000.0);
|
||||
@ -1799,33 +1794,63 @@ void ConcurrentMark::cleanup() {
|
||||
/* silent */ false,
|
||||
/* prev marking */ true);
|
||||
}
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
}
|
||||
|
||||
void ConcurrentMark::completeCleanup() {
|
||||
// A full collection intervened.
|
||||
if (has_aborted()) return;
|
||||
|
||||
int first = 0;
|
||||
int last = (int)MAX2(ParallelGCThreads, (size_t)1);
|
||||
for (int t = 0; t < last; t++) {
|
||||
UncleanRegionList* list = &_par_cleanup_thread_state[t]->list;
|
||||
assert(list->well_formed(), "Inv");
|
||||
HeapRegion* hd = list->hd();
|
||||
while (hd != NULL) {
|
||||
// Now finish up the other stuff.
|
||||
hd->rem_set()->clear();
|
||||
HeapRegion* next_hd = hd->next_from_unclean_list();
|
||||
(void)list->pop();
|
||||
assert(list->hd() == next_hd, "how not?");
|
||||
_g1h->put_region_on_unclean_list(hd);
|
||||
if (!hd->isHumongous()) {
|
||||
// Add this to the _free_regions count by 1.
|
||||
_g1h->finish_free_region_work(0, 0, 1, NULL);
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
_cleanup_list.verify_optional();
|
||||
FreeRegionList local_free_list("Local Cleanup List");
|
||||
|
||||
if (G1ConcRegionFreeingVerbose) {
|
||||
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
|
||||
"cleanup list has "SIZE_FORMAT" entries",
|
||||
_cleanup_list.length());
|
||||
}
|
||||
|
||||
// Noone else should be accessing the _cleanup_list at this point,
|
||||
// so it's not necessary to take any locks
|
||||
while (!_cleanup_list.is_empty()) {
|
||||
HeapRegion* hr = _cleanup_list.remove_head();
|
||||
assert(hr != NULL, "the list was not empty");
|
||||
hr->rem_set()->clear();
|
||||
local_free_list.add_as_tail(hr);
|
||||
|
||||
// Instead of adding one region at a time to the secondary_free_list,
|
||||
// we accumulate them in the local list and move them a few at a
|
||||
// time. This also cuts down on the number of notify_all() calls
|
||||
// we do during this process. We'll also append the local list when
|
||||
// _cleanup_list is empty (which means we just removed the last
|
||||
// region from the _cleanup_list).
|
||||
if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
|
||||
_cleanup_list.is_empty()) {
|
||||
if (G1ConcRegionFreeingVerbose) {
|
||||
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
|
||||
"appending "SIZE_FORMAT" entries to the "
|
||||
"secondary_free_list, clean list still has "
|
||||
SIZE_FORMAT" entries",
|
||||
local_free_list.length(),
|
||||
_cleanup_list.length());
|
||||
}
|
||||
|
||||
{
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
g1h->secondary_free_list_add_as_tail(&local_free_list);
|
||||
SecondaryFreeList_lock->notify_all();
|
||||
}
|
||||
|
||||
if (G1StressConcRegionFreeing) {
|
||||
for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
|
||||
os::sleep(Thread::current(), (jlong) 1, false);
|
||||
}
|
||||
}
|
||||
hd = list->hd();
|
||||
assert(hd == next_hd, "how not?");
|
||||
}
|
||||
}
|
||||
assert(local_free_list.is_empty(), "post-condition");
|
||||
}
|
||||
|
||||
bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
@ -2897,9 +2922,9 @@ public:
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
|
||||
assert(!_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(),
|
||||
"invariant");
|
||||
assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
|
||||
assert(!_g1h->is_on_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) p)), "invariant");
|
||||
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (_cm->verbose_high())
|
||||
@ -3119,8 +3144,8 @@ void CMTask::deal_with_reference(oop obj) {
|
||||
void CMTask::push(oop obj) {
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
|
||||
assert(!_g1h->heap_region_containing(objAddr)->is_on_free_list(),
|
||||
"invariant");
|
||||
assert(!_g1h->is_on_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
|
||||
assert(!_g1h->is_obj_ill(obj), "invariant");
|
||||
assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
|
||||
|
||||
@ -3365,8 +3390,8 @@ void CMTask::drain_local_queue(bool partially) {
|
||||
(void*) obj);
|
||||
|
||||
assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
|
||||
assert(!_g1h->heap_region_containing(obj)->is_on_free_list(),
|
||||
"invariant");
|
||||
assert(!_g1h->is_on_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) obj)), "invariant");
|
||||
|
||||
scan_object(obj);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|
||||
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSets.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
@ -369,13 +369,7 @@ protected:
|
||||
double _cleanup_sleep_factor;
|
||||
double _cleanup_task_overhead;
|
||||
|
||||
// Stuff related to age cohort processing.
|
||||
struct ParCleanupThreadState {
|
||||
char _pre[64];
|
||||
UncleanRegionList list;
|
||||
char _post[64];
|
||||
};
|
||||
ParCleanupThreadState** _par_cleanup_thread_state;
|
||||
FreeRegionList _cleanup_list;
|
||||
|
||||
// CMS marking support structures
|
||||
CMBitMap _markBitMap1;
|
||||
@ -484,6 +478,10 @@ protected:
|
||||
// prints all gathered CM-related statistics
|
||||
void print_stats();
|
||||
|
||||
bool cleanup_list_is_empty() {
|
||||
return _cleanup_list.is_empty();
|
||||
}
|
||||
|
||||
// accessor methods
|
||||
size_t parallel_marking_threads() { return _parallel_marking_threads; }
|
||||
double sleep_factor() { return _sleep_factor; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -95,8 +95,8 @@ void ConcurrentMarkThread::run() {
|
||||
_vtime_start = os::elapsedVTime();
|
||||
wait_for_universe_init();
|
||||
|
||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1_policy = g1->g1_policy();
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1_policy = g1h->g1_policy();
|
||||
G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
|
||||
Thread *current_thread = Thread::current();
|
||||
|
||||
@ -119,7 +119,7 @@ void ConcurrentMarkThread::run() {
|
||||
if (!g1_policy->in_young_gc_mode()) {
|
||||
// this ensures the flag is not set if we bail out of the marking
|
||||
// cycle; normally the flag is cleared immediately after cleanup
|
||||
g1->set_marking_complete();
|
||||
g1h->set_marking_complete();
|
||||
|
||||
if (g1_policy->adaptive_young_list_length()) {
|
||||
double now = os::elapsedTime();
|
||||
@ -228,10 +228,20 @@ void ConcurrentMarkThread::run() {
|
||||
VM_CGC_Operation op(&cl_cl, verbose_str);
|
||||
VMThread::execute(&op);
|
||||
} else {
|
||||
G1CollectedHeap::heap()->set_marking_complete();
|
||||
g1h->set_marking_complete();
|
||||
}
|
||||
|
||||
if (!cm()->has_aborted()) {
|
||||
// Check if cleanup set the free_regions_coming flag. If it
|
||||
// hasn't, we can just skip the next step.
|
||||
if (g1h->free_regions_coming()) {
|
||||
// The following will finish freeing up any regions that we
|
||||
// found to be empty during cleanup. We'll do this part
|
||||
// without joining the suspendible set. If an evacuation pause
|
||||
// takes places, then we would carry on freeing regions in
|
||||
// case they are needed by the pause. If a Full GC takes
|
||||
// places, it would wait for us to process the regions
|
||||
// reclaimed by cleanup.
|
||||
|
||||
double cleanup_start_sec = os::elapsedTime();
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
@ -240,23 +250,22 @@ void ConcurrentMarkThread::run() {
|
||||
}
|
||||
|
||||
// Now do the remainder of the cleanup operation.
|
||||
_sts.join();
|
||||
_cm->completeCleanup();
|
||||
if (!cm()->has_aborted()) {
|
||||
g1_policy->record_concurrent_mark_cleanup_completed();
|
||||
g1_policy->record_concurrent_mark_cleanup_completed();
|
||||
|
||||
double cleanup_end_sec = os::elapsedTime();
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
|
||||
cleanup_end_sec - cleanup_start_sec);
|
||||
}
|
||||
double cleanup_end_sec = os::elapsedTime();
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
|
||||
cleanup_end_sec - cleanup_start_sec);
|
||||
}
|
||||
_sts.leave();
|
||||
|
||||
// We're done: no more free regions coming.
|
||||
g1h->reset_free_regions_coming();
|
||||
}
|
||||
// We're done: no more unclean regions coming.
|
||||
G1CollectedHeap::heap()->set_unclean_regions_coming(false);
|
||||
guarantee(cm()->cleanup_list_is_empty(),
|
||||
"at this point there should be no regions on the cleanup list");
|
||||
|
||||
if (cm()->has_aborted()) {
|
||||
if (PrintGC) {
|
||||
@ -278,7 +287,7 @@ void ConcurrentMarkThread::run() {
|
||||
// Java thread is waiting for a full GC to happen (e.g., it
|
||||
// called System.gc() with +ExplicitGCInvokesConcurrent).
|
||||
_sts.join();
|
||||
g1->increment_full_collections_completed(true /* concurrent */);
|
||||
g1h->increment_full_collections_completed(true /* concurrent */);
|
||||
_sts.leave();
|
||||
}
|
||||
assert(_should_terminate, "just checking");
|
||||
|
@ -1,194 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/concurrentZFThread.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
// ======= Concurrent Zero-Fill Thread ========
|
||||
|
||||
// The CM thread is created when the G1 garbage collector is used
|
||||
|
||||
int ConcurrentZFThread::_region_allocs = 0;
|
||||
int ConcurrentZFThread::_sync_zfs = 0;
|
||||
int ConcurrentZFThread::_zf_waits = 0;
|
||||
int ConcurrentZFThread::_regions_filled = 0;
|
||||
|
||||
ConcurrentZFThread::ConcurrentZFThread() :
|
||||
ConcurrentGCThread()
|
||||
{
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
void ConcurrentZFThread::wait_for_ZF_completed(HeapRegion* hr) {
|
||||
assert(ZF_mon->owned_by_self(), "Precondition.");
|
||||
note_zf_wait();
|
||||
while (hr->zero_fill_state() == HeapRegion::ZeroFilling) {
|
||||
ZF_mon->wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentZFThread::processHeapRegion(HeapRegion* hr) {
|
||||
assert(!Universe::heap()->is_gc_active(),
|
||||
"This should not happen during GC.");
|
||||
assert(hr != NULL, "Precondition");
|
||||
// These are unlocked reads, but if this test is successful, then no
|
||||
// other thread will attempt this zero filling. Only a GC thread can
|
||||
// modify the ZF state of a region whose state is zero-filling, and this
|
||||
// should only happen while the ZF thread is locking out GC.
|
||||
if (hr->zero_fill_state() == HeapRegion::ZeroFilling
|
||||
&& hr->zero_filler() == Thread::current()) {
|
||||
assert(hr->top() == hr->bottom(), "better be empty!");
|
||||
assert(!hr->isHumongous(), "Only free regions on unclean list.");
|
||||
Copy::fill_to_words(hr->bottom(), hr->capacity()/HeapWordSize);
|
||||
note_region_filled();
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentZFThread::run() {
|
||||
initialize_in_thread();
|
||||
Thread* thr_self = Thread::current();
|
||||
_vtime_start = os::elapsedVTime();
|
||||
wait_for_universe_init();
|
||||
|
||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||
_sts.join();
|
||||
while (!_should_terminate) {
|
||||
_sts.leave();
|
||||
|
||||
{
|
||||
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// This local variable will hold a region being zero-filled. This
|
||||
// region will neither be on the unclean or zero-filled lists, and
|
||||
// will not be available for allocation; thus, we might have an
|
||||
// allocation fail, causing a full GC, because of this, but this is a
|
||||
// price we will pay. (In future, we might want to make the fact
|
||||
// that there's a region being zero-filled apparent to the G1 heap,
|
||||
// which could then wait for it in this extreme case...)
|
||||
HeapRegion* to_fill;
|
||||
|
||||
while (!g1->should_zf()
|
||||
|| (to_fill = g1->pop_unclean_region_list_locked()) == NULL)
|
||||
ZF_mon->wait(Mutex::_no_safepoint_check_flag);
|
||||
while (to_fill->zero_fill_state() == HeapRegion::ZeroFilling)
|
||||
ZF_mon->wait(Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// So now to_fill is non-NULL and is not ZeroFilling. It might be
|
||||
// Allocated or ZeroFilled. (The latter could happen if this thread
|
||||
// starts the zero-filling of a region, but a GC intervenes and
|
||||
// pushes new regions needing on the front of the filling on the
|
||||
// front of the list.)
|
||||
|
||||
switch (to_fill->zero_fill_state()) {
|
||||
case HeapRegion::Allocated:
|
||||
to_fill = NULL;
|
||||
break;
|
||||
|
||||
case HeapRegion::NotZeroFilled:
|
||||
to_fill->set_zero_fill_in_progress(thr_self);
|
||||
|
||||
ZF_mon->unlock();
|
||||
_sts.join();
|
||||
processHeapRegion(to_fill);
|
||||
_sts.leave();
|
||||
ZF_mon->lock_without_safepoint_check();
|
||||
|
||||
if (to_fill->zero_fill_state() == HeapRegion::ZeroFilling
|
||||
&& to_fill->zero_filler() == thr_self) {
|
||||
to_fill->set_zero_fill_complete();
|
||||
(void)g1->put_free_region_on_list_locked(to_fill);
|
||||
}
|
||||
break;
|
||||
|
||||
case HeapRegion::ZeroFilled:
|
||||
(void)g1->put_free_region_on_list_locked(to_fill);
|
||||
break;
|
||||
|
||||
case HeapRegion::ZeroFilling:
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
}
|
||||
}
|
||||
_vtime_accum = (os::elapsedVTime() - _vtime_start);
|
||||
_sts.join();
|
||||
}
|
||||
_sts.leave();
|
||||
|
||||
assert(_should_terminate, "just checking");
|
||||
terminate();
|
||||
}
|
||||
|
||||
bool ConcurrentZFThread::offer_yield() {
|
||||
if (_sts.should_yield()) {
|
||||
_sts.yield("Concurrent ZF");
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentZFThread::stop() {
|
||||
// it is ok to take late safepoints here, if needed
|
||||
MutexLockerEx mu(Terminator_lock);
|
||||
_should_terminate = true;
|
||||
while (!_has_terminated) {
|
||||
Terminator_lock->wait();
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentZFThread::print() const {
|
||||
print_on(tty);
|
||||
}
|
||||
|
||||
void ConcurrentZFThread::print_on(outputStream* st) const {
|
||||
st->print("\"G1 Concurrent Zero-Fill Thread\" ");
|
||||
Thread::print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
||||
double ConcurrentZFThread::_vtime_accum;
|
||||
|
||||
void ConcurrentZFThread::print_summary_info() {
|
||||
gclog_or_tty->print("\nConcurrent Zero-Filling:\n");
|
||||
gclog_or_tty->print(" Filled %d regions, used %5.2fs.\n",
|
||||
_regions_filled,
|
||||
vtime_accum());
|
||||
gclog_or_tty->print(" Of %d region allocs, %d (%5.2f%%) required sync ZF,\n",
|
||||
_region_allocs, _sync_zfs,
|
||||
(_region_allocs > 0 ?
|
||||
(float)_sync_zfs/(float)_region_allocs*100.0 :
|
||||
0.0));
|
||||
gclog_or_tty->print(" and %d (%5.2f%%) required a ZF wait.\n",
|
||||
_zf_waits,
|
||||
(_region_allocs > 0 ?
|
||||
(float)_zf_waits/(float)_region_allocs*100.0 :
|
||||
0.0));
|
||||
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
|
||||
|
||||
#include "gc_implementation/shared/concurrentGCThread.hpp"
|
||||
|
||||
// The Concurrent ZF Thread. Performs concurrent zero-filling.
|
||||
|
||||
class ConcurrentZFThread: public ConcurrentGCThread {
|
||||
friend class VMStructs;
|
||||
friend class ZeroFillRegionClosure;
|
||||
|
||||
private:
|
||||
|
||||
// Zero fill the heap region.
|
||||
void processHeapRegion(HeapRegion* r);
|
||||
|
||||
// Stats
|
||||
// Allocation (protected by heap lock).
|
||||
static int _region_allocs; // Number of regions allocated
|
||||
static int _sync_zfs; // Synchronous zero-fills +
|
||||
static int _zf_waits; // Wait for conc zero-fill completion.
|
||||
|
||||
// Number of regions CFZ thread fills.
|
||||
static int _regions_filled;
|
||||
|
||||
double _vtime_start; // Initial virtual time.
|
||||
|
||||
// These are static because the "print_summary_info" method is, and
|
||||
// it currently assumes there is only one ZF thread. We'll change when
|
||||
// we need to.
|
||||
static double _vtime_accum; // Initial virtual time.
|
||||
static double vtime_accum() { return _vtime_accum; }
|
||||
|
||||
// Offer yield for GC. Returns true if yield occurred.
|
||||
bool offer_yield();
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
ConcurrentZFThread();
|
||||
|
||||
// Main loop.
|
||||
virtual void run();
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
void print() const;
|
||||
|
||||
// Waits until "r" has been zero-filled. Requires caller to hold the
|
||||
// ZF_mon.
|
||||
static void wait_for_ZF_completed(HeapRegion* r);
|
||||
|
||||
// Get or clear the current unclean region. Should be done
|
||||
// while holding the ZF_needed_mon lock.
|
||||
|
||||
// shutdown
|
||||
void stop();
|
||||
|
||||
// Stats
|
||||
static void note_region_alloc() {_region_allocs++; }
|
||||
static void note_sync_zfs() { _sync_zfs++; }
|
||||
static void note_zf_wait() { _zf_waits++; }
|
||||
static void note_region_filled() { _regions_filled++; }
|
||||
|
||||
static void print_summary_info();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
|
File diff suppressed because it is too large
Load Diff
@ -27,7 +27,7 @@
|
||||
|
||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSets.hpp"
|
||||
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
|
||||
#include "memory/barrierSet.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
@ -66,8 +66,7 @@ typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||
enum G1GCThreadGroups {
|
||||
G1CRGroup = 0,
|
||||
G1ZFGroup = 1,
|
||||
G1CMGroup = 2,
|
||||
G1CLGroup = 3
|
||||
G1CMGroup = 2
|
||||
};
|
||||
|
||||
enum GCAllocPurpose {
|
||||
@ -155,6 +154,7 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class RefineCardTableEntryClosure;
|
||||
friend class G1PrepareCompactClosure;
|
||||
friend class RegionSorter;
|
||||
friend class RegionResetter;
|
||||
friend class CountRCClosure;
|
||||
friend class EvacPopObjClosure;
|
||||
friend class G1ParCleanupCTTask;
|
||||
@ -178,17 +178,20 @@ private:
|
||||
// The maximum part of _g1_storage that has ever been committed.
|
||||
MemRegion _g1_max_committed;
|
||||
|
||||
// The number of regions that are completely free.
|
||||
size_t _free_regions;
|
||||
// The master free list. It will satisfy all new region allocations.
|
||||
MasterFreeRegionList _free_list;
|
||||
|
||||
// The secondary free list which contains regions that have been
|
||||
// freed up during the cleanup process. This will be appended to the
|
||||
// master free list when appropriate.
|
||||
SecondaryFreeRegionList _secondary_free_list;
|
||||
|
||||
// It keeps track of the humongous regions.
|
||||
MasterHumongousRegionSet _humongous_set;
|
||||
|
||||
// The number of regions we could create by expansion.
|
||||
size_t _expansion_regions;
|
||||
|
||||
// Return the number of free regions in the heap (by direct counting.)
|
||||
size_t count_free_regions();
|
||||
// Return the number of free regions on the free and unclean lists.
|
||||
size_t count_free_regions_list();
|
||||
|
||||
// The block offset table for the G1 heap.
|
||||
G1BlockOffsetSharedArray* _bot_shared;
|
||||
|
||||
@ -196,9 +199,6 @@ private:
|
||||
// lists, before and after full GC.
|
||||
void tear_down_region_lists();
|
||||
void rebuild_region_lists();
|
||||
// This sets all non-empty regions to need zero-fill (which they will if
|
||||
// they are empty after full collection.)
|
||||
void set_used_regions_to_need_zero_fill();
|
||||
|
||||
// The sequence of all heap regions in the heap.
|
||||
HeapRegionSeq* _hrs;
|
||||
@ -231,7 +231,7 @@ private:
|
||||
// Determines PLAB size for a particular allocation purpose.
|
||||
static size_t desired_plab_sz(GCAllocPurpose purpose);
|
||||
|
||||
// When called by par thread, require par_alloc_during_gc_lock() to be held.
|
||||
// When called by par thread, requires the FreeList_lock to be held.
|
||||
void push_gc_alloc_region(HeapRegion* hr);
|
||||
|
||||
// This should only be called single-threaded. Undeclares all GC alloc
|
||||
@ -294,10 +294,11 @@ private:
|
||||
// line number, file, etc.
|
||||
|
||||
#define heap_locking_asserts_err_msg(__extra_message) \
|
||||
err_msg("%s : Heap_lock %slocked, %sat a safepoint", \
|
||||
err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
|
||||
(__extra_message), \
|
||||
(!Heap_lock->owned_by_self()) ? "NOT " : "", \
|
||||
(!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
|
||||
BOOL_TO_STR(Heap_lock->owned_by_self()), \
|
||||
BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
|
||||
BOOL_TO_STR(Thread::current()->is_VM_thread()))
|
||||
|
||||
#define assert_heap_locked() \
|
||||
do { \
|
||||
@ -305,10 +306,11 @@ private:
|
||||
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_heap_locked_or_at_safepoint() \
|
||||
#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \
|
||||
do { \
|
||||
assert(Heap_lock->owned_by_self() || \
|
||||
SafepointSynchronize::is_at_safepoint(), \
|
||||
(SafepointSynchronize::is_at_safepoint() && \
|
||||
((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \
|
||||
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
|
||||
"should be at a safepoint")); \
|
||||
} while (0)
|
||||
@ -335,9 +337,10 @@ private:
|
||||
"should not be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_at_safepoint() \
|
||||
#define assert_at_safepoint(__should_be_vm_thread) \
|
||||
do { \
|
||||
assert(SafepointSynchronize::is_at_safepoint(), \
|
||||
assert(SafepointSynchronize::is_at_safepoint() && \
|
||||
((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \
|
||||
heap_locking_asserts_err_msg("should be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
@ -362,31 +365,41 @@ protected:
|
||||
// The current policy object for the collector.
|
||||
G1CollectorPolicy* _g1_policy;
|
||||
|
||||
// Parallel allocation lock to protect the current allocation region.
|
||||
Mutex _par_alloc_during_gc_lock;
|
||||
Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; }
|
||||
// This is the second level of trying to allocate a new region. If
|
||||
// new_region_work didn't find a region in the free_list, this call
|
||||
// will check whether there's anything available in the
|
||||
// secondary_free_list and/or wait for more regions to appear in that
|
||||
// list, if _free_regions_coming is set.
|
||||
HeapRegion* new_region_try_secondary_free_list(size_t word_size);
|
||||
|
||||
// If possible/desirable, allocate a new HeapRegion for normal object
|
||||
// allocation sufficient for an allocation of the given "word_size".
|
||||
// If "do_expand" is true, will attempt to expand the heap if necessary
|
||||
// to to satisfy the request. If "zero_filled" is true, requires a
|
||||
// zero-filled region.
|
||||
// (Returning NULL will trigger a GC.)
|
||||
virtual HeapRegion* newAllocRegion_work(size_t word_size,
|
||||
bool do_expand,
|
||||
bool zero_filled);
|
||||
// It will try to allocate a single non-humongous HeapRegion
|
||||
// sufficient for an allocation of the given word_size. If
|
||||
// do_expand is true, it will attempt to expand the heap if
|
||||
// necessary to satisfy the allocation request. Note that word_size
|
||||
// is only used to make sure that we expand sufficiently but, given
|
||||
// that the allocation request is assumed not to be humongous,
|
||||
// having word_size is not strictly necessary (expanding by a single
|
||||
// region will always be sufficient). But let's keep that parameter
|
||||
// in case we need it in the future.
|
||||
HeapRegion* new_region_work(size_t word_size, bool do_expand);
|
||||
|
||||
virtual HeapRegion* newAllocRegion(size_t word_size,
|
||||
bool zero_filled = true) {
|
||||
return newAllocRegion_work(word_size, false, zero_filled);
|
||||
// It will try to allocate a new region to be used for allocation by
|
||||
// mutator threads. It will not try to expand the heap if not region
|
||||
// is available.
|
||||
HeapRegion* new_alloc_region(size_t word_size) {
|
||||
return new_region_work(word_size, false /* do_expand */);
|
||||
}
|
||||
virtual HeapRegion* newAllocRegionWithExpansion(int purpose,
|
||||
size_t word_size,
|
||||
bool zero_filled = true);
|
||||
|
||||
// It will try to allocate a new region to be used for allocation by
|
||||
// a GC thread. It will try to expand the heap if no region is
|
||||
// available.
|
||||
HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
|
||||
|
||||
int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
|
||||
|
||||
// Attempt to allocate an object of the given (very large) "word_size".
|
||||
// Returns "NULL" on failure.
|
||||
virtual HeapWord* humongous_obj_allocate(size_t word_size);
|
||||
HeapWord* humongous_obj_allocate(size_t word_size);
|
||||
|
||||
// The following two methods, allocate_new_tlab() and
|
||||
// mem_allocate(), are the two main entry points from the runtime
|
||||
@ -760,20 +773,29 @@ protected:
|
||||
// Invoke "save_marks" on all heap regions.
|
||||
void save_marks();
|
||||
|
||||
// Free a heap region.
|
||||
void free_region(HeapRegion* hr);
|
||||
// A component of "free_region", exposed for 'batching'.
|
||||
// All the params after "hr" are out params: the used bytes of the freed
|
||||
// region(s), the number of H regions cleared, the number of regions
|
||||
// freed, and pointers to the head and tail of a list of freed contig
|
||||
// regions, linked throught the "next_on_unclean_list" field.
|
||||
void free_region_work(HeapRegion* hr,
|
||||
size_t& pre_used,
|
||||
size_t& cleared_h,
|
||||
size_t& freed_regions,
|
||||
UncleanRegionList* list,
|
||||
bool par = false);
|
||||
// It frees a non-humongous region by initializing its contents and
|
||||
// adding it to the free list that's passed as a parameter (this is
|
||||
// usually a local list which will be appended to the master free
|
||||
// list later). The used bytes of freed regions are accumulated in
|
||||
// pre_used. If par is true, the region's RSet will not be freed
|
||||
// up. The assumption is that this will be done later.
|
||||
void free_region(HeapRegion* hr,
|
||||
size_t* pre_used,
|
||||
FreeRegionList* free_list,
|
||||
bool par);
|
||||
|
||||
// It frees a humongous region by collapsing it into individual
|
||||
// regions and calling free_region() for each of them. The freed
|
||||
// regions will be added to the free list that's passed as a parameter
|
||||
// (this is usually a local list which will be appended to the
|
||||
// master free list later). The used bytes of freed regions are
|
||||
// accumulated in pre_used. If par is true, the region's RSet will
|
||||
// not be freed up. The assumption is that this will be done later.
|
||||
void free_humongous_region(HeapRegion* hr,
|
||||
size_t* pre_used,
|
||||
FreeRegionList* free_list,
|
||||
HumongousRegionSet* humongous_proxy_set,
|
||||
bool par);
|
||||
|
||||
// The concurrent marker (and the thread it runs in.)
|
||||
ConcurrentMark* _cm;
|
||||
@ -783,9 +805,6 @@ protected:
|
||||
// The concurrent refiner.
|
||||
ConcurrentG1Refine* _cg1r;
|
||||
|
||||
// The concurrent zero-fill thread.
|
||||
ConcurrentZFThread* _czft;
|
||||
|
||||
// The parallel task queues
|
||||
RefToScanQueueSet *_task_queues;
|
||||
|
||||
@ -877,9 +896,7 @@ protected:
|
||||
|
||||
SubTasksDone* _process_strong_tasks;
|
||||
|
||||
// List of regions which require zero filling.
|
||||
UncleanRegionList _unclean_region_list;
|
||||
bool _unclean_regions_coming;
|
||||
volatile bool _free_regions_coming;
|
||||
|
||||
public:
|
||||
|
||||
@ -1002,71 +1019,64 @@ public:
|
||||
size_t max_regions();
|
||||
|
||||
// The number of regions that are completely free.
|
||||
size_t free_regions();
|
||||
size_t free_regions() {
|
||||
return _free_list.length();
|
||||
}
|
||||
|
||||
// The number of regions that are not completely free.
|
||||
size_t used_regions() { return n_regions() - free_regions(); }
|
||||
|
||||
// True iff the ZF thread should run.
|
||||
bool should_zf();
|
||||
|
||||
// The number of regions available for "regular" expansion.
|
||||
size_t expansion_regions() { return _expansion_regions; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool regions_accounted_for();
|
||||
bool print_region_accounting_info();
|
||||
void print_region_counts();
|
||||
#endif
|
||||
// verify_region_sets() performs verification over the region
|
||||
// lists. It will be compiled in the product code to be used when
|
||||
// necessary (i.e., during heap verification).
|
||||
void verify_region_sets();
|
||||
|
||||
HeapRegion* alloc_region_from_unclean_list(bool zero_filled);
|
||||
HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled);
|
||||
// verify_region_sets_optional() is planted in the code for
|
||||
// list verification in non-product builds (and it can be enabled in
|
||||
// product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
|
||||
#if HEAP_REGION_SET_FORCE_VERIFY
|
||||
void verify_region_sets_optional() {
|
||||
verify_region_sets();
|
||||
}
|
||||
#else // HEAP_REGION_SET_FORCE_VERIFY
|
||||
void verify_region_sets_optional() { }
|
||||
#endif // HEAP_REGION_SET_FORCE_VERIFY
|
||||
|
||||
void put_region_on_unclean_list(HeapRegion* r);
|
||||
void put_region_on_unclean_list_locked(HeapRegion* r);
|
||||
#ifdef ASSERT
|
||||
bool is_on_free_list(HeapRegion* hr) {
|
||||
return hr->containing_set() == &_free_list;
|
||||
}
|
||||
|
||||
void prepend_region_list_on_unclean_list(UncleanRegionList* list);
|
||||
void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list);
|
||||
bool is_on_humongous_set(HeapRegion* hr) {
|
||||
return hr->containing_set() == &_humongous_set;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void set_unclean_regions_coming(bool b);
|
||||
void set_unclean_regions_coming_locked(bool b);
|
||||
// Wait for cleanup to be complete.
|
||||
void wait_for_cleanup_complete();
|
||||
// Like above, but assumes that the calling thread owns the Heap_lock.
|
||||
void wait_for_cleanup_complete_locked();
|
||||
// Wrapper for the region list operations that can be called from
|
||||
// methods outside this class.
|
||||
|
||||
// Return the head of the unclean list.
|
||||
HeapRegion* peek_unclean_region_list_locked();
|
||||
// Remove and return the head of the unclean list.
|
||||
HeapRegion* pop_unclean_region_list_locked();
|
||||
void secondary_free_list_add_as_tail(FreeRegionList* list) {
|
||||
_secondary_free_list.add_as_tail(list);
|
||||
}
|
||||
|
||||
// List of regions which are zero filled and ready for allocation.
|
||||
HeapRegion* _free_region_list;
|
||||
// Number of elements on the free list.
|
||||
size_t _free_region_list_size;
|
||||
void append_secondary_free_list() {
|
||||
_free_list.add_as_tail(&_secondary_free_list);
|
||||
}
|
||||
|
||||
// If the head of the unclean list is ZeroFilled, move it to the free
|
||||
// list.
|
||||
bool move_cleaned_region_to_free_list_locked();
|
||||
bool move_cleaned_region_to_free_list();
|
||||
void append_secondary_free_list_if_not_empty() {
|
||||
if (!_secondary_free_list.is_empty()) {
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
append_secondary_free_list();
|
||||
}
|
||||
}
|
||||
|
||||
void put_free_region_on_list_locked(HeapRegion* r);
|
||||
void put_free_region_on_list(HeapRegion* r);
|
||||
|
||||
// Remove and return the head element of the free list.
|
||||
HeapRegion* pop_free_region_list_locked();
|
||||
|
||||
// If "zero_filled" is true, we first try the free list, then we try the
|
||||
// unclean list, zero-filling the result. If "zero_filled" is false, we
|
||||
// first try the unclean list, then the zero-filled list.
|
||||
HeapRegion* alloc_free_region_from_lists(bool zero_filled);
|
||||
|
||||
// Verify the integrity of the region lists.
|
||||
void remove_allocated_regions_from_lists();
|
||||
bool verify_region_lists();
|
||||
bool verify_region_lists_locked();
|
||||
size_t unclean_region_list_length();
|
||||
size_t free_region_list_length();
|
||||
void set_free_regions_coming();
|
||||
void reset_free_regions_coming();
|
||||
bool free_regions_coming() { return _free_regions_coming; }
|
||||
void wait_while_free_regions_coming();
|
||||
|
||||
// Perform a collection of the heap; intended for use in implementing
|
||||
// "System.gc". This probably implies as full a collection as the
|
||||
@ -1085,23 +1095,24 @@ public:
|
||||
// True iff a evacuation has failed in the most-recent collection.
|
||||
bool evacuation_failed() { return _evacuation_failed; }
|
||||
|
||||
// Free a region if it is totally full of garbage. Returns the number of
|
||||
// bytes freed (0 ==> didn't free it).
|
||||
size_t free_region_if_totally_empty(HeapRegion *hr);
|
||||
void free_region_if_totally_empty_work(HeapRegion *hr,
|
||||
size_t& pre_used,
|
||||
size_t& cleared_h_regions,
|
||||
size_t& freed_regions,
|
||||
UncleanRegionList* list,
|
||||
bool par = false);
|
||||
|
||||
// If we've done free region work that yields the given changes, update
|
||||
// the relevant global variables.
|
||||
void finish_free_region_work(size_t pre_used,
|
||||
size_t cleared_h_regions,
|
||||
size_t freed_regions,
|
||||
UncleanRegionList* list);
|
||||
// It will free a region if it has allocated objects in it that are
|
||||
// all dead. It calls either free_region() or
|
||||
// free_humongous_region() depending on the type of the region that
|
||||
// is passed to it.
|
||||
void free_region_if_totally_empty(HeapRegion* hr,
|
||||
size_t* pre_used,
|
||||
FreeRegionList* free_list,
|
||||
HumongousRegionSet* humongous_proxy_set,
|
||||
bool par);
|
||||
|
||||
// It appends the free list to the master free list and updates the
|
||||
// master humongous list according to the contents of the proxy
|
||||
// list. It also adjusts the total used bytes according to pre_used
|
||||
// (if par is true, it will do so by taking the ParGCRareEvent_lock).
|
||||
void update_sets_after_freeing_regions(size_t pre_used,
|
||||
FreeRegionList* free_list,
|
||||
HumongousRegionSet* humongous_proxy_set,
|
||||
bool par);
|
||||
|
||||
// Returns "TRUE" iff "p" points into the allocated area of the heap.
|
||||
virtual bool is_in(const void* p) const;
|
||||
@ -1314,8 +1325,6 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual bool allocs_are_zero_filled();
|
||||
|
||||
// The boundary between a "large" and "small" array of primitives, in
|
||||
// words.
|
||||
virtual size_t large_typearray_limit();
|
||||
@ -1546,13 +1555,6 @@ public:
|
||||
|
||||
protected:
|
||||
size_t _max_heap_capacity;
|
||||
|
||||
public:
|
||||
// Temporary: call to mark things unimplemented for the G1 heap (e.g.,
|
||||
// MemoryService). In productization, we can make this assert false
|
||||
// to catch such places (as well as searching for calls to this...)
|
||||
static void g1_unimplemented();
|
||||
|
||||
};
|
||||
|
||||
#define use_local_bitmaps 1
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
|
||||
// Inline functions for G1CollectedHeap
|
||||
@ -135,7 +135,7 @@ G1CollectedHeap::attempt_allocation(size_t word_size) {
|
||||
|
||||
inline void
|
||||
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
|
||||
assert_heap_locked_or_at_safepoint();
|
||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
||||
assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
|
||||
"pre-condition of the call");
|
||||
assert(cur_alloc_region->is_young(),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2875,8 +2875,6 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
|
||||
// Adjust for expansion and slop.
|
||||
max_live_bytes = max_live_bytes + expansion_bytes;
|
||||
|
||||
assert(_g1->regions_accounted_for(), "Region leakage!");
|
||||
|
||||
HeapRegion* hr;
|
||||
if (in_young_gc_mode()) {
|
||||
double young_start_time_sec = os::elapsedTime();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -113,6 +113,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
|
||||
Threads::gc_epilogue();
|
||||
CodeCache::gc_epilogue();
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
// refs processing: clean slate
|
||||
GenMarkSweep::_ref_processor = NULL;
|
||||
@ -180,26 +181,46 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
}
|
||||
|
||||
class G1PrepareCompactClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
ModRefBarrierSet* _mrbs;
|
||||
CompactPoint _cp;
|
||||
size_t _pre_used;
|
||||
FreeRegionList _free_list;
|
||||
HumongousRegionSet _humongous_proxy_set;
|
||||
|
||||
void free_humongous_region(HeapRegion* hr) {
|
||||
HeapWord* bot = hr->bottom();
|
||||
HeapWord* end = hr->end();
|
||||
assert(hr->startsHumongous(),
|
||||
"Only the start of a humongous region should be freed.");
|
||||
G1CollectedHeap::heap()->free_region(hr);
|
||||
_g1h->free_humongous_region(hr, &_pre_used, &_free_list,
|
||||
&_humongous_proxy_set, false /* par */);
|
||||
// Do we also need to do this for the continues humongous regions
|
||||
// we just collapsed?
|
||||
hr->prepare_for_compaction(&_cp);
|
||||
// Also clear the part of the card table that will be unused after
|
||||
// compaction.
|
||||
_mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
|
||||
_mrbs->clear(MemRegion(hr->compaction_top(), end));
|
||||
}
|
||||
|
||||
public:
|
||||
G1PrepareCompactClosure(CompactibleSpace* cs) :
|
||||
G1PrepareCompactClosure(CompactibleSpace* cs)
|
||||
: _g1h(G1CollectedHeap::heap()),
|
||||
_mrbs(G1CollectedHeap::heap()->mr_bs()),
|
||||
_cp(NULL, cs, cs->initialize_threshold()),
|
||||
_mrbs(G1CollectedHeap::heap()->mr_bs())
|
||||
{}
|
||||
_pre_used(0),
|
||||
_free_list("Local Free List for G1MarkSweep"),
|
||||
_humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
|
||||
|
||||
void update_sets() {
|
||||
// We'll recalculate total used bytes and recreate the free list
|
||||
// at the end of the GC, so no point in updating those values here.
|
||||
_g1h->update_sets_after_freeing_regions(0, /* pre_used */
|
||||
NULL, /* free_list */
|
||||
&_humongous_proxy_set,
|
||||
false /* par */);
|
||||
_free_list.remove_all();
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->isHumongous()) {
|
||||
if (hr->startsHumongous()) {
|
||||
@ -265,6 +286,7 @@ void G1MarkSweep::mark_sweep_phase2() {
|
||||
|
||||
G1PrepareCompactClosure blk(sp);
|
||||
g1h->heap_region_iterate(&blk);
|
||||
blk.update_sets();
|
||||
|
||||
CompactPoint perm_cp(pg, NULL, NULL);
|
||||
pg->prepare_for_compaction(&perm_cp);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -75,21 +75,12 @@
|
||||
"(0 means do not periodically generate this info); " \
|
||||
"it also requires -XX:+G1SummarizeRSetStats") \
|
||||
\
|
||||
diagnostic(bool, G1SummarizeZFStats, false, \
|
||||
"Summarize zero-filling info") \
|
||||
\
|
||||
diagnostic(bool, G1TraceConcRefinement, false, \
|
||||
"Trace G1 concurrent refinement") \
|
||||
\
|
||||
product(intx, G1MarkRegionStackSize, 1024 * 1024, \
|
||||
"Size of the region stack for concurrent marking.") \
|
||||
\
|
||||
develop(bool, G1ConcZeroFill, true, \
|
||||
"If true, run concurrent zero-filling thread") \
|
||||
\
|
||||
develop(intx, G1ConcZFMaxRegions, 1, \
|
||||
"Stop zero-filling when # of zf'd regions reaches") \
|
||||
\
|
||||
develop(bool, G1SATBBarrierPrintNullPreVals, false, \
|
||||
"If true, count frac of ptr writes with null pre-vals.") \
|
||||
\
|
||||
@ -99,6 +90,13 @@
|
||||
develop(intx, G1SATBProcessCompletedThreshold, 20, \
|
||||
"Number of completed buffers that triggers log processing.") \
|
||||
\
|
||||
product(uintx, G1SATBBufferEnqueueingThresholdPercent, 60, \
|
||||
"Before enqueueing them, each mutator thread tries to do some " \
|
||||
"filtering on the SATB buffers it generates. If post-filtering " \
|
||||
"the percentage of retained entries is over this threshold " \
|
||||
"the buffer will be enqueued for processing. A value of 0 " \
|
||||
"specifies that mutator threads should not do such filtering.") \
|
||||
\
|
||||
develop(intx, G1ExtraRegionSurvRate, 33, \
|
||||
"If the young survival rate is S, and there's room left in " \
|
||||
"to-space, we will allow regions whose survival rate is up to " \
|
||||
@ -282,7 +280,20 @@
|
||||
"Size of a work unit of cards claimed by a worker thread" \
|
||||
"during RSet scanning.") \
|
||||
\
|
||||
develop(bool, ReduceInitialCardMarksForG1, false, \
|
||||
develop(uintx, G1SecondaryFreeListAppendLength, 5, \
|
||||
"The number of regions we will add to the secondary free list " \
|
||||
"at every append operation") \
|
||||
\
|
||||
develop(bool, G1ConcRegionFreeingVerbose, false, \
|
||||
"Enables verboseness during concurrent region freeing") \
|
||||
\
|
||||
develop(bool, G1StressConcRegionFreeing, false, \
|
||||
"It stresses the concurrent region freeing operation") \
|
||||
\
|
||||
develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \
|
||||
"Artificial delay during concurrent region freeing") \
|
||||
\
|
||||
develop(bool, ReduceInitialCardMarksForG1, false, \
|
||||
"When ReduceInitialCardMarks is true, this flag setting " \
|
||||
" controls whether G1 allows the RICM optimization")
|
||||
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/concurrentZFThread.hpp"
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
@ -348,22 +347,20 @@ HeapRegion::new_dcto_closure(OopClosure* cl,
|
||||
}
|
||||
|
||||
void HeapRegion::hr_clear(bool par, bool clear_space) {
|
||||
_humongous_type = NotHumongous;
|
||||
_humongous_start_region = NULL;
|
||||
assert(_humongous_type == NotHumongous,
|
||||
"we should have already filtered out humongous regions");
|
||||
assert(_humongous_start_region == NULL,
|
||||
"we should have already filtered out humongous regions");
|
||||
assert(_end == _orig_end,
|
||||
"we should have already filtered out humongous regions");
|
||||
|
||||
_in_collection_set = false;
|
||||
_is_gc_alloc_region = false;
|
||||
|
||||
// Age stuff (if parallel, this will be done separately, since it needs
|
||||
// to be sequential).
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
set_young_index_in_cset(-1);
|
||||
uninstall_surv_rate_group();
|
||||
set_young_type(NotYoung);
|
||||
|
||||
// In case it had been the start of a humongous sequence, reset its end.
|
||||
set_end(_orig_end);
|
||||
|
||||
if (!par) {
|
||||
// If this is parallel, this will be done later.
|
||||
HeapRegionRemSet* hrrs = rem_set();
|
||||
@ -387,6 +384,7 @@ void HeapRegion::calc_gc_efficiency() {
|
||||
// </PREDICTION>
|
||||
|
||||
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
|
||||
assert(!isHumongous(), "sanity / pre-condition");
|
||||
assert(end() == _orig_end,
|
||||
"Should be normal before the humongous object allocation");
|
||||
assert(top() == bottom(), "should be empty");
|
||||
@ -400,6 +398,7 @@ void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
|
||||
}
|
||||
|
||||
void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
|
||||
assert(!isHumongous(), "sanity / pre-condition");
|
||||
assert(end() == _orig_end,
|
||||
"Should be normal before the humongous object allocation");
|
||||
assert(top() == bottom(), "should be empty");
|
||||
@ -409,6 +408,26 @@ void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
|
||||
_humongous_start_region = first_hr;
|
||||
}
|
||||
|
||||
void HeapRegion::set_notHumongous() {
|
||||
assert(isHumongous(), "pre-condition");
|
||||
|
||||
if (startsHumongous()) {
|
||||
assert(top() <= end(), "pre-condition");
|
||||
set_end(_orig_end);
|
||||
if (top() > end()) {
|
||||
// at least one "continues humongous" region after it
|
||||
set_top(end());
|
||||
}
|
||||
} else {
|
||||
// continues humongous
|
||||
assert(end() == _orig_end, "sanity");
|
||||
}
|
||||
|
||||
assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
|
||||
_humongous_type = NotHumongous;
|
||||
_humongous_start_region = NULL;
|
||||
}
|
||||
|
||||
bool HeapRegion::claimHeapRegion(jint claimValue) {
|
||||
jint current = _claimed;
|
||||
if (current != claimValue) {
|
||||
@ -443,15 +462,6 @@ HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
|
||||
return low;
|
||||
}
|
||||
|
||||
void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
|
||||
assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
|
||||
_next_in_special_set = r;
|
||||
}
|
||||
|
||||
void HeapRegion::set_on_unclean_list(bool b) {
|
||||
_is_on_unclean_list = b;
|
||||
}
|
||||
|
||||
void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||
G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
|
||||
hr_clear(false/*par*/, clear_space);
|
||||
@ -469,15 +479,16 @@ HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
_hrs_index(-1),
|
||||
_humongous_type(NotHumongous), _humongous_start_region(NULL),
|
||||
_in_collection_set(false), _is_gc_alloc_region(false),
|
||||
_is_on_free_list(false), _is_on_unclean_list(false),
|
||||
_next_in_special_set(NULL), _orig_end(NULL),
|
||||
_claimed(InitialClaimValue), _evacuation_failed(false),
|
||||
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
|
||||
_young_type(NotYoung), _next_young_region(NULL),
|
||||
_next_dirty_cards_region(NULL),
|
||||
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
|
||||
_rem_set(NULL), _zfs(NotZeroFilled),
|
||||
_recorded_rs_length(0), _predicted_elapsed_time_ms(0),
|
||||
_next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
|
||||
#ifdef ASSERT
|
||||
_containing_set(NULL),
|
||||
#endif // ASSERT
|
||||
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
|
||||
_rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
|
||||
_predicted_bytes_to_copy(0)
|
||||
{
|
||||
_orig_end = mr.end();
|
||||
@ -552,86 +563,6 @@ void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
|
||||
oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
HeapWord* HeapRegion::allocate(size_t size) {
|
||||
jint state = zero_fill_state();
|
||||
assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
|
||||
zero_fill_is_allocated(),
|
||||
"When ZF is on, only alloc in ZF'd regions");
|
||||
return G1OffsetTableContigSpace::allocate(size);
|
||||
}
|
||||
#endif
|
||||
|
||||
void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
|
||||
assert(ZF_mon->owned_by_self() ||
|
||||
Universe::heap()->is_gc_active(),
|
||||
"Must hold the lock or be a full GC to modify.");
|
||||
#ifdef ASSERT
|
||||
if (top() != bottom() && zfs != Allocated) {
|
||||
ResourceMark rm;
|
||||
stringStream region_str;
|
||||
print_on(®ion_str);
|
||||
assert(top() == bottom() || zfs == Allocated,
|
||||
err_msg("Region must be empty, or we must be setting it to allocated. "
|
||||
"_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
|
||||
}
|
||||
#endif
|
||||
_zfs = zfs;
|
||||
}
|
||||
|
||||
void HeapRegion::set_zero_fill_complete() {
|
||||
set_zero_fill_state_work(ZeroFilled);
|
||||
if (ZF_mon->owned_by_self()) {
|
||||
ZF_mon->notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HeapRegion::ensure_zero_filled() {
|
||||
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
||||
ensure_zero_filled_locked();
|
||||
}
|
||||
|
||||
void HeapRegion::ensure_zero_filled_locked() {
|
||||
assert(ZF_mon->owned_by_self(), "Precondition");
|
||||
bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
|
||||
assert(should_ignore_zf || Heap_lock->is_locked(),
|
||||
"Either we're in a GC or we're allocating a region.");
|
||||
switch (zero_fill_state()) {
|
||||
case HeapRegion::NotZeroFilled:
|
||||
set_zero_fill_in_progress(Thread::current());
|
||||
{
|
||||
ZF_mon->unlock();
|
||||
Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
|
||||
ZF_mon->lock_without_safepoint_check();
|
||||
}
|
||||
// A trap.
|
||||
guarantee(zero_fill_state() == HeapRegion::ZeroFilling
|
||||
&& zero_filler() == Thread::current(),
|
||||
"AHA! Tell Dave D if you see this...");
|
||||
set_zero_fill_complete();
|
||||
// gclog_or_tty->print_cr("Did sync ZF.");
|
||||
ConcurrentZFThread::note_sync_zfs();
|
||||
break;
|
||||
case HeapRegion::ZeroFilling:
|
||||
if (should_ignore_zf) {
|
||||
// We can "break" the lock and take over the work.
|
||||
Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
|
||||
set_zero_fill_complete();
|
||||
ConcurrentZFThread::note_sync_zfs();
|
||||
break;
|
||||
} else {
|
||||
ConcurrentZFThread::wait_for_ZF_completed(this);
|
||||
}
|
||||
case HeapRegion::ZeroFilled:
|
||||
// Nothing to do.
|
||||
break;
|
||||
case HeapRegion::Allocated:
|
||||
guarantee(false, "Should not call on allocated regions.");
|
||||
}
|
||||
assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
|
||||
}
|
||||
|
||||
HeapWord*
|
||||
HeapRegion::object_iterate_mem_careful(MemRegion mr,
|
||||
ObjectClosure* cl) {
|
||||
@ -1010,67 +941,3 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
_offsets.set_space(this);
|
||||
initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
|
||||
}
|
||||
|
||||
size_t RegionList::length() {
|
||||
size_t len = 0;
|
||||
HeapRegion* cur = hd();
|
||||
DEBUG_ONLY(HeapRegion* last = NULL);
|
||||
while (cur != NULL) {
|
||||
len++;
|
||||
DEBUG_ONLY(last = cur);
|
||||
cur = get_next(cur);
|
||||
}
|
||||
assert(last == tl(), "Invariant");
|
||||
return len;
|
||||
}
|
||||
|
||||
void RegionList::insert_before_head(HeapRegion* r) {
|
||||
assert(well_formed(), "Inv");
|
||||
set_next(r, hd());
|
||||
_hd = r;
|
||||
_sz++;
|
||||
if (tl() == NULL) _tl = r;
|
||||
assert(well_formed(), "Inv");
|
||||
}
|
||||
|
||||
void RegionList::prepend_list(RegionList* new_list) {
|
||||
assert(well_formed(), "Precondition");
|
||||
assert(new_list->well_formed(), "Precondition");
|
||||
HeapRegion* new_tl = new_list->tl();
|
||||
if (new_tl != NULL) {
|
||||
set_next(new_tl, hd());
|
||||
_hd = new_list->hd();
|
||||
_sz += new_list->sz();
|
||||
if (tl() == NULL) _tl = new_list->tl();
|
||||
} else {
|
||||
assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
|
||||
}
|
||||
assert(well_formed(), "Inv");
|
||||
}
|
||||
|
||||
void RegionList::delete_after(HeapRegion* r) {
|
||||
assert(well_formed(), "Precondition");
|
||||
HeapRegion* next = get_next(r);
|
||||
assert(r != NULL, "Precondition");
|
||||
HeapRegion* next_tl = get_next(next);
|
||||
set_next(r, next_tl);
|
||||
dec_sz();
|
||||
if (next == tl()) {
|
||||
assert(next_tl == NULL, "Inv");
|
||||
_tl = r;
|
||||
}
|
||||
assert(well_formed(), "Inv");
|
||||
}
|
||||
|
||||
HeapRegion* RegionList::pop() {
|
||||
assert(well_formed(), "Inv");
|
||||
HeapRegion* res = hd();
|
||||
if (res != NULL) {
|
||||
_hd = get_next(res);
|
||||
_sz--;
|
||||
set_next(res, NULL);
|
||||
if (sz() == 0) _tl = NULL;
|
||||
}
|
||||
assert(well_formed(), "Inv");
|
||||
return res;
|
||||
}
|
||||
|
@ -50,6 +50,11 @@ class ContiguousSpace;
|
||||
class HeapRegionRemSet;
|
||||
class HeapRegionRemSetIterator;
|
||||
class HeapRegion;
|
||||
class HeapRegionSetBase;
|
||||
|
||||
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
|
||||
#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
|
||||
(__hr)->top(), (__hr)->end()
|
||||
|
||||
// A dirty card to oop closure for heap regions. It
|
||||
// knows how to get the G1 heap and how to use the bitmap
|
||||
@ -227,12 +232,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// True iff the region is in current collection_set.
|
||||
bool _in_collection_set;
|
||||
|
||||
// True iff the region is on the unclean list, waiting to be zero filled.
|
||||
bool _is_on_unclean_list;
|
||||
|
||||
// True iff the region is on the free list, ready for allocation.
|
||||
bool _is_on_free_list;
|
||||
|
||||
// Is this or has it been an allocation region in the current collection
|
||||
// pause.
|
||||
bool _is_gc_alloc_region;
|
||||
@ -254,6 +253,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// Next region whose cards need cleaning
|
||||
HeapRegion* _next_dirty_cards_region;
|
||||
|
||||
// Fields used by the HeapRegionSetBase class and subclasses.
|
||||
HeapRegion* _next;
|
||||
#ifdef ASSERT
|
||||
HeapRegionSetBase* _containing_set;
|
||||
#endif // ASSERT
|
||||
bool _pending_removal;
|
||||
|
||||
// For parallel heapRegion traversal.
|
||||
jint _claimed;
|
||||
|
||||
@ -305,10 +311,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
_top_at_conc_mark_count = bot;
|
||||
}
|
||||
|
||||
jint _zfs; // A member of ZeroFillState. Protected by ZF_lock.
|
||||
Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
|
||||
// made it so.
|
||||
|
||||
void set_young_type(YoungType new_type) {
|
||||
//assert(_young_type != new_type, "setting the same type" );
|
||||
// TODO: add more assertions here
|
||||
@ -362,16 +364,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
RebuildRSClaimValue = 5
|
||||
};
|
||||
|
||||
// Concurrent refinement requires contiguous heap regions (in which TLABs
|
||||
// might be allocated) to be zero-filled. Each region therefore has a
|
||||
// zero-fill-state.
|
||||
enum ZeroFillState {
|
||||
NotZeroFilled,
|
||||
ZeroFilling,
|
||||
ZeroFilled,
|
||||
Allocated
|
||||
};
|
||||
|
||||
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
|
||||
assert(is_young(), "we can only skip BOT updates on young regions");
|
||||
return ContiguousSpace::par_allocate(word_size);
|
||||
@ -456,6 +448,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// which this region will be part of.
|
||||
void set_continuesHumongous(HeapRegion* first_hr);
|
||||
|
||||
// Unsets the humongous-related fields on the region.
|
||||
void set_notHumongous();
|
||||
|
||||
// If the region has a remembered set, return a pointer to it.
|
||||
HeapRegionRemSet* rem_set() const {
|
||||
return _rem_set;
|
||||
@ -502,45 +497,56 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
_next_in_special_set = r;
|
||||
}
|
||||
|
||||
bool is_on_free_list() {
|
||||
return _is_on_free_list;
|
||||
// Methods used by the HeapRegionSetBase class and subclasses.
|
||||
|
||||
// Getter and setter for the next field used to link regions into
|
||||
// linked lists.
|
||||
HeapRegion* next() { return _next; }
|
||||
|
||||
void set_next(HeapRegion* next) { _next = next; }
|
||||
|
||||
// Every region added to a set is tagged with a reference to that
|
||||
// set. This is used for doing consistency checking to make sure that
|
||||
// the contents of a set are as they should be and it's only
|
||||
// available in non-product builds.
|
||||
#ifdef ASSERT
|
||||
void set_containing_set(HeapRegionSetBase* containing_set) {
|
||||
assert((containing_set == NULL && _containing_set != NULL) ||
|
||||
(containing_set != NULL && _containing_set == NULL),
|
||||
err_msg("containing_set: "PTR_FORMAT" "
|
||||
"_containing_set: "PTR_FORMAT,
|
||||
containing_set, _containing_set));
|
||||
|
||||
_containing_set = containing_set;
|
||||
}
|
||||
|
||||
HeapRegionSetBase* containing_set() { return _containing_set; }
|
||||
#else // ASSERT
|
||||
void set_containing_set(HeapRegionSetBase* containing_set) { }
|
||||
|
||||
// containing_set() is only used in asserts so there's not reason
|
||||
// to provide a dummy version of it.
|
||||
#endif // ASSERT
|
||||
|
||||
// If we want to remove regions from a list in bulk we can simply tag
|
||||
// them with the pending_removal tag and call the
|
||||
// remove_all_pending() method on the list.
|
||||
|
||||
bool pending_removal() { return _pending_removal; }
|
||||
|
||||
void set_pending_removal(bool pending_removal) {
|
||||
// We can only set pending_removal to true, if it's false and the
|
||||
// region belongs to a set.
|
||||
assert(!pending_removal ||
|
||||
(!_pending_removal && containing_set() != NULL), "pre-condition");
|
||||
// We can only set pending_removal to false, if it's true and the
|
||||
// region does not belong to a set.
|
||||
assert( pending_removal ||
|
||||
( _pending_removal && containing_set() == NULL), "pre-condition");
|
||||
|
||||
_pending_removal = pending_removal;
|
||||
}
|
||||
|
||||
void set_on_free_list(bool b) {
|
||||
_is_on_free_list = b;
|
||||
}
|
||||
|
||||
HeapRegion* next_from_free_list() {
|
||||
assert(is_on_free_list(),
|
||||
"Should only invoke on free space.");
|
||||
assert(_next_in_special_set == NULL ||
|
||||
_next_in_special_set->is_on_free_list(),
|
||||
"Malformed Free List.");
|
||||
return _next_in_special_set;
|
||||
}
|
||||
|
||||
void set_next_on_free_list(HeapRegion* r) {
|
||||
assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
|
||||
_next_in_special_set = r;
|
||||
}
|
||||
|
||||
bool is_on_unclean_list() {
|
||||
return _is_on_unclean_list;
|
||||
}
|
||||
|
||||
void set_on_unclean_list(bool b);
|
||||
|
||||
HeapRegion* next_from_unclean_list() {
|
||||
assert(is_on_unclean_list(),
|
||||
"Should only invoke on unclean space.");
|
||||
assert(_next_in_special_set == NULL ||
|
||||
_next_in_special_set->is_on_unclean_list(),
|
||||
"Malformed unclean List.");
|
||||
return _next_in_special_set;
|
||||
}
|
||||
|
||||
void set_next_on_unclean_list(HeapRegion* r);
|
||||
|
||||
HeapRegion* get_next_young_region() { return _next_young_region; }
|
||||
void set_next_young_region(HeapRegion* hr) {
|
||||
_next_young_region = hr;
|
||||
@ -559,11 +565,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||
|
||||
// Ensure that "this" is zero-filled.
|
||||
void ensure_zero_filled();
|
||||
// This one requires that the calling thread holds ZF_mon.
|
||||
void ensure_zero_filled_locked();
|
||||
|
||||
// Get the start of the unmarked area in this region.
|
||||
HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
|
||||
HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
|
||||
@ -798,36 +799,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// "end" of the region if there is no such block.
|
||||
HeapWord* next_block_start_careful(HeapWord* addr);
|
||||
|
||||
// Returns the zero-fill-state of the current region.
|
||||
ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
|
||||
bool zero_fill_is_allocated() { return _zfs == Allocated; }
|
||||
Thread* zero_filler() { return _zero_filler; }
|
||||
|
||||
// Indicate that the contents of the region are unknown, and therefore
|
||||
// might require zero-filling.
|
||||
void set_zero_fill_needed() {
|
||||
set_zero_fill_state_work(NotZeroFilled);
|
||||
}
|
||||
void set_zero_fill_in_progress(Thread* t) {
|
||||
set_zero_fill_state_work(ZeroFilling);
|
||||
_zero_filler = t;
|
||||
}
|
||||
void set_zero_fill_complete();
|
||||
void set_zero_fill_allocated() {
|
||||
set_zero_fill_state_work(Allocated);
|
||||
}
|
||||
|
||||
void set_zero_fill_state_work(ZeroFillState zfs);
|
||||
|
||||
// This is called when a full collection shrinks the heap.
|
||||
// We want to set the heap region to a value which says
|
||||
// it is no longer part of the heap. For now, we'll let "NotZF" fill
|
||||
// that role.
|
||||
void reset_zero_fill() {
|
||||
set_zero_fill_state_work(NotZeroFilled);
|
||||
_zero_filler = NULL;
|
||||
}
|
||||
|
||||
size_t recorded_rs_length() const { return _recorded_rs_length; }
|
||||
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
|
||||
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
|
||||
@ -866,10 +837,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
// Override; it uses the "prev" marking information
|
||||
virtual void verify(bool allow_dirty) const;
|
||||
|
||||
#ifdef DEBUG
|
||||
HeapWord* allocate(size_t size);
|
||||
#endif
|
||||
};
|
||||
|
||||
// HeapRegionClosure is used for iterating over regions.
|
||||
@ -892,113 +859,6 @@ class HeapRegionClosure : public StackObj {
|
||||
bool complete() { return _complete; }
|
||||
};
|
||||
|
||||
// A linked lists of heap regions. It leaves the "next" field
|
||||
// unspecified; that's up to subtypes.
|
||||
class RegionList VALUE_OBJ_CLASS_SPEC {
|
||||
protected:
|
||||
virtual HeapRegion* get_next(HeapRegion* chr) = 0;
|
||||
virtual void set_next(HeapRegion* chr,
|
||||
HeapRegion* new_next) = 0;
|
||||
|
||||
HeapRegion* _hd;
|
||||
HeapRegion* _tl;
|
||||
size_t _sz;
|
||||
|
||||
// Protected constructor because this type is only meaningful
|
||||
// when the _get/_set next functions are defined.
|
||||
RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
|
||||
public:
|
||||
void reset() {
|
||||
_hd = NULL;
|
||||
_tl = NULL;
|
||||
_sz = 0;
|
||||
}
|
||||
HeapRegion* hd() { return _hd; }
|
||||
HeapRegion* tl() { return _tl; }
|
||||
size_t sz() { return _sz; }
|
||||
size_t length();
|
||||
|
||||
bool well_formed() {
|
||||
return
|
||||
((hd() == NULL && tl() == NULL && sz() == 0)
|
||||
|| (hd() != NULL && tl() != NULL && sz() > 0))
|
||||
&& (sz() == length());
|
||||
}
|
||||
virtual void insert_before_head(HeapRegion* r);
|
||||
void prepend_list(RegionList* new_list);
|
||||
virtual HeapRegion* pop();
|
||||
void dec_sz() { _sz--; }
|
||||
// Requires that "r" is an element of the list, and is not the tail.
|
||||
void delete_after(HeapRegion* r);
|
||||
};
|
||||
|
||||
class EmptyNonHRegionList: public RegionList {
|
||||
protected:
|
||||
// Protected constructor because this type is only meaningful
|
||||
// when the _get/_set next functions are defined.
|
||||
EmptyNonHRegionList() : RegionList() {}
|
||||
|
||||
public:
|
||||
void insert_before_head(HeapRegion* r) {
|
||||
// assert(r->is_empty(), "Better be empty");
|
||||
assert(!r->isHumongous(), "Better not be humongous.");
|
||||
RegionList::insert_before_head(r);
|
||||
}
|
||||
void prepend_list(EmptyNonHRegionList* new_list) {
|
||||
// assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
|
||||
// "Better be empty");
|
||||
assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
|
||||
"Better not be humongous.");
|
||||
// assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
|
||||
// "Better be empty");
|
||||
assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
|
||||
"Better not be humongous.");
|
||||
RegionList::prepend_list(new_list);
|
||||
}
|
||||
};
|
||||
|
||||
class UncleanRegionList: public EmptyNonHRegionList {
|
||||
public:
|
||||
HeapRegion* get_next(HeapRegion* hr) {
|
||||
return hr->next_from_unclean_list();
|
||||
}
|
||||
void set_next(HeapRegion* hr, HeapRegion* new_next) {
|
||||
hr->set_next_on_unclean_list(new_next);
|
||||
}
|
||||
|
||||
UncleanRegionList() : EmptyNonHRegionList() {}
|
||||
|
||||
void insert_before_head(HeapRegion* r) {
|
||||
assert(!r->is_on_free_list(),
|
||||
"Better not already be on free list");
|
||||
assert(!r->is_on_unclean_list(),
|
||||
"Better not already be on unclean list");
|
||||
r->set_zero_fill_needed();
|
||||
r->set_on_unclean_list(true);
|
||||
EmptyNonHRegionList::insert_before_head(r);
|
||||
}
|
||||
void prepend_list(UncleanRegionList* new_list) {
|
||||
assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
|
||||
"Better not already be on free list");
|
||||
assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
|
||||
"Better already be marked as on unclean list");
|
||||
assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
|
||||
"Better not already be on free list");
|
||||
assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
|
||||
"Better already be marked as on unclean list");
|
||||
EmptyNonHRegionList::prepend_list(new_list);
|
||||
}
|
||||
HeapRegion* pop() {
|
||||
HeapRegion* res = RegionList::pop();
|
||||
if (res != NULL) res->set_on_unclean_list(false);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
// Local Variables: ***
|
||||
// c-indentation-style: gnu ***
|
||||
// End: ***
|
||||
|
||||
#endif // SERIALGC
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
||||
|
@ -65,152 +65,6 @@ HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
|
||||
|
||||
// Private methods.
|
||||
|
||||
HeapWord*
|
||||
HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
|
||||
assert(G1CollectedHeap::isHumongous(word_size),
|
||||
"Allocation size should be humongous");
|
||||
int cur = ind;
|
||||
int first = cur;
|
||||
size_t sumSizes = 0;
|
||||
while (cur < _regions.length() && sumSizes < word_size) {
|
||||
// Loop invariant:
|
||||
// For all i in [first, cur):
|
||||
// _regions.at(i)->is_empty()
|
||||
// && _regions.at(i) is contiguous with its predecessor, if any
|
||||
// && sumSizes is the sum of the sizes of the regions in the interval
|
||||
// [first, cur)
|
||||
HeapRegion* curhr = _regions.at(cur);
|
||||
if (curhr->is_empty()
|
||||
&& (first == cur
|
||||
|| (_regions.at(cur-1)->end() ==
|
||||
curhr->bottom()))) {
|
||||
sumSizes += curhr->capacity() / HeapWordSize;
|
||||
} else {
|
||||
first = cur + 1;
|
||||
sumSizes = 0;
|
||||
}
|
||||
cur++;
|
||||
}
|
||||
if (sumSizes >= word_size) {
|
||||
_alloc_search_start = cur;
|
||||
|
||||
// We need to initialize the region(s) we just discovered. This is
|
||||
// a bit tricky given that it can happen concurrently with
|
||||
// refinement threads refining cards on these regions and
|
||||
// potentially wanting to refine the BOT as they are scanning
|
||||
// those cards (this can happen shortly after a cleanup; see CR
|
||||
// 6991377). So we have to set up the region(s) carefully and in
|
||||
// a specific order.
|
||||
|
||||
// Currently, allocs_are_zero_filled() returns false. The zero
|
||||
// filling infrastructure will be going away soon (see CR 6977804).
|
||||
// So no need to do anything else here.
|
||||
bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
|
||||
assert(!zf, "not supported");
|
||||
|
||||
// This will be the "starts humongous" region.
|
||||
HeapRegion* first_hr = _regions.at(first);
|
||||
{
|
||||
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
||||
first_hr->set_zero_fill_allocated();
|
||||
}
|
||||
// The header of the new object will be placed at the bottom of
|
||||
// the first region.
|
||||
HeapWord* new_obj = first_hr->bottom();
|
||||
// This will be the new end of the first region in the series that
|
||||
// should also match the end of the last region in the seriers.
|
||||
// (Note: sumSizes = "region size" x "number of regions we found").
|
||||
HeapWord* new_end = new_obj + sumSizes;
|
||||
// This will be the new top of the first region that will reflect
|
||||
// this allocation.
|
||||
HeapWord* new_top = new_obj + word_size;
|
||||
|
||||
// First, we need to zero the header of the space that we will be
|
||||
// allocating. When we update top further down, some refinement
|
||||
// threads might try to scan the region. By zeroing the header we
|
||||
// ensure that any thread that will try to scan the region will
|
||||
// come across the zero klass word and bail out.
|
||||
//
|
||||
// NOTE: It would not have been correct to have used
|
||||
// CollectedHeap::fill_with_object() and make the space look like
|
||||
// an int array. The thread that is doing the allocation will
|
||||
// later update the object header to a potentially different array
|
||||
// type and, for a very short period of time, the klass and length
|
||||
// fields will be inconsistent. This could cause a refinement
|
||||
// thread to calculate the object size incorrectly.
|
||||
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
|
||||
|
||||
// We will set up the first region as "starts humongous". This
|
||||
// will also update the BOT covering all the regions to reflect
|
||||
// that there is a single object that starts at the bottom of the
|
||||
// first region.
|
||||
first_hr->set_startsHumongous(new_top, new_end);
|
||||
|
||||
// Then, if there are any, we will set up the "continues
|
||||
// humongous" regions.
|
||||
HeapRegion* hr = NULL;
|
||||
for (int i = first + 1; i < cur; ++i) {
|
||||
hr = _regions.at(i);
|
||||
{
|
||||
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
||||
hr->set_zero_fill_allocated();
|
||||
}
|
||||
hr->set_continuesHumongous(first_hr);
|
||||
}
|
||||
// If we have "continues humongous" regions (hr != NULL), then the
|
||||
// end of the last one should match new_end.
|
||||
assert(hr == NULL || hr->end() == new_end, "sanity");
|
||||
|
||||
// Up to this point no concurrent thread would have been able to
|
||||
// do any scanning on any region in this series. All the top
|
||||
// fields still point to bottom, so the intersection between
|
||||
// [bottom,top] and [card_start,card_end] will be empty. Before we
|
||||
// update the top fields, we'll do a storestore to make sure that
|
||||
// no thread sees the update to top before the zeroing of the
|
||||
// object header and the BOT initialization.
|
||||
OrderAccess::storestore();
|
||||
|
||||
// Now that the BOT and the object header have been initialized,
|
||||
// we can update top of the "starts humongous" region.
|
||||
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
|
||||
"new_top should be in this region");
|
||||
first_hr->set_top(new_top);
|
||||
|
||||
// Now, we will update the top fields of the "continues humongous"
|
||||
// regions. The reason we need to do this is that, otherwise,
|
||||
// these regions would look empty and this will confuse parts of
|
||||
// G1. For example, the code that looks for a consecutive number
|
||||
// of empty regions will consider them empty and try to
|
||||
// re-allocate them. We can extend is_empty() to also include
|
||||
// !continuesHumongous(), but it is easier to just update the top
|
||||
// fields here.
|
||||
hr = NULL;
|
||||
for (int i = first + 1; i < cur; ++i) {
|
||||
hr = _regions.at(i);
|
||||
if ((i + 1) == cur) {
|
||||
// last continues humongous region
|
||||
assert(hr->bottom() < new_top && new_top <= hr->end(),
|
||||
"new_top should fall on this region");
|
||||
hr->set_top(new_top);
|
||||
} else {
|
||||
// not last one
|
||||
assert(new_top > hr->end(), "new_top should be above this region");
|
||||
hr->set_top(hr->end());
|
||||
}
|
||||
}
|
||||
// If we have continues humongous regions (hr != NULL), then the
|
||||
// end of the last one should match new_end and its top should
|
||||
// match new_top.
|
||||
assert(hr == NULL ||
|
||||
(hr->end() == new_end && hr->top() == new_top), "sanity");
|
||||
|
||||
return new_obj;
|
||||
} else {
|
||||
// If we started from the beginning, we want to know why we can't alloc.
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void HeapRegionSeq::print_empty_runs() {
|
||||
int empty_run = 0;
|
||||
int n_empty = 0;
|
||||
@ -284,13 +138,67 @@ size_t HeapRegionSeq::free_suffix() {
|
||||
return res;
|
||||
}
|
||||
|
||||
HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
|
||||
int cur = _alloc_search_start;
|
||||
// Make sure "cur" is a valid index.
|
||||
assert(cur >= 0, "Invariant.");
|
||||
HeapWord* res = alloc_obj_from_region_index(cur, word_size);
|
||||
if (res == NULL)
|
||||
res = alloc_obj_from_region_index(0, word_size);
|
||||
int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
|
||||
assert(num > 1, "pre-condition");
|
||||
assert(0 <= from && from <= _regions.length(),
|
||||
err_msg("from: %d should be valid and <= than %d",
|
||||
from, _regions.length()));
|
||||
|
||||
int curr = from;
|
||||
int first = -1;
|
||||
size_t num_so_far = 0;
|
||||
while (curr < _regions.length() && num_so_far < num) {
|
||||
HeapRegion* curr_hr = _regions.at(curr);
|
||||
if (curr_hr->is_empty()) {
|
||||
if (first == -1) {
|
||||
first = curr;
|
||||
num_so_far = 1;
|
||||
} else {
|
||||
num_so_far += 1;
|
||||
}
|
||||
} else {
|
||||
first = -1;
|
||||
num_so_far = 0;
|
||||
}
|
||||
curr += 1;
|
||||
}
|
||||
|
||||
assert(num_so_far <= num, "post-condition");
|
||||
if (num_so_far == num) {
|
||||
// we find enough space for the humongous object
|
||||
assert(from <= first && first < _regions.length(), "post-condition");
|
||||
assert(first < curr && (curr - first) == (int) num, "post-condition");
|
||||
for (int i = first; i < first + (int) num; ++i) {
|
||||
assert(_regions.at(i)->is_empty(), "post-condition");
|
||||
}
|
||||
return first;
|
||||
} else {
|
||||
// we failed to find enough space for the humongous object
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int HeapRegionSeq::find_contiguous(size_t num) {
|
||||
assert(num > 1, "otherwise we should not be calling this");
|
||||
assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
|
||||
err_msg("_alloc_search_start: %d should be valid and <= than %d",
|
||||
_alloc_search_start, _regions.length()));
|
||||
|
||||
int start = _alloc_search_start;
|
||||
int res = find_contiguous_from(start, num);
|
||||
if (res == -1 && start != 0) {
|
||||
// Try starting from the beginning. If _alloc_search_start was 0,
|
||||
// no point in doing this again.
|
||||
res = find_contiguous_from(0, num);
|
||||
}
|
||||
if (res != -1) {
|
||||
assert(0 <= res && res < _regions.length(),
|
||||
err_msg("res: %d should be valid", res));
|
||||
_alloc_search_start = res + (int) num;
|
||||
}
|
||||
assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
|
||||
err_msg("_alloc_search_start: %d should be valid",
|
||||
_alloc_search_start));
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -376,6 +284,10 @@ void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
|
||||
|
||||
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
|
||||
size_t& num_regions_deleted) {
|
||||
// Reset this in case it's currently pointing into the regions that
|
||||
// we just removed.
|
||||
_alloc_search_start = 0;
|
||||
|
||||
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
|
||||
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
|
||||
|
||||
@ -395,7 +307,6 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
|
||||
}
|
||||
assert(cur == _regions.top(), "Should be top");
|
||||
if (!cur->is_empty()) break;
|
||||
cur->reset_zero_fill();
|
||||
shrink_bytes -= cur->capacity();
|
||||
num_regions_deleted++;
|
||||
_regions.pop();
|
||||
@ -410,7 +321,6 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
|
||||
return MemRegion(last_start, end);
|
||||
}
|
||||
|
||||
|
||||
class PrintHeapRegionClosure : public HeapRegionClosure {
|
||||
public:
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,9 +41,9 @@ class HeapRegionSeq: public CHeapObj {
|
||||
// (For efficiency only; private to obj_allocate after initialization.)
|
||||
int _alloc_search_start;
|
||||
|
||||
// Attempts to allocate a block of the (assumed humongous) word_size,
|
||||
// starting at the region "ind".
|
||||
HeapWord* alloc_obj_from_region_index(int ind, size_t word_size);
|
||||
// Finds a contiguous set of empty regions of length num, starting
|
||||
// from a given index.
|
||||
int find_contiguous_from(int from, size_t num);
|
||||
|
||||
// Currently, we're choosing collection sets in a round-robin fashion,
|
||||
// starting here.
|
||||
@ -76,11 +76,8 @@ class HeapRegionSeq: public CHeapObj {
|
||||
// that are available for allocation.
|
||||
size_t free_suffix();
|
||||
|
||||
// Requires "word_size" to be humongous (in the technical sense). If
|
||||
// possible, allocates a contiguous subsequence of the heap regions to
|
||||
// satisfy the allocation, and returns the address of the beginning of
|
||||
// that sequence, otherwise returns NULL.
|
||||
HeapWord* obj_allocate(size_t word_size);
|
||||
// Finds a contiguous set of empty regions of length num.
|
||||
int find_contiguous(size_t num);
|
||||
|
||||
// Apply the "doHeapRegion" method of "blk" to all regions in "this",
|
||||
// in address order, terminating the iteration early
|
||||
@ -106,7 +103,7 @@ class HeapRegionSeq: public CHeapObj {
|
||||
|
||||
// If "addr" falls within a region in the sequence, return that region,
|
||||
// or else NULL.
|
||||
HeapRegion* addr_to_region(const void* addr);
|
||||
inline HeapRegion* addr_to_region(const void* addr);
|
||||
|
||||
void print();
|
||||
|
||||
|
438
hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
Normal file
438
hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
Normal file
@ -0,0 +1,438 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
|
||||
size_t HeapRegionSetBase::_unrealistically_long_length = 0;
|
||||
|
||||
//////////////////// HeapRegionSetBase ////////////////////
|
||||
|
||||
void HeapRegionSetBase::set_unrealistically_long_length(size_t len) {
|
||||
guarantee(_unrealistically_long_length == 0, "should only be set once");
|
||||
_unrealistically_long_length = len;
|
||||
}
|
||||
|
||||
size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
|
||||
assert(hr->startsHumongous(), "pre-condition");
|
||||
assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
|
||||
size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes;
|
||||
assert(region_num > 0, "sanity");
|
||||
return region_num;
|
||||
}
|
||||
|
||||
void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) {
|
||||
msg->append("[%s] %s "
|
||||
"ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
|
||||
"cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
|
||||
name(), message, length(), region_num(),
|
||||
total_capacity_bytes(), total_used_bytes());
|
||||
fill_in_ext_msg_extra(msg);
|
||||
}
|
||||
|
||||
bool HeapRegionSetBase::verify_region(HeapRegion* hr,
|
||||
HeapRegionSetBase* expected_containing_set) {
|
||||
const char* error_message = NULL;
|
||||
|
||||
if (!regions_humongous()) {
|
||||
if (hr->isHumongous()) {
|
||||
error_message = "the region should not be humongous";
|
||||
}
|
||||
} else {
|
||||
if (!hr->isHumongous() || !hr->startsHumongous()) {
|
||||
error_message = "the region should be 'starts humongous'";
|
||||
}
|
||||
}
|
||||
|
||||
if (!regions_empty()) {
|
||||
if (hr->is_empty()) {
|
||||
error_message = "the region should not be empty";
|
||||
}
|
||||
} else {
|
||||
if (!hr->is_empty()) {
|
||||
error_message = "the region should be empty";
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// The _containing_set field is only available when ASSERT is defined.
|
||||
if (hr->containing_set() != expected_containing_set) {
|
||||
error_message = "inconsistent containing set found";
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
const char* extra_error_message = verify_region_extra(hr);
|
||||
if (extra_error_message != NULL) {
|
||||
error_message = extra_error_message;
|
||||
}
|
||||
|
||||
if (error_message != NULL) {
|
||||
outputStream* out = tty;
|
||||
out->cr();
|
||||
out->print_cr("## [%s] %s", name(), error_message);
|
||||
out->print_cr("## Offending Region: "PTR_FORMAT, hr);
|
||||
out->print_cr(" "HR_FORMAT, HR_FORMAT_PARAMS(hr));
|
||||
#ifdef ASSERT
|
||||
out->print_cr(" containing set: "PTR_FORMAT, hr->containing_set());
|
||||
#endif // ASSERT
|
||||
out->print_cr("## Offending Region Set: "PTR_FORMAT, this);
|
||||
print_on(out);
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void HeapRegionSetBase::verify() {
|
||||
// It's important that we also observe the MT safety protocol even
|
||||
// for the verification calls. If we do verification without the
|
||||
// appropriate locks and the set changes underneath our feet
|
||||
// verification might fail and send us on a wild goose chase.
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
|
||||
guarantee(( is_empty() && length() == 0 && region_num() == 0 &&
|
||||
total_used_bytes() == 0 && total_capacity_bytes() == 0) ||
|
||||
(!is_empty() && length() >= 0 && region_num() >= 0 &&
|
||||
total_used_bytes() >= 0 && total_capacity_bytes() >= 0),
|
||||
hrl_ext_msg(this, "invariant"));
|
||||
|
||||
guarantee((!regions_humongous() && region_num() == length()) ||
|
||||
( regions_humongous() && region_num() >= length()),
|
||||
hrl_ext_msg(this, "invariant"));
|
||||
|
||||
guarantee(!regions_empty() || total_used_bytes() == 0,
|
||||
hrl_ext_msg(this, "invariant"));
|
||||
|
||||
guarantee(total_used_bytes() <= total_capacity_bytes(),
|
||||
hrl_ext_msg(this, "invariant"));
|
||||
}
|
||||
|
||||
void HeapRegionSetBase::verify_start() {
|
||||
// See comment in verify() about MT safety and verification.
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
assert(!_verify_in_progress,
|
||||
hrl_ext_msg(this, "verification should not be in progress"));
|
||||
|
||||
// Do the basic verification first before we do the checks over the regions.
|
||||
HeapRegionSetBase::verify();
|
||||
|
||||
_calc_length = 0;
|
||||
_calc_region_num = 0;
|
||||
_calc_total_capacity_bytes = 0;
|
||||
_calc_total_used_bytes = 0;
|
||||
_verify_in_progress = true;
|
||||
}
|
||||
|
||||
void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
|
||||
// See comment in verify() about MT safety and verification.
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
assert(_verify_in_progress,
|
||||
hrl_ext_msg(this, "verification should be in progress"));
|
||||
|
||||
guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification"));
|
||||
|
||||
_calc_length += 1;
|
||||
if (!hr->isHumongous()) {
|
||||
_calc_region_num += 1;
|
||||
} else {
|
||||
_calc_region_num += calculate_region_num(hr);
|
||||
}
|
||||
_calc_total_capacity_bytes += hr->capacity();
|
||||
_calc_total_used_bytes += hr->used();
|
||||
}
|
||||
|
||||
void HeapRegionSetBase::verify_end() {
|
||||
// See comment in verify() about MT safety and verification.
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
assert(_verify_in_progress,
|
||||
hrl_ext_msg(this, "verification should be in progress"));
|
||||
|
||||
guarantee(length() == _calc_length,
|
||||
hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == "
|
||||
"calc length: "SIZE_FORMAT,
|
||||
name(), length(), _calc_length));
|
||||
|
||||
guarantee(region_num() == _calc_region_num,
|
||||
hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
|
||||
"calc region num: "SIZE_FORMAT,
|
||||
name(), region_num(), _calc_region_num));
|
||||
|
||||
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
|
||||
hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
|
||||
"calc capacity bytes: "SIZE_FORMAT,
|
||||
name(),
|
||||
total_capacity_bytes(), _calc_total_capacity_bytes));
|
||||
|
||||
guarantee(total_used_bytes() == _calc_total_used_bytes,
|
||||
hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
|
||||
"calc used bytes: "SIZE_FORMAT,
|
||||
name(), total_used_bytes(), _calc_total_used_bytes));
|
||||
|
||||
_verify_in_progress = false;
|
||||
}
|
||||
|
||||
void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
|
||||
out->cr();
|
||||
out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
|
||||
out->print_cr(" Region Assumptions");
|
||||
out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous()));
|
||||
out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty()));
|
||||
out->print_cr(" Attributes");
|
||||
out->print_cr(" length : "SIZE_FORMAT_W(14), length());
|
||||
out->print_cr(" region num : "SIZE_FORMAT_W(14), region_num());
|
||||
out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes",
|
||||
total_capacity_bytes());
|
||||
out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes",
|
||||
total_used_bytes());
|
||||
}
|
||||
|
||||
void HeapRegionSetBase::clear() {
|
||||
_length = 0;
|
||||
_region_num = 0;
|
||||
_total_used_bytes = 0;
|
||||
}
|
||||
|
||||
HeapRegionSetBase::HeapRegionSetBase(const char* name)
|
||||
: _name(name), _verify_in_progress(false),
|
||||
_calc_length(0), _calc_region_num(0),
|
||||
_calc_total_capacity_bytes(0), _calc_total_used_bytes(0) { }
|
||||
|
||||
//////////////////// HeapRegionSet ////////////////////
|
||||
|
||||
void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
hrl_assert_mt_safety_ok(proxy_set);
|
||||
hrl_assert_sets_match(this, proxy_set);
|
||||
|
||||
verify_optional();
|
||||
proxy_set->verify_optional();
|
||||
|
||||
if (proxy_set->is_empty()) return;
|
||||
|
||||
assert(proxy_set->length() <= _length,
|
||||
hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
|
||||
"should be <= length: "SIZE_FORMAT,
|
||||
name(), proxy_set->length(), _length));
|
||||
_length -= proxy_set->length();
|
||||
|
||||
assert(proxy_set->region_num() <= _region_num,
|
||||
hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
|
||||
"should be <= region num: "SIZE_FORMAT,
|
||||
name(), proxy_set->region_num(), _region_num));
|
||||
_region_num -= proxy_set->region_num();
|
||||
|
||||
assert(proxy_set->total_used_bytes() <= _total_used_bytes,
|
||||
hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
|
||||
"should be <= used bytes: "SIZE_FORMAT,
|
||||
name(), proxy_set->total_used_bytes(),
|
||||
_total_used_bytes));
|
||||
_total_used_bytes -= proxy_set->total_used_bytes();
|
||||
|
||||
proxy_set->clear();
|
||||
|
||||
verify_optional();
|
||||
proxy_set->verify_optional();
|
||||
}
|
||||
|
||||
//////////////////// HeapRegionLinkedList ////////////////////
|
||||
|
||||
void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) {
|
||||
msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
|
||||
}
|
||||
|
||||
void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
hrl_assert_mt_safety_ok(from_list);
|
||||
|
||||
verify_optional();
|
||||
from_list->verify_optional();
|
||||
|
||||
if (from_list->is_empty()) return;
|
||||
|
||||
#ifdef ASSERT
|
||||
HeapRegionLinkedListIterator iter(from_list);
|
||||
while (iter.more_available()) {
|
||||
HeapRegion* hr = iter.get_next();
|
||||
// In set_containing_set() we check that we either set the value
|
||||
// from NULL to non-NULL or vice versa to catch bugs. So, we have
|
||||
// to NULL it first before setting it to the value.
|
||||
hr->set_containing_set(NULL);
|
||||
hr->set_containing_set(this);
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
if (_tail != NULL) {
|
||||
assert(length() > 0 && _head != NULL, hrl_ext_msg(this, "invariant"));
|
||||
_tail->set_next(from_list->_head);
|
||||
} else {
|
||||
assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant"));
|
||||
_head = from_list->_head;
|
||||
}
|
||||
_tail = from_list->_tail;
|
||||
|
||||
_length += from_list->length();
|
||||
_region_num += from_list->region_num();
|
||||
_total_used_bytes += from_list->total_used_bytes();
|
||||
from_list->clear();
|
||||
|
||||
verify_optional();
|
||||
from_list->verify_optional();
|
||||
}
|
||||
|
||||
void HeapRegionLinkedList::remove_all() {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
verify_optional();
|
||||
|
||||
HeapRegion* curr = _head;
|
||||
while (curr != NULL) {
|
||||
hrl_assert_region_ok(this, curr, this);
|
||||
|
||||
HeapRegion* next = curr->next();
|
||||
curr->set_next(NULL);
|
||||
curr->set_containing_set(NULL);
|
||||
curr = next;
|
||||
}
|
||||
clear();
|
||||
|
||||
verify_optional();
|
||||
}
|
||||
|
||||
void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
assert(target_count > 1, hrl_ext_msg(this, "pre-condition"));
|
||||
assert(!is_empty(), hrl_ext_msg(this, "pre-condition"));
|
||||
|
||||
verify_optional();
|
||||
DEBUG_ONLY(size_t old_length = length();)
|
||||
|
||||
HeapRegion* curr = _head;
|
||||
HeapRegion* prev = NULL;
|
||||
size_t count = 0;
|
||||
while (curr != NULL) {
|
||||
hrl_assert_region_ok(this, curr, this);
|
||||
HeapRegion* next = curr->next();
|
||||
|
||||
if (curr->pending_removal()) {
|
||||
assert(count < target_count,
|
||||
hrl_err_msg("[%s] should not come across more regions "
|
||||
"pending for removal than target_count: "SIZE_FORMAT,
|
||||
name(), target_count));
|
||||
|
||||
if (prev == NULL) {
|
||||
assert(_head == curr, hrl_ext_msg(this, "invariant"));
|
||||
_head = next;
|
||||
} else {
|
||||
assert(_head != curr, hrl_ext_msg(this, "invariant"));
|
||||
prev->set_next(next);
|
||||
}
|
||||
if (next == NULL) {
|
||||
assert(_tail == curr, hrl_ext_msg(this, "invariant"));
|
||||
_tail = prev;
|
||||
} else {
|
||||
assert(_tail != curr, hrl_ext_msg(this, "invariant"));
|
||||
}
|
||||
|
||||
curr->set_next(NULL);
|
||||
remove_internal(curr);
|
||||
curr->set_pending_removal(false);
|
||||
|
||||
count += 1;
|
||||
|
||||
// If we have come across the target number of regions we can
|
||||
// just bail out. However, for debugging purposes, we can just
|
||||
// carry on iterating to make sure there are not more regions
|
||||
// tagged with pending removal.
|
||||
DEBUG_ONLY(if (count == target_count) break;)
|
||||
} else {
|
||||
prev = curr;
|
||||
}
|
||||
curr = next;
|
||||
}
|
||||
|
||||
assert(count == target_count,
|
||||
hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == "
|
||||
"target_count: "SIZE_FORMAT, name(), count, target_count));
|
||||
assert(length() + target_count == old_length,
|
||||
hrl_err_msg("[%s] new length should be consistent "
|
||||
"new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
|
||||
"target_count: "SIZE_FORMAT,
|
||||
name(), length(), old_length, target_count));
|
||||
|
||||
verify_optional();
|
||||
}
|
||||
|
||||
void HeapRegionLinkedList::verify() {
|
||||
// See comment in HeapRegionSetBase::verify() about MT safety and
|
||||
// verification.
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
|
||||
// This will also do the basic verification too.
|
||||
verify_start();
|
||||
|
||||
HeapRegion* curr = _head;
|
||||
HeapRegion* prev1 = NULL;
|
||||
HeapRegion* prev0 = NULL;
|
||||
size_t count = 0;
|
||||
while (curr != NULL) {
|
||||
verify_next_region(curr);
|
||||
|
||||
count += 1;
|
||||
guarantee(count < _unrealistically_long_length,
|
||||
hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
|
||||
"seems very long, is there maybe a cycle? "
|
||||
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
|
||||
"prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
|
||||
name(), count, curr, prev0, prev1, length()));
|
||||
|
||||
prev1 = prev0;
|
||||
prev0 = curr;
|
||||
curr = curr->next();
|
||||
}
|
||||
|
||||
guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition"));
|
||||
|
||||
verify_end();
|
||||
}
|
||||
|
||||
void HeapRegionLinkedList::clear() {
|
||||
HeapRegionSetBase::clear();
|
||||
_head = NULL;
|
||||
_tail = NULL;
|
||||
}
|
||||
|
||||
void HeapRegionLinkedList::print_on(outputStream* out, bool print_contents) {
|
||||
HeapRegionSetBase::print_on(out, print_contents);
|
||||
out->print_cr(" Linking");
|
||||
out->print_cr(" head : "PTR_FORMAT, _head);
|
||||
out->print_cr(" tail : "PTR_FORMAT, _tail);
|
||||
|
||||
if (print_contents) {
|
||||
out->print_cr(" Contents");
|
||||
HeapRegionLinkedListIterator iter(this);
|
||||
while (iter.more_available()) {
|
||||
HeapRegion* hr = iter.get_next();
|
||||
hr->print_on(out);
|
||||
}
|
||||
}
|
||||
}
|
346
hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp
Normal file
346
hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp
Normal file
@ -0,0 +1,346 @@
|
||||
/*
|
||||
* copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
|
||||
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
|
||||
// Large buffer for some cases where the output might be larger than normal.
|
||||
#define HRL_ERR_MSG_BUFSZ 512
|
||||
typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
|
||||
|
||||
// Set verification will be forced either if someone defines
|
||||
// HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which
|
||||
// asserts are compiled in.
|
||||
#ifndef HEAP_REGION_SET_FORCE_VERIFY
|
||||
#define HEAP_REGION_SET_FORCE_VERIFY defined(ASSERT)
|
||||
#endif // HEAP_REGION_SET_FORCE_VERIFY
|
||||
|
||||
//////////////////// HeapRegionSetBase ////////////////////
|
||||
|
||||
// Base class for all the classes that represent heap region sets. It
|
||||
// contains the basic attributes that each set needs to maintain
|
||||
// (e.g., length, region num, used bytes sum) plus any shared
|
||||
// functionality (e.g., verification).
|
||||
|
||||
class hrl_ext_msg;
|
||||
|
||||
class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
|
||||
friend class hrl_ext_msg;
|
||||
|
||||
protected:
|
||||
static size_t calculate_region_num(HeapRegion* hr);
|
||||
|
||||
static size_t _unrealistically_long_length;
|
||||
|
||||
// The number of regions added to the set. If the set contains
|
||||
// only humongous regions, this reflects only 'starts humongous'
|
||||
// regions and does not include 'continues humongous' ones.
|
||||
size_t _length;
|
||||
|
||||
// The total number of regions represented by the set. If the set
|
||||
// does not contain humongous regions, this should be the same as
|
||||
// _length. If the set contains only humongous regions, this will
|
||||
// include the 'continues humongous' regions.
|
||||
size_t _region_num;
|
||||
|
||||
// We don't keep track of the total capacity explicitly, we instead
|
||||
// recalculate it based on _region_num and the heap region size.
|
||||
|
||||
// The sum of used bytes in the all the regions in the set.
|
||||
size_t _total_used_bytes;
|
||||
|
||||
const char* _name;
|
||||
|
||||
bool _verify_in_progress;
|
||||
size_t _calc_length;
|
||||
size_t _calc_region_num;
|
||||
size_t _calc_total_capacity_bytes;
|
||||
size_t _calc_total_used_bytes;
|
||||
|
||||
// verify_region() is used to ensure that the contents of a region
|
||||
// added to / removed from a set are consistent. Different sets
|
||||
// make different assumptions about the regions added to them. So
|
||||
// each set can override verify_region_extra(), which is called
|
||||
// from verify_region(), and do any extra verification it needs to
|
||||
// perform in that.
|
||||
virtual const char* verify_region_extra(HeapRegion* hr) { return NULL; }
|
||||
bool verify_region(HeapRegion* hr,
|
||||
HeapRegionSetBase* expected_containing_set);
|
||||
|
||||
// Indicates whether all regions in the set should be humongous or
|
||||
// not. Only used during verification.
|
||||
virtual bool regions_humongous() = 0;
|
||||
|
||||
// Indicates whether all regions in the set should be empty or
|
||||
// not. Only used during verification.
|
||||
virtual bool regions_empty() = 0;
|
||||
|
||||
// Subclasses can optionally override this to do MT safety protocol
|
||||
// checks. It is called in an assert from all methods that perform
|
||||
// updates on the set (and subclasses should also call it too).
|
||||
virtual bool check_mt_safety() { return true; }
|
||||
|
||||
// fill_in_ext_msg() writes the the values of the set's attributes
|
||||
// in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra()
|
||||
// allows subclasses to append further information.
|
||||
virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { }
|
||||
void fill_in_ext_msg(hrl_ext_msg* msg, const char* message);
|
||||
|
||||
// It updates the fields of the set to reflect hr being added to
|
||||
// the set.
|
||||
inline void update_for_addition(HeapRegion* hr);
|
||||
|
||||
// It updates the fields of the set to reflect hr being added to
|
||||
// the set and tags the region appropriately.
|
||||
inline void add_internal(HeapRegion* hr);
|
||||
|
||||
// It updates the fields of the set to reflect hr being removed
|
||||
// from the set.
|
||||
inline void update_for_removal(HeapRegion* hr);
|
||||
|
||||
// It updates the fields of the set to reflect hr being removed
|
||||
// from the set and tags the region appropriately.
|
||||
inline void remove_internal(HeapRegion* hr);
|
||||
|
||||
// It clears all the fields of the sets. Note: it will not iterate
|
||||
// over the set and remove regions from it. It assumes that the
|
||||
// caller has already done so. It will literally just clear the fields.
|
||||
virtual void clear();
|
||||
|
||||
HeapRegionSetBase(const char* name);
|
||||
|
||||
public:
|
||||
static void set_unrealistically_long_length(size_t len);
|
||||
|
||||
const char* name() { return _name; }
|
||||
|
||||
size_t length() { return _length; }
|
||||
|
||||
bool is_empty() { return _length == 0; }
|
||||
|
||||
size_t region_num() { return _region_num; }
|
||||
|
||||
size_t total_capacity_bytes() {
|
||||
return region_num() << HeapRegion::LogOfHRGrainBytes;
|
||||
}
|
||||
|
||||
size_t total_used_bytes() { return _total_used_bytes; }
|
||||
|
||||
virtual void verify();
|
||||
void verify_start();
|
||||
void verify_next_region(HeapRegion* hr);
|
||||
void verify_end();
|
||||
|
||||
#if HEAP_REGION_SET_FORCE_VERIFY
|
||||
void verify_optional() {
|
||||
verify();
|
||||
}
|
||||
#else // HEAP_REGION_SET_FORCE_VERIFY
|
||||
void verify_optional() { }
|
||||
#endif // HEAP_REGION_SET_FORCE_VERIFY
|
||||
|
||||
virtual void print_on(outputStream* out, bool print_contents = false);
|
||||
};
|
||||
|
||||
// Customized err_msg for heap region sets. Apart from a
|
||||
// assert/guarantee-specific message it also prints out the values of
|
||||
// the fields of the associated set. This can be very helpful in
|
||||
// diagnosing failures.
|
||||
|
||||
class hrl_ext_msg : public hrl_err_msg {
|
||||
public:
|
||||
hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") {
|
||||
set->fill_in_ext_msg(this, message);
|
||||
}
|
||||
};
|
||||
|
||||
// These two macros are provided for convenience, to keep the uses of
|
||||
// these two asserts a bit more concise.
|
||||
|
||||
#define hrl_assert_mt_safety_ok(_set_) \
|
||||
do { \
|
||||
assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety")); \
|
||||
} while (0)
|
||||
|
||||
#define hrl_assert_region_ok(_set_, _hr_, _expected_) \
|
||||
do { \
|
||||
assert((_set_)->verify_region((_hr_), (_expected_)), \
|
||||
hrl_ext_msg((_set_), "region verification")); \
|
||||
} while (0)
|
||||
|
||||
//////////////////// HeapRegionSet ////////////////////
|
||||
|
||||
#define hrl_assert_sets_match(_set1_, _set2_) \
|
||||
do { \
|
||||
assert(((_set1_)->regions_humongous() == \
|
||||
(_set2_)->regions_humongous()) && \
|
||||
((_set1_)->regions_empty() == (_set2_)->regions_empty()), \
|
||||
hrl_err_msg("the contents of set %s and set %s should match", \
|
||||
(_set1_)->name(), (_set2_)->name())); \
|
||||
} while (0)
|
||||
|
||||
// This class represents heap region sets whose members are not
|
||||
// explicitly tracked. It's helpful to group regions using such sets
|
||||
// so that we can reason about all the region groups in the heap using
|
||||
// the same interface (namely, the HeapRegionSetBase API).
|
||||
|
||||
class HeapRegionSet : public HeapRegionSetBase {
|
||||
protected:
|
||||
virtual const char* verify_region_extra(HeapRegion* hr) {
|
||||
if (hr->next() != NULL) {
|
||||
return "next() should always be NULL as we do not link the regions";
|
||||
}
|
||||
|
||||
return HeapRegionSetBase::verify_region_extra(hr);
|
||||
}
|
||||
|
||||
HeapRegionSet(const char* name) : HeapRegionSetBase(name) {
|
||||
clear();
|
||||
}
|
||||
|
||||
public:
|
||||
// It adds hr to the set. The region should not be a member of
|
||||
// another set.
|
||||
inline void add(HeapRegion* hr);
|
||||
|
||||
// It removes hr from the set. The region should be a member of
|
||||
// this set.
|
||||
inline void remove(HeapRegion* hr);
|
||||
|
||||
// It removes a region from the set. Instead of updating the fields
|
||||
// of the set to reflect this removal, it accumulates the updates
|
||||
// in proxy_set. The idea is that proxy_set is thread-local to
|
||||
// avoid multiple threads updating the fields of the set
|
||||
// concurrently and having to synchronize. The method
|
||||
// update_from_proxy() will update the fields of the set from the
|
||||
// proxy_set.
|
||||
inline void remove_with_proxy(HeapRegion* hr, HeapRegionSet* proxy_set);
|
||||
|
||||
// After multiple calls to remove_with_proxy() the updates to the
|
||||
// fields of the set are accumulated in proxy_set. This call
|
||||
// updates the fields of the set from proxy_set.
|
||||
void update_from_proxy(HeapRegionSet* proxy_set);
|
||||
};
|
||||
|
||||
//////////////////// HeapRegionLinkedList ////////////////////
|
||||
|
||||
// A set that links all the regions added to it in a singly-linked
|
||||
// list. We should try to avoid doing operations that iterate over
|
||||
// such lists in performance critical paths. Typically we should
|
||||
// add / remove one region at a time or concatenate two lists. All
|
||||
// those operations are done in constant time.
|
||||
|
||||
class HeapRegionLinkedListIterator;
|
||||
|
||||
class HeapRegionLinkedList : public HeapRegionSetBase {
|
||||
friend class HeapRegionLinkedListIterator;
|
||||
|
||||
private:
|
||||
HeapRegion* _head;
|
||||
HeapRegion* _tail;
|
||||
|
||||
// These are provided for use by the friend classes.
|
||||
HeapRegion* head() { return _head; }
|
||||
HeapRegion* tail() { return _tail; }
|
||||
|
||||
protected:
|
||||
virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg);
|
||||
|
||||
// See the comment for HeapRegionSetBase::clear()
|
||||
virtual void clear();
|
||||
|
||||
HeapRegionLinkedList(const char* name) : HeapRegionSetBase(name) {
|
||||
clear();
|
||||
}
|
||||
|
||||
public:
|
||||
// It adds hr to the list as the new tail. The region should not be
|
||||
// a member of another set.
|
||||
inline void add_as_tail(HeapRegion* hr);
|
||||
|
||||
// It removes and returns the head of the list. It assumes that the
|
||||
// list is not empty so it will return a non-NULL value.
|
||||
inline HeapRegion* remove_head();
|
||||
|
||||
// Convenience method.
|
||||
inline HeapRegion* remove_head_or_null();
|
||||
|
||||
// It moves the regions from from_list to this list and empties
|
||||
// from_list. The new regions will appear in the same order as they
|
||||
// were in from_list and be linked in the end of this list.
|
||||
void add_as_tail(HeapRegionLinkedList* from_list);
|
||||
|
||||
// It empties the list by removing all regions from it.
|
||||
void remove_all();
|
||||
|
||||
// It removes all regions in the list that are pending for removal
|
||||
// (i.e., they have been tagged with "pending_removal"). The list
|
||||
// must not be empty, target_count should reflect the exact number
|
||||
// of regions that are pending for removal in the list, and
|
||||
// target_count should be > 1 (currently, we never need to remove a
|
||||
// single region using this).
|
||||
void remove_all_pending(size_t target_count);
|
||||
|
||||
virtual void verify();
|
||||
|
||||
virtual void print_on(outputStream* out, bool print_contents = false);
|
||||
};
|
||||
|
||||
//////////////////// HeapRegionLinkedList ////////////////////
|
||||
|
||||
// Iterator class that provides a convenient way to iterator over the
|
||||
// regions in a HeapRegionLinkedList instance.
|
||||
|
||||
class HeapRegionLinkedListIterator : public StackObj {
|
||||
private:
|
||||
HeapRegionLinkedList* _list;
|
||||
HeapRegion* _curr;
|
||||
|
||||
public:
|
||||
bool more_available() {
|
||||
return _curr != NULL;
|
||||
}
|
||||
|
||||
HeapRegion* get_next() {
|
||||
assert(more_available(),
|
||||
"get_next() should be called when more regions are available");
|
||||
|
||||
// If we are going to introduce a count in the iterator we should
|
||||
// do the "cycle" check.
|
||||
|
||||
HeapRegion* hr = _curr;
|
||||
assert(_list->verify_region(hr, _list), "region verification");
|
||||
_curr = hr->next();
|
||||
return hr;
|
||||
}
|
||||
|
||||
HeapRegionLinkedListIterator(HeapRegionLinkedList* list)
|
||||
: _curr(NULL), _list(list) {
|
||||
_curr = list->head();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
|
@ -0,0 +1,159 @@
|
||||
/*
|
||||
* copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/heapRegionSet.hpp"
|
||||
|
||||
//////////////////// HeapRegionSetBase ////////////////////
|
||||
|
||||
inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) {
|
||||
// Assumes the caller has already verified the region.
|
||||
|
||||
_length += 1;
|
||||
if (!hr->isHumongous()) {
|
||||
_region_num += 1;
|
||||
} else {
|
||||
_region_num += calculate_region_num(hr);
|
||||
}
|
||||
_total_used_bytes += hr->used();
|
||||
}
|
||||
|
||||
inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
|
||||
hrl_assert_region_ok(this, hr, NULL);
|
||||
assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked"));
|
||||
|
||||
update_for_addition(hr);
|
||||
hr->set_containing_set(this);
|
||||
}
|
||||
|
||||
inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
|
||||
// Assumes the caller has already verified the region.
|
||||
assert(_length > 0, hrl_ext_msg(this, "pre-condition"));
|
||||
_length -= 1;
|
||||
|
||||
size_t region_num_diff;
|
||||
if (!hr->isHumongous()) {
|
||||
region_num_diff = 1;
|
||||
} else {
|
||||
region_num_diff = calculate_region_num(hr);
|
||||
}
|
||||
assert(region_num_diff <= _region_num,
|
||||
hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" "
|
||||
"should be <= region num: "SIZE_FORMAT,
|
||||
name(), region_num_diff, _region_num));
|
||||
_region_num -= region_num_diff;
|
||||
|
||||
size_t used_bytes = hr->used();
|
||||
assert(used_bytes <= _total_used_bytes,
|
||||
hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
|
||||
"should be <= used bytes: "SIZE_FORMAT,
|
||||
name(), used_bytes, _total_used_bytes));
|
||||
_total_used_bytes -= used_bytes;
|
||||
}
|
||||
|
||||
inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
|
||||
hrl_assert_region_ok(this, hr, this);
|
||||
assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked"));
|
||||
|
||||
hr->set_containing_set(NULL);
|
||||
update_for_removal(hr);
|
||||
}
|
||||
|
||||
//////////////////// HeapRegionSet ////////////////////
|
||||
|
||||
inline void HeapRegionSet::add(HeapRegion* hr) {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
// add_internal() will verify the region.
|
||||
add_internal(hr);
|
||||
}
|
||||
|
||||
inline void HeapRegionSet::remove(HeapRegion* hr) {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
// remove_internal() will verify the region.
|
||||
remove_internal(hr);
|
||||
}
|
||||
|
||||
inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
|
||||
HeapRegionSet* proxy_set) {
|
||||
// No need to fo the MT safety check here given that this method
|
||||
// does not update the contents of the set but instead accumulates
|
||||
// the changes in proxy_set which is assumed to be thread-local.
|
||||
hrl_assert_sets_match(this, proxy_set);
|
||||
hrl_assert_region_ok(this, hr, this);
|
||||
|
||||
hr->set_containing_set(NULL);
|
||||
proxy_set->update_for_addition(hr);
|
||||
}
|
||||
|
||||
//////////////////// HeapRegionLinkedList ////////////////////
|
||||
|
||||
inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
assert((length() == 0 && _head == NULL && _tail == NULL) ||
|
||||
(length() > 0 && _head != NULL && _tail != NULL),
|
||||
hrl_ext_msg(this, "invariant"));
|
||||
// add_internal() will verify the region.
|
||||
add_internal(hr);
|
||||
|
||||
// Now link the region.
|
||||
if (_tail != NULL) {
|
||||
_tail->set_next(hr);
|
||||
} else {
|
||||
_head = hr;
|
||||
}
|
||||
_tail = hr;
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegionLinkedList::remove_head() {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty"));
|
||||
assert(length() > 0 && _head != NULL && _tail != NULL,
|
||||
hrl_ext_msg(this, "invariant"));
|
||||
|
||||
// We need to unlink it first.
|
||||
HeapRegion* hr = _head;
|
||||
_head = hr->next();
|
||||
if (_head == NULL) {
|
||||
_tail = NULL;
|
||||
}
|
||||
hr->set_next(NULL);
|
||||
|
||||
// remove_internal() will verify the region.
|
||||
remove_internal(hr);
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
|
||||
hrl_assert_mt_safety_ok(this);
|
||||
|
||||
if (!is_empty()) {
|
||||
return remove_head();
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
|
102
hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.cpp
Normal file
102
hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.cpp
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSets.hpp"
|
||||
|
||||
//////////////////// FreeRegionList ////////////////////
|
||||
|
||||
const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
|
||||
if (hr->is_young()) {
|
||||
return "the region should not be young";
|
||||
}
|
||||
// The superclass will check that the region is empty and
|
||||
// not-humongous.
|
||||
return HeapRegionLinkedList::verify_region_extra(hr);
|
||||
}
|
||||
|
||||
//////////////////// MasterFreeRegionList ////////////////////
|
||||
|
||||
bool MasterFreeRegionList::check_mt_safety() {
|
||||
// Master Free List MT safety protocol:
|
||||
// (a) If we're at a safepoint, operations on the master free list
|
||||
// should be invoked by either the VM thread (which will serialize
|
||||
// them) or by the GC workers while holding the
|
||||
// FreeList_lock.
|
||||
// (b) If we're not at a safepoint, operations on the master free
|
||||
// list should be invoked while holding the Heap_lock.
|
||||
|
||||
guarantee((SafepointSynchronize::is_at_safepoint() &&
|
||||
(Thread::current()->is_VM_thread() ||
|
||||
FreeList_lock->owned_by_self())) ||
|
||||
(!SafepointSynchronize::is_at_safepoint() &&
|
||||
Heap_lock->owned_by_self()),
|
||||
hrl_ext_msg(this, "master free list MT safety protocol"));
|
||||
|
||||
return FreeRegionList::check_mt_safety();
|
||||
}
|
||||
|
||||
//////////////////// SecondaryFreeRegionList ////////////////////
|
||||
|
||||
bool SecondaryFreeRegionList::check_mt_safety() {
|
||||
// Secondary Free List MT safety protocol:
|
||||
// Operations on the secondary free list should always be invoked
|
||||
// while holding the SecondaryFreeList_lock.
|
||||
|
||||
guarantee(SecondaryFreeList_lock->owned_by_self(),
|
||||
hrl_ext_msg(this, "secondary free list MT safety protocol"));
|
||||
|
||||
return FreeRegionList::check_mt_safety();
|
||||
}
|
||||
|
||||
//////////////////// HumongousRegionSet ////////////////////
|
||||
|
||||
const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
|
||||
if (hr->is_young()) {
|
||||
return "the region should not be young";
|
||||
}
|
||||
// The superclass will check that the region is not empty and
|
||||
// humongous.
|
||||
return HeapRegionSet::verify_region_extra(hr);
|
||||
}
|
||||
|
||||
//////////////////// HumongousRegionSet ////////////////////
|
||||
|
||||
bool MasterHumongousRegionSet::check_mt_safety() {
|
||||
// Master Humongous Set MT safety protocol:
|
||||
// (a) If we're at a safepoint, operations on the master humongous
|
||||
// set should be invoked by either the VM thread (which will
|
||||
// serialize them) or by the GC workers while holding the
|
||||
// OldSets_lock.
|
||||
// (b) If we're not at a safepoint, operations on the master
|
||||
// humongous set should be invoked while holding the Heap_lock.
|
||||
|
||||
guarantee((SafepointSynchronize::is_at_safepoint() &&
|
||||
(Thread::current()->is_VM_thread() ||
|
||||
OldSets_lock->owned_by_self())) ||
|
||||
(!SafepointSynchronize::is_at_safepoint() &&
|
||||
Heap_lock->owned_by_self()),
|
||||
hrl_ext_msg(this, "master humongous set MT safety protocol"));
|
||||
return HumongousRegionSet::check_mt_safety();
|
||||
}
|
86
hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.hpp
Normal file
86
hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.hpp
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
|
||||
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
|
||||
//////////////////// FreeRegionList ////////////////////
|
||||
|
||||
class FreeRegionList : public HeapRegionLinkedList {
|
||||
protected:
|
||||
virtual const char* verify_region_extra(HeapRegion* hr);
|
||||
|
||||
virtual bool regions_humongous() { return false; }
|
||||
virtual bool regions_empty() { return true; }
|
||||
|
||||
public:
|
||||
FreeRegionList(const char* name) : HeapRegionLinkedList(name) { }
|
||||
};
|
||||
|
||||
//////////////////// MasterFreeRegionList ////////////////////
|
||||
|
||||
class MasterFreeRegionList : public FreeRegionList {
|
||||
protected:
|
||||
virtual bool check_mt_safety();
|
||||
|
||||
public:
|
||||
MasterFreeRegionList(const char* name) : FreeRegionList(name) { }
|
||||
};
|
||||
|
||||
//////////////////// SecondaryFreeRegionList ////////////////////
|
||||
|
||||
class SecondaryFreeRegionList : public FreeRegionList {
|
||||
protected:
|
||||
virtual bool check_mt_safety();
|
||||
|
||||
public:
|
||||
SecondaryFreeRegionList(const char* name) : FreeRegionList(name) { }
|
||||
};
|
||||
|
||||
//////////////////// HumongousRegionSet ////////////////////
|
||||
|
||||
class HumongousRegionSet : public HeapRegionSet {
|
||||
protected:
|
||||
virtual const char* verify_region_extra(HeapRegion* hr);
|
||||
|
||||
virtual bool regions_humongous() { return true; }
|
||||
virtual bool regions_empty() { return false; }
|
||||
|
||||
public:
|
||||
HumongousRegionSet(const char* name) : HeapRegionSet(name) { }
|
||||
};
|
||||
|
||||
//////////////////// MasterHumongousRegionSet ////////////////////
|
||||
|
||||
class MasterHumongousRegionSet : public HumongousRegionSet {
|
||||
protected:
|
||||
virtual bool check_mt_safety();
|
||||
|
||||
public:
|
||||
MasterHumongousRegionSet(const char* name) : HumongousRegionSet(name) { }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,8 +38,8 @@
|
||||
# include "thread_windows.inline.hpp"
|
||||
#endif
|
||||
|
||||
PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm, bool active) :
|
||||
_qset(qset_), _buf(NULL), _index(0), _active(active),
|
||||
PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
|
||||
_qset(qset), _buf(NULL), _index(0), _active(active),
|
||||
_perm(perm), _lock(NULL)
|
||||
{}
|
||||
|
||||
@ -153,10 +153,16 @@ void PtrQueueSet::reduce_free_list() {
|
||||
}
|
||||
|
||||
void PtrQueue::handle_zero_index() {
|
||||
assert(0 == _index, "Precondition.");
|
||||
assert(_index == 0, "Precondition.");
|
||||
|
||||
// This thread records the full buffer and allocates a new one (while
|
||||
// holding the lock if there is one).
|
||||
if (_buf != NULL) {
|
||||
if (!should_enqueue_buffer()) {
|
||||
assert(_index > 0, "the buffer can only be re-used if it's not full");
|
||||
return;
|
||||
}
|
||||
|
||||
if (_lock) {
|
||||
assert(_lock->owned_by_self(), "Required.");
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -68,7 +68,7 @@ protected:
|
||||
public:
|
||||
// Initialize this queue to contain a null buffer, and be part of the
|
||||
// given PtrQueueSet.
|
||||
PtrQueue(PtrQueueSet*, bool perm = false, bool active = false);
|
||||
PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
|
||||
// Release any contained resources.
|
||||
void flush();
|
||||
// Calls flush() when destroyed.
|
||||
@ -85,6 +85,14 @@ public:
|
||||
else enqueue_known_active(ptr);
|
||||
}
|
||||
|
||||
// This method is called when we're doing the zero index handling
|
||||
// and gives a chance to the queues to do any pre-enqueueing
|
||||
// processing they might want to do on the buffer. It should return
|
||||
// true if the buffer should be enqueued, or false if enough
|
||||
// entries were cleared from it so that it can be re-used. It should
|
||||
// not return false if the buffer is still full (otherwise we can
|
||||
// get into an infinite loop).
|
||||
virtual bool should_enqueue_buffer() { return true; }
|
||||
void handle_zero_index();
|
||||
void locking_enqueue_completed_buffer(void** buf);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,12 +23,98 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/satbQueue.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
// This method removes entries from an SATB buffer that will not be
|
||||
// useful to the concurrent marking threads. An entry is removed if it
|
||||
// satisfies one of the following conditions:
|
||||
//
|
||||
// * it points to an object outside the G1 heap (G1's concurrent
|
||||
// marking only visits objects inside the G1 heap),
|
||||
// * it points to an object that has been allocated since marking
|
||||
// started (according to SATB those objects do not need to be
|
||||
// visited during marking), or
|
||||
// * it points to an object that has already been marked (no need to
|
||||
// process it again).
|
||||
//
|
||||
// The rest of the entries will be retained and are compacted towards
|
||||
// the top of the buffer. If with this filtering we clear a large
|
||||
// enough chunk of the buffer we can re-use it (instead of enqueueing
|
||||
// it) and we can just allow the mutator to carry on executing.
|
||||
|
||||
bool ObjPtrQueue::should_enqueue_buffer() {
|
||||
assert(_lock == NULL || _lock->owned_by_self(),
|
||||
"we should have taken the lock before calling this");
|
||||
|
||||
// A value of 0 means "don't filter SATB buffers".
|
||||
if (G1SATBBufferEnqueueingThresholdPercent == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// This method should only be called if there is a non-NULL buffer
|
||||
// that is full.
|
||||
assert(_index == 0, "pre-condition");
|
||||
assert(_buf != NULL, "pre-condition");
|
||||
|
||||
void** buf = _buf;
|
||||
size_t sz = _sz;
|
||||
|
||||
// Used for sanity checking at the end of the loop.
|
||||
debug_only(size_t entries = 0; size_t retained = 0;)
|
||||
|
||||
size_t i = sz;
|
||||
size_t new_index = sz;
|
||||
|
||||
// Given that we are expecting _index == 0, we could have changed
|
||||
// the loop condition to (i > 0). But we are using _index for
|
||||
// generality.
|
||||
while (i > _index) {
|
||||
assert(i > 0, "we should have at least one more entry to process");
|
||||
i -= oopSize;
|
||||
debug_only(entries += 1;)
|
||||
oop* p = (oop*) &buf[byte_index_to_index((int) i)];
|
||||
oop obj = *p;
|
||||
// NULL the entry so that unused parts of the buffer contain NULLs
|
||||
// at the end. If we are going to retain it we will copy it to its
|
||||
// final place. If we have retained all entries we have visited so
|
||||
// far, we'll just end up copying it to the same place.
|
||||
*p = NULL;
|
||||
|
||||
bool retain = g1h->is_obj_ill(obj);
|
||||
if (retain) {
|
||||
assert(new_index > 0, "we should not have already filled up the buffer");
|
||||
new_index -= oopSize;
|
||||
assert(new_index >= i,
|
||||
"new_index should never be below i, as we alwaysr compact 'up'");
|
||||
oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
|
||||
assert(new_p >= p, "the destination location should never be below "
|
||||
"the source as we always compact 'up'");
|
||||
assert(*new_p == NULL,
|
||||
"we should have already cleared the destination location");
|
||||
*new_p = obj;
|
||||
debug_only(retained += 1;)
|
||||
}
|
||||
}
|
||||
size_t entries_calc = (sz - _index) / oopSize;
|
||||
assert(entries == entries_calc, "the number of entries we counted "
|
||||
"should match the number of entries we calculated");
|
||||
size_t retained_calc = (sz - new_index) / oopSize;
|
||||
assert(retained == retained_calc, "the number of retained entries we counted "
|
||||
"should match the number of retained entries we calculated");
|
||||
size_t perc = retained_calc * 100 / entries_calc;
|
||||
bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
|
||||
_index = new_index;
|
||||
|
||||
return should_enqueue;
|
||||
}
|
||||
|
||||
void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
|
||||
if (_buf != NULL) {
|
||||
apply_closure_to_buffer(cl, _buf, _index, _sz);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,13 +33,18 @@ class JavaThread;
|
||||
// A ptrQueue whose elements are "oops", pointers to object heads.
|
||||
class ObjPtrQueue: public PtrQueue {
|
||||
public:
|
||||
ObjPtrQueue(PtrQueueSet* qset_, bool perm = false) :
|
||||
ObjPtrQueue(PtrQueueSet* qset, bool perm = false) :
|
||||
// SATB queues are only active during marking cycles. We create
|
||||
// them with their active field set to false. If a thread is
|
||||
// created during a cycle and its SATB queue needs to be activated
|
||||
// before the thread starts running, we'll need to set its active
|
||||
// field to true. This is done in JavaThread::initialize_queues().
|
||||
PtrQueue(qset_, perm, false /* active */) { }
|
||||
PtrQueue(qset, perm, false /* active */) { }
|
||||
|
||||
// Overrides PtrQueue::should_enqueue_buffer(). See the method's
|
||||
// definition for more information.
|
||||
virtual bool should_enqueue_buffer();
|
||||
|
||||
// Apply the closure to all elements, and reset the index to make the
|
||||
// buffer empty.
|
||||
void apply_closure(ObjectClosure* cl);
|
||||
|
@ -258,6 +258,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
BiasedLocking::restore_marks();
|
||||
Threads::gc_epilogue();
|
||||
CodeCache::gc_epilogue();
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
|
||||
|
@ -1054,6 +1054,7 @@ void PSParallelCompact::post_compact()
|
||||
|
||||
Threads::gc_epilogue();
|
||||
CodeCache::gc_epilogue();
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,30 +34,6 @@
|
||||
|
||||
// Implementation of Bytecode
|
||||
|
||||
bool Bytecode::check_must_rewrite(Bytecodes::Code code) const {
|
||||
assert(Bytecodes::can_rewrite(code), "post-check only");
|
||||
|
||||
// Some codes are conditionally rewriting. Look closely at them.
|
||||
switch (code) {
|
||||
case Bytecodes::_aload_0:
|
||||
// Even if RewriteFrequentPairs is turned on,
|
||||
// the _aload_0 code might delay its rewrite until
|
||||
// a following _getfield rewrites itself.
|
||||
return false;
|
||||
|
||||
case Bytecodes::_lookupswitch:
|
||||
return false; // the rewrite is not done by the interpreter
|
||||
|
||||
case Bytecodes::_new:
|
||||
// (Could actually look at the class here, but the profit would be small.)
|
||||
return false; // the rewrite is not always done
|
||||
}
|
||||
|
||||
// No other special cases.
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const {
|
||||
@ -188,17 +164,16 @@ int Bytecode_member_ref::index() const {
|
||||
// Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
|
||||
// at the same time it allocates per-call-site CP cache entries.
|
||||
Bytecodes::Code rawc = code();
|
||||
Bytecode* invoke = bytecode();
|
||||
if (invoke->has_index_u4(rawc))
|
||||
return invoke->get_index_u4(rawc);
|
||||
if (has_index_u4(rawc))
|
||||
return get_index_u4(rawc);
|
||||
else
|
||||
return invoke->get_index_u2_cpcache(rawc);
|
||||
return get_index_u2_cpcache(rawc);
|
||||
}
|
||||
|
||||
int Bytecode_member_ref::pool_index() const {
|
||||
int index = this->index();
|
||||
DEBUG_ONLY({
|
||||
if (!bytecode()->has_index_u4(code()))
|
||||
if (!has_index_u4(code()))
|
||||
index -= constantPoolOopDesc::CPCACHE_INDEX_TAG;
|
||||
});
|
||||
return _method->constants()->cache()->entry_at(index)->constant_pool_index();
|
||||
@ -214,13 +189,12 @@ void Bytecode_field::verify() const {
|
||||
// Implementation of Bytecode_loadconstant
|
||||
|
||||
int Bytecode_loadconstant::raw_index() const {
|
||||
Bytecode* bcp = bytecode();
|
||||
Bytecodes::Code rawc = bcp->code();
|
||||
Bytecodes::Code rawc = code();
|
||||
assert(rawc != Bytecodes::_wide, "verifier prevents this");
|
||||
if (Bytecodes::java_code(rawc) == Bytecodes::_ldc)
|
||||
return bcp->get_index_u1(rawc);
|
||||
return get_index_u1(rawc);
|
||||
else
|
||||
return bcp->get_index_u2(rawc, false);
|
||||
return get_index_u2(rawc, false);
|
||||
}
|
||||
|
||||
int Bytecode_loadconstant::pool_index() const {
|
||||
@ -258,7 +232,7 @@ void Bytecode_lookupswitch::verify() const {
|
||||
case Bytecodes::_lookupswitch:
|
||||
{ int i = number_of_pairs() - 1;
|
||||
while (i-- > 0) {
|
||||
assert(pair_at(i)->match() < pair_at(i+1)->match(), "unsorted table entries");
|
||||
assert(pair_at(i).match() < pair_at(i+1).match(), "unsorted table entries");
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,14 +38,20 @@
|
||||
# include "bytes_zero.hpp"
|
||||
#endif
|
||||
|
||||
// Base class for different kinds of abstractions working
|
||||
// relative to an objects 'this' pointer.
|
||||
class ciBytecodeStream;
|
||||
|
||||
// The base class for different kinds of bytecode abstractions.
|
||||
// Provides the primitive operations to manipulate code relative
|
||||
// to the bcp.
|
||||
|
||||
class Bytecode: public StackObj {
|
||||
protected:
|
||||
const address _bcp;
|
||||
const Bytecodes::Code _code;
|
||||
|
||||
class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
// Address computation
|
||||
address addr_at (int offset) const { return (address)this + offset; }
|
||||
int byte_at (int offset) const { return *(addr_at(offset)); }
|
||||
address addr_at (int offset) const { return (address)_bcp + offset; }
|
||||
u_char byte_at(int offset) const { return *addr_at(offset); }
|
||||
address aligned_addr_at (int offset) const { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
|
||||
int aligned_offset (int offset) const { return aligned_addr_at(offset) - addr_at(0); }
|
||||
|
||||
@ -54,31 +60,20 @@ class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
|
||||
int get_Java_u4_at (int offset) const { return Bytes::get_Java_u4(addr_at(offset)); }
|
||||
int get_native_u2_at (int offset) const { return Bytes::get_native_u2(addr_at(offset)); }
|
||||
int get_native_u4_at (int offset) const { return Bytes::get_native_u4(addr_at(offset)); }
|
||||
};
|
||||
|
||||
|
||||
// The base class for different kinds of bytecode abstractions.
|
||||
// Provides the primitive operations to manipulate code relative
|
||||
// to an objects 'this' pointer.
|
||||
// FIXME: Make this a ResourceObj, include the enclosing methodOop, and cache the opcode.
|
||||
|
||||
class Bytecode: public ThisRelativeObj {
|
||||
protected:
|
||||
u_char byte_at(int offset) const { return *addr_at(offset); }
|
||||
bool check_must_rewrite(Bytecodes::Code bc) const;
|
||||
|
||||
public:
|
||||
Bytecode(methodOop method, address bcp): _bcp(bcp), _code(Bytecodes::code_at(method, addr_at(0))) {
|
||||
assert(method != NULL, "this form requires a valid methodOop");
|
||||
}
|
||||
// Defined in ciStreams.hpp
|
||||
inline Bytecode(const ciBytecodeStream* stream, address bcp = NULL);
|
||||
|
||||
// Attributes
|
||||
address bcp() const { return addr_at(0); }
|
||||
int instruction_size() const { return Bytecodes::length_at(bcp()); }
|
||||
address bcp() const { return _bcp; }
|
||||
int instruction_size() const { return Bytecodes::length_for_code_at(_code, bcp()); }
|
||||
|
||||
// Warning: Use code() with caution on live bytecode streams. 4926272
|
||||
Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); }
|
||||
Bytecodes::Code code() const { return _code; }
|
||||
Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); }
|
||||
bool must_rewrite(Bytecodes::Code code) const { return Bytecodes::can_rewrite(code) && check_must_rewrite(code); }
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode* Bytecode_at(address bcp);
|
||||
|
||||
// Static functions for parsing bytecodes in place.
|
||||
int get_index_u1(Bytecodes::Code bc) const {
|
||||
@ -89,7 +84,7 @@ class Bytecode: public ThisRelativeObj {
|
||||
assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide);
|
||||
address p = addr_at(is_wide ? 2 : 1);
|
||||
if (can_use_native_byte_order(bc, is_wide))
|
||||
return Bytes::get_native_u2(p);
|
||||
return Bytes::get_native_u2(p);
|
||||
else return Bytes::get_Java_u2(p);
|
||||
}
|
||||
int get_index_u1_cpcache(Bytecodes::Code bc) const {
|
||||
@ -138,20 +133,17 @@ class Bytecode: public ThisRelativeObj {
|
||||
}
|
||||
};
|
||||
|
||||
inline Bytecode* Bytecode_at(address bcp) {
|
||||
// Warning: Use with caution on live bytecode streams. 4926272
|
||||
return (Bytecode*)bcp;
|
||||
}
|
||||
|
||||
|
||||
// Abstractions for lookupswitch bytecode
|
||||
|
||||
class LookupswitchPair: ThisRelativeObj {
|
||||
class LookupswitchPair VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
int _match;
|
||||
int _offset;
|
||||
const address _bcp;
|
||||
|
||||
address addr_at (int offset) const { return _bcp + offset; }
|
||||
int get_Java_u4_at (int offset) const { return Bytes::get_Java_u4(addr_at(offset)); }
|
||||
|
||||
public:
|
||||
LookupswitchPair(address bcp): _bcp(bcp) {}
|
||||
int match() const { return get_Java_u4_at(0 * jintSize); }
|
||||
int offset() const { return get_Java_u4_at(1 * jintSize); }
|
||||
};
|
||||
@ -159,26 +151,25 @@ class LookupswitchPair: ThisRelativeObj {
|
||||
|
||||
class Bytecode_lookupswitch: public Bytecode {
|
||||
public:
|
||||
Bytecode_lookupswitch(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
|
||||
// Defined in ciStreams.hpp
|
||||
inline Bytecode_lookupswitch(const ciBytecodeStream* stream);
|
||||
void verify() const PRODUCT_RETURN;
|
||||
|
||||
// Attributes
|
||||
int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
|
||||
int number_of_pairs() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
|
||||
LookupswitchPair* pair_at(int i) const { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
|
||||
return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); }
|
||||
// Creation
|
||||
inline friend Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp);
|
||||
LookupswitchPair pair_at(int i) const {
|
||||
assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
|
||||
return LookupswitchPair(aligned_addr_at(1 + (1 + i)*2*jintSize));
|
||||
}
|
||||
};
|
||||
|
||||
inline Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp) {
|
||||
Bytecode_lookupswitch* b = (Bytecode_lookupswitch*)bcp;
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
class Bytecode_tableswitch: public Bytecode {
|
||||
public:
|
||||
Bytecode_tableswitch(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
|
||||
// Defined in ciStreams.hpp
|
||||
inline Bytecode_tableswitch(const ciBytecodeStream* stream);
|
||||
void verify() const PRODUCT_RETURN;
|
||||
|
||||
// Attributes
|
||||
@ -187,52 +178,36 @@ class Bytecode_tableswitch: public Bytecode {
|
||||
int high_key() const { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); }
|
||||
int dest_offset_at(int i) const;
|
||||
int length() { return high_key()-low_key()+1; }
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp);
|
||||
};
|
||||
|
||||
inline Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp) {
|
||||
Bytecode_tableswitch* b = (Bytecode_tableswitch*)bcp;
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
// Common code for decoding invokes and field references.
|
||||
|
||||
class Bytecode_member_ref: public ResourceObj {
|
||||
class Bytecode_member_ref: public Bytecode {
|
||||
protected:
|
||||
methodHandle _method; // method containing the bytecode
|
||||
int _bci; // position of the bytecode
|
||||
const methodHandle _method; // method containing the bytecode
|
||||
|
||||
Bytecode_member_ref(methodHandle method, int bci) : _method(method), _bci(bci) {}
|
||||
Bytecode_member_ref(methodHandle method, int bci) : Bytecode(method(), method()->bcp_from(bci)), _method(method) {}
|
||||
|
||||
methodHandle method() const { return _method; }
|
||||
|
||||
public:
|
||||
// Attributes
|
||||
methodHandle method() const { return _method; }
|
||||
int bci() const { return _bci; }
|
||||
address bcp() const { return _method->bcp_from(bci()); }
|
||||
Bytecode* bytecode() const { return Bytecode_at(bcp()); }
|
||||
|
||||
int index() const; // cache index (loaded from instruction)
|
||||
int pool_index() const; // constant pool index
|
||||
symbolOop name() const; // returns the name of the method or field
|
||||
symbolOop signature() const; // returns the signature of the method or field
|
||||
|
||||
BasicType result_type(Thread* thread) const; // returns the result type of the getfield or invoke
|
||||
|
||||
Bytecodes::Code code() const { return Bytecodes::code_at(bcp(), _method()); }
|
||||
Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); }
|
||||
};
|
||||
|
||||
// Abstraction for invoke_{virtual, static, interface, special}
|
||||
|
||||
class Bytecode_invoke: public Bytecode_member_ref {
|
||||
protected:
|
||||
Bytecode_invoke(methodHandle method, int bci) : Bytecode_member_ref(method, bci) {}
|
||||
// Constructor that skips verification
|
||||
Bytecode_invoke(methodHandle method, int bci, bool unused) : Bytecode_member_ref(method, bci) {}
|
||||
|
||||
public:
|
||||
Bytecode_invoke(methodHandle method, int bci) : Bytecode_member_ref(method, bci) { verify(); }
|
||||
void verify() const;
|
||||
|
||||
// Attributes
|
||||
@ -253,31 +228,20 @@ class Bytecode_invoke: public Bytecode_member_ref {
|
||||
is_invokespecial() ||
|
||||
is_invokedynamic(); }
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci);
|
||||
|
||||
// Like Bytecode_invoke_at. Instead it returns NULL if the bci is not at an invoke.
|
||||
inline friend Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci);
|
||||
// Helper to skip verification. Used is_valid() to check if the result is really an invoke
|
||||
inline friend Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci);
|
||||
};
|
||||
|
||||
inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) {
|
||||
Bytecode_invoke* b = new Bytecode_invoke(method, bci);
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
inline Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci) {
|
||||
Bytecode_invoke* b = new Bytecode_invoke(method, bci);
|
||||
return b->is_valid() ? b : NULL;
|
||||
inline Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci) {
|
||||
return Bytecode_invoke(method, bci, false);
|
||||
}
|
||||
|
||||
|
||||
// Abstraction for all field accesses (put/get field/static)
|
||||
class Bytecode_field: public Bytecode_member_ref {
|
||||
protected:
|
||||
Bytecode_field(methodHandle method, int bci) : Bytecode_member_ref(method, bci) {}
|
||||
|
||||
public:
|
||||
Bytecode_field(methodHandle method, int bci) : Bytecode_member_ref(method, bci) { verify(); }
|
||||
|
||||
// Testers
|
||||
bool is_getfield() const { return java_code() == Bytecodes::_getfield; }
|
||||
bool is_putfield() const { return java_code() == Bytecodes::_putfield; }
|
||||
@ -292,131 +256,64 @@ class Bytecode_field: public Bytecode_member_ref {
|
||||
is_getstatic() ||
|
||||
is_putstatic(); }
|
||||
void verify() const;
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_field* Bytecode_field_at(methodHandle method, int bci);
|
||||
};
|
||||
|
||||
inline Bytecode_field* Bytecode_field_at(methodHandle method, int bci) {
|
||||
Bytecode_field* b = new Bytecode_field(method, bci);
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
// Abstraction for checkcast
|
||||
|
||||
class Bytecode_checkcast: public Bytecode {
|
||||
public:
|
||||
Bytecode_checkcast(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
|
||||
void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
|
||||
|
||||
// Returns index
|
||||
long index() const { return get_index_u2(Bytecodes::_checkcast); };
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
|
||||
};
|
||||
|
||||
inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) {
|
||||
Bytecode_checkcast* b = (Bytecode_checkcast*)bcp;
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
// Abstraction for instanceof
|
||||
|
||||
class Bytecode_instanceof: public Bytecode {
|
||||
public:
|
||||
Bytecode_instanceof(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
|
||||
void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
|
||||
|
||||
// Returns index
|
||||
long index() const { return get_index_u2(Bytecodes::_instanceof); };
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
|
||||
};
|
||||
|
||||
inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) {
|
||||
Bytecode_instanceof* b = (Bytecode_instanceof*)bcp;
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
class Bytecode_new: public Bytecode {
|
||||
public:
|
||||
Bytecode_new(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
|
||||
void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
|
||||
|
||||
// Returns index
|
||||
long index() const { return get_index_u2(Bytecodes::_new); };
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_new* Bytecode_new_at(address bcp);
|
||||
};
|
||||
|
||||
inline Bytecode_new* Bytecode_new_at(address bcp) {
|
||||
Bytecode_new* b = (Bytecode_new*)bcp;
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
class Bytecode_multianewarray: public Bytecode {
|
||||
public:
|
||||
Bytecode_multianewarray(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
|
||||
void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
|
||||
|
||||
// Returns index
|
||||
long index() const { return get_index_u2(Bytecodes::_multianewarray); };
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
|
||||
};
|
||||
|
||||
inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) {
|
||||
Bytecode_multianewarray* b = (Bytecode_multianewarray*)bcp;
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
class Bytecode_anewarray: public Bytecode {
|
||||
public:
|
||||
Bytecode_anewarray(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
|
||||
void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
|
||||
|
||||
// Returns index
|
||||
long index() const { return get_index_u2(Bytecodes::_anewarray); };
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);
|
||||
};
|
||||
|
||||
inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) {
|
||||
Bytecode_anewarray* b = (Bytecode_anewarray*)bcp;
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
// Abstraction for ldc, ldc_w and ldc2_w
|
||||
|
||||
class Bytecode_loadconstant: public ResourceObj {
|
||||
class Bytecode_loadconstant: public Bytecode {
|
||||
private:
|
||||
int _bci;
|
||||
methodHandle _method;
|
||||
|
||||
Bytecodes::Code code() const { return bytecode()->code(); }
|
||||
const methodHandle _method;
|
||||
|
||||
int raw_index() const;
|
||||
|
||||
Bytecode_loadconstant(methodHandle method, int bci) : _method(method), _bci(bci) {}
|
||||
|
||||
public:
|
||||
// Attributes
|
||||
methodHandle method() const { return _method; }
|
||||
int bci() const { return _bci; }
|
||||
address bcp() const { return _method->bcp_from(bci()); }
|
||||
Bytecode* bytecode() const { return Bytecode_at(bcp()); }
|
||||
Bytecode_loadconstant(methodHandle method, int bci): Bytecode(method(), method->bcp_from(bci)), _method(method) { verify(); }
|
||||
|
||||
void verify() const {
|
||||
assert(_method.not_null(), "must supply method");
|
||||
@ -437,15 +334,6 @@ class Bytecode_loadconstant: public ResourceObj {
|
||||
BasicType result_type() const; // returns the result type of the ldc
|
||||
|
||||
oop resolve_constant(TRAPS) const;
|
||||
|
||||
// Creation
|
||||
inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci);
|
||||
};
|
||||
|
||||
inline Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci) {
|
||||
Bytecode_loadconstant* b = new Bytecode_loadconstant(method, bci);
|
||||
DEBUG_ONLY(b->verify());
|
||||
return b;
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_INTERPRETER_BYTECODE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -831,11 +831,11 @@ BytecodeInterpreter::run(interpreterState istate) {
|
||||
// much like trying to deopt at a poll return. In that has we simply
|
||||
// get out of here
|
||||
//
|
||||
if ( Bytecodes::code_at(pc, METHOD) == Bytecodes::_return_register_finalizer) {
|
||||
if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
|
||||
// this will do the right thing even if an exception is pending.
|
||||
goto handle_return;
|
||||
}
|
||||
UPDATE_PC(Bytecodes::length_at(pc));
|
||||
UPDATE_PC(Bytecodes::length_at(METHOD, pc));
|
||||
if (THREAD->has_pending_exception()) goto handle_exception;
|
||||
goto run;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -59,7 +59,7 @@ void BaseBytecodeStream::assert_raw_index_size(int size) const {
|
||||
// in raw mode, pretend indy is "bJJ__"
|
||||
assert(size == 2, "raw invokedynamic instruction has 2-byte index only");
|
||||
} else {
|
||||
bytecode()->assert_index_size(size, raw_code(), is_wide());
|
||||
bytecode().assert_index_size(size, raw_code(), is_wide());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -105,14 +105,14 @@ class BaseBytecodeStream: StackObj {
|
||||
bool is_last_bytecode() const { return _next_bci >= _end_bci; }
|
||||
|
||||
address bcp() const { return method()->code_base() + _bci; }
|
||||
Bytecode* bytecode() const { return Bytecode_at(bcp()); }
|
||||
Bytecode bytecode() const { return Bytecode(_method(), bcp()); }
|
||||
|
||||
// State changes
|
||||
void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
|
||||
|
||||
// Bytecode-specific attributes
|
||||
int dest() const { return bci() + bytecode()->get_offset_s2(raw_code()); }
|
||||
int dest_w() const { return bci() + bytecode()->get_offset_s4(raw_code()); }
|
||||
int dest() const { return bci() + bytecode().get_offset_s2(raw_code()); }
|
||||
int dest_w() const { return bci() + bytecode().get_offset_s4(raw_code()); }
|
||||
|
||||
// One-byte indices.
|
||||
int get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
|
||||
@ -189,7 +189,7 @@ class BytecodeStream: public BaseBytecodeStream {
|
||||
} else {
|
||||
// get bytecode
|
||||
address bcp = this->bcp();
|
||||
raw_code = Bytecodes::code_at(bcp);
|
||||
raw_code = Bytecodes::code_at(_method(), bcp);
|
||||
code = Bytecodes::java_code(raw_code);
|
||||
// set next bytecode position
|
||||
//
|
||||
@ -197,7 +197,7 @@ class BytecodeStream: public BaseBytecodeStream {
|
||||
// tty bytecode otherwise the stepping is wrong!
|
||||
// (carefull: length_for(...) must be used first!)
|
||||
int l = Bytecodes::length_for(code);
|
||||
if (l == 0) l = Bytecodes::length_at(bcp);
|
||||
if (l == 0) l = Bytecodes::length_at(_method(), bcp);
|
||||
_next_bci += l;
|
||||
assert(_bci < _next_bci, "length must be > 0");
|
||||
// set attributes
|
||||
@ -219,16 +219,16 @@ class BytecodeStream: public BaseBytecodeStream {
|
||||
Bytecodes::Code code() const { return _code; }
|
||||
|
||||
// Unsigned indices, widening
|
||||
int get_index() const { return is_wide() ? bytecode()->get_index_u2(raw_code(), true) : get_index_u1(); }
|
||||
int get_index() const { return is_wide() ? bytecode().get_index_u2(raw_code(), true) : get_index_u1(); }
|
||||
// Get an unsigned 2-byte index, swapping the bytes if necessary.
|
||||
int get_index_u2() const { assert_raw_stream(false);
|
||||
return bytecode()->get_index_u2(raw_code(), false); }
|
||||
return bytecode().get_index_u2(raw_code(), false); }
|
||||
// Get an unsigned 2-byte index in native order.
|
||||
int get_index_u2_cpcache() const { assert_raw_stream(false);
|
||||
return bytecode()->get_index_u2_cpcache(raw_code()); }
|
||||
return bytecode().get_index_u2_cpcache(raw_code()); }
|
||||
int get_index_u4() const { assert_raw_stream(false);
|
||||
return bytecode()->get_index_u4(raw_code()); }
|
||||
bool has_index_u4() const { return bytecode()->has_index_u4(raw_code()); }
|
||||
return bytecode().get_index_u4(raw_code()); }
|
||||
bool has_index_u4() const { return bytecode().has_index_u4(raw_code()); }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_INTERPRETER_BYTECODESTREAM_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -100,9 +100,9 @@ class BytecodePrinter: public BytecodeClosure {
|
||||
Bytecodes::Code code;
|
||||
if (is_wide()) {
|
||||
// bcp wasn't advanced if previous bytecode was _wide.
|
||||
code = Bytecodes::code_at(bcp+1);
|
||||
code = Bytecodes::code_at(method(), bcp+1);
|
||||
} else {
|
||||
code = Bytecodes::code_at(bcp);
|
||||
code = Bytecodes::code_at(method(), bcp);
|
||||
}
|
||||
_code = code;
|
||||
int bci = bcp - method->code_base();
|
||||
@ -127,11 +127,11 @@ class BytecodePrinter: public BytecodeClosure {
|
||||
void trace(methodHandle method, address bcp, outputStream* st) {
|
||||
_current_method = method();
|
||||
ResourceMark rm;
|
||||
Bytecodes::Code code = Bytecodes::code_at(bcp);
|
||||
Bytecodes::Code code = Bytecodes::code_at(method(), bcp);
|
||||
// Set is_wide
|
||||
_is_wide = (code == Bytecodes::_wide);
|
||||
if (is_wide()) {
|
||||
code = Bytecodes::code_at(bcp+1);
|
||||
code = Bytecodes::code_at(method(), bcp+1);
|
||||
}
|
||||
_code = code;
|
||||
int bci = bcp - method->code_base();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,18 +54,46 @@ u_char Bytecodes::_lengths [Bytecodes::number_of_codes];
|
||||
Bytecodes::Code Bytecodes::_java_code [Bytecodes::number_of_codes];
|
||||
u_short Bytecodes::_flags [(1<<BitsPerByte)*2];
|
||||
|
||||
#ifdef ASSERT
|
||||
bool Bytecodes::check_method(const methodOopDesc* method, address bcp) {
|
||||
return method->contains(bcp);
|
||||
}
|
||||
#endif
|
||||
|
||||
Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) {
|
||||
return code_at(method->bcp_from(bci), method);
|
||||
bool Bytecodes::check_must_rewrite(Bytecodes::Code code) {
|
||||
assert(can_rewrite(code), "post-check only");
|
||||
|
||||
// Some codes are conditionally rewriting. Look closely at them.
|
||||
switch (code) {
|
||||
case Bytecodes::_aload_0:
|
||||
// Even if RewriteFrequentPairs is turned on,
|
||||
// the _aload_0 code might delay its rewrite until
|
||||
// a following _getfield rewrites itself.
|
||||
return false;
|
||||
|
||||
case Bytecodes::_lookupswitch:
|
||||
return false; // the rewrite is not done by the interpreter
|
||||
|
||||
case Bytecodes::_new:
|
||||
// (Could actually look at the class here, but the profit would be small.)
|
||||
return false; // the rewrite is not always done
|
||||
}
|
||||
|
||||
// No other special cases.
|
||||
return true;
|
||||
}
|
||||
|
||||
Bytecodes::Code Bytecodes::non_breakpoint_code_at(address bcp, methodOop method) {
|
||||
if (method == NULL) method = methodOopDesc::method_from_bcp(bcp);
|
||||
Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) {
|
||||
return code_at(method, method->bcp_from(bci));
|
||||
}
|
||||
|
||||
Bytecodes::Code Bytecodes::non_breakpoint_code_at(const methodOopDesc* method, address bcp) {
|
||||
assert(method != NULL, "must have the method for breakpoint conversion");
|
||||
assert(method->contains(bcp), "must be valid bcp in method");
|
||||
return method->orig_bytecode_at(method->bci_from(bcp));
|
||||
}
|
||||
|
||||
int Bytecodes::special_length_at(address bcp, address end) {
|
||||
Code code = code_at(bcp);
|
||||
int Bytecodes::special_length_at(Bytecodes::Code code, address bcp, address end) {
|
||||
switch (code) {
|
||||
case _wide:
|
||||
if (end != NULL && bcp + 1 >= end) {
|
||||
@ -120,7 +148,7 @@ int Bytecodes::raw_special_length_at(address bcp, address end) {
|
||||
if (code == _breakpoint) {
|
||||
return 1;
|
||||
} else {
|
||||
return special_length_at(bcp, end);
|
||||
return special_length_at(code, bcp, end);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -342,6 +342,12 @@ class Bytecodes: AllStatic {
|
||||
static void pd_initialize(); // platform specific initialization
|
||||
static Code pd_base_code_for(Code code); // platform specific base_code_for implementation
|
||||
|
||||
// Verify that bcp points into method
|
||||
#ifdef ASSERT
|
||||
static bool check_method(const methodOopDesc* method, address bcp);
|
||||
#endif
|
||||
static bool check_must_rewrite(Bytecodes::Code bc);
|
||||
|
||||
public:
|
||||
// Conversion
|
||||
static void check (Code code) { assert(is_defined(code), "illegal code"); }
|
||||
@ -349,22 +355,30 @@ class Bytecodes: AllStatic {
|
||||
static Code cast (int code) { return (Code)code; }
|
||||
|
||||
|
||||
// Fetch a bytecode, hiding breakpoints as necessary:
|
||||
static Code code_at(address bcp, methodOop method = NULL) {
|
||||
Code code = cast(*bcp); return (code != _breakpoint) ? code : non_breakpoint_code_at(bcp, method);
|
||||
}
|
||||
static Code java_code_at(address bcp, methodOop method = NULL) {
|
||||
return java_code(code_at(bcp, method));
|
||||
}
|
||||
// Fetch a bytecode, hiding breakpoints as necessary. The method
|
||||
// argument is used for conversion of breakpoints into the original
|
||||
// bytecode. The CI uses these methods but guarantees that
|
||||
// breakpoints are hidden so the method argument should be passed as
|
||||
// NULL since in that case the bcp and methodOop are unrelated
|
||||
// memory.
|
||||
static Code code_at(const methodOopDesc* method, address bcp) {
|
||||
assert(method == NULL || check_method(method, bcp), "bcp must point into method");
|
||||
Code code = cast(*bcp);
|
||||
assert(code != _breakpoint || method != NULL, "need methodOop to decode breakpoint");
|
||||
return (code != _breakpoint) ? code : non_breakpoint_code_at(method, bcp);
|
||||
}
|
||||
static Code java_code_at(const methodOopDesc* method, address bcp) {
|
||||
return java_code(code_at(method, bcp));
|
||||
}
|
||||
|
||||
// Fetch a bytecode or a breakpoint:
|
||||
static Code code_or_bp_at(address bcp) { return (Code)cast(*bcp); }
|
||||
// Fetch a bytecode or a breakpoint:
|
||||
static Code code_or_bp_at(address bcp) { return (Code)cast(*bcp); }
|
||||
|
||||
static Code code_at(methodOop method, int bci);
|
||||
static bool is_active_breakpoint_at(address bcp) { return (Code)*bcp == _breakpoint; }
|
||||
static Code code_at(methodOop method, int bci);
|
||||
static bool is_active_breakpoint_at(address bcp) { return (Code)*bcp == _breakpoint; }
|
||||
|
||||
// find a bytecode, behind a breakpoint if necessary:
|
||||
static Code non_breakpoint_code_at(address bcp, methodOop method = NULL);
|
||||
// find a bytecode, behind a breakpoint if necessary:
|
||||
static Code non_breakpoint_code_at(const methodOopDesc* method, address bcp);
|
||||
|
||||
// Bytecode attributes
|
||||
static bool is_defined (int code) { return 0 <= code && code < number_of_codes && flags(code, false) != 0; }
|
||||
@ -379,14 +393,17 @@ class Bytecodes: AllStatic {
|
||||
static bool can_trap (Code code) { check(code); return has_all_flags(code, _bc_can_trap, false); }
|
||||
static Code java_code (Code code) { check(code); return _java_code [code]; }
|
||||
static bool can_rewrite (Code code) { check(code); return has_all_flags(code, _bc_can_rewrite, false); }
|
||||
static bool must_rewrite(Bytecodes::Code code) { return can_rewrite(code) && check_must_rewrite(code); }
|
||||
static bool native_byte_order(Code code) { check(code); return has_all_flags(code, _fmt_has_nbo, false); }
|
||||
static bool uses_cp_cache (Code code) { check(code); return has_all_flags(code, _fmt_has_j, false); }
|
||||
// if 'end' is provided, it indicates the end of the code buffer which
|
||||
// should not be read past when parsing.
|
||||
static int special_length_at(address bcp, address end = NULL);
|
||||
static int special_length_at(Bytecodes::Code code, address bcp, address end = NULL);
|
||||
static int special_length_at(methodOop method, address bcp, address end = NULL) { return special_length_at(code_at(method, bcp), bcp, end); }
|
||||
static int raw_special_length_at(address bcp, address end = NULL);
|
||||
static int length_at (address bcp) { int l = length_for(code_at(bcp)); return l > 0 ? l : special_length_at(bcp); }
|
||||
static int java_length_at (address bcp) { int l = length_for(java_code_at(bcp)); return l > 0 ? l : special_length_at(bcp); }
|
||||
static int length_for_code_at(Bytecodes::Code code, address bcp) { int l = length_for(code); return l > 0 ? l : special_length_at(code, bcp); }
|
||||
static int length_at (methodOop method, address bcp) { return length_for_code_at(code_at(method, bcp), bcp); }
|
||||
static int java_length_at (methodOop method, address bcp) { return length_for_code_at(java_code_at(method, bcp), bcp); }
|
||||
static bool is_java_code (Code code) { return 0 <= code && code < number_of_java_codes; }
|
||||
|
||||
static bool is_aload (Code code) { return (code == _aload || code == _aload_0 || code == _aload_1
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -237,10 +237,9 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
|
||||
// Return true if the interpreter can prove that the given bytecode has
|
||||
// not yet been executed (in Java semantics, not in actual operation).
|
||||
bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) {
|
||||
address bcp = method->bcp_from(bci);
|
||||
Bytecodes::Code code = Bytecodes::code_at(bcp, method());
|
||||
Bytecodes::Code code = method()->code_at(bci);
|
||||
|
||||
if (!Bytecode_at(bcp)->must_rewrite(code)) {
|
||||
if (!Bytecodes::must_rewrite(code)) {
|
||||
// might have been reached
|
||||
return false;
|
||||
}
|
||||
@ -286,12 +285,12 @@ void AbstractInterpreter::print_method_kind(MethodKind kind) {
|
||||
// If deoptimization happens, this function returns the point of next bytecode to continue execution
|
||||
address AbstractInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) {
|
||||
assert(method->contains(bcp), "just checkin'");
|
||||
Bytecodes::Code code = Bytecodes::java_code_at(bcp);
|
||||
Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
|
||||
assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
|
||||
int bci = method->bci_from(bcp);
|
||||
int length = -1; // initial value for debugging
|
||||
// compute continuation length
|
||||
length = Bytecodes::length_at(bcp);
|
||||
length = Bytecodes::length_at(method, bcp);
|
||||
// compute result type
|
||||
BasicType type = T_ILLEGAL;
|
||||
|
||||
@ -303,7 +302,7 @@ address AbstractInterpreter::deopt_continue_after_entry(methodOop method, addres
|
||||
Thread *thread = Thread::current();
|
||||
ResourceMark rm(thread);
|
||||
methodHandle mh(thread, method);
|
||||
type = Bytecode_invoke_at(mh, bci)->result_type(thread);
|
||||
type = Bytecode_invoke(mh, bci).result_type(thread);
|
||||
// since the cache entry might not be initialized:
|
||||
// (NOT needed for the old calling convension)
|
||||
if (!is_top_frame) {
|
||||
@ -317,7 +316,7 @@ address AbstractInterpreter::deopt_continue_after_entry(methodOop method, addres
|
||||
Thread *thread = Thread::current();
|
||||
ResourceMark rm(thread);
|
||||
methodHandle mh(thread, method);
|
||||
type = Bytecode_invoke_at(mh, bci)->result_type(thread);
|
||||
type = Bytecode_invoke(mh, bci).result_type(thread);
|
||||
// since the cache entry might not be initialized:
|
||||
// (NOT needed for the old calling convension)
|
||||
if (!is_top_frame) {
|
||||
@ -334,7 +333,7 @@ address AbstractInterpreter::deopt_continue_after_entry(methodOop method, addres
|
||||
Thread *thread = Thread::current();
|
||||
ResourceMark rm(thread);
|
||||
methodHandle mh(thread, method);
|
||||
type = Bytecode_loadconstant_at(mh, bci)->result_type();
|
||||
type = Bytecode_loadconstant(mh, bci).result_type();
|
||||
break;
|
||||
}
|
||||
|
||||
@ -356,7 +355,7 @@ address AbstractInterpreter::deopt_continue_after_entry(methodOop method, addres
|
||||
// Interpreter::deopt_entry(vtos, 0) like others
|
||||
address AbstractInterpreter::deopt_reexecute_entry(methodOop method, address bcp) {
|
||||
assert(method->contains(bcp), "just checkin'");
|
||||
Bytecodes::Code code = Bytecodes::java_code_at(bcp);
|
||||
Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
|
||||
#ifdef COMPILER1
|
||||
if(code == Bytecodes::_athrow ) {
|
||||
return Interpreter::rethrow_exception_entry();
|
||||
|
@ -132,9 +132,9 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::C
|
||||
bytecode == Bytecodes::_fast_aldc_w, "wrong bc");
|
||||
ResourceMark rm(thread);
|
||||
methodHandle m (thread, method(thread));
|
||||
Bytecode_loadconstant* ldc = Bytecode_loadconstant_at(m, bci(thread));
|
||||
oop result = ldc->resolve_constant(THREAD);
|
||||
DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc->cache_index()));
|
||||
Bytecode_loadconstant ldc(m, bci(thread));
|
||||
oop result = ldc.resolve_constant(THREAD);
|
||||
DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc.cache_index()));
|
||||
assert(result == cpce->f1(), "expected result for assembly code");
|
||||
}
|
||||
IRT_END
|
||||
@ -672,8 +672,8 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
|
||||
if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
|
||||
ResourceMark rm(thread);
|
||||
methodHandle m (thread, method(thread));
|
||||
Bytecode_invoke* call = Bytecode_invoke_at(m, bci(thread));
|
||||
symbolHandle signature (thread, call->signature());
|
||||
Bytecode_invoke call(m, bci(thread));
|
||||
symbolHandle signature (thread, call.signature());
|
||||
receiver = Handle(thread,
|
||||
thread->last_frame().interpreter_callee_receiver(signature));
|
||||
assert(Universe::heap()->is_in_reserved_or_null(receiver()),
|
||||
@ -756,7 +756,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
|
||||
caller_bci = caller_method->bci_from(caller_bcp);
|
||||
site_index = Bytes::get_native_u4(caller_bcp+1);
|
||||
}
|
||||
assert(site_index == InterpreterRuntime::bytecode(thread)->get_index_u4(bytecode), "");
|
||||
assert(site_index == InterpreterRuntime::bytecode(thread).get_index_u4(bytecode), "");
|
||||
assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
|
||||
// there is a second CPC entries that is of interest; it caches signature info:
|
||||
int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
|
||||
@ -1241,9 +1241,9 @@ IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* threa
|
||||
assert(fr.is_interpreted_frame(), "");
|
||||
jint bci = fr.interpreter_frame_bci();
|
||||
methodHandle mh(thread, fr.interpreter_frame_method());
|
||||
Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
|
||||
ArgumentSizeComputer asc(invoke->signature());
|
||||
int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
|
||||
Bytecode_invoke invoke(mh, bci);
|
||||
ArgumentSizeComputer asc(invoke.signature());
|
||||
int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver
|
||||
Copy::conjoint_jbytes(src_address, dest_address,
|
||||
size_of_arguments * Interpreter::stackElementSize);
|
||||
IRT_END
|
||||
|
@ -58,16 +58,16 @@ class InterpreterRuntime: AllStatic {
|
||||
static void set_bcp_and_mdp(address bcp, JavaThread*thread);
|
||||
static Bytecodes::Code code(JavaThread *thread) {
|
||||
// pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
|
||||
return Bytecodes::code_at(bcp(thread), method(thread));
|
||||
return Bytecodes::code_at(method(thread), bcp(thread));
|
||||
}
|
||||
static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
|
||||
static Bytecode* bytecode(JavaThread *thread) { return Bytecode_at(bcp(thread)); }
|
||||
static Bytecode bytecode(JavaThread *thread) { return Bytecode(method(thread), bcp(thread)); }
|
||||
static int get_index_u1(JavaThread *thread, Bytecodes::Code bc)
|
||||
{ return bytecode(thread)->get_index_u1(bc); }
|
||||
{ return bytecode(thread).get_index_u1(bc); }
|
||||
static int get_index_u2(JavaThread *thread, Bytecodes::Code bc)
|
||||
{ return bytecode(thread)->get_index_u2(bc); }
|
||||
{ return bytecode(thread).get_index_u2(bc); }
|
||||
static int get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc)
|
||||
{ return bytecode(thread)->get_index_u2_cpcache(bc); }
|
||||
{ return bytecode(thread).get_index_u2_cpcache(bc); }
|
||||
static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; }
|
||||
|
||||
static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -221,7 +221,7 @@ void Rewriter::scan_method(methodOop method) {
|
||||
// call to calculate the length.
|
||||
bc_length = Bytecodes::length_for(c);
|
||||
if (bc_length == 0) {
|
||||
bc_length = Bytecodes::length_at(bcp);
|
||||
bc_length = Bytecodes::length_at(method, bcp);
|
||||
|
||||
// length_at will put us at the bytecode after the one modified
|
||||
// by 'wide'. We don't currently examine any of the bytecodes
|
||||
@ -237,9 +237,9 @@ void Rewriter::scan_method(methodOop method) {
|
||||
switch (c) {
|
||||
case Bytecodes::_lookupswitch : {
|
||||
#ifndef CC_INTERP
|
||||
Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
|
||||
Bytecode_lookupswitch bc(method, bcp);
|
||||
(*bcp) = (
|
||||
bc->number_of_pairs() < BinarySwitchThreshold
|
||||
bc.number_of_pairs() < BinarySwitchThreshold
|
||||
? Bytecodes::_fast_linearswitch
|
||||
: Bytecodes::_fast_binaryswitch
|
||||
);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -592,7 +592,7 @@ address TemplateInterpreter::deopt_continue_after_entry(methodOop method, addres
|
||||
// that do not return "Interpreter::deopt_entry(vtos, 0)"
|
||||
address TemplateInterpreter::deopt_reexecute_entry(methodOop method, address bcp) {
|
||||
assert(method->contains(bcp), "just checkin'");
|
||||
Bytecodes::Code code = Bytecodes::java_code_at(bcp);
|
||||
Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
|
||||
if (code == Bytecodes::_return) {
|
||||
// This is used for deopt during registration of finalizers
|
||||
// during Object.<init>. We simply need to resume execution at
|
||||
|
@ -158,6 +158,7 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
|
||||
|
||||
Threads::gc_epilogue();
|
||||
CodeCache::gc_epilogue();
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
if (PrintGC && !PrintGCDetails) {
|
||||
gch->print_heap_change(gch_prev_used);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -535,23 +535,23 @@ bool GenerateOopMap::jump_targets_do(BytecodeStream *bcs, jmpFct_t jmpFct, int *
|
||||
(*jmpFct)(this, bcs->dest_w(), data);
|
||||
break;
|
||||
case Bytecodes::_tableswitch:
|
||||
{ Bytecode_tableswitch *tableswitch = Bytecode_tableswitch_at(bcs->bcp());
|
||||
int len = tableswitch->length();
|
||||
{ Bytecode_tableswitch tableswitch(method(), bcs->bcp());
|
||||
int len = tableswitch.length();
|
||||
|
||||
(*jmpFct)(this, bci + tableswitch->default_offset(), data); /* Default. jump address */
|
||||
(*jmpFct)(this, bci + tableswitch.default_offset(), data); /* Default. jump address */
|
||||
while (--len >= 0) {
|
||||
(*jmpFct)(this, bci + tableswitch->dest_offset_at(len), data);
|
||||
(*jmpFct)(this, bci + tableswitch.dest_offset_at(len), data);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_lookupswitch:
|
||||
{ Bytecode_lookupswitch *lookupswitch = Bytecode_lookupswitch_at(bcs->bcp());
|
||||
int npairs = lookupswitch->number_of_pairs();
|
||||
(*jmpFct)(this, bci + lookupswitch->default_offset(), data); /* Default. */
|
||||
{ Bytecode_lookupswitch lookupswitch(method(), bcs->bcp());
|
||||
int npairs = lookupswitch.number_of_pairs();
|
||||
(*jmpFct)(this, bci + lookupswitch.default_offset(), data); /* Default. */
|
||||
while(--npairs >= 0) {
|
||||
LookupswitchPair *pair = lookupswitch->pair_at(npairs);
|
||||
(*jmpFct)(this, bci + pair->offset(), data);
|
||||
LookupswitchPair pair = lookupswitch.pair_at(npairs);
|
||||
(*jmpFct)(this, bci + pair.offset(), data);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -977,7 +977,7 @@ void GenerateOopMap::init_basic_blocks() {
|
||||
#ifdef ASSERT
|
||||
if (blockNum + 1 < bbNo) {
|
||||
address bcp = _method->bcp_from(bb->_end_bci);
|
||||
int bc_len = Bytecodes::java_length_at(bcp);
|
||||
int bc_len = Bytecodes::java_length_at(_method(), bcp);
|
||||
assert(bb->_end_bci + bc_len == bb[1]._bci, "unmatched bci info in basicblock");
|
||||
}
|
||||
#endif
|
||||
@ -985,7 +985,7 @@ void GenerateOopMap::init_basic_blocks() {
|
||||
#ifdef ASSERT
|
||||
{ BasicBlock *bb = &_basic_blocks[bbNo-1];
|
||||
address bcp = _method->bcp_from(bb->_end_bci);
|
||||
int bc_len = Bytecodes::java_length_at(bcp);
|
||||
int bc_len = Bytecodes::java_length_at(_method(), bcp);
|
||||
assert(bb->_end_bci + bc_len == _method->code_size(), "wrong end bci");
|
||||
}
|
||||
#endif
|
||||
@ -1837,14 +1837,14 @@ void GenerateOopMap::do_jsr(int targ_bci) {
|
||||
|
||||
|
||||
void GenerateOopMap::do_ldc(int bci) {
|
||||
Bytecode_loadconstant* ldc = Bytecode_loadconstant_at(method(), bci);
|
||||
Bytecode_loadconstant ldc(method(), bci);
|
||||
constantPoolOop cp = method()->constants();
|
||||
BasicType bt = ldc->result_type();
|
||||
BasicType bt = ldc.result_type();
|
||||
CellTypeState cts = (bt == T_OBJECT) ? CellTypeState::make_line_ref(bci) : valCTS;
|
||||
// Make sure bt==T_OBJECT is the same as old code (is_pointer_entry).
|
||||
// Note that CONSTANT_MethodHandle entries are u2 index pairs, not pointer-entries,
|
||||
// and they are processed by _fast_aldc and the CP cache.
|
||||
assert((ldc->has_cache_index() || cp->is_pointer_entry(ldc->pool_index()))
|
||||
assert((ldc.has_cache_index() || cp->is_pointer_entry(ldc.pool_index()))
|
||||
? (bt == T_OBJECT) : true, "expected object type");
|
||||
ppush1(cts);
|
||||
}
|
||||
@ -2343,7 +2343,7 @@ bool GenerateOopMap::rewrite_refval_conflict_inst(BytecodeStream *itr, int from,
|
||||
bool GenerateOopMap::rewrite_load_or_store(BytecodeStream *bcs, Bytecodes::Code bcN, Bytecodes::Code bc0, unsigned int varNo) {
|
||||
assert(bcN == Bytecodes::_astore || bcN == Bytecodes::_aload, "wrong argument (bcN)");
|
||||
assert(bc0 == Bytecodes::_astore_0 || bc0 == Bytecodes::_aload_0, "wrong argument (bc0)");
|
||||
int ilen = Bytecodes::length_at(bcs->bcp());
|
||||
int ilen = Bytecodes::length_at(_method(), bcs->bcp());
|
||||
int newIlen;
|
||||
|
||||
if (ilen == 4) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -417,11 +417,11 @@ void BranchData::print_data_on(outputStream* st) {
|
||||
int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
|
||||
int cell_count = 0;
|
||||
if (stream->code() == Bytecodes::_tableswitch) {
|
||||
Bytecode_tableswitch* sw = Bytecode_tableswitch_at(stream->bcp());
|
||||
cell_count = 1 + per_case_cell_count * (1 + sw->length()); // 1 for default
|
||||
Bytecode_tableswitch sw(stream->method()(), stream->bcp());
|
||||
cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
|
||||
} else {
|
||||
Bytecode_lookupswitch* sw = Bytecode_lookupswitch_at(stream->bcp());
|
||||
cell_count = 1 + per_case_cell_count * (sw->number_of_pairs() + 1); // 1 for default
|
||||
Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
|
||||
cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
|
||||
}
|
||||
return cell_count;
|
||||
}
|
||||
@ -434,35 +434,35 @@ void MultiBranchData::post_initialize(BytecodeStream* stream,
|
||||
int target_di;
|
||||
int offset;
|
||||
if (stream->code() == Bytecodes::_tableswitch) {
|
||||
Bytecode_tableswitch* sw = Bytecode_tableswitch_at(stream->bcp());
|
||||
int len = sw->length();
|
||||
Bytecode_tableswitch sw(stream->method()(), stream->bcp());
|
||||
int len = sw.length();
|
||||
assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
|
||||
for (int count = 0; count < len; count++) {
|
||||
target = sw->dest_offset_at(count) + bci();
|
||||
target = sw.dest_offset_at(count) + bci();
|
||||
my_di = mdo->dp_to_di(dp());
|
||||
target_di = mdo->bci_to_di(target);
|
||||
offset = target_di - my_di;
|
||||
set_displacement_at(count, offset);
|
||||
}
|
||||
target = sw->default_offset() + bci();
|
||||
target = sw.default_offset() + bci();
|
||||
my_di = mdo->dp_to_di(dp());
|
||||
target_di = mdo->bci_to_di(target);
|
||||
offset = target_di - my_di;
|
||||
set_default_displacement(offset);
|
||||
|
||||
} else {
|
||||
Bytecode_lookupswitch* sw = Bytecode_lookupswitch_at(stream->bcp());
|
||||
int npairs = sw->number_of_pairs();
|
||||
Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
|
||||
int npairs = sw.number_of_pairs();
|
||||
assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
|
||||
for (int count = 0; count < npairs; count++) {
|
||||
LookupswitchPair *pair = sw->pair_at(count);
|
||||
target = pair->offset() + bci();
|
||||
LookupswitchPair pair = sw.pair_at(count);
|
||||
target = pair.offset() + bci();
|
||||
my_di = mdo->dp_to_di(dp());
|
||||
target_di = mdo->bci_to_di(target);
|
||||
offset = target_di - my_di;
|
||||
set_displacement_at(count, offset);
|
||||
}
|
||||
target = sw->default_offset() + bci();
|
||||
target = sw.default_offset() + bci();
|
||||
my_di = mdo->dp_to_di(dp());
|
||||
target_di = mdo->bci_to_di(target);
|
||||
offset = target_di - my_di;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -150,17 +150,6 @@ int methodOopDesc::fast_exception_handler_bci_for(KlassHandle ex_klass, int thr
|
||||
return -1;
|
||||
}
|
||||
|
||||
methodOop methodOopDesc::method_from_bcp(address bcp) {
|
||||
debug_only(static int count = 0; count++);
|
||||
assert(Universe::heap()->is_in_permanent(bcp), "bcp not in perm_gen");
|
||||
// TO DO: this may be unsafe in some configurations
|
||||
HeapWord* p = Universe::heap()->block_start(bcp);
|
||||
assert(Universe::heap()->block_is_obj(p), "must be obj");
|
||||
assert(oop(p)->is_constMethod(), "not a method");
|
||||
return constMethodOop(p)->method();
|
||||
}
|
||||
|
||||
|
||||
void methodOopDesc::mask_for(int bci, InterpreterOopMap* mask) {
|
||||
|
||||
Thread* myThread = Thread::current();
|
||||
@ -469,11 +458,10 @@ bool methodOopDesc::can_be_statically_bound() const {
|
||||
bool methodOopDesc::is_accessor() const {
|
||||
if (code_size() != 5) return false;
|
||||
if (size_of_parameters() != 1) return false;
|
||||
methodOop m = (methodOop)this; // pass to code_at() to avoid method_from_bcp
|
||||
if (Bytecodes::java_code_at(code_base()+0, m) != Bytecodes::_aload_0 ) return false;
|
||||
if (Bytecodes::java_code_at(code_base()+1, m) != Bytecodes::_getfield) return false;
|
||||
if (Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_areturn &&
|
||||
Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_ireturn ) return false;
|
||||
if (java_code_at(0) != Bytecodes::_aload_0 ) return false;
|
||||
if (java_code_at(1) != Bytecodes::_getfield) return false;
|
||||
if (java_code_at(4) != Bytecodes::_areturn &&
|
||||
java_code_at(4) != Bytecodes::_ireturn ) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1414,7 +1402,7 @@ bool CompressedLineNumberReadStream::read_pair() {
|
||||
}
|
||||
|
||||
|
||||
Bytecodes::Code methodOopDesc::orig_bytecode_at(int bci) {
|
||||
Bytecodes::Code methodOopDesc::orig_bytecode_at(int bci) const {
|
||||
BreakpointInfo* bp = instanceKlass::cast(method_holder())->breakpoints();
|
||||
for (; bp != NULL; bp = bp->next()) {
|
||||
if (bp->match(this, bci)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -196,8 +196,15 @@ class methodOopDesc : public oopDesc {
|
||||
static char* name_and_sig_as_C_string(Klass* klass, symbolOop method_name, symbolOop signature);
|
||||
static char* name_and_sig_as_C_string(Klass* klass, symbolOop method_name, symbolOop signature, char* buf, int size);
|
||||
|
||||
Bytecodes::Code java_code_at(int bci) const {
|
||||
return Bytecodes::java_code_at(this, bcp_from(bci));
|
||||
}
|
||||
Bytecodes::Code code_at(int bci) const {
|
||||
return Bytecodes::code_at(this, bcp_from(bci));
|
||||
}
|
||||
|
||||
// JVMTI breakpoints
|
||||
Bytecodes::Code orig_bytecode_at(int bci);
|
||||
Bytecodes::Code orig_bytecode_at(int bci) const;
|
||||
void set_orig_bytecode_at(int bci, Bytecodes::Code code);
|
||||
void set_breakpoint(int bci);
|
||||
void clear_breakpoint(int bci);
|
||||
@ -655,8 +662,6 @@ class methodOopDesc : public oopDesc {
|
||||
void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
|
||||
void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
|
||||
|
||||
static methodOop method_from_bcp(address bcp);
|
||||
|
||||
// Resolve all classes in signature, return 'true' if successful
|
||||
static bool load_signature_classes(methodHandle m, TRAPS);
|
||||
|
||||
@ -787,11 +792,11 @@ class BreakpointInfo : public CHeapObj {
|
||||
void set_next(BreakpointInfo* n) { _next = n; }
|
||||
|
||||
// helps for searchers
|
||||
bool match(methodOop m, int bci) {
|
||||
bool match(const methodOopDesc* m, int bci) {
|
||||
return bci == _bci && match(m);
|
||||
}
|
||||
|
||||
bool match(methodOop m) {
|
||||
bool match(const methodOopDesc* m) {
|
||||
return _name_index == m->name_index() &&
|
||||
_signature_index == m->signature_index();
|
||||
}
|
||||
|
@ -2268,6 +2268,14 @@ void JvmtiExport::oops_do(OopClosure* f) {
|
||||
JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(f);
|
||||
}
|
||||
|
||||
void JvmtiExport::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
|
||||
JvmtiTagMap::weak_oops_do(is_alive, f);
|
||||
}
|
||||
|
||||
void JvmtiExport::gc_epilogue() {
|
||||
JvmtiCurrentBreakpoints::gc_epilogue();
|
||||
}
|
||||
|
||||
// Onload raw monitor transition.
|
||||
void JvmtiExport::transition_pending_onload_raw_monitors() {
|
||||
JvmtiPendingMonitors::transition_raw_monitors();
|
||||
|
@ -346,6 +346,8 @@ class JvmtiExport : public AllStatic {
|
||||
static void cleanup_thread (JavaThread* thread) KERNEL_RETURN;
|
||||
|
||||
static void oops_do(OopClosure* f) KERNEL_RETURN;
|
||||
static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) KERNEL_RETURN;
|
||||
static void gc_epilogue() KERNEL_RETURN;
|
||||
|
||||
static void transition_pending_onload_raw_monitors() KERNEL_RETURN;
|
||||
|
||||
|
@ -212,7 +212,13 @@ void GrowableCache::oops_do(OopClosure* f) {
|
||||
for (int i=0; i<len; i++) {
|
||||
GrowableElement *e = _elements->at(i);
|
||||
e->oops_do(f);
|
||||
_cache[i] = e->getCacheValue();
|
||||
}
|
||||
}
|
||||
|
||||
void GrowableCache::gc_epilogue() {
|
||||
int len = _elements->length();
|
||||
for (int i=0; i<len; i++) {
|
||||
_cache[i] = _elements->at(i)->getCacheValue();
|
||||
}
|
||||
}
|
||||
|
||||
@ -394,6 +400,10 @@ void JvmtiBreakpoints::oops_do(OopClosure* f) {
|
||||
_bps.oops_do(f);
|
||||
}
|
||||
|
||||
void JvmtiBreakpoints::gc_epilogue() {
|
||||
_bps.gc_epilogue();
|
||||
}
|
||||
|
||||
void JvmtiBreakpoints::print() {
|
||||
#ifndef PRODUCT
|
||||
ResourceMark rm;
|
||||
@ -523,6 +533,12 @@ void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
|
||||
}
|
||||
}
|
||||
|
||||
void JvmtiCurrentBreakpoints::gc_epilogue() {
|
||||
if (_jvmti_breakpoints != NULL) {
|
||||
_jvmti_breakpoints->gc_epilogue();
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
//
|
||||
// class VM_GetOrSetLocal
|
||||
|
@ -117,6 +117,8 @@ public:
|
||||
void clear();
|
||||
// apply f to every element and update the cache
|
||||
void oops_do(OopClosure* f);
|
||||
// update the cache after a full gc
|
||||
void gc_epilogue();
|
||||
};
|
||||
|
||||
|
||||
@ -148,6 +150,7 @@ public:
|
||||
void remove (int index) { _cache.remove(index); }
|
||||
void clear() { _cache.clear(); }
|
||||
void oops_do(OopClosure* f) { _cache.oops_do(f); }
|
||||
void gc_epilogue() { _cache.gc_epilogue(); }
|
||||
};
|
||||
|
||||
|
||||
@ -282,6 +285,7 @@ public:
|
||||
int clear(JvmtiBreakpoint& bp);
|
||||
void clearall_in_class_at_safepoint(klassOop klass);
|
||||
void clearall();
|
||||
void gc_epilogue();
|
||||
};
|
||||
|
||||
|
||||
@ -325,6 +329,7 @@ public:
|
||||
static inline bool is_breakpoint(address bcp);
|
||||
|
||||
static void oops_do(OopClosure* f);
|
||||
static void gc_epilogue();
|
||||
};
|
||||
|
||||
// quickly test whether the bcp matches a cached breakpoint in the list
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1458,7 +1458,7 @@ void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method,
|
||||
if (bc_length == 0) {
|
||||
// More complicated bytecodes report a length of zero so
|
||||
// we have to try again a slightly different way.
|
||||
bc_length = Bytecodes::length_at(bcp);
|
||||
bc_length = Bytecodes::length_at(method(), bcp);
|
||||
}
|
||||
|
||||
assert(bc_length != 0, "impossible bytecode length");
|
||||
|
@ -3290,7 +3290,11 @@ void JvmtiTagMap::follow_references(jint heap_filter,
|
||||
|
||||
|
||||
void JvmtiTagMap::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
// No locks during VM bring-up (0 threads) and no safepoints after main
|
||||
// thread creation and before VMThread creation (1 thread); initial GC
|
||||
// verification can happen in that window which gets to here.
|
||||
assert(Threads::number_of_threads() <= 1 ||
|
||||
SafepointSynchronize::is_at_safepoint(),
|
||||
"must be executed at a safepoint");
|
||||
if (JvmtiEnv::environments_might_exist()) {
|
||||
JvmtiEnvIterator it;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -194,10 +194,10 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
|
||||
|
||||
case Bytecodes::_ldc : // fall through
|
||||
case Bytecodes::_ldc_w : {
|
||||
Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method(), _s_old->bci());
|
||||
Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method(), _s_new->bci());
|
||||
int cpi_old = ldc_old->pool_index();
|
||||
int cpi_new = ldc_new->pool_index();
|
||||
Bytecode_loadconstant ldc_old(_s_old->method(), _s_old->bci());
|
||||
Bytecode_loadconstant ldc_new(_s_new->method(), _s_new->bci());
|
||||
int cpi_old = ldc_old.pool_index();
|
||||
int cpi_new = ldc_new.pool_index();
|
||||
if (!pool_constants_same(cpi_old, cpi_new))
|
||||
return false;
|
||||
break;
|
||||
@ -267,8 +267,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
|
||||
case Bytecodes::_ifnonnull : // fall through
|
||||
case Bytecodes::_ifnull : // fall through
|
||||
case Bytecodes::_jsr : {
|
||||
int old_ofs = _s_old->bytecode()->get_offset_s2(c_old);
|
||||
int new_ofs = _s_new->bytecode()->get_offset_s2(c_new);
|
||||
int old_ofs = _s_old->bytecode().get_offset_s2(c_old);
|
||||
int new_ofs = _s_new->bytecode().get_offset_s2(c_new);
|
||||
if (_switchable_test) {
|
||||
int old_dest = _s_old->bci() + old_ofs;
|
||||
int new_dest = _s_new->bci() + new_ofs;
|
||||
@ -304,8 +304,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
|
||||
|
||||
case Bytecodes::_goto_w : // fall through
|
||||
case Bytecodes::_jsr_w : {
|
||||
int old_ofs = _s_old->bytecode()->get_offset_s4(c_old);
|
||||
int new_ofs = _s_new->bytecode()->get_offset_s4(c_new);
|
||||
int old_ofs = _s_old->bytecode().get_offset_s4(c_old);
|
||||
int new_ofs = _s_new->bytecode().get_offset_s4(c_new);
|
||||
if (_switchable_test) {
|
||||
int old_dest = _s_old->bci() + old_ofs;
|
||||
int new_dest = _s_new->bci() + new_ofs;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -395,8 +395,8 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
|
||||
{
|
||||
HandleMark hm;
|
||||
methodHandle method(thread, array->element(0)->method());
|
||||
Bytecode_invoke* invoke = Bytecode_invoke_at_check(method, array->element(0)->bci());
|
||||
return_type = (invoke != NULL) ? invoke->result_type(thread) : T_ILLEGAL;
|
||||
Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
|
||||
return_type = invoke.is_valid() ? invoke.result_type(thread) : T_ILLEGAL;
|
||||
}
|
||||
|
||||
// Compute information for handling adapters and adjusting the frame size of the caller.
|
||||
@ -600,8 +600,8 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m
|
||||
cur_code == Bytecodes::_invokespecial ||
|
||||
cur_code == Bytecodes::_invokestatic ||
|
||||
cur_code == Bytecodes::_invokeinterface) {
|
||||
Bytecode_invoke* invoke = Bytecode_invoke_at(mh, iframe->interpreter_frame_bci());
|
||||
symbolHandle signature(thread, invoke->signature());
|
||||
Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
|
||||
symbolHandle signature(thread, invoke.signature());
|
||||
ArgumentSizeComputer asc(signature);
|
||||
cur_invoke_parameter_size = asc.size();
|
||||
if (cur_code != Bytecodes::_invokestatic) {
|
||||
@ -963,7 +963,7 @@ vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, Re
|
||||
if (bci == SynchronizationEntryBCI) {
|
||||
code_name = "sync entry";
|
||||
} else {
|
||||
Bytecodes::Code code = Bytecodes::code_at(vf->method(), bci);
|
||||
Bytecodes::Code code = vf->method()->code_at(bci);
|
||||
code_name = Bytecodes::name(code);
|
||||
}
|
||||
tty->print(" - %s", code_name);
|
||||
@ -1224,7 +1224,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
|
||||
ScopeDesc* trap_scope = cvf->scope();
|
||||
methodHandle trap_method = trap_scope->method();
|
||||
int trap_bci = trap_scope->bci();
|
||||
Bytecodes::Code trap_bc = Bytecode_at(trap_method->bcp_from(trap_bci))->java_code();
|
||||
Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci);
|
||||
|
||||
// Record this event in the histogram.
|
||||
gather_statistics(reason, action, trap_bc);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -930,10 +930,10 @@ void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool quer
|
||||
// This is used sometimes for calling into the VM, not for another
|
||||
// interpreted or compiled frame.
|
||||
if (!m->is_native()) {
|
||||
Bytecode_invoke *call = Bytecode_invoke_at_check(m, bci);
|
||||
if (call != NULL) {
|
||||
signature = symbolHandle(thread, call->signature());
|
||||
has_receiver = call->has_receiver();
|
||||
Bytecode_invoke call = Bytecode_invoke_check(m, bci);
|
||||
if (call.is_valid()) {
|
||||
signature = symbolHandle(thread, call.signature());
|
||||
has_receiver = call.has_receiver();
|
||||
if (map->include_argument_oops() &&
|
||||
interpreter_frame_expression_stack_size() > 0) {
|
||||
ResourceMark rm(thread); // is this right ???
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiTagMap.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#ifdef TARGET_OS_FAMILY_linux
|
||||
@ -431,10 +431,10 @@ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
|
||||
}
|
||||
|
||||
/*
|
||||
* JvmtiTagMap may also contain weak oops. The iteration of it is placed
|
||||
* here so that we don't need to add it to each of the collectors.
|
||||
* JVMTI data structures may also contain weak oops. The iteration of them
|
||||
* is placed here so that we don't need to add it to each of the collectors.
|
||||
*/
|
||||
JvmtiTagMap::weak_oops_do(is_alive, f);
|
||||
JvmtiExport::weak_oops_do(is_alive, f);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -80,8 +80,6 @@ Monitor* SLT_lock = NULL;
|
||||
Monitor* iCMS_lock = NULL;
|
||||
Monitor* FullGCCount_lock = NULL;
|
||||
Monitor* CMark_lock = NULL;
|
||||
Monitor* ZF_mon = NULL;
|
||||
Monitor* Cleanup_mon = NULL;
|
||||
Mutex* CMRegionStack_lock = NULL;
|
||||
Mutex* SATB_Q_FL_lock = NULL;
|
||||
Monitor* SATB_Q_CBL_mon = NULL;
|
||||
@ -122,6 +120,9 @@ Mutex* PerfDataMemAlloc_lock = NULL;
|
||||
Mutex* PerfDataManager_lock = NULL;
|
||||
Mutex* OopMapCacheAlloc_lock = NULL;
|
||||
|
||||
Mutex* FreeList_lock = NULL;
|
||||
Monitor* SecondaryFreeList_lock = NULL;
|
||||
Mutex* OldSets_lock = NULL;
|
||||
Mutex* MMUTracker_lock = NULL;
|
||||
Mutex* HotCardCache_lock = NULL;
|
||||
|
||||
@ -177,8 +178,6 @@ void mutex_init() {
|
||||
}
|
||||
if (UseG1GC) {
|
||||
def(CMark_lock , Monitor, nonleaf, true ); // coordinate concurrent mark thread
|
||||
def(ZF_mon , Monitor, leaf, true );
|
||||
def(Cleanup_mon , Monitor, nonleaf, true );
|
||||
def(CMRegionStack_lock , Mutex, leaf, true );
|
||||
def(SATB_Q_FL_lock , Mutex , special, true );
|
||||
def(SATB_Q_CBL_mon , Monitor, nonleaf, true );
|
||||
@ -188,6 +187,9 @@ void mutex_init() {
|
||||
def(DirtyCardQ_CBL_mon , Monitor, nonleaf, true );
|
||||
def(Shared_DirtyCardQ_lock , Mutex, nonleaf, true );
|
||||
|
||||
def(FreeList_lock , Mutex, leaf , true );
|
||||
def(SecondaryFreeList_lock , Monitor, leaf , true );
|
||||
def(OldSets_lock , Mutex , leaf , true );
|
||||
def(MMUTracker_lock , Mutex , leaf , true );
|
||||
def(HotCardCache_lock , Mutex , special , true );
|
||||
def(EvacFailureStack_lock , Mutex , nonleaf , true );
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -76,8 +76,6 @@ extern Monitor* SLT_lock; // used in CMS GC for acquiring
|
||||
extern Monitor* iCMS_lock; // CMS incremental mode start/stop notification
|
||||
extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
|
||||
extern Monitor* CMark_lock; // used for concurrent mark thread coordination
|
||||
extern Monitor* ZF_mon; // used for G1 conc zero-fill.
|
||||
extern Monitor* Cleanup_mon; // used for G1 conc cleanup.
|
||||
extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack
|
||||
extern Mutex* SATB_Q_FL_lock; // Protects SATB Q
|
||||
// buffer free list.
|
||||
@ -125,6 +123,9 @@ extern Mutex* PerfDataManager_lock; // a long on access to PerfData
|
||||
extern Mutex* ParkerFreeList_lock;
|
||||
extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches
|
||||
|
||||
extern Mutex* FreeList_lock; // protects the free region list during safepoints
|
||||
extern Monitor* SecondaryFreeList_lock; // protects the secondary free region list
|
||||
extern Mutex* OldSets_lock; // protects the old region sets
|
||||
extern Mutex* MMUTracker_lock; // protects the MMU
|
||||
// tracker data structures
|
||||
extern Mutex* HotCardCache_lock; // protects the hot card cache
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -106,7 +106,7 @@ class Relocator : public ResourceObj {
|
||||
// get the address of in the code_array
|
||||
inline char* addr_at(int bci) const { return (char*) &code_array()[bci]; }
|
||||
|
||||
int instruction_length_at(int bci) { return Bytecodes::length_at(code_array() + bci); }
|
||||
int instruction_length_at(int bci) { return Bytecodes::length_at(NULL, code_array() + bci); }
|
||||
|
||||
// Helper methods
|
||||
int align(int n) const { return (n+3) & ~3; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -944,9 +944,9 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
|
||||
int bci = vfst.bci();
|
||||
|
||||
// Find bytecode
|
||||
Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
|
||||
bc = bytecode->java_code();
|
||||
int bytecode_index = bytecode->index();
|
||||
Bytecode_invoke bytecode(caller, bci);
|
||||
bc = bytecode.java_code();
|
||||
int bytecode_index = bytecode.index();
|
||||
|
||||
// Find receiver for non-static call
|
||||
if (bc != Bytecodes::_invokestatic) {
|
||||
@ -957,7 +957,7 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
|
||||
// Caller-frame is a compiled frame
|
||||
frame callerFrame = stubFrame.sender(®_map2);
|
||||
|
||||
methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
|
||||
methodHandle callee = bytecode.static_target(CHECK_(nullHandle));
|
||||
if (callee.is_null()) {
|
||||
THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
|
||||
}
|
||||
@ -1674,10 +1674,9 @@ char* SharedRuntime::generate_class_cast_message(
|
||||
// Get target class name from the checkcast instruction
|
||||
vframeStream vfst(thread, true);
|
||||
assert(!vfst.at_end(), "Java frame must exist");
|
||||
Bytecode_checkcast* cc = Bytecode_checkcast_at(
|
||||
vfst.method()->bcp_from(vfst.bci()));
|
||||
Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
|
||||
Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
|
||||
cc->index(), thread));
|
||||
cc.index(), thread));
|
||||
return generate_class_cast_message(objName, targetKlass->external_name());
|
||||
}
|
||||
|
||||
@ -1711,11 +1710,11 @@ char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
|
||||
const char* targetType = "the required signature";
|
||||
vframeStream vfst(thread, true);
|
||||
if (!vfst.at_end()) {
|
||||
Bytecode_invoke* call = Bytecode_invoke_at(vfst.method(), vfst.bci());
|
||||
Bytecode_invoke call(vfst.method(), vfst.bci());
|
||||
methodHandle target;
|
||||
{
|
||||
EXCEPTION_MARK;
|
||||
target = call->static_target(THREAD);
|
||||
target = call.static_target(THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; }
|
||||
}
|
||||
if (target.not_null()
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -399,7 +399,7 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters,
|
||||
} else if (TraceDeoptimization) {
|
||||
tty->print(" ");
|
||||
method()->print_value();
|
||||
Bytecodes::Code code = Bytecodes::java_code_at(bcp);
|
||||
Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp);
|
||||
int bci = method()->bci_from(bcp);
|
||||
tty->print(" - %s", Bytecodes::name(code));
|
||||
tty->print(" @ bci %d ", bci);
|
||||
|
@ -453,7 +453,7 @@ DumpWriter::DumpWriter(const char* path) {
|
||||
|
||||
DumpWriter::~DumpWriter() {
|
||||
// flush and close dump file
|
||||
if (file_descriptor() >= 0) {
|
||||
if (is_open()) {
|
||||
close();
|
||||
}
|
||||
if (_buffer != NULL) os::free(_buffer);
|
||||
@ -463,9 +463,10 @@ DumpWriter::~DumpWriter() {
|
||||
// closes dump file (if open)
|
||||
void DumpWriter::close() {
|
||||
// flush and close dump file
|
||||
if (file_descriptor() >= 0) {
|
||||
if (is_open()) {
|
||||
flush();
|
||||
::close(file_descriptor());
|
||||
set_file_descriptor(-1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1935,18 +1936,32 @@ void HeapDumper::dump_heap() {
|
||||
void HeapDumper::dump_heap(bool oome) {
|
||||
static char base_path[JVM_MAXPATHLEN] = {'\0'};
|
||||
static uint dump_file_seq = 0;
|
||||
char my_path[JVM_MAXPATHLEN] = {'\0'};
|
||||
char* my_path;
|
||||
const int max_digit_chars = 20;
|
||||
|
||||
const char* dump_file_name = "java_pid";
|
||||
const char* dump_file_ext = ".hprof";
|
||||
|
||||
// The dump file defaults to java_pid<pid>.hprof in the current working
|
||||
// directory. HeapDumpPath=<file> can be used to specify an alternative
|
||||
// dump file name or a directory where dump file is created.
|
||||
if (dump_file_seq == 0) { // first time in, we initialize base_path
|
||||
// Calculate potentially longest base path and check if we have enough
|
||||
// allocated statically.
|
||||
const size_t total_length =
|
||||
(HeapDumpPath == NULL ? 0 : strlen(HeapDumpPath)) +
|
||||
strlen(os::file_separator()) + max_digit_chars +
|
||||
strlen(dump_file_name) + strlen(dump_file_ext) + 1;
|
||||
if (total_length > sizeof(base_path)) {
|
||||
warning("Cannot create heap dump file. HeapDumpPath is too long.");
|
||||
return;
|
||||
}
|
||||
|
||||
bool use_default_filename = true;
|
||||
if (HeapDumpPath == NULL || HeapDumpPath[0] == '\0') {
|
||||
// HeapDumpPath=<file> not specified
|
||||
} else {
|
||||
assert(strlen(HeapDumpPath) < sizeof(base_path), "HeapDumpPath too long");
|
||||
strcpy(base_path, HeapDumpPath);
|
||||
strncpy(base_path, HeapDumpPath, sizeof(base_path));
|
||||
// check if the path is a directory (must exist)
|
||||
DIR* dir = os::opendir(base_path);
|
||||
if (dir == NULL) {
|
||||
@ -1960,8 +1975,6 @@ void HeapDumper::dump_heap(bool oome) {
|
||||
char* end = base_path;
|
||||
end += (strlen(base_path) - fs_len);
|
||||
if (strcmp(end, os::file_separator()) != 0) {
|
||||
assert(strlen(base_path) + strlen(os::file_separator()) < sizeof(base_path),
|
||||
"HeapDumpPath too long");
|
||||
strcat(base_path, os::file_separator());
|
||||
}
|
||||
}
|
||||
@ -1969,21 +1982,26 @@ void HeapDumper::dump_heap(bool oome) {
|
||||
}
|
||||
// If HeapDumpPath wasn't a file name then we append the default name
|
||||
if (use_default_filename) {
|
||||
char fn[32];
|
||||
sprintf(fn, "java_pid%d", os::current_process_id());
|
||||
assert(strlen(base_path) + strlen(fn) + strlen(".hprof") < sizeof(base_path), "HeapDumpPath too long");
|
||||
strcat(base_path, fn);
|
||||
strcat(base_path, ".hprof");
|
||||
const size_t dlen = strlen(base_path); // if heap dump dir specified
|
||||
jio_snprintf(&base_path[dlen], sizeof(base_path)-dlen, "%s%d%s",
|
||||
dump_file_name, os::current_process_id(), dump_file_ext);
|
||||
}
|
||||
assert(strlen(base_path) < sizeof(my_path), "Buffer too small");
|
||||
strcpy(my_path, base_path);
|
||||
const size_t len = strlen(base_path) + 1;
|
||||
my_path = (char*)os::malloc(len);
|
||||
if (my_path == NULL) {
|
||||
warning("Cannot create heap dump file. Out of system memory.");
|
||||
return;
|
||||
}
|
||||
strncpy(my_path, base_path, len);
|
||||
} else {
|
||||
// Append a sequence number id for dumps following the first
|
||||
char fn[33];
|
||||
sprintf(fn, ".%d", dump_file_seq);
|
||||
assert(strlen(base_path) + strlen(fn) < sizeof(my_path), "HeapDumpPath too long");
|
||||
strcpy(my_path, base_path);
|
||||
strcat(my_path, fn);
|
||||
const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0
|
||||
my_path = (char*)os::malloc(len);
|
||||
if (my_path == NULL) {
|
||||
warning("Cannot create heap dump file. Out of system memory.");
|
||||
return;
|
||||
}
|
||||
jio_snprintf(my_path, len, "%s.%d", base_path, dump_file_seq);
|
||||
}
|
||||
dump_file_seq++; // increment seq number for next time we dump
|
||||
|
||||
@ -1991,4 +2009,5 @@ void HeapDumper::dump_heap(bool oome) {
|
||||
true /* send to tty */,
|
||||
oome /* pass along out-of-memory-error flag */);
|
||||
dumper.dump(my_path);
|
||||
os::free(my_path);
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ template <size_t bufsz = 256>
|
||||
class FormatBuffer {
|
||||
public:
|
||||
inline FormatBuffer(const char * format, ...);
|
||||
inline void append(const char* format, ...);
|
||||
operator const char *() const { return _buf; }
|
||||
|
||||
private:
|
||||
@ -51,6 +52,19 @@ FormatBuffer<bufsz>::FormatBuffer(const char * format, ...) {
|
||||
va_end(argp);
|
||||
}
|
||||
|
||||
template <size_t bufsz>
|
||||
void FormatBuffer<bufsz>::append(const char* format, ...) {
|
||||
// Given that the constructor does a vsnprintf we can assume that
|
||||
// _buf is already initialized.
|
||||
size_t len = strlen(_buf);
|
||||
char* buf_end = _buf + len;
|
||||
|
||||
va_list argp;
|
||||
va_start(argp, format);
|
||||
vsnprintf(buf_end, bufsz - len, format, argp);
|
||||
va_end(argp);
|
||||
}
|
||||
|
||||
// Used to format messages for assert(), guarantee(), fatal(), etc.
|
||||
typedef FormatBuffer<> err_msg;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1179,6 +1179,8 @@ inline int build_int_from_shorts( jushort low, jushort high ) {
|
||||
// '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll"
|
||||
// (in ILP32).
|
||||
|
||||
#define BOOL_TO_STR(__b) (__b) ? "true" : "false"
|
||||
|
||||
// Format 32-bit quantities.
|
||||
#define INT32_FORMAT "%d"
|
||||
#define UINT32_FORMAT "%u"
|
||||
|
Loading…
x
Reference in New Issue
Block a user