7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error

A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.

Reviewed-by: kvn, iveresov, never, tonyp, dholmes
This commit is contained in:
John Cuthbertson 2011-04-07 09:53:20 -07:00
parent aac4647e6e
commit a08e1ce906
41 changed files with 1423 additions and 268 deletions

View File

@ -4257,34 +4257,14 @@ void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
///////////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
static uint num_stores = 0;
static uint num_null_pre_stores = 0;
static address satb_log_enqueue_with_frame = NULL;
static u_char* satb_log_enqueue_with_frame_end = NULL;
static void count_null_pre_vals(void* pre_val) {
num_stores++;
if (pre_val == NULL) num_null_pre_stores++;
if ((num_stores % 1000000) == 0) {
tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.",
num_stores, num_null_pre_stores,
100.0*(float)num_null_pre_stores/(float)num_stores);
}
}
static address satb_log_enqueue_with_frame = 0;
static u_char* satb_log_enqueue_with_frame_end = 0;
static address satb_log_enqueue_frameless = 0;
static u_char* satb_log_enqueue_frameless_end = 0;
static address satb_log_enqueue_frameless = NULL;
static u_char* satb_log_enqueue_frameless_end = NULL;
static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
// The calls to this don't work. We'd need to do a fair amount of work to
// make it work.
static void check_index(int ind) {
assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
"Invariants.");
}
static void generate_satb_log_enqueue(bool with_frame) {
BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
CodeBuffer buf(bb);
@ -4388,13 +4368,27 @@ static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
}
}
void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) {
assert(offset == 0 || index == noreg, "choose one");
if (G1DisablePreBarrier) return;
// satb_log_barrier(tmp, obj, offset, preserve_o_regs);
void MacroAssembler::g1_write_barrier_pre(Register obj,
Register index,
int offset,
Register pre_val,
Register tmp,
bool preserve_o_regs) {
Label filtered;
// satb_log_barrier_work0(tmp, filtered);
if (obj == noreg) {
// We are not loading the previous value so make
// sure that we don't trash the value in pre_val
// with the code below.
assert_different_registers(pre_val, tmp);
} else {
// We will be loading the previous value
// in this code so...
assert(offset == 0 || index == noreg, "choose one");
assert(pre_val == noreg, "check this code");
}
// Is marking active?
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
ld(G2,
in_bytes(JavaThread::satb_mark_queue_offset() +
@ -4413,61 +4407,46 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
delayed() -> nop();
// satb_log_barrier_work1(tmp, offset);
if (index == noreg) {
if (Assembler::is_simm13(offset)) {
load_heap_oop(obj, offset, tmp);
// Do we need to load the previous value?
if (obj != noreg) {
// Load the previous value...
if (index == noreg) {
if (Assembler::is_simm13(offset)) {
load_heap_oop(obj, offset, tmp);
} else {
set(offset, tmp);
load_heap_oop(obj, tmp, tmp);
}
} else {
set(offset, tmp);
load_heap_oop(obj, tmp, tmp);
load_heap_oop(obj, index, tmp);
}
} else {
load_heap_oop(obj, index, tmp);
// Previous value has been loaded into tmp
pre_val = tmp;
}
// satb_log_barrier_work2(obj, tmp, offset);
// satb_log_barrier_work3(tmp, filtered, preserve_o_regs);
const Register pre_val = tmp;
if (G1SATBBarrierPrintNullPreVals) {
save_frame(0);
mov(pre_val, O0);
// Save G-regs that target may use.
mov(G1, L1);
mov(G2, L2);
mov(G3, L3);
mov(G4, L4);
mov(G5, L5);
call(CAST_FROM_FN_PTR(address, &count_null_pre_vals));
delayed()->nop();
// Restore G-regs that target may have used.
mov(L1, G1);
mov(L2, G2);
mov(L3, G3);
mov(L4, G4);
mov(L5, G5);
restore(G0, G0, G0);
}
assert(pre_val != noreg, "must have a real register");
// Is the previous value null?
// Check on whether to annul.
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
delayed() -> nop();
// OK, it's not filtered, so we'll need to call enqueue. In the normal
// case, pre_val will be a scratch G-reg, but there's some cases in which
// it's an O-reg. In the first case, do a normal call. In the latter,
// do a save here and call the frameless version.
// case, pre_val will be a scratch G-reg, but there are some cases in
// which it's an O-reg. In the first case, do a normal call. In the
// latter, do a save here and call the frameless version.
guarantee(pre_val->is_global() || pre_val->is_out(),
"Or we need to think harder.");
if (pre_val->is_global() && !preserve_o_regs) {
generate_satb_log_enqueue_if_necessary(true); // with frame.
generate_satb_log_enqueue_if_necessary(true); // with frame
call(satb_log_enqueue_with_frame);
delayed()->mov(pre_val, O0);
} else {
generate_satb_log_enqueue_if_necessary(false); // with frameless.
generate_satb_log_enqueue_if_necessary(false); // frameless
save_frame(0);
call(satb_log_enqueue_frameless);
delayed()->mov(pre_val->after_save(), O0);
@ -4614,7 +4593,6 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
MacroAssembler* post_filter_masm = this;
if (new_val == G0) return;
if (G1DisablePostBarrier) return;
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::G1SATBCT ||
@ -4626,6 +4604,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
#else
srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
#endif
if (G1PrintCTFilterStats) {
guarantee(tmp->is_global(), "Or stats won't work...");
// This is a sleazy hack: I'm temporarily hijacking G2, which I

View File

@ -2210,15 +2210,11 @@ public:
void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
#ifndef SERIALGC
// Array store and offset
void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
// General G1 pre-barrier generator.
void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
// General G1 post-barrier generator
void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
// May do filtering, depending on the boolean arguments.
void g1_card_table_write(jbyte* byte_map_base,
Register tmp, Register obj, Register new_val,
bool region_filter, bool null_filter);
#endif // SERIALGC
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack

View File

@ -408,13 +408,20 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
#ifndef SERIALGC
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
pre_val_reg, _continuation);
@ -431,6 +438,96 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
}
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that offset == referent_offset.
//
// So we might have to emit:
// if (src == null) goto continuation.
//
// and we definitely have to emit:
// if (klass(src).reference_type == REF_NONE) goto continuation
// if (!marking_active) goto continuation
// if (pre_val == null) goto continuation
// call pre_barrier(pre_val)
// goto continuation
//
__ bind(_entry);
assert(src()->is_register(), "sanity");
Register src_reg = src()->as_register();
if (gen_src_check()) {
// The original src operand was not a constant.
// Generate src == null?
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
src_reg, _continuation);
} else {
__ cmp(src_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
}
// Generate src->_klass->_reference_type() == REF_NONE)?
assert(tmp()->is_register(), "sanity");
Register tmp_reg = tmp()->as_register();
__ load_klass(src_reg, tmp_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
__ ld(ref_type_adr, tmp_reg);
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
tmp_reg, _continuation);
} else {
__ cmp(tmp_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
// Is marking active?
assert(thread()->is_register(), "precondition");
Register thread_reg = thread()->as_register();
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
__ ld(in_progress, tmp_reg);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
__ ldsb(in_progress, tmp_reg);
}
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
tmp_reg, _continuation);
} else {
__ cmp(tmp_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
// val == null?
assert(val()->is_register(), "Precondition.");
Register val_reg = val()->as_register();
if (__ is_in_wdisp16_range(_continuation)) {
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
val_reg, _continuation);
} else {
__ cmp(val_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
__ delayed()->mov(val_reg, G4);
__ br(Assembler::always, false, Assembler::pt, _continuation);
__ delayed()->nop();
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -387,7 +387,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
__ move(value.result(), array_addr, null_check_info);
if (obj_store) {
@ -687,7 +688,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
__ add(obj.result(), offset.result(), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
pre_barrier(addr, false, NULL);
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
if (type == objectType)
@ -1187,7 +1189,8 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
}
if (is_obj) {
pre_barrier(LIR_OprFact::address(addr), false, NULL);
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
// _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
}
__ move(data, addr);

View File

@ -551,6 +551,26 @@ address InterpreterGenerator::generate_accessor_entry(void) {
return NULL;
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
if (UseG1GC) {
// We need to generate have a routine that generates code to:
// * load the value in the referent field
// * passes that value to the pre-barrier.
//
// In the case of G1 this will record the value of the
// referent in an SATB buffer if marking is active.
// This will cause concurrent marking to mark the referent
// field as live.
Unimplemented();
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
//
// Interpreter stub for calling a native method. (C++ interpreter)
// This sets up a somewhat different looking stack for calling the native method

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,7 @@
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry(void);
void lock_method(void);
void save_native_result(void);
void restore_native_result(void);

View File

@ -407,6 +407,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_abs : break;
case Interpreter::java_lang_math_log : break;
case Interpreter::java_lang_math_log10 : break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}

View File

@ -763,6 +763,87 @@ address InterpreterGenerator::generate_accessor_entry(void) {
return NULL;
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. The "intrinsified" code for G1 (or any SATB based GC),
// 2. The slow path - which is an expansion of the regular method entry.
//
// Notes:-
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
if (UseG1GC) {
Label slow_path;
// In the G1 code we don't check if we need to reach a safepoint. We
// continue and the thread will safepoint at the next bytecode dispatch.
// Check if local 0 != NULL
// If the receiver is null then it is OK to jump to the slow path.
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
__ delayed()->nop();
// Load the value of the referent field.
if (Assembler::is_simm13(referent_offset)) {
__ load_heap_oop(Otos_i, referent_offset, Otos_i);
} else {
__ set(referent_offset, G3_scratch);
__ load_heap_oop(Otos_i, G3_scratch, Otos_i);
}
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer. Note with
// these parameters the pre-barrier does not generate
// the load of the previous value
__ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
Otos_i /* pre_val */,
G3_scratch /* tmp */,
true /* preserve_o_regs */);
// _areturn
__ retl(); // return from leaf routine
__ delayed()->mov(O5_savedSP, SP);
// Generate regular method entry
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
//
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the native method

View File

@ -57,7 +57,11 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
__ g1_write_barrier_pre( base, index, offset, tmp, /*preserve_o_regs*/true);
// Load and record the previous value.
__ g1_write_barrier_pre(base, index, offset,
noreg /* pre_val */,
tmp, true /*preserve_o_regs*/);
if (index == noreg ) {
assert(Assembler::is_simm13(offset), "fix this code");
__ store_heap_oop(val, base, offset);

View File

@ -6890,26 +6890,39 @@ void MacroAssembler::sign_extend_short(Register reg) {
#ifndef SERIALGC
void MacroAssembler::g1_write_barrier_pre(Register obj,
#ifndef _LP64
Register pre_val,
Register thread,
#endif
Register tmp,
Register tmp2,
bool tosca_live) {
LP64_ONLY(Register thread = r15_thread;)
bool tosca_live,
bool expand_call) {
// If expand_call is true then we expand the call_VM_leaf macro
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
#ifdef _LP64
assert(thread == r15_thread, "must be");
#endif // _LP64
Label done;
Label runtime;
assert(pre_val != noreg, "check this code");
if (obj != noreg) {
assert_different_registers(obj, pre_val, tmp);
assert(pre_val != rax, "check this code");
}
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_index()));
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_buf()));
Label done;
Label runtime;
// if (!marking_in_progress) goto done;
// Is marking active?
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
cmpl(in_progress, 0);
} else {
@ -6918,65 +6931,92 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
}
jcc(Assembler::equal, done);
// if (x.f == NULL) goto done;
#ifdef _LP64
load_heap_oop(tmp2, Address(obj, 0));
#else
movptr(tmp2, Address(obj, 0));
#endif
cmpptr(tmp2, (int32_t) NULL_WORD);
// Do we need to load the previous value?
if (obj != noreg) {
load_heap_oop(pre_val, Address(obj, 0));
}
// Is the previous value null?
cmpptr(pre_val, (int32_t) NULL_WORD);
jcc(Assembler::equal, done);
// Can we store original value in the thread's buffer?
// Is index == 0?
// (The index field is typed as size_t.)
#ifdef _LP64
movslq(tmp, index);
cmpq(tmp, 0);
#else
cmpl(index, 0);
#endif
jcc(Assembler::equal, runtime);
#ifdef _LP64
subq(tmp, wordSize);
movl(index, tmp);
addq(tmp, buffer);
#else
subl(index, wordSize);
movl(tmp, buffer);
addl(tmp, index);
#endif
movptr(Address(tmp, 0), tmp2);
movptr(tmp, index); // tmp := *index_adr
cmpptr(tmp, 0); // tmp == 0?
jcc(Assembler::equal, runtime); // If yes, goto runtime
subptr(tmp, wordSize); // tmp := tmp - wordSize
movptr(index, tmp); // *index_adr := tmp
addptr(tmp, buffer); // tmp := tmp + *buffer_adr
// Record the previous value
movptr(Address(tmp, 0), pre_val);
jmp(done);
bind(runtime);
// save the live input values
if(tosca_live) push(rax);
push(obj);
#ifdef _LP64
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, r15_thread);
#else
push(thread);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
pop(thread);
#endif
pop(obj);
if(tosca_live) pop(rax);
bind(done);
if (obj != noreg && obj != rax)
push(obj);
if (pre_val != rax)
push(pre_val);
// Calling the runtime using the regular call_VM_leaf mechanism generates
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
//
// If we care generating the pre-barrier without a frame (e.g. in the
// intrinsified Reference.get() routine) then ebp might be pointing to
// the caller frame and so this check will most likely fail at runtime.
//
// Expanding the call directly bypasses the generation of the check.
// So when we do not have have a full interpreter frame on the stack
// expand_call should be passed true.
NOT_LP64( push(thread); )
if (expand_call) {
LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
pass_arg1(this, thread);
pass_arg0(this, pre_val);
MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
} else {
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
}
NOT_LP64( pop(thread); )
// save the live input values
if (pre_val != rax)
pop(pre_val);
if (obj != noreg && obj != rax)
pop(obj);
if(tosca_live) pop(rax);
bind(done);
}
void MacroAssembler::g1_write_barrier_post(Register store_addr,
Register new_val,
#ifndef _LP64
Register thread,
#endif
Register tmp,
Register tmp2) {
#ifdef _LP64
assert(thread == r15_thread, "must be");
#endif // _LP64
LP64_ONLY(Register thread = r15_thread;)
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_index()));
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
Label done;
@ -7055,7 +7095,6 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
pop(store_addr);
bind(done);
}
#endif // SERIALGC

View File

@ -1444,6 +1444,7 @@ private:
class MacroAssembler: public Assembler {
friend class LIR_Assembler;
friend class Runtime1; // as_Address()
protected:
Address as_Address(AddressLiteral adr);
@ -1665,21 +1666,22 @@ class MacroAssembler: public Assembler {
void store_check(Register obj); // store check for obj - register is destroyed afterwards
void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
#ifndef SERIALGC
void g1_write_barrier_pre(Register obj,
#ifndef _LP64
Register pre_val,
Register thread,
#endif
Register tmp,
Register tmp2,
bool tosca_live);
bool tosca_live,
bool expand_call);
void g1_write_barrier_post(Register store_addr,
Register new_val,
#ifndef _LP64
Register thread,
#endif
Register tmp,
Register tmp2);
#endif // SERIALGC
// split store_check(Register obj) to enhance instruction interleaving
void store_check_part_1(Register obj);

View File

@ -464,15 +464,19 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
#ifndef SERIALGC
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
@ -482,6 +486,68 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
}
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that offset == referent_offset.
//
// So we might have to emit:
// if (src == null) goto continuation.
//
// and we definitely have to emit:
// if (klass(src).reference_type == REF_NONE) goto continuation
// if (!marking_active) goto continuation
// if (pre_val == null) goto continuation
// call pre_barrier(pre_val)
// goto continuation
//
__ bind(_entry);
assert(src()->is_register(), "sanity");
Register src_reg = src()->as_register();
if (gen_src_check()) {
// The original src operand was not a constant.
// Generate src == null?
__ cmpptr(src_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
}
// Generate src->_klass->_reference_type == REF_NONE)?
assert(tmp()->is_register(), "sanity");
Register tmp_reg = tmp()->as_register();
__ load_klass(tmp_reg, src_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
__ cmpl(ref_type_adr, REF_NONE);
__ jcc(Assembler::equal, _continuation);
// Is marking active?
assert(thread()->is_register(), "precondition");
Register thread_reg = thread()->as_register();
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
__ cmpl(in_progress, 0);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(in_progress, 0);
}
__ jcc(Assembler::equal, _continuation);
// val == null?
assert(val()->is_register(), "Precondition.");
Register val_reg = val()->as_register();
__ cmpptr(val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(val()->as_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ jmp(_continuation);
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {

View File

@ -326,7 +326,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(value.result(), array_addr, null_check_info);
// Seems to be a precise
post_barrier(LIR_OprFact::address(array_addr), value.result());
@ -794,7 +795,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
if (type == objectType) { // Write-barrier needed for Object fields.
// Do the pre-write barrier, if any.
pre_barrier(addr, false, NULL);
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
@ -1339,7 +1341,8 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), false, NULL);
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(data, addr);
assert(src->is_register(), "must be register");
// Seems to be a precise address

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,7 @@
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry(void);
void lock_method(void);
void generate_stack_overflow_check(void);

View File

@ -936,6 +936,26 @@ address InterpreterGenerator::generate_accessor_entry(void) {
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
if (UseG1GC) {
// We need to generate have a routine that generates code to:
// * load the value in the referent field
// * passes that value to the pre-barrier.
//
// In the case of G1 this will record the value of the
// referent in an SATB buffer if marking is active.
// This will cause concurrent marking to mark the referent
// field as live.
Unimplemented();
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
//
// C++ Interpreter stub for calling a native method.
// This sets up a somewhat different looking stack for calling the native method
@ -2210,6 +2230,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,6 +39,7 @@
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry();
void lock_method(void);
void generate_stack_overflow_check(void);

View File

@ -776,6 +776,98 @@ address InterpreterGenerator::generate_accessor_entry(void) {
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. The "intrinsified" code for G1 (or any SATB based GC),
// 2. The slow path - which is an expansion of the regular method entry.
//
// Notes:-
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code below can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
// rbx,: methodOop
// rcx: receiver (preserve for slow entry into asm interpreter)
// rsi: senderSP must preserved for slow path, set SP to it on fast path
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
if (UseG1GC) {
Label slow_path;
// Check if local 0 != NULL
// If the receiver is null then it is OK to jump to the slow path.
__ movptr(rax, Address(rsp, wordSize));
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// rax: local 0 (must be preserved across the G1 barrier call)
//
// rbx: method (at this point it's scratch)
// rcx: receiver (at this point it's scratch)
// rdx: scratch
// rdi: scratch
//
// rsi: sender sp
// Preserve the sender sp in case the pre-barrier
// calls the runtime
__ push(rsi);
// Load the value of the referent field.
const Address field_address(rax, referent_offset);
__ movptr(rax, field_address);
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
__ get_thread(rcx);
__ g1_write_barrier_pre(noreg /* obj */,
rax /* pre_val */,
rcx /* thread */,
rbx /* tmp */,
true /* tosca_save */,
true /* expand_call */);
// _areturn
__ pop(rsi); // get sender sp
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
//
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the native method
@ -1444,6 +1536,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}

View File

@ -757,6 +757,95 @@ address InterpreterGenerator::generate_accessor_entry(void) {
return entry_point;
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. The "intrinsified" code for G1 (or any SATB based GC),
// 2. The slow path - which is an expansion of the regular method entry.
//
// Notes:-
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
//
// rbx: methodOop
// r13: senderSP must preserve for slow path, set SP to it on fast path
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
if (UseG1GC) {
Label slow_path;
// rbx: method
// Check if local 0 != NULL
// If the receiver is null then it is OK to jump to the slow path.
__ movptr(rax, Address(rsp, wordSize));
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// rax: local 0
// rbx: method (but can be used as scratch now)
// rdx: scratch
// rdi: scratch
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
// Load the value of the referent field.
const Address field_address(rax, referent_offset);
__ load_heap_oop(rax, field_address);
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
__ g1_write_barrier_pre(noreg /* obj */,
rax /* pre_val */,
r15_thread /* thread */,
rbx /* tmp */,
true /* tosca_live */,
true /* expand_call */);
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, r13); // set sp to sender sp
__ jmp(rdi);
__ ret(0);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the
// native method than the typical interpreter frame setup.
@ -1463,6 +1552,8 @@ address AbstractInterpreterGenerator::generate_method_entry(
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}

View File

@ -139,7 +139,12 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
}
__ get_thread(rcx);
__ save_bcp();
__ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
__ g1_write_barrier_pre(rdx /* obj */,
rbx /* pre_val */,
rcx /* thread */,
rsi /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
// Do the actual store
// noreg means NULL
@ -148,7 +153,11 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
// No post barrier for NULL
} else {
__ movl(Address(rdx, 0), val);
__ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
__ g1_write_barrier_post(rdx /* store_adr */,
val /* new_val */,
rcx /* thread */,
rbx /* tmp */,
rsi /* tmp2 */);
}
__ restore_bcp();

View File

@ -147,12 +147,21 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
} else {
__ leaq(rdx, obj);
}
__ g1_write_barrier_pre(rdx, r8, rbx, val != noreg);
__ g1_write_barrier_pre(rdx /* obj */,
rbx /* pre_val */,
r15_thread /* thread */,
r8 /* tmp */,
val != noreg /* tosca_live */,
false /* expand_call */);
if (val == noreg) {
__ store_heap_oop_null(Address(rdx, 0));
} else {
__ store_heap_oop(Address(rdx, 0), val);
__ g1_write_barrier_post(rdx, val, r8, rbx);
__ g1_write_barrier_post(rdx /* store_adr */,
val /* new_val */,
r15_thread /* thread */,
r8 /* tmp */,
rbx /* tmp2 */);
}
}

View File

@ -737,6 +737,26 @@ address InterpreterGenerator::generate_accessor_entry() {
return generate_entry((address) CppInterpreter::accessor_entry);
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#ifndef SERIALGC
if (UseG1GC) {
// We need to generate have a routine that generates code to:
// * load the value in the referent field
// * passes that value to the pre-barrier.
//
// In the case of G1 this will record the value of the
// referent in an SATB buffer if marking is active.
// This will cause concurrent marking to mark the referent
// field as live.
Unimplemented();
}
#endif // SERIALGC
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
}
address InterpreterGenerator::generate_native_entry(bool synchronized) {
assert(synchronized == false, "should be");
@ -792,6 +812,10 @@ address AbstractInterpreterGenerator::generate_method_entry(
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
break;
case Interpreter::java_lang_ref_reference_get:
entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
break;
default:
ShouldNotReachHere();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -37,6 +37,7 @@
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry();
address generate_accessor_entry();
address generate_Reference_get_entry();
address generate_method_handle_entry();
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP

View File

@ -519,42 +519,126 @@ class ArrayCopyStub: public CodeStub {
// Code stubs for Garbage-First barriers.
class G1PreBarrierStub: public CodeStub {
private:
bool _do_load;
LIR_Opr _addr;
LIR_Opr _pre_val;
LIR_PatchCode _patch_code;
CodeEmitInfo* _info;
public:
// pre_val (a temporary register) must be a register;
// Version that _does_ generate a load of the previous value from addr.
// addr (the address of the field to be read) must be a LIR_Address
// pre_val (a temporary register) must be a register;
G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
_addr(addr), _pre_val(pre_val), _patch_code(patch_code), _info(info)
_addr(addr), _pre_val(pre_val), _do_load(true),
_patch_code(patch_code), _info(info)
{
assert(_pre_val->is_register(), "should be temporary register");
assert(_addr->is_address(), "should be the address of the field");
}
// Version that _does not_ generate load of the previous value; the
// previous value is assumed to have already been loaded into pre_val.
G1PreBarrierStub(LIR_Opr pre_val) :
_addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
_patch_code(lir_patch_none), _info(NULL)
{
assert(_pre_val->is_register(), "should be a register");
}
LIR_Opr addr() const { return _addr; }
LIR_Opr pre_val() const { return _pre_val; }
LIR_PatchCode patch_code() const { return _patch_code; }
CodeEmitInfo* info() const { return _info; }
bool do_load() const { return _do_load; }
virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) {
// don't pass in the code emit info since it's processed in the fast
// path
if (_info != NULL)
visitor->do_slow_case(_info);
else
if (_do_load) {
// don't pass in the code emit info since it's processed in the fast
// path
if (_info != NULL)
visitor->do_slow_case(_info);
else
visitor->do_slow_case();
visitor->do_input(_addr);
visitor->do_temp(_pre_val);
} else {
visitor->do_slow_case();
visitor->do_input(_addr);
visitor->do_temp(_pre_val);
visitor->do_input(_pre_val);
}
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); }
#endif // PRODUCT
};
// This G1 barrier code stub is used in Unsafe.getObject.
// It generates a sequence of guards around the SATB
// barrier code that are used to detect when we have
// the referent field of a Reference object.
// The first check is assumed to have been generated
// in the code generated for Unsafe.getObject().
class G1UnsafeGetObjSATBBarrierStub: public CodeStub {
private:
LIR_Opr _val;
LIR_Opr _src;
LIR_Opr _tmp;
LIR_Opr _thread;
bool _gen_src_check;
public:
// A G1 barrier that is guarded by generated guards that determine whether
// val (which is the result of Unsafe.getObject() should be recorded in an
// SATB log buffer. We could be reading the referent field of a Reference object
// using Unsafe.getObject() and we need to record the referent.
//
// * val is the operand returned by the unsafe.getObject routine.
// * src is the base object
// * tmp is a temp used to load the klass of src, and then reference type
// * thread is the thread object.
G1UnsafeGetObjSATBBarrierStub(LIR_Opr val, LIR_Opr src,
LIR_Opr tmp, LIR_Opr thread,
bool gen_src_check) :
_val(val), _src(src),
_tmp(tmp), _thread(thread),
_gen_src_check(gen_src_check)
{
assert(_val->is_register(), "should have already been loaded");
assert(_src->is_register(), "should have already been loaded");
assert(_tmp->is_register(), "should be a temporary register");
}
LIR_Opr val() const { return _val; }
LIR_Opr src() const { return _src; }
LIR_Opr tmp() const { return _tmp; }
LIR_Opr thread() const { return _thread; }
bool gen_src_check() const { return _gen_src_check; }
virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case();
visitor->do_input(_val);
visitor->do_input(_src);
visitor->do_input(_thread);
visitor->do_temp(_tmp);
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("G1UnsafeGetObjSATBBarrierStub"); }
#endif // PRODUCT
};
class G1PostBarrierStub: public CodeStub {
private:
LIR_Opr _addr;

View File

@ -2912,6 +2912,46 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
block()->set_end(end);
break;
}
case vmIntrinsics::_Reference_get:
{
if (UseG1GC) {
// With java.lang.ref.reference.get() we must go through the
// intrinsic - when G1 is enabled - even when get() is the root
// method of the compile so that, if necessary, the value in
// the referent field of the reference object gets recorded by
// the pre-barrier code.
// Specifically, if G1 is enabled, the value in the referent
// field is recorded by the G1 SATB pre barrier. This will
// result in the referent being marked live and the reference
// object removed from the list of discovered references during
// reference processing.
// Set up a stream so that appending instructions works properly.
ciBytecodeStream s(scope->method());
s.reset_to_bci(0);
scope_data()->set_stream(&s);
s.next();
// setup the initial block state
_block = start_block;
_state = start_block->state()->copy_for_parsing();
_last = start_block;
load_local(objectType, 0);
// Emit the intrinsic node.
bool result = try_inline_intrinsics(scope->method());
if (!result) BAILOUT("failed to inline intrinsic");
method_return(apop());
// connect the begin and end blocks and we're all done.
BlockEnd* end = last()->as_BlockEnd();
block()->set_end(end);
break;
}
// Otherwise, fall thru
}
default:
scope_data()->add_to_work_list(start_block);
iterate_all_blocks();
@ -3149,6 +3189,15 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
append_unsafe_CAS(callee);
return true;
case vmIntrinsics::_Reference_get:
// It is only when G1 is enabled that we absolutely
// need to use the intrinsic version of Reference.get()
// so that the value in the referent field, if necessary,
// can be registered by the pre-barrier code.
if (!UseG1GC) return false;
preserves_state = true;
break;
default : return false; // do not inline
}
// create intrinsic node

View File

@ -1104,6 +1104,38 @@ void LIRGenerator::do_Return(Return* x) {
set_no_result(x);
}
// Examble: ref.get()
// Combination of LoadField and g1 pre-write barrier
void LIRGenerator::do_Reference_get(Intrinsic* x) {
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
assert(x->number_of_arguments() == 1, "wrong type");
LIRItem reference(x->argument_at(0), this);
reference.load_item();
// need to perform the null check on the reference objecy
CodeEmitInfo* info = NULL;
if (x->needs_null_check()) {
info = state_for(x);
}
LIR_Address* referent_field_adr =
new LIR_Address(reference.result(), referent_offset, T_OBJECT);
LIR_Opr result = rlock_result(x);
__ load(referent_field_adr, result, info);
// Register the value in the referent field with the pre-barrier
pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
result /* pre_val */,
false /* do_load */,
false /* patch */,
NULL /* info */);
}
// Example: object.getClass ()
void LIRGenerator::do_getClass(Intrinsic* x) {
@ -1246,13 +1278,14 @@ LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
// Various barriers
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info) {
// Do the pre-write barrier, if any.
switch (_bs->kind()) {
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
@ -1293,9 +1326,8 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
if (G1DisablePreBarrier) return;
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info) {
// First we test whether marking is in progress.
BasicType flag_type;
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
@ -1314,26 +1346,40 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
// Read the marking-in-progress flag.
LIR_Opr flag_val = new_register(T_INT);
__ load(mark_active_flag_addr, flag_val);
LIR_PatchCode pre_val_patch_code =
patch ? lir_patch_normal : lir_patch_none;
LIR_Opr pre_val = new_register(T_OBJECT);
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be");
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
LIR_PatchCode pre_val_patch_code = lir_patch_none;
CodeStub* slow;
if (do_load) {
assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
if (patch)
pre_val_patch_code = lir_patch_normal;
pre_val = new_register(T_OBJECT);
if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be");
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
}
slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
} else {
assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
assert(pre_val->is_register(), "must be");
assert(pre_val->type() == T_OBJECT, "must be an object");
assert(info == NULL, "sanity");
slow = new G1PreBarrierStub(pre_val);
}
CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
info);
__ branch(lir_cond_notEqual, T_INT, slow);
__ branch_destination(slow->continuation());
}
void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
if (G1DisablePostBarrier) return;
// If the "new_val" is a constant NULL, no barrier is necessary.
if (new_val->is_constant() &&
new_val->as_constant_ptr()->as_jobject() == NULL) return;
@ -1555,6 +1601,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
if (is_oop) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(address),
LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load*/,
needs_patching,
(info ? new CodeEmitInfo(info) : NULL));
}
@ -1984,9 +2032,127 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
off.load_item();
src.load_item();
LIR_Opr reg = reg = rlock_result(x, x->basic_type());
LIR_Opr reg = rlock_result(x, x->basic_type());
get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
#ifndef SERIALGC
// We might be reading the value of the referent field of a
// Reference object in order to attach it back to the live
// object graph. If G1 is enabled then we need to record
// the value that is being returned in an SATB log buffer.
//
// We need to generate code similar to the following...
//
// if (offset == java_lang_ref_Reference::referent_offset) {
// if (src != NULL) {
// if (klass(src)->reference_type() != REF_NONE) {
// pre_barrier(..., reg, ...);
// }
// }
// }
//
// The first non-constant check of either the offset or
// the src operand will be done here; the remainder
// will take place in the generated code stub.
if (UseG1GC && type == T_OBJECT) {
bool gen_code_stub = true; // Assume we need to generate the slow code stub.
bool gen_offset_check = true; // Assume the code stub has to generate the offset guard.
bool gen_source_check = true; // Assume the code stub has to check the src object for null.
if (off.is_constant()) {
jint off_con = off.get_jint_constant();
if (off_con != java_lang_ref_Reference::referent_offset) {
// The constant offset is something other than referent_offset.
// We can skip generating/checking the remaining guards and
// skip generation of the code stub.
gen_code_stub = false;
} else {
// The constant offset is the same as referent_offset -
// we do not need to generate a runtime offset check.
gen_offset_check = false;
}
}
// We don't need to generate stub if the source object is an array
if (gen_code_stub && src.type()->is_array()) {
gen_code_stub = false;
}
if (gen_code_stub) {
// We still need to continue with the checks.
if (src.is_constant()) {
ciObject* src_con = src.get_jobject_constant();
if (src_con->is_null_object()) {
// The constant src object is null - We can skip
// generating the code stub.
gen_code_stub = false;
} else {
// Non-null constant source object. We still have to generate
// the slow stub - but we don't need to generate the runtime
// null object check.
gen_source_check = false;
}
}
}
if (gen_code_stub) {
// Temoraries.
LIR_Opr src_klass = new_register(T_OBJECT);
// Get the thread pointer for the pre-barrier
LIR_Opr thread = getThreadPointer();
CodeStub* stub;
// We can have generate one runtime check here. Let's start with
// the offset check.
if (gen_offset_check) {
// if (offset == referent_offset) -> slow code stub
__ cmp(lir_cond_equal, off.result(),
LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset));
// Optionally generate "src == null" check.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
gen_source_check);
__ branch(lir_cond_equal, T_INT, stub);
} else {
if (gen_source_check) {
// offset is a const and equals referent offset
// if (source != null) -> slow code stub
__ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL));
// Since we are generating the "if src == null" guard here,
// there is no need to generate the "src == null" check again.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
false);
__ branch(lir_cond_notEqual, T_OBJECT, stub);
} else {
// We have statically determined that offset == referent_offset
// && src != null so we unconditionally branch to code stub
// to perform the guards and record reg in the SATB log buffer.
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
src_klass, thread,
false);
__ branch(lir_cond_always, T_ILLEGAL, stub);
}
}
// Continuation point
__ branch_destination(stub->continuation());
}
}
#endif // SERIALGC
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
}
@ -2652,6 +2818,10 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
do_AttemptUpdate(x);
break;
case vmIntrinsics::_Reference_get:
do_Reference_get(x);
break;
default: ShouldNotReachHere(); break;
}
}

View File

@ -246,6 +246,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_AttemptUpdate(Intrinsic* x);
void do_NIOCheckIndex(Intrinsic* x);
void do_FPIntrinsics(Intrinsic* x);
void do_Reference_get(Intrinsic* x);
void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store);
@ -260,13 +261,14 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
// generic interface
void pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info);
void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
// specific implementations
// pre barriers
void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info);
void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info);
// post barriers

View File

@ -706,6 +706,10 @@
do_intrinsic(_checkIndex, java_nio_Buffer, checkIndex_name, int_int_signature, F_R) \
do_name( checkIndex_name, "checkIndex") \
\
/* java/lang/ref/Reference */ \
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
\
\
do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \
do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \
/* (symbols get_name and void_long_signature defined above) */ \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,9 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
assert(pre_val->is_oop_or_null(true), "Error");
// Nulls should have been already filtered.
assert(pre_val->is_oop(true), "Error");
if (!JavaThread::satb_mark_queue_set().is_active()) return;
Thread* thr = Thread::current();
if (thr->is_Java_thread()) {
@ -59,20 +61,6 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
}
}
// When we know the current java thread:
template <class T> void
G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
oop new_val,
JavaThread* jt) {
if (!JavaThread::satb_mark_queue_set().is_active()) return;
T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) {
oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(pre_val->is_oop(true /* ignore mark word */), "Error");
jt->satb_mark_queue().enqueue(pre_val);
}
}
template <class T> void
G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
if (!JavaThread::satb_mark_queue_set().is_active()) return;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,12 +37,11 @@ class DirtyCardQueueSet;
// snapshot-at-the-beginning marking.
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
private:
public:
// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph.
static void enqueue(oop pre_val);
public:
G1SATBCardTableModRefBS(MemRegion whole_heap,
int max_covered_regions);
@ -61,10 +60,6 @@ public:
}
}
// When we know the current java thread:
template <class T> static void write_ref_field_pre_static(T* field, oop newVal,
JavaThread* jt);
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {

View File

@ -89,13 +89,9 @@
"The number of discovered reference objects to process before " \
"draining concurrent marking work queues.") \
\
experimental(bool, G1UseConcMarkReferenceProcessing, false, \
experimental(bool, G1UseConcMarkReferenceProcessing, true, \
"If true, enable reference discovery during concurrent " \
"marking and reference processing at the end of remark " \
"(unsafe).") \
\
develop(bool, G1SATBBarrierPrintNullPreVals, false, \
"If true, count frac of ptr writes with null pre-vals.") \
"marking and reference processing at the end of remark.") \
\
product(intx, G1SATBBufferSize, 1*K, \
"Number of entries in an SATB log buffer.") \
@ -150,12 +146,6 @@
develop(bool, G1PrintParCleanupStats, false, \
"When true, print extra stats about parallel cleanup.") \
\
develop(bool, G1DisablePreBarrier, false, \
"Disable generation of pre-barrier (i.e., marking barrier) ") \
\
develop(bool, G1DisablePostBarrier, false, \
"Disable generation of post-barrier (i.e., RS barrier) ") \
\
product(intx, G1UpdateBufferSize, 256, \
"Size of an update buffer") \
\

View File

@ -104,6 +104,7 @@ class AbstractInterpreter: AllStatic {
java_lang_math_sqrt, // implementation of java.lang.Math.sqrt (x)
java_lang_math_log, // implementation of java.lang.Math.log (x)
java_lang_math_log10, // implementation of java.lang.Math.log10 (x)
java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get()
number_of_method_entries,
invalid = -1
};
@ -140,7 +141,7 @@ class AbstractInterpreter: AllStatic {
// Method activation
static MethodKind method_kind(methodHandle m);
static address entry_for_kind(MethodKind k) { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; }
static address entry_for_method(methodHandle m) { return _entry_table[method_kind(m)]; }
static address entry_for_method(methodHandle m) { return entry_for_kind(method_kind(m)); }
static void print_method_kind(MethodKind kind) PRODUCT_RETURN;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -125,6 +125,7 @@ void CppInterpreterGenerator::generate_all() {
method_entry(java_lang_math_sqrt );
method_entry(java_lang_math_log );
method_entry(java_lang_math_log10 );
method_entry(java_lang_ref_reference_get);
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
method_entry(native);
method_entry(native_synchronized);

View File

@ -208,12 +208,6 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
return empty;
}
// Accessor method?
if (m->is_accessor()) {
assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
return accessor;
}
// Special intrinsic method?
// Note: This test must come _after_ the test for native methods,
// otherwise we will run into problems with JDK 1.2, see also
@ -227,6 +221,15 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
case vmIntrinsics::_dlog : return java_lang_math_log ;
case vmIntrinsics::_dlog10: return java_lang_math_log10;
case vmIntrinsics::_Reference_get:
return java_lang_ref_reference_get;
}
// Accessor method?
if (m->is_accessor()) {
assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
return accessor;
}
// Note: for now: zero locals for all non-empty methods

View File

@ -372,6 +372,7 @@ void TemplateInterpreterGenerator::generate_all() {
method_entry(java_lang_math_sqrt )
method_entry(java_lang_math_log )
method_entry(java_lang_math_log10)
method_entry(java_lang_ref_reference_get)
// all native method kinds (must be one contiguous block)
Interpreter::_native_entry_begin = Interpreter::code()->code_end();

View File

@ -403,6 +403,8 @@ class instanceKlass: public Klass {
ReferenceType reference_type() const { return _reference_type; }
void set_reference_type(ReferenceType t) { _reference_type = t; }
static int reference_type_offset_in_bytes() { return offset_of(instanceKlass, _reference_type); }
// find local field, returns true if found
bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
// find field in direct superinterfaces, returns the interface in which the field is defined

View File

@ -629,7 +629,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
initial_gvn()->transform_no_reclaim(top());
// Set up tf(), start(), and find a CallGenerator.
CallGenerator* cg;
CallGenerator* cg = NULL;
if (is_osr_compilation()) {
const TypeTuple *domain = StartOSRNode::osr_domain();
const TypeTuple *range = TypeTuple::make_range(method()->signature());
@ -644,9 +644,24 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
StartNode* s = new (this, 2) StartNode(root(), tf()->domain());
initial_gvn()->set_type_bottom(s);
init_start(s);
float past_uses = method()->interpreter_invocation_count();
float expected_uses = past_uses;
cg = CallGenerator::for_inline(method(), expected_uses);
if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
// With java.lang.ref.reference.get() we must go through the
// intrinsic when G1 is enabled - even when get() is the root
// method of the compile - so that, if necessary, the value in
// the referent field of the reference object gets recorded by
// the pre-barrier code.
// Specifically, if G1 is enabled, the value in the referent
// field is recorded by the G1 SATB pre barrier. This will
// result in the referent being marked live and the reference
// object removed from the list of discovered references during
// reference processing.
cg = find_intrinsic(method(), false);
}
if (cg == NULL) {
float past_uses = method()->interpreter_invocation_count();
float expected_uses = past_uses;
cg = CallGenerator::for_inline(method(), expected_uses);
}
}
if (failing()) return;
if (cg == NULL) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1457,19 +1457,22 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
}
void GraphKit::pre_barrier(Node* ctl,
void GraphKit::pre_barrier(bool do_load,
Node* ctl,
Node* obj,
Node* adr,
uint adr_idx,
Node* val,
const TypeOopPtr* val_type,
Node* pre_val,
BasicType bt) {
BarrierSet* bs = Universe::heap()->barrier_set();
set_control(ctl);
switch (bs->kind()) {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
break;
case BarrierSet::CardTableModRef:
@ -1532,7 +1535,11 @@ Node* GraphKit::store_oop(Node* ctl,
uint adr_idx = C->get_alias_index(adr_type);
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
pre_barrier(control(), obj, adr, adr_idx, val, val_type, bt);
pre_barrier(true /* do_load */,
control(), obj, adr, adr_idx, val, val_type,
NULL /* pre_val */,
bt);
Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
return store;
@ -3465,12 +3472,31 @@ void GraphKit::write_barrier_post(Node* oop_store,
}
// G1 pre/post barriers
void GraphKit::g1_write_barrier_pre(Node* obj,
void GraphKit::g1_write_barrier_pre(bool do_load,
Node* obj,
Node* adr,
uint alias_idx,
Node* val,
const TypeOopPtr* val_type,
Node* pre_val,
BasicType bt) {
// Some sanity checks
// Note: val is unused in this routine.
if (do_load) {
// We need to generate the load of the previous value
assert(obj != NULL, "must have a base");
assert(adr != NULL, "where are loading from?");
assert(pre_val == NULL, "loaded already?");
assert(val_type != NULL, "need a type");
} else {
// In this case both val_type and alias_idx are unused.
assert(pre_val != NULL, "must be loaded already");
assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
}
assert(bt == T_OBJECT, "or we shouldn't be here");
IdealKit ideal(gvn(), control(), merged_memory(), true);
Node* tls = __ thread(); // ThreadLocalStorage
@ -3492,32 +3518,28 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
PtrQueue::byte_offset_of_index());
const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652
PtrQueue::byte_offset_of_buf());
// Now the actual pointers into the thread
// set_control( ctl);
Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
// Now some of the values
Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
// if (!marking)
__ if_then(marking, BoolTest::ne, zero); {
Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
const Type* t1 = adr->bottom_type();
const Type* t2 = val->bottom_type();
Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx);
// if (orig != NULL)
__ if_then(orig, BoolTest::ne, null()); {
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
if (do_load) {
// load original value
// alias_idx correct??
pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
}
// if (pre_val != NULL)
__ if_then(pre_val, BoolTest::ne, null()); {
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
// is the queue for this thread full?
__ if_then(index, BoolTest::ne, zero, likely); {
@ -3531,10 +3553,9 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
#endif
// Now get the buffer location we will log the original value into and store it
// Now get the buffer location we will log the previous value into and store it
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
__ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
__ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
// update the index
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
@ -3542,9 +3563,9 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
// logging buffer is full, call the runtime
const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
} __ end_if(); // (!index)
} __ end_if(); // (orig != NULL)
} __ end_if(); // (pre_val != NULL)
} __ end_if(); // (!marking)
// Final sync IdealKit and GraphKit.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -544,8 +544,10 @@ class GraphKit : public Phase {
BasicType bt);
// For the few case where the barriers need special help
void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
Node* val, const TypeOopPtr* val_type, BasicType bt);
void pre_barrier(bool do_load, Node* ctl,
Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
Node* pre_val,
BasicType bt);
void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
Node* val, BasicType bt, bool use_precise);
@ -669,11 +671,13 @@ class GraphKit : public Phase {
Node* adr, uint adr_idx, Node* val, bool use_precise);
// G1 pre/post barriers
void g1_write_barrier_pre(Node* obj,
void g1_write_barrier_pre(bool do_load,
Node* obj,
Node* adr,
uint alias_idx,
Node* val,
const TypeOopPtr* val_type,
Node* pre_val,
BasicType bt);
void g1_write_barrier_post(Node* store,

View File

@ -165,6 +165,10 @@ class LibraryCallKit : public GraphKit {
// This returns Type::AnyPtr, RawPtr, or OopPtr.
int classify_unsafe_addr(Node* &base, Node* &offset);
Node* make_unsafe_address(Node* base, Node* offset);
// Helper for inline_unsafe_access.
// Generates the guards that check whether the result of
// Unsafe.getObject should be recorded in an SATB log buffer.
void insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val);
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
bool inline_unsafe_allocate();
@ -239,6 +243,8 @@ class LibraryCallKit : public GraphKit {
bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
bool inline_bitCount(vmIntrinsics::ID id);
bool inline_reverseBytes(vmIntrinsics::ID id);
bool inline_reference_get();
};
@ -335,6 +341,14 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
if (!UsePopCountInstruction) return NULL;
break;
case vmIntrinsics::_Reference_get:
// It is only when G1 is enabled that we absolutely
// need to use the intrinsic version of Reference.get()
// so that the value in the referent field, if necessary,
// can be registered by the pre-barrier code.
if (!UseG1GC) return NULL;
break;
default:
assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
@ -386,13 +400,22 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
tty->print_cr("Intrinsic %s", str);
}
#endif
if (kit.try_to_inline()) {
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
tty->print("Inlining intrinsic %s%s at bci:%d in",
vmIntrinsics::name_at(intrinsic_id()),
(is_virtual() ? " (virtual)" : ""), kit.bci());
kit.caller()->print_short_name(tty);
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
if (jvms->has_method()) {
// Not a root compile.
tty->print("Inlining intrinsic %s%s at bci:%d in",
vmIntrinsics::name_at(intrinsic_id()),
(is_virtual() ? " (virtual)" : ""), kit.bci());
kit.caller()->print_short_name(tty);
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
} else {
// Root compile
tty->print_cr("Generating intrinsic %s%s at bci:%d",
vmIntrinsics::name_at(intrinsic_id()),
(is_virtual() ? " (virtual)" : ""), kit.bci());
}
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
if (C->log()) {
@ -405,11 +428,19 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
}
if (PrintIntrinsics) {
tty->print("Did not inline intrinsic %s%s at bci:%d in",
if (jvms->has_method()) {
// Not a root compile.
tty->print("Did not inline intrinsic %s%s at bci:%d in",
vmIntrinsics::name_at(intrinsic_id()),
(is_virtual() ? " (virtual)" : ""), kit.bci());
kit.caller()->print_short_name(tty);
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
} else {
// Root compile
tty->print("Did not generate intrinsic %s%s at bci:%d in",
vmIntrinsics::name_at(intrinsic_id()),
(is_virtual() ? " (virtual)" : ""), kit.bci());
kit.caller()->print_short_name(tty);
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
}
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
return NULL;
@ -421,6 +452,14 @@ bool LibraryCallKit::try_to_inline() {
const bool is_native_ptr = true;
const bool is_static = true;
if (!jvms()->has_method()) {
// Root JVMState has a null method.
assert(map()->memory()->Opcode() == Op_Parm, "");
// Insert the memory aliasing node
set_all_memory(reset_memory());
}
assert(merged_memory(), "");
switch (intrinsic_id()) {
case vmIntrinsics::_hashCode:
return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
@ -661,6 +700,9 @@ bool LibraryCallKit::try_to_inline() {
case vmIntrinsics::_getCallerClass:
return inline_native_Reflection_getCallerClass();
case vmIntrinsics::_Reference_get:
return inline_reference_get();
default:
// If you get here, it may be that someone has added a new intrinsic
// to the list in vmSymbols.hpp without implementing it here.
@ -2079,6 +2121,110 @@ bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
const static BasicType T_ADDRESS_HOLDER = T_LONG;
// Helper that guards and inserts a G1 pre-barrier.
void LibraryCallKit::insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val) {
assert(UseG1GC, "should not call this otherwise");
// We could be accessing the referent field of a reference object. If so, when G1
// is enabled, we need to log the value in the referent field in an SATB buffer.
// This routine performs some compile time filters and generates suitable
// runtime filters that guard the pre-barrier code.
// Some compile time checks.
// If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
const TypeX* otype = offset->find_intptr_t_type();
if (otype != NULL && otype->is_con() &&
otype->get_con() != java_lang_ref_Reference::referent_offset) {
// Constant offset but not the reference_offset so just return
return;
}
// We only need to generate the runtime guards for instances.
const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
if (btype != NULL) {
if (btype->isa_aryptr()) {
// Array type so nothing to do
return;
}
const TypeInstPtr* itype = btype->isa_instptr();
if (itype != NULL) {
// Can the klass of base_oop be statically determined
// to be _not_ a sub-class of Reference?
ciKlass* klass = itype->klass();
if (klass->is_subtype_of(env()->Reference_klass()) &&
!env()->Reference_klass()->is_subtype_of(klass)) {
return;
}
}
}
// The compile time filters did not reject base_oop/offset so
// we need to generate the following runtime filters
//
// if (offset == java_lang_ref_Reference::_reference_offset) {
// if (base != null) {
// if (klass(base)->reference_type() != REF_NONE)) {
// pre_barrier(_, pre_val, ...);
// }
// }
// }
float likely = PROB_LIKELY(0.999);
float unlikely = PROB_UNLIKELY(0.999);
IdealKit ideal(gvn(), control(), merged_memory());
#define __ ideal.
const int reference_type_offset = instanceKlass::reference_type_offset_in_bytes() +
sizeof(oopDesc);
Node* referent_off = __ ConI(java_lang_ref_Reference::referent_offset);
__ if_then(offset, BoolTest::eq, referent_off, unlikely); {
__ if_then(base_oop, BoolTest::ne, null(), likely); {
// Update graphKit memory and control from IdealKit.
set_all_memory(__ merged_memory());
set_control(__ ctrl());
Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
// Update IdealKit memory and control from graphKit.
__ set_all_memory(merged_memory());
__ set_ctrl(control());
Node* one = __ ConI(1);
__ if_then(is_instof, BoolTest::eq, one, unlikely); {
// Update graphKit from IdeakKit.
set_all_memory(__ merged_memory());
set_control(__ ctrl());
// Use the pre-barrier to record the value in the referent field
pre_barrier(false /* do_load */,
__ ctrl(),
NULL /* obj */, NULL /* adr */, -1 /* alias_idx */, NULL /* val */, NULL /* val_type */,
pre_val /* pre_val */,
T_OBJECT);
// Update IdealKit from graphKit.
__ set_all_memory(merged_memory());
__ set_ctrl(control());
} __ end_if(); // _ref_type != ref_none
} __ end_if(); // base != NULL
} __ end_if(); // offset == referent_offset
// Final sync IdealKit and GraphKit.
sync_kit(ideal);
#undef __
}
// Interpret Unsafe.fieldOffset cookies correctly:
extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
@ -2155,9 +2301,11 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// Build address expression. See the code in inline_unsafe_prefetch.
Node *adr;
Node *heap_base_oop = top();
Node* offset = top();
if (!is_native_ptr) {
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
Node* offset = pop_pair();
offset = pop_pair();
// The base is either a Java object or a value produced by Unsafe.staticFieldBase
Node* base = pop();
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
@ -2198,6 +2346,13 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// or Compile::must_alias will throw a diagnostic assert.)
bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
// If we are reading the value of the referent field of a Reference
// object (either by using Unsafe directly or through reflection)
// then, if G1 is enabled, we need to record the referent in an
// SATB log buffer using the pre-barrier mechanism.
bool need_read_barrier = UseG1GC && !is_native_ptr && !is_store &&
offset != top() && heap_base_oop != top();
if (!is_store && type == T_OBJECT) {
// Attempt to infer a sharper value type from the offset and base type.
ciKlass* sharpened_klass = NULL;
@ -2281,8 +2436,13 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
case T_SHORT:
case T_INT:
case T_FLOAT:
push(p);
break;
case T_OBJECT:
push( p );
if (need_read_barrier) {
insert_g1_pre_barrier(heap_base_oop, offset, p);
}
push(p);
break;
case T_ADDRESS:
// Cast to an int type.
@ -2539,7 +2699,10 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
case T_OBJECT:
// reference stores need a store barrier.
// (They don't if CAS fails, but it isn't worth checking.)
pre_barrier(control(), base, adr, alias_idx, newval, value_type->make_oopptr(), T_OBJECT);
pre_barrier(true /* do_load*/,
control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
NULL /* pre_val*/,
T_OBJECT);
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@ -5315,3 +5478,44 @@ LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
copyfunc_addr, copyfunc_name, adr_type,
src_start, dest_start, copy_length XTOP);
}
//----------------------------inline_reference_get----------------------------
bool LibraryCallKit::inline_reference_get() {
const int nargs = 1; // self
guarantee(java_lang_ref_Reference::referent_offset > 0,
"should have already been set");
int referent_offset = java_lang_ref_Reference::referent_offset;
// Restore the stack and pop off the argument
_sp += nargs;
Node *reference_obj = pop();
// Null check on self without removing any arguments.
_sp += nargs;
reference_obj = do_null_check(reference_obj, T_OBJECT);
_sp -= nargs;;
if (stopped()) return true;
Node *adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
ciInstanceKlass* klass = env()->Object_klass();
const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
Node* no_ctrl = NULL;
Node *result = make_load(no_ctrl, adr, object_type, T_OBJECT);
// Use the pre-barrier to record the value in the referent field
pre_barrier(false /* do_load */,
control(),
NULL /* obj */, NULL /* adr */, -1 /* alias_idx */, NULL /* val */, NULL /* val_type */,
result /* pre_val */,
T_OBJECT);
push(result);
return true;
}

View File

@ -29,6 +29,9 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "interpreter/linkResolver.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif // SERIALGC
#include "memory/allocation.inline.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/oopFactory.hpp"
@ -1724,6 +1727,26 @@ JNI_ENTRY(jobject, jni_GetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID
o = JvmtiExport::jni_GetField_probe(thread, obj, o, k, fieldID, false);
}
jobject ret = JNIHandles::make_local(env, o->obj_field(offset));
#ifndef SERIALGC
// If G1 is enabled and we are accessing the value of the referent
// field in a reference object then we need to register a non-null
// referent with the SATB barrier.
if (UseG1GC) {
bool needs_barrier = false;
if (ret != NULL &&
offset == java_lang_ref_Reference::referent_offset &&
instanceKlass::cast(k)->reference_type() != REF_NONE) {
assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
needs_barrier = true;
}
if (needs_barrier) {
oop referent = JNIHandles::resolve(ret);
G1SATBCardTableModRefBS::enqueue(referent);
}
}
#endif // SERIALGC
DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret);
return ret;
JNI_END

View File

@ -24,6 +24,9 @@
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif // SERIALGC
#include "memory/allocation.inline.hpp"
#include "prims/jni.h"
#include "prims/jvm.h"
@ -193,7 +196,32 @@ UNSAFE_ENTRY(jobject, Unsafe_GetObject140(JNIEnv *env, jobject unsafe, jobject o
UnsafeWrapper("Unsafe_GetObject");
if (obj == NULL) THROW_0(vmSymbols::java_lang_NullPointerException());
GET_OOP_FIELD(obj, offset, v)
return JNIHandles::make_local(env, v);
jobject ret = JNIHandles::make_local(env, v);
#ifndef SERIALGC
// We could be accessing the referent field in a reference
// object. If G1 is enabled then we need to register a non-null
// referent with the SATB barrier.
if (UseG1GC) {
bool needs_barrier = false;
if (ret != NULL) {
if (offset == java_lang_ref_Reference::referent_offset) {
oop o = JNIHandles::resolve_non_null(obj);
klassOop k = o->klass();
if (instanceKlass::cast(k)->reference_type() != REF_NONE) {
assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
needs_barrier = true;
}
}
}
if (needs_barrier) {
oop referent = JNIHandles::resolve(ret);
G1SATBCardTableModRefBS::enqueue(referent);
}
}
#endif // SERIALGC
return ret;
UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_SetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset, jobject x_h))
@ -226,7 +254,32 @@ UNSAFE_END
UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
UnsafeWrapper("Unsafe_GetObject");
GET_OOP_FIELD(obj, offset, v)
return JNIHandles::make_local(env, v);
jobject ret = JNIHandles::make_local(env, v);
#ifndef SERIALGC
// We could be accessing the referent field in a reference
// object. If G1 is enabled then we need to register non-null
// referent with the SATB barrier.
if (UseG1GC) {
bool needs_barrier = false;
if (ret != NULL) {
if (offset == java_lang_ref_Reference::referent_offset && obj != NULL) {
oop o = JNIHandles::resolve(obj);
klassOop k = o->klass();
if (instanceKlass::cast(k)->reference_type() != REF_NONE) {
assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
needs_barrier = true;
}
}
}
if (needs_barrier) {
oop referent = JNIHandles::resolve(ret);
G1SATBCardTableModRefBS::enqueue(referent);
}
}
#endif // SERIALGC
return ret;
UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))