Merge
This commit is contained in:
commit
18602d10f0
.hgtags.hgtags-top-repo
corba
hotspot
.hgtags
make
src
cpu
sparc/vm
assembler_sparc.cppassembler_sparc.hppc1_CodeStubs_sparc.cppc1_LIRGenerator_sparc.cppcppInterpreter_sparc.cppinterpreterGenerator_sparc.hppinterpreter_sparc.cpptemplateInterpreter_sparc.cpptemplateTable_sparc.cpp
x86/vm
assembler_x86.cppassembler_x86.hppc1_CodeStubs_x86.cppc1_LIRGenerator_x86.cppcppInterpreterGenerator_x86.hppcppInterpreter_x86.cppinterpreterGenerator_x86.hpptemplateInterpreter_x86_32.cpptemplateInterpreter_x86_64.cpptemplateTable_x86_32.cpptemplateTable_x86_64.cpp
zero/vm
os/windows/vm
share/vm
c1
classfile
gc_implementation
g1
parNew
parallelScavenge
psOldGen.cpppsOldGen.hpppsParallelCompact.cpppsParallelCompact.hpppsPermGen.cpppsPermGen.hpppsYoungGen.cpppsYoungGen.hpp
shared
interpreter
memory
oops
opto
prims
runtime
jaxp
jaxws
jdk
1
.hgtags
1
.hgtags
@ -113,3 +113,4 @@ f75a1efb141210901aabe00a834e0fc32bb8b337 jdk7-b135
|
||||
46acf76a533954cfd594bb88fdea79938abfbe20 jdk7-b136
|
||||
d1cf7d4ee16c341f5b8c7e7f1d68a8c412b6c693 jdk7-b137
|
||||
62b8e328f8c8c66c14b0713222116f2add473f3f jdk7-b138
|
||||
955488f34ca418f6cdab843d61c20d2c615637d9 jdk7-b139
|
||||
|
@ -113,3 +113,4 @@ ddc2fcb3682ffd27f44354db666128827be7e3c3 jdk7-b134
|
||||
2fe76e73adaa5133ac559f0b3c2c0707eca04580 jdk7-b136
|
||||
7654afc6a29e43cb0a1343ce7f1287bf690d5e5f jdk7-b137
|
||||
fc47c97bbbd91b1f774d855c48a7e285eb1a351a jdk7-b138
|
||||
7ed6d0b9aaa12320832a7ddadb88d6d8d0dda4c1 jdk7-b139
|
||||
|
@ -113,3 +113,4 @@ e0b72ae5dc5e824b342801c8d1d336a55eb54e2c jdk7-b135
|
||||
48ef0c712e7cbf272f47f9224db92a3c6a9e2612 jdk7-b136
|
||||
a66c01d8bf895261715955df0b95545c000ed6a8 jdk7-b137
|
||||
78d8cf04697e9df54f7f11e195b7da29b8e345a2 jdk7-b138
|
||||
60b074ec6fcf5cdf9efce22fdfb02326ed8fa2d3 jdk7-b139
|
||||
|
@ -164,3 +164,5 @@ bd586e392d93b7ed7a1636dcc8da2b6a4203a102 hs21-b06
|
||||
2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f hs21-b07
|
||||
0930dc920c185afbf40fed9a655290b8e5b16783 jdk7-b138
|
||||
0930dc920c185afbf40fed9a655290b8e5b16783 hs21-b08
|
||||
611e19a16519d6fb5deea9ab565336e6e6ee475d jdk7-b139
|
||||
611e19a16519d6fb5deea9ab565336e6e6ee475d hs21-b09
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
|
||||
|
||||
HS_MAJOR_VER=21
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=09
|
||||
HS_BUILD_NUMBER=10
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=7
|
||||
|
@ -4257,34 +4257,14 @@ void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
|
||||
///////////////////////////////////////////////////////////////////////////////////
|
||||
#ifndef SERIALGC
|
||||
|
||||
static uint num_stores = 0;
|
||||
static uint num_null_pre_stores = 0;
|
||||
static address satb_log_enqueue_with_frame = NULL;
|
||||
static u_char* satb_log_enqueue_with_frame_end = NULL;
|
||||
|
||||
static void count_null_pre_vals(void* pre_val) {
|
||||
num_stores++;
|
||||
if (pre_val == NULL) num_null_pre_stores++;
|
||||
if ((num_stores % 1000000) == 0) {
|
||||
tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.",
|
||||
num_stores, num_null_pre_stores,
|
||||
100.0*(float)num_null_pre_stores/(float)num_stores);
|
||||
}
|
||||
}
|
||||
|
||||
static address satb_log_enqueue_with_frame = 0;
|
||||
static u_char* satb_log_enqueue_with_frame_end = 0;
|
||||
|
||||
static address satb_log_enqueue_frameless = 0;
|
||||
static u_char* satb_log_enqueue_frameless_end = 0;
|
||||
static address satb_log_enqueue_frameless = NULL;
|
||||
static u_char* satb_log_enqueue_frameless_end = NULL;
|
||||
|
||||
static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
|
||||
|
||||
// The calls to this don't work. We'd need to do a fair amount of work to
|
||||
// make it work.
|
||||
static void check_index(int ind) {
|
||||
assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
|
||||
"Invariants.");
|
||||
}
|
||||
|
||||
static void generate_satb_log_enqueue(bool with_frame) {
|
||||
BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
|
||||
CodeBuffer buf(bb);
|
||||
@ -4388,13 +4368,27 @@ static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) {
|
||||
assert(offset == 0 || index == noreg, "choose one");
|
||||
|
||||
if (G1DisablePreBarrier) return;
|
||||
// satb_log_barrier(tmp, obj, offset, preserve_o_regs);
|
||||
void MacroAssembler::g1_write_barrier_pre(Register obj,
|
||||
Register index,
|
||||
int offset,
|
||||
Register pre_val,
|
||||
Register tmp,
|
||||
bool preserve_o_regs) {
|
||||
Label filtered;
|
||||
// satb_log_barrier_work0(tmp, filtered);
|
||||
|
||||
if (obj == noreg) {
|
||||
// We are not loading the previous value so make
|
||||
// sure that we don't trash the value in pre_val
|
||||
// with the code below.
|
||||
assert_different_registers(pre_val, tmp);
|
||||
} else {
|
||||
// We will be loading the previous value
|
||||
// in this code so...
|
||||
assert(offset == 0 || index == noreg, "choose one");
|
||||
assert(pre_val == noreg, "check this code");
|
||||
}
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
|
||||
ld(G2,
|
||||
in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
@ -4413,61 +4407,46 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
|
||||
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
|
||||
delayed() -> nop();
|
||||
|
||||
// satb_log_barrier_work1(tmp, offset);
|
||||
if (index == noreg) {
|
||||
if (Assembler::is_simm13(offset)) {
|
||||
load_heap_oop(obj, offset, tmp);
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
// Load the previous value...
|
||||
if (index == noreg) {
|
||||
if (Assembler::is_simm13(offset)) {
|
||||
load_heap_oop(obj, offset, tmp);
|
||||
} else {
|
||||
set(offset, tmp);
|
||||
load_heap_oop(obj, tmp, tmp);
|
||||
}
|
||||
} else {
|
||||
set(offset, tmp);
|
||||
load_heap_oop(obj, tmp, tmp);
|
||||
load_heap_oop(obj, index, tmp);
|
||||
}
|
||||
} else {
|
||||
load_heap_oop(obj, index, tmp);
|
||||
// Previous value has been loaded into tmp
|
||||
pre_val = tmp;
|
||||
}
|
||||
|
||||
// satb_log_barrier_work2(obj, tmp, offset);
|
||||
|
||||
// satb_log_barrier_work3(tmp, filtered, preserve_o_regs);
|
||||
|
||||
const Register pre_val = tmp;
|
||||
|
||||
if (G1SATBBarrierPrintNullPreVals) {
|
||||
save_frame(0);
|
||||
mov(pre_val, O0);
|
||||
// Save G-regs that target may use.
|
||||
mov(G1, L1);
|
||||
mov(G2, L2);
|
||||
mov(G3, L3);
|
||||
mov(G4, L4);
|
||||
mov(G5, L5);
|
||||
call(CAST_FROM_FN_PTR(address, &count_null_pre_vals));
|
||||
delayed()->nop();
|
||||
// Restore G-regs that target may have used.
|
||||
mov(L1, G1);
|
||||
mov(L2, G2);
|
||||
mov(L3, G3);
|
||||
mov(L4, G4);
|
||||
mov(L5, G5);
|
||||
restore(G0, G0, G0);
|
||||
}
|
||||
assert(pre_val != noreg, "must have a real register");
|
||||
|
||||
// Is the previous value null?
|
||||
// Check on whether to annul.
|
||||
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
|
||||
delayed() -> nop();
|
||||
|
||||
// OK, it's not filtered, so we'll need to call enqueue. In the normal
|
||||
// case, pre_val will be a scratch G-reg, but there's some cases in which
|
||||
// it's an O-reg. In the first case, do a normal call. In the latter,
|
||||
// do a save here and call the frameless version.
|
||||
// case, pre_val will be a scratch G-reg, but there are some cases in
|
||||
// which it's an O-reg. In the first case, do a normal call. In the
|
||||
// latter, do a save here and call the frameless version.
|
||||
|
||||
guarantee(pre_val->is_global() || pre_val->is_out(),
|
||||
"Or we need to think harder.");
|
||||
|
||||
if (pre_val->is_global() && !preserve_o_regs) {
|
||||
generate_satb_log_enqueue_if_necessary(true); // with frame.
|
||||
generate_satb_log_enqueue_if_necessary(true); // with frame
|
||||
|
||||
call(satb_log_enqueue_with_frame);
|
||||
delayed()->mov(pre_val, O0);
|
||||
} else {
|
||||
generate_satb_log_enqueue_if_necessary(false); // with frameless.
|
||||
generate_satb_log_enqueue_if_necessary(false); // frameless
|
||||
|
||||
save_frame(0);
|
||||
call(satb_log_enqueue_frameless);
|
||||
delayed()->mov(pre_val->after_save(), O0);
|
||||
@ -4614,7 +4593,6 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
|
||||
MacroAssembler* post_filter_masm = this;
|
||||
|
||||
if (new_val == G0) return;
|
||||
if (G1DisablePostBarrier) return;
|
||||
|
||||
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
|
||||
assert(bs->kind() == BarrierSet::G1SATBCT ||
|
||||
@ -4626,6 +4604,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
|
||||
#else
|
||||
srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
|
||||
#endif
|
||||
|
||||
if (G1PrintCTFilterStats) {
|
||||
guarantee(tmp->is_global(), "Or stats won't work...");
|
||||
// This is a sleazy hack: I'm temporarily hijacking G2, which I
|
||||
|
@ -2210,15 +2210,11 @@ public:
|
||||
void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
|
||||
|
||||
#ifndef SERIALGC
|
||||
// Array store and offset
|
||||
void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
|
||||
// General G1 pre-barrier generator.
|
||||
void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
|
||||
|
||||
// General G1 post-barrier generator
|
||||
void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
|
||||
|
||||
// May do filtering, depending on the boolean arguments.
|
||||
void g1_card_table_write(jbyte* byte_map_base,
|
||||
Register tmp, Register obj, Register new_val,
|
||||
bool region_filter, bool null_filter);
|
||||
#endif // SERIALGC
|
||||
|
||||
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
|
||||
|
@ -408,13 +408,20 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
|
||||
#ifndef SERIALGC
|
||||
|
||||
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
// At this point we know that marking is in progress.
|
||||
// If do_load() is true then we have to emit the
|
||||
// load of the previous value; otherwise it has already
|
||||
// been loaded into _pre_val.
|
||||
|
||||
__ bind(_entry);
|
||||
|
||||
assert(pre_val()->is_register(), "Precondition.");
|
||||
|
||||
Register pre_val_reg = pre_val()->as_register();
|
||||
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
if (do_load()) {
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
}
|
||||
|
||||
if (__ is_in_wdisp16_range(_continuation)) {
|
||||
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
|
||||
pre_val_reg, _continuation);
|
||||
@ -431,6 +438,96 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
}
|
||||
|
||||
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
// At this point we know that offset == referent_offset.
|
||||
//
|
||||
// So we might have to emit:
|
||||
// if (src == null) goto continuation.
|
||||
//
|
||||
// and we definitely have to emit:
|
||||
// if (klass(src).reference_type == REF_NONE) goto continuation
|
||||
// if (!marking_active) goto continuation
|
||||
// if (pre_val == null) goto continuation
|
||||
// call pre_barrier(pre_val)
|
||||
// goto continuation
|
||||
//
|
||||
__ bind(_entry);
|
||||
|
||||
assert(src()->is_register(), "sanity");
|
||||
Register src_reg = src()->as_register();
|
||||
|
||||
if (gen_src_check()) {
|
||||
// The original src operand was not a constant.
|
||||
// Generate src == null?
|
||||
if (__ is_in_wdisp16_range(_continuation)) {
|
||||
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
|
||||
src_reg, _continuation);
|
||||
} else {
|
||||
__ cmp(src_reg, G0);
|
||||
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
|
||||
}
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
// Generate src->_klass->_reference_type() == REF_NONE)?
|
||||
assert(tmp()->is_register(), "sanity");
|
||||
Register tmp_reg = tmp()->as_register();
|
||||
|
||||
__ load_klass(src_reg, tmp_reg);
|
||||
|
||||
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
|
||||
__ ld(ref_type_adr, tmp_reg);
|
||||
|
||||
if (__ is_in_wdisp16_range(_continuation)) {
|
||||
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
|
||||
tmp_reg, _continuation);
|
||||
} else {
|
||||
__ cmp(tmp_reg, G0);
|
||||
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
|
||||
}
|
||||
__ delayed()->nop();
|
||||
|
||||
// Is marking active?
|
||||
assert(thread()->is_register(), "precondition");
|
||||
Register thread_reg = thread()->as_pointer_register();
|
||||
|
||||
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
PtrQueue::byte_offset_of_active()));
|
||||
|
||||
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
|
||||
__ ld(in_progress, tmp_reg);
|
||||
} else {
|
||||
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ ldsb(in_progress, tmp_reg);
|
||||
}
|
||||
if (__ is_in_wdisp16_range(_continuation)) {
|
||||
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
|
||||
tmp_reg, _continuation);
|
||||
} else {
|
||||
__ cmp(tmp_reg, G0);
|
||||
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
|
||||
}
|
||||
__ delayed()->nop();
|
||||
|
||||
// val == null?
|
||||
assert(val()->is_register(), "Precondition.");
|
||||
Register val_reg = val()->as_register();
|
||||
|
||||
if (__ is_in_wdisp16_range(_continuation)) {
|
||||
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
|
||||
val_reg, _continuation);
|
||||
} else {
|
||||
__ cmp(val_reg, G0);
|
||||
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
|
||||
}
|
||||
__ delayed()->nop();
|
||||
|
||||
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
|
||||
__ delayed()->mov(val_reg, G4);
|
||||
__ br(Assembler::always, false, Assembler::pt, _continuation);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
|
||||
|
||||
jbyte* G1PostBarrierStub::byte_map_base_slow() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -387,7 +387,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
|
||||
|
||||
if (obj_store) {
|
||||
// Needs GC write barriers.
|
||||
pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
|
||||
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
|
||||
true /* do_load */, false /* patch */, NULL);
|
||||
}
|
||||
__ move(value.result(), array_addr, null_check_info);
|
||||
if (obj_store) {
|
||||
@ -687,7 +688,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
||||
__ add(obj.result(), offset.result(), addr);
|
||||
|
||||
if (type == objectType) { // Write-barrier needed for Object fields.
|
||||
pre_barrier(addr, false, NULL);
|
||||
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
|
||||
true /* do_load */, false /* patch */, NULL);
|
||||
}
|
||||
|
||||
if (type == objectType)
|
||||
@ -1187,7 +1189,8 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
|
||||
}
|
||||
|
||||
if (is_obj) {
|
||||
pre_barrier(LIR_OprFact::address(addr), false, NULL);
|
||||
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
|
||||
true /* do_load */, false /* patch */, NULL);
|
||||
// _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
|
||||
}
|
||||
__ move(data, addr);
|
||||
|
@ -551,6 +551,26 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#ifndef SERIALGC
|
||||
if (UseG1GC) {
|
||||
// We need to generate have a routine that generates code to:
|
||||
// * load the value in the referent field
|
||||
// * passes that value to the pre-barrier.
|
||||
//
|
||||
// In the case of G1 this will record the value of the
|
||||
// referent in an SATB buffer if marking is active.
|
||||
// This will cause concurrent marking to mark the referent
|
||||
// field as live.
|
||||
Unimplemented();
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
}
|
||||
|
||||
//
|
||||
// Interpreter stub for calling a native method. (C++ interpreter)
|
||||
// This sets up a somewhat different looking stack for calling the native method
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,6 +36,7 @@
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_Reference_get_entry(void);
|
||||
void lock_method(void);
|
||||
void save_native_result(void);
|
||||
void restore_native_result(void);
|
||||
|
@ -407,6 +407,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::java_lang_math_abs : break;
|
||||
case Interpreter::java_lang_math_log : break;
|
||||
case Interpreter::java_lang_math_log10 : break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
|
@ -763,6 +763,87 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#ifndef SERIALGC
|
||||
// Code: _aload_0, _getfield, _areturn
|
||||
// parameter size = 1
|
||||
//
|
||||
// The code that gets generated by this routine is split into 2 parts:
|
||||
// 1. The "intrinsified" code for G1 (or any SATB based GC),
|
||||
// 2. The slow path - which is an expansion of the regular method entry.
|
||||
//
|
||||
// Notes:-
|
||||
// * In the G1 code we do not check whether we need to block for
|
||||
// a safepoint. If G1 is enabled then we must execute the specialized
|
||||
// code for Reference.get (except when the Reference object is null)
|
||||
// so that we can log the value in the referent field with an SATB
|
||||
// update buffer.
|
||||
// If the code for the getfield template is modified so that the
|
||||
// G1 pre-barrier code is executed when the current method is
|
||||
// Reference.get() then going through the normal method entry
|
||||
// will be fine.
|
||||
// * The G1 code can, however, check the receiver object (the instance
|
||||
// of java.lang.Reference) and jump to the slow path if null. If the
|
||||
// Reference object is null then we obviously cannot fetch the referent
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||
|
||||
if (UseG1GC) {
|
||||
Label slow_path;
|
||||
|
||||
// In the G1 code we don't check if we need to reach a safepoint. We
|
||||
// continue and the thread will safepoint at the next bytecode dispatch.
|
||||
|
||||
// Check if local 0 != NULL
|
||||
// If the receiver is null then it is OK to jump to the slow path.
|
||||
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
|
||||
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
|
||||
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
|
||||
|
||||
// Load the value of the referent field.
|
||||
if (Assembler::is_simm13(referent_offset)) {
|
||||
__ load_heap_oop(Otos_i, referent_offset, Otos_i);
|
||||
} else {
|
||||
__ set(referent_offset, G3_scratch);
|
||||
__ load_heap_oop(Otos_i, G3_scratch, Otos_i);
|
||||
}
|
||||
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer. Note with
|
||||
// these parameters the pre-barrier does not generate
|
||||
// the load of the previous value
|
||||
|
||||
__ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
|
||||
Otos_i /* pre_val */,
|
||||
G3_scratch /* tmp */,
|
||||
true /* preserve_o_regs */);
|
||||
|
||||
// _areturn
|
||||
__ retl(); // return from leaf routine
|
||||
__ delayed()->mov(O5_savedSP, SP);
|
||||
|
||||
// Generate regular method entry
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
return entry;
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
}
|
||||
|
||||
//
|
||||
// Interpreter stub for calling a native method. (asm interpreter)
|
||||
// This sets up a somewhat different looking stack for calling the native method
|
||||
|
@ -57,7 +57,11 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
case BarrierSet::G1SATBCT:
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
{
|
||||
__ g1_write_barrier_pre( base, index, offset, tmp, /*preserve_o_regs*/true);
|
||||
// Load and record the previous value.
|
||||
__ g1_write_barrier_pre(base, index, offset,
|
||||
noreg /* pre_val */,
|
||||
tmp, true /*preserve_o_regs*/);
|
||||
|
||||
if (index == noreg ) {
|
||||
assert(Assembler::is_simm13(offset), "fix this code");
|
||||
__ store_heap_oop(val, base, offset);
|
||||
|
@ -6902,26 +6902,39 @@ void MacroAssembler::testl(Register dst, AddressLiteral src) {
|
||||
#ifndef SERIALGC
|
||||
|
||||
void MacroAssembler::g1_write_barrier_pre(Register obj,
|
||||
#ifndef _LP64
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
#endif
|
||||
Register tmp,
|
||||
Register tmp2,
|
||||
bool tosca_live) {
|
||||
LP64_ONLY(Register thread = r15_thread;)
|
||||
bool tosca_live,
|
||||
bool expand_call) {
|
||||
|
||||
// If expand_call is true then we expand the call_VM_leaf macro
|
||||
// directly to skip generating the check by
|
||||
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
|
||||
|
||||
#ifdef _LP64
|
||||
assert(thread == r15_thread, "must be");
|
||||
#endif // _LP64
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
assert(pre_val != noreg, "check this code");
|
||||
|
||||
if (obj != noreg) {
|
||||
assert_different_registers(obj, pre_val, tmp);
|
||||
assert(pre_val != rax, "check this code");
|
||||
}
|
||||
|
||||
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
PtrQueue::byte_offset_of_active()));
|
||||
|
||||
Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
PtrQueue::byte_offset_of_index()));
|
||||
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
PtrQueue::byte_offset_of_buf()));
|
||||
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// if (!marking_in_progress) goto done;
|
||||
// Is marking active?
|
||||
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
|
||||
cmpl(in_progress, 0);
|
||||
} else {
|
||||
@ -6930,65 +6943,92 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
|
||||
}
|
||||
jcc(Assembler::equal, done);
|
||||
|
||||
// if (x.f == NULL) goto done;
|
||||
#ifdef _LP64
|
||||
load_heap_oop(tmp2, Address(obj, 0));
|
||||
#else
|
||||
movptr(tmp2, Address(obj, 0));
|
||||
#endif
|
||||
cmpptr(tmp2, (int32_t) NULL_WORD);
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
load_heap_oop(pre_val, Address(obj, 0));
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
cmpptr(pre_val, (int32_t) NULL_WORD);
|
||||
jcc(Assembler::equal, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
|
||||
#ifdef _LP64
|
||||
movslq(tmp, index);
|
||||
cmpq(tmp, 0);
|
||||
#else
|
||||
cmpl(index, 0);
|
||||
#endif
|
||||
jcc(Assembler::equal, runtime);
|
||||
#ifdef _LP64
|
||||
subq(tmp, wordSize);
|
||||
movl(index, tmp);
|
||||
addq(tmp, buffer);
|
||||
#else
|
||||
subl(index, wordSize);
|
||||
movl(tmp, buffer);
|
||||
addl(tmp, index);
|
||||
#endif
|
||||
movptr(Address(tmp, 0), tmp2);
|
||||
movptr(tmp, index); // tmp := *index_adr
|
||||
cmpptr(tmp, 0); // tmp == 0?
|
||||
jcc(Assembler::equal, runtime); // If yes, goto runtime
|
||||
|
||||
subptr(tmp, wordSize); // tmp := tmp - wordSize
|
||||
movptr(index, tmp); // *index_adr := tmp
|
||||
addptr(tmp, buffer); // tmp := tmp + *buffer_adr
|
||||
|
||||
// Record the previous value
|
||||
movptr(Address(tmp, 0), pre_val);
|
||||
jmp(done);
|
||||
|
||||
bind(runtime);
|
||||
// save the live input values
|
||||
if(tosca_live) push(rax);
|
||||
push(obj);
|
||||
#ifdef _LP64
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, r15_thread);
|
||||
#else
|
||||
push(thread);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
|
||||
pop(thread);
|
||||
#endif
|
||||
pop(obj);
|
||||
if(tosca_live) pop(rax);
|
||||
bind(done);
|
||||
|
||||
if (obj != noreg && obj != rax)
|
||||
push(obj);
|
||||
|
||||
if (pre_val != rax)
|
||||
push(pre_val);
|
||||
|
||||
// Calling the runtime using the regular call_VM_leaf mechanism generates
|
||||
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
|
||||
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
|
||||
//
|
||||
// If we care generating the pre-barrier without a frame (e.g. in the
|
||||
// intrinsified Reference.get() routine) then ebp might be pointing to
|
||||
// the caller frame and so this check will most likely fail at runtime.
|
||||
//
|
||||
// Expanding the call directly bypasses the generation of the check.
|
||||
// So when we do not have have a full interpreter frame on the stack
|
||||
// expand_call should be passed true.
|
||||
|
||||
NOT_LP64( push(thread); )
|
||||
|
||||
if (expand_call) {
|
||||
LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
|
||||
pass_arg1(this, thread);
|
||||
pass_arg0(this, pre_val);
|
||||
MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
|
||||
} else {
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
|
||||
}
|
||||
|
||||
NOT_LP64( pop(thread); )
|
||||
|
||||
// save the live input values
|
||||
if (pre_val != rax)
|
||||
pop(pre_val);
|
||||
|
||||
if (obj != noreg && obj != rax)
|
||||
pop(obj);
|
||||
|
||||
if(tosca_live) pop(rax);
|
||||
|
||||
bind(done);
|
||||
}
|
||||
|
||||
void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||
Register new_val,
|
||||
#ifndef _LP64
|
||||
Register thread,
|
||||
#endif
|
||||
Register tmp,
|
||||
Register tmp2) {
|
||||
#ifdef _LP64
|
||||
assert(thread == r15_thread, "must be");
|
||||
#endif // _LP64
|
||||
|
||||
LP64_ONLY(Register thread = r15_thread;)
|
||||
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
||||
PtrQueue::byte_offset_of_index()));
|
||||
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
||||
PtrQueue::byte_offset_of_buf()));
|
||||
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
||||
Label done;
|
||||
@ -7067,7 +7107,6 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||
pop(store_addr);
|
||||
|
||||
bind(done);
|
||||
|
||||
}
|
||||
|
||||
#endif // SERIALGC
|
||||
|
@ -1453,6 +1453,7 @@ private:
|
||||
class MacroAssembler: public Assembler {
|
||||
friend class LIR_Assembler;
|
||||
friend class Runtime1; // as_Address()
|
||||
|
||||
protected:
|
||||
|
||||
Address as_Address(AddressLiteral adr);
|
||||
@ -1674,21 +1675,22 @@ class MacroAssembler: public Assembler {
|
||||
void store_check(Register obj); // store check for obj - register is destroyed afterwards
|
||||
void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
|
||||
|
||||
#ifndef SERIALGC
|
||||
|
||||
void g1_write_barrier_pre(Register obj,
|
||||
#ifndef _LP64
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
#endif
|
||||
Register tmp,
|
||||
Register tmp2,
|
||||
bool tosca_live);
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void g1_write_barrier_post(Register store_addr,
|
||||
Register new_val,
|
||||
#ifndef _LP64
|
||||
Register thread,
|
||||
#endif
|
||||
Register tmp,
|
||||
Register tmp2);
|
||||
|
||||
#endif // SERIALGC
|
||||
|
||||
// split store_check(Register obj) to enhance instruction interleaving
|
||||
void store_check_part_1(Register obj);
|
||||
|
@ -466,15 +466,19 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
|
||||
#ifndef SERIALGC
|
||||
|
||||
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
// At this point we know that marking is in progress
|
||||
// At this point we know that marking is in progress.
|
||||
// If do_load() is true then we have to emit the
|
||||
// load of the previous value; otherwise it has already
|
||||
// been loaded into _pre_val.
|
||||
|
||||
__ bind(_entry);
|
||||
assert(pre_val()->is_register(), "Precondition.");
|
||||
|
||||
Register pre_val_reg = pre_val()->as_register();
|
||||
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
if (do_load()) {
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
}
|
||||
|
||||
__ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
|
||||
__ jcc(Assembler::equal, _continuation);
|
||||
@ -484,6 +488,68 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
}
|
||||
|
||||
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
// At this point we know that offset == referent_offset.
|
||||
//
|
||||
// So we might have to emit:
|
||||
// if (src == null) goto continuation.
|
||||
//
|
||||
// and we definitely have to emit:
|
||||
// if (klass(src).reference_type == REF_NONE) goto continuation
|
||||
// if (!marking_active) goto continuation
|
||||
// if (pre_val == null) goto continuation
|
||||
// call pre_barrier(pre_val)
|
||||
// goto continuation
|
||||
//
|
||||
__ bind(_entry);
|
||||
|
||||
assert(src()->is_register(), "sanity");
|
||||
Register src_reg = src()->as_register();
|
||||
|
||||
if (gen_src_check()) {
|
||||
// The original src operand was not a constant.
|
||||
// Generate src == null?
|
||||
__ cmpptr(src_reg, (int32_t) NULL_WORD);
|
||||
__ jcc(Assembler::equal, _continuation);
|
||||
}
|
||||
|
||||
// Generate src->_klass->_reference_type == REF_NONE)?
|
||||
assert(tmp()->is_register(), "sanity");
|
||||
Register tmp_reg = tmp()->as_register();
|
||||
|
||||
__ load_klass(tmp_reg, src_reg);
|
||||
|
||||
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
|
||||
__ cmpl(ref_type_adr, REF_NONE);
|
||||
__ jcc(Assembler::equal, _continuation);
|
||||
|
||||
// Is marking active?
|
||||
assert(thread()->is_register(), "precondition");
|
||||
Register thread_reg = thread()->as_pointer_register();
|
||||
|
||||
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
PtrQueue::byte_offset_of_active()));
|
||||
|
||||
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
|
||||
__ cmpl(in_progress, 0);
|
||||
} else {
|
||||
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ cmpb(in_progress, 0);
|
||||
}
|
||||
__ jcc(Assembler::equal, _continuation);
|
||||
|
||||
// val == null?
|
||||
assert(val()->is_register(), "Precondition.");
|
||||
Register val_reg = val()->as_register();
|
||||
|
||||
__ cmpptr(val_reg, (int32_t) NULL_WORD);
|
||||
__ jcc(Assembler::equal, _continuation);
|
||||
|
||||
ce->store_parameter(val()->as_register(), 0);
|
||||
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
|
||||
__ jmp(_continuation);
|
||||
}
|
||||
|
||||
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
|
||||
|
||||
jbyte* G1PostBarrierStub::byte_map_base_slow() {
|
||||
|
@ -326,7 +326,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
|
||||
|
||||
if (obj_store) {
|
||||
// Needs GC write barriers.
|
||||
pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
|
||||
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
|
||||
true /* do_load */, false /* patch */, NULL);
|
||||
__ move(value.result(), array_addr, null_check_info);
|
||||
// Seems to be a precise
|
||||
post_barrier(LIR_OprFact::address(array_addr), value.result());
|
||||
@ -794,7 +795,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
||||
|
||||
if (type == objectType) { // Write-barrier needed for Object fields.
|
||||
// Do the pre-write barrier, if any.
|
||||
pre_barrier(addr, false, NULL);
|
||||
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
|
||||
true /* do_load */, false /* patch */, NULL);
|
||||
}
|
||||
|
||||
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
|
||||
@ -1339,7 +1341,8 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
|
||||
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
|
||||
if (is_obj) {
|
||||
// Do the pre-write barrier, if any.
|
||||
pre_barrier(LIR_OprFact::address(addr), false, NULL);
|
||||
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
|
||||
true /* do_load */, false /* patch */, NULL);
|
||||
__ move(data, addr);
|
||||
assert(src->is_register(), "must be register");
|
||||
// Seems to be a precise address
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,6 +34,7 @@
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_Reference_get_entry(void);
|
||||
void lock_method(void);
|
||||
void generate_stack_overflow_check(void);
|
||||
|
||||
|
@ -936,6 +936,26 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#ifndef SERIALGC
|
||||
if (UseG1GC) {
|
||||
// We need to generate have a routine that generates code to:
|
||||
// * load the value in the referent field
|
||||
// * passes that value to the pre-barrier.
|
||||
//
|
||||
// In the case of G1 this will record the value of the
|
||||
// referent in an SATB buffer if marking is active.
|
||||
// This will cause concurrent marking to mark the referent
|
||||
// field as live.
|
||||
Unimplemented();
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
}
|
||||
|
||||
//
|
||||
// C++ Interpreter stub for calling a native method.
|
||||
// This sets up a somewhat different looking stack for calling the native method
|
||||
@ -2210,6 +2230,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,6 +39,7 @@
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_Reference_get_entry();
|
||||
void lock_method(void);
|
||||
void generate_stack_overflow_check(void);
|
||||
|
||||
|
@ -776,6 +776,98 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#ifndef SERIALGC
|
||||
// Code: _aload_0, _getfield, _areturn
|
||||
// parameter size = 1
|
||||
//
|
||||
// The code that gets generated by this routine is split into 2 parts:
|
||||
// 1. The "intrinsified" code for G1 (or any SATB based GC),
|
||||
// 2. The slow path - which is an expansion of the regular method entry.
|
||||
//
|
||||
// Notes:-
|
||||
// * In the G1 code we do not check whether we need to block for
|
||||
// a safepoint. If G1 is enabled then we must execute the specialized
|
||||
// code for Reference.get (except when the Reference object is null)
|
||||
// so that we can log the value in the referent field with an SATB
|
||||
// update buffer.
|
||||
// If the code for the getfield template is modified so that the
|
||||
// G1 pre-barrier code is executed when the current method is
|
||||
// Reference.get() then going through the normal method entry
|
||||
// will be fine.
|
||||
// * The G1 code below can, however, check the receiver object (the instance
|
||||
// of java.lang.Reference) and jump to the slow path if null. If the
|
||||
// Reference object is null then we obviously cannot fetch the referent
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
|
||||
// rbx,: methodOop
|
||||
// rcx: receiver (preserve for slow entry into asm interpreter)
|
||||
|
||||
// rsi: senderSP must preserved for slow path, set SP to it on fast path
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||
|
||||
if (UseG1GC) {
|
||||
Label slow_path;
|
||||
|
||||
// Check if local 0 != NULL
|
||||
// If the receiver is null then it is OK to jump to the slow path.
|
||||
__ movptr(rax, Address(rsp, wordSize));
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
// rax: local 0 (must be preserved across the G1 barrier call)
|
||||
//
|
||||
// rbx: method (at this point it's scratch)
|
||||
// rcx: receiver (at this point it's scratch)
|
||||
// rdx: scratch
|
||||
// rdi: scratch
|
||||
//
|
||||
// rsi: sender sp
|
||||
|
||||
// Preserve the sender sp in case the pre-barrier
|
||||
// calls the runtime
|
||||
__ push(rsi);
|
||||
|
||||
// Load the value of the referent field.
|
||||
const Address field_address(rax, referent_offset);
|
||||
__ movptr(rax, field_address);
|
||||
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
__ get_thread(rcx);
|
||||
__ g1_write_barrier_pre(noreg /* obj */,
|
||||
rax /* pre_val */,
|
||||
rcx /* thread */,
|
||||
rbx /* tmp */,
|
||||
true /* tosca_save */,
|
||||
true /* expand_call */);
|
||||
|
||||
// _areturn
|
||||
__ pop(rsi); // get sender sp
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, rsi); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
|
||||
return entry;
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
}
|
||||
|
||||
//
|
||||
// Interpreter stub for calling a native method. (asm interpreter)
|
||||
// This sets up a somewhat different looking stack for calling the native method
|
||||
@ -1444,6 +1536,8 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
|
@ -757,6 +757,95 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#ifndef SERIALGC
|
||||
// Code: _aload_0, _getfield, _areturn
|
||||
// parameter size = 1
|
||||
//
|
||||
// The code that gets generated by this routine is split into 2 parts:
|
||||
// 1. The "intrinsified" code for G1 (or any SATB based GC),
|
||||
// 2. The slow path - which is an expansion of the regular method entry.
|
||||
//
|
||||
// Notes:-
|
||||
// * In the G1 code we do not check whether we need to block for
|
||||
// a safepoint. If G1 is enabled then we must execute the specialized
|
||||
// code for Reference.get (except when the Reference object is null)
|
||||
// so that we can log the value in the referent field with an SATB
|
||||
// update buffer.
|
||||
// If the code for the getfield template is modified so that the
|
||||
// G1 pre-barrier code is executed when the current method is
|
||||
// Reference.get() then going through the normal method entry
|
||||
// will be fine.
|
||||
// * The G1 code can, however, check the receiver object (the instance
|
||||
// of java.lang.Reference) and jump to the slow path if null. If the
|
||||
// Reference object is null then we obviously cannot fetch the referent
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
//
|
||||
// rbx: methodOop
|
||||
|
||||
// r13: senderSP must preserve for slow path, set SP to it on fast path
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||
|
||||
if (UseG1GC) {
|
||||
Label slow_path;
|
||||
// rbx: method
|
||||
|
||||
// Check if local 0 != NULL
|
||||
// If the receiver is null then it is OK to jump to the slow path.
|
||||
__ movptr(rax, Address(rsp, wordSize));
|
||||
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
// rax: local 0
|
||||
// rbx: method (but can be used as scratch now)
|
||||
// rdx: scratch
|
||||
// rdi: scratch
|
||||
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
|
||||
// Load the value of the referent field.
|
||||
const Address field_address(rax, referent_offset);
|
||||
__ load_heap_oop(rax, field_address);
|
||||
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
__ g1_write_barrier_pre(noreg /* obj */,
|
||||
rax /* pre_val */,
|
||||
r15_thread /* thread */,
|
||||
rbx /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
|
||||
// _areturn
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, r13); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
__ ret(0);
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
|
||||
return entry;
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
}
|
||||
|
||||
|
||||
// Interpreter stub for calling a native method. (asm interpreter)
|
||||
// This sets up a somewhat different looking stack for calling the
|
||||
// native method than the typical interpreter frame setup.
|
||||
@ -1463,6 +1552,8 @@ address AbstractInterpreterGenerator::generate_method_entry(
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,12 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
}
|
||||
__ get_thread(rcx);
|
||||
__ save_bcp();
|
||||
__ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
|
||||
__ g1_write_barrier_pre(rdx /* obj */,
|
||||
rbx /* pre_val */,
|
||||
rcx /* thread */,
|
||||
rsi /* tmp */,
|
||||
val != noreg /* tosca_live */,
|
||||
false /* expand_call */);
|
||||
|
||||
// Do the actual store
|
||||
// noreg means NULL
|
||||
@ -149,7 +154,11 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
// No post barrier for NULL
|
||||
} else {
|
||||
__ movl(Address(rdx, 0), val);
|
||||
__ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
|
||||
__ g1_write_barrier_post(rdx /* store_adr */,
|
||||
val /* new_val */,
|
||||
rcx /* thread */,
|
||||
rbx /* tmp */,
|
||||
rsi /* tmp2 */);
|
||||
}
|
||||
__ restore_bcp();
|
||||
|
||||
|
@ -147,12 +147,21 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
} else {
|
||||
__ leaq(rdx, obj);
|
||||
}
|
||||
__ g1_write_barrier_pre(rdx, r8, rbx, val != noreg);
|
||||
__ g1_write_barrier_pre(rdx /* obj */,
|
||||
rbx /* pre_val */,
|
||||
r15_thread /* thread */,
|
||||
r8 /* tmp */,
|
||||
val != noreg /* tosca_live */,
|
||||
false /* expand_call */);
|
||||
if (val == noreg) {
|
||||
__ store_heap_oop_null(Address(rdx, 0));
|
||||
} else {
|
||||
__ store_heap_oop(Address(rdx, 0), val);
|
||||
__ g1_write_barrier_post(rdx, val, r8, rbx);
|
||||
__ g1_write_barrier_post(rdx /* store_adr */,
|
||||
val /* new_val */,
|
||||
r15_thread /* thread */,
|
||||
r8 /* tmp */,
|
||||
rbx /* tmp2 */);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1302,6 +1302,26 @@ address InterpreterGenerator::generate_accessor_entry() {
|
||||
return generate_entry((address) CppInterpreter::accessor_entry);
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#ifndef SERIALGC
|
||||
if (UseG1GC) {
|
||||
// We need to generate have a routine that generates code to:
|
||||
// * load the value in the referent field
|
||||
// * passes that value to the pre-barrier.
|
||||
//
|
||||
// In the case of G1 this will record the value of the
|
||||
// referent in an SATB buffer if marking is active.
|
||||
// This will cause concurrent marking to mark the referent
|
||||
// field as live.
|
||||
Unimplemented();
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
assert(synchronized == false, "should be");
|
||||
|
||||
@ -1357,6 +1377,10 @@ address AbstractInterpreterGenerator::generate_method_entry(
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
|
||||
break;
|
||||
|
||||
case Interpreter::java_lang_ref_reference_get:
|
||||
entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -37,6 +37,7 @@
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry();
|
||||
address generate_accessor_entry();
|
||||
address generate_Reference_get_entry();
|
||||
address generate_method_handle_entry();
|
||||
|
||||
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
|
||||
|
@ -921,6 +921,8 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
||||
HINSTANCE dbghelp;
|
||||
EXCEPTION_POINTERS ep;
|
||||
MINIDUMP_EXCEPTION_INFORMATION mei;
|
||||
MINIDUMP_EXCEPTION_INFORMATION* pmei;
|
||||
|
||||
HANDLE hProcess = GetCurrentProcess();
|
||||
DWORD processId = GetCurrentProcessId();
|
||||
HANDLE dumpFile;
|
||||
@ -971,17 +973,22 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
||||
VMError::report_coredump_status("Failed to create file for dumping", false);
|
||||
return;
|
||||
}
|
||||
if (exceptionRecord != NULL && contextRecord != NULL) {
|
||||
ep.ContextRecord = (PCONTEXT) contextRecord;
|
||||
ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
|
||||
|
||||
ep.ContextRecord = (PCONTEXT) contextRecord;
|
||||
ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
|
||||
mei.ThreadId = GetCurrentThreadId();
|
||||
mei.ExceptionPointers = &ep;
|
||||
pmei = &mei;
|
||||
} else {
|
||||
pmei = NULL;
|
||||
}
|
||||
|
||||
mei.ThreadId = GetCurrentThreadId();
|
||||
mei.ExceptionPointers = &ep;
|
||||
|
||||
// Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
|
||||
// the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
|
||||
if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, &mei, NULL, NULL) == false &&
|
||||
_MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, &mei, NULL, NULL) == false) {
|
||||
if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
|
||||
_MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
|
||||
VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false);
|
||||
} else {
|
||||
VMError::report_coredump_status(buffer, true);
|
||||
|
@ -519,42 +519,126 @@ class ArrayCopyStub: public CodeStub {
|
||||
// Code stubs for Garbage-First barriers.
|
||||
class G1PreBarrierStub: public CodeStub {
|
||||
private:
|
||||
bool _do_load;
|
||||
LIR_Opr _addr;
|
||||
LIR_Opr _pre_val;
|
||||
LIR_PatchCode _patch_code;
|
||||
CodeEmitInfo* _info;
|
||||
|
||||
public:
|
||||
// pre_val (a temporary register) must be a register;
|
||||
// Version that _does_ generate a load of the previous value from addr.
|
||||
// addr (the address of the field to be read) must be a LIR_Address
|
||||
// pre_val (a temporary register) must be a register;
|
||||
G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
|
||||
_addr(addr), _pre_val(pre_val), _patch_code(patch_code), _info(info)
|
||||
_addr(addr), _pre_val(pre_val), _do_load(true),
|
||||
_patch_code(patch_code), _info(info)
|
||||
{
|
||||
assert(_pre_val->is_register(), "should be temporary register");
|
||||
assert(_addr->is_address(), "should be the address of the field");
|
||||
}
|
||||
|
||||
// Version that _does not_ generate load of the previous value; the
|
||||
// previous value is assumed to have already been loaded into pre_val.
|
||||
G1PreBarrierStub(LIR_Opr pre_val) :
|
||||
_addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
|
||||
_patch_code(lir_patch_none), _info(NULL)
|
||||
{
|
||||
assert(_pre_val->is_register(), "should be a register");
|
||||
}
|
||||
|
||||
LIR_Opr addr() const { return _addr; }
|
||||
LIR_Opr pre_val() const { return _pre_val; }
|
||||
LIR_PatchCode patch_code() const { return _patch_code; }
|
||||
CodeEmitInfo* info() const { return _info; }
|
||||
bool do_load() const { return _do_load; }
|
||||
|
||||
virtual void emit_code(LIR_Assembler* e);
|
||||
virtual void visit(LIR_OpVisitState* visitor) {
|
||||
// don't pass in the code emit info since it's processed in the fast
|
||||
// path
|
||||
if (_info != NULL)
|
||||
visitor->do_slow_case(_info);
|
||||
else
|
||||
if (_do_load) {
|
||||
// don't pass in the code emit info since it's processed in the fast
|
||||
// path
|
||||
if (_info != NULL)
|
||||
visitor->do_slow_case(_info);
|
||||
else
|
||||
visitor->do_slow_case();
|
||||
|
||||
visitor->do_input(_addr);
|
||||
visitor->do_temp(_pre_val);
|
||||
} else {
|
||||
visitor->do_slow_case();
|
||||
visitor->do_input(_addr);
|
||||
visitor->do_temp(_pre_val);
|
||||
visitor->do_input(_pre_val);
|
||||
}
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); }
|
||||
#endif // PRODUCT
|
||||
};
|
||||
|
||||
// This G1 barrier code stub is used in Unsafe.getObject.
|
||||
// It generates a sequence of guards around the SATB
|
||||
// barrier code that are used to detect when we have
|
||||
// the referent field of a Reference object.
|
||||
// The first check is assumed to have been generated
|
||||
// in the code generated for Unsafe.getObject().
|
||||
|
||||
class G1UnsafeGetObjSATBBarrierStub: public CodeStub {
|
||||
private:
|
||||
LIR_Opr _val;
|
||||
LIR_Opr _src;
|
||||
|
||||
LIR_Opr _tmp;
|
||||
LIR_Opr _thread;
|
||||
|
||||
bool _gen_src_check;
|
||||
|
||||
public:
|
||||
// A G1 barrier that is guarded by generated guards that determine whether
|
||||
// val (which is the result of Unsafe.getObject() should be recorded in an
|
||||
// SATB log buffer. We could be reading the referent field of a Reference object
|
||||
// using Unsafe.getObject() and we need to record the referent.
|
||||
//
|
||||
// * val is the operand returned by the unsafe.getObject routine.
|
||||
// * src is the base object
|
||||
// * tmp is a temp used to load the klass of src, and then reference type
|
||||
// * thread is the thread object.
|
||||
|
||||
G1UnsafeGetObjSATBBarrierStub(LIR_Opr val, LIR_Opr src,
|
||||
LIR_Opr tmp, LIR_Opr thread,
|
||||
bool gen_src_check) :
|
||||
_val(val), _src(src),
|
||||
_tmp(tmp), _thread(thread),
|
||||
_gen_src_check(gen_src_check)
|
||||
{
|
||||
assert(_val->is_register(), "should have already been loaded");
|
||||
assert(_src->is_register(), "should have already been loaded");
|
||||
|
||||
assert(_tmp->is_register(), "should be a temporary register");
|
||||
}
|
||||
|
||||
LIR_Opr val() const { return _val; }
|
||||
LIR_Opr src() const { return _src; }
|
||||
|
||||
LIR_Opr tmp() const { return _tmp; }
|
||||
LIR_Opr thread() const { return _thread; }
|
||||
|
||||
bool gen_src_check() const { return _gen_src_check; }
|
||||
|
||||
virtual void emit_code(LIR_Assembler* e);
|
||||
|
||||
virtual void visit(LIR_OpVisitState* visitor) {
|
||||
visitor->do_slow_case();
|
||||
visitor->do_input(_val);
|
||||
visitor->do_input(_src);
|
||||
visitor->do_input(_thread);
|
||||
|
||||
visitor->do_temp(_tmp);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void print_name(outputStream* out) const { out->print("G1UnsafeGetObjSATBBarrierStub"); }
|
||||
#endif // PRODUCT
|
||||
};
|
||||
|
||||
class G1PostBarrierStub: public CodeStub {
|
||||
private:
|
||||
LIR_Opr _addr;
|
||||
|
@ -2913,6 +2913,46 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
|
||||
block()->set_end(end);
|
||||
break;
|
||||
}
|
||||
|
||||
case vmIntrinsics::_Reference_get:
|
||||
{
|
||||
if (UseG1GC) {
|
||||
// With java.lang.ref.reference.get() we must go through the
|
||||
// intrinsic - when G1 is enabled - even when get() is the root
|
||||
// method of the compile so that, if necessary, the value in
|
||||
// the referent field of the reference object gets recorded by
|
||||
// the pre-barrier code.
|
||||
// Specifically, if G1 is enabled, the value in the referent
|
||||
// field is recorded by the G1 SATB pre barrier. This will
|
||||
// result in the referent being marked live and the reference
|
||||
// object removed from the list of discovered references during
|
||||
// reference processing.
|
||||
|
||||
// Set up a stream so that appending instructions works properly.
|
||||
ciBytecodeStream s(scope->method());
|
||||
s.reset_to_bci(0);
|
||||
scope_data()->set_stream(&s);
|
||||
s.next();
|
||||
|
||||
// setup the initial block state
|
||||
_block = start_block;
|
||||
_state = start_block->state()->copy_for_parsing();
|
||||
_last = start_block;
|
||||
load_local(objectType, 0);
|
||||
|
||||
// Emit the intrinsic node.
|
||||
bool result = try_inline_intrinsics(scope->method());
|
||||
if (!result) BAILOUT("failed to inline intrinsic");
|
||||
method_return(apop());
|
||||
|
||||
// connect the begin and end blocks and we're all done.
|
||||
BlockEnd* end = last()->as_BlockEnd();
|
||||
block()->set_end(end);
|
||||
break;
|
||||
}
|
||||
// Otherwise, fall thru
|
||||
}
|
||||
|
||||
default:
|
||||
scope_data()->add_to_work_list(start_block);
|
||||
iterate_all_blocks();
|
||||
@ -3150,6 +3190,15 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
||||
append_unsafe_CAS(callee);
|
||||
return true;
|
||||
|
||||
case vmIntrinsics::_Reference_get:
|
||||
// It is only when G1 is enabled that we absolutely
|
||||
// need to use the intrinsic version of Reference.get()
|
||||
// so that the value in the referent field, if necessary,
|
||||
// can be registered by the pre-barrier code.
|
||||
if (!UseG1GC) return false;
|
||||
preserves_state = true;
|
||||
break;
|
||||
|
||||
default : return false; // do not inline
|
||||
}
|
||||
// create intrinsic node
|
||||
|
@ -596,7 +596,7 @@ void BlockBegin::substitute_sux(BlockBegin* old_sux, BlockBegin* new_sux) {
|
||||
// of the inserted block, without recomputing the values of the other blocks
|
||||
// in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless.
|
||||
BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
|
||||
BlockBegin* new_sux = new BlockBegin(-99);
|
||||
BlockBegin* new_sux = new BlockBegin(end()->state()->bci());
|
||||
|
||||
// mark this block (special treatment when block order is computed)
|
||||
new_sux->set(critical_edge_split_flag);
|
||||
|
@ -1209,6 +1209,38 @@ void LIRGenerator::do_Return(Return* x) {
|
||||
set_no_result(x);
|
||||
}
|
||||
|
||||
// Examble: ref.get()
|
||||
// Combination of LoadField and g1 pre-write barrier
|
||||
void LIRGenerator::do_Reference_get(Intrinsic* x) {
|
||||
|
||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||
|
||||
assert(x->number_of_arguments() == 1, "wrong type");
|
||||
|
||||
LIRItem reference(x->argument_at(0), this);
|
||||
reference.load_item();
|
||||
|
||||
// need to perform the null check on the reference objecy
|
||||
CodeEmitInfo* info = NULL;
|
||||
if (x->needs_null_check()) {
|
||||
info = state_for(x);
|
||||
}
|
||||
|
||||
LIR_Address* referent_field_adr =
|
||||
new LIR_Address(reference.result(), referent_offset, T_OBJECT);
|
||||
|
||||
LIR_Opr result = rlock_result(x);
|
||||
|
||||
__ load(referent_field_adr, result, info);
|
||||
|
||||
// Register the value in the referent field with the pre-barrier
|
||||
pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
|
||||
result /* pre_val */,
|
||||
false /* do_load */,
|
||||
false /* patch */,
|
||||
NULL /* info */);
|
||||
}
|
||||
|
||||
// Example: object.getClass ()
|
||||
void LIRGenerator::do_getClass(Intrinsic* x) {
|
||||
@ -1351,13 +1383,14 @@ LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
|
||||
|
||||
// Various barriers
|
||||
|
||||
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
|
||||
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
|
||||
bool do_load, bool patch, CodeEmitInfo* info) {
|
||||
// Do the pre-write barrier, if any.
|
||||
switch (_bs->kind()) {
|
||||
#ifndef SERIALGC
|
||||
case BarrierSet::G1SATBCT:
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
|
||||
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
|
||||
break;
|
||||
#endif // SERIALGC
|
||||
case BarrierSet::CardTableModRef:
|
||||
@ -1398,9 +1431,8 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
#ifndef SERIALGC
|
||||
|
||||
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
|
||||
if (G1DisablePreBarrier) return;
|
||||
|
||||
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
|
||||
bool do_load, bool patch, CodeEmitInfo* info) {
|
||||
// First we test whether marking is in progress.
|
||||
BasicType flag_type;
|
||||
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
|
||||
@ -1419,26 +1451,40 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
|
||||
// Read the marking-in-progress flag.
|
||||
LIR_Opr flag_val = new_register(T_INT);
|
||||
__ load(mark_active_flag_addr, flag_val);
|
||||
|
||||
LIR_PatchCode pre_val_patch_code =
|
||||
patch ? lir_patch_normal : lir_patch_none;
|
||||
|
||||
LIR_Opr pre_val = new_register(T_OBJECT);
|
||||
|
||||
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
|
||||
if (!addr_opr->is_address()) {
|
||||
assert(addr_opr->is_register(), "must be");
|
||||
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
|
||||
|
||||
LIR_PatchCode pre_val_patch_code = lir_patch_none;
|
||||
|
||||
CodeStub* slow;
|
||||
|
||||
if (do_load) {
|
||||
assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
|
||||
assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
|
||||
|
||||
if (patch)
|
||||
pre_val_patch_code = lir_patch_normal;
|
||||
|
||||
pre_val = new_register(T_OBJECT);
|
||||
|
||||
if (!addr_opr->is_address()) {
|
||||
assert(addr_opr->is_register(), "must be");
|
||||
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
|
||||
}
|
||||
slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
|
||||
} else {
|
||||
assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
|
||||
assert(pre_val->is_register(), "must be");
|
||||
assert(pre_val->type() == T_OBJECT, "must be an object");
|
||||
assert(info == NULL, "sanity");
|
||||
|
||||
slow = new G1PreBarrierStub(pre_val);
|
||||
}
|
||||
CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
|
||||
info);
|
||||
|
||||
__ branch(lir_cond_notEqual, T_INT, slow);
|
||||
__ branch_destination(slow->continuation());
|
||||
}
|
||||
|
||||
void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
|
||||
if (G1DisablePostBarrier) return;
|
||||
|
||||
// If the "new_val" is a constant NULL, no barrier is necessary.
|
||||
if (new_val->is_constant() &&
|
||||
new_val->as_constant_ptr()->as_jobject() == NULL) return;
|
||||
@ -1662,6 +1708,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
|
||||
if (is_oop) {
|
||||
// Do the pre-write barrier, if any.
|
||||
pre_barrier(LIR_OprFact::address(address),
|
||||
LIR_OprFact::illegalOpr /* pre_val */,
|
||||
true /* do_load*/,
|
||||
needs_patching,
|
||||
(info ? new CodeEmitInfo(info) : NULL));
|
||||
}
|
||||
@ -2091,9 +2139,144 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
|
||||
off.load_item();
|
||||
src.load_item();
|
||||
|
||||
LIR_Opr reg = reg = rlock_result(x, x->basic_type());
|
||||
LIR_Opr reg = rlock_result(x, x->basic_type());
|
||||
|
||||
get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
|
||||
|
||||
#ifndef SERIALGC
|
||||
// We might be reading the value of the referent field of a
|
||||
// Reference object in order to attach it back to the live
|
||||
// object graph. If G1 is enabled then we need to record
|
||||
// the value that is being returned in an SATB log buffer.
|
||||
//
|
||||
// We need to generate code similar to the following...
|
||||
//
|
||||
// if (offset == java_lang_ref_Reference::referent_offset) {
|
||||
// if (src != NULL) {
|
||||
// if (klass(src)->reference_type() != REF_NONE) {
|
||||
// pre_barrier(..., reg, ...);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The first non-constant check of either the offset or
|
||||
// the src operand will be done here; the remainder
|
||||
// will take place in the generated code stub.
|
||||
|
||||
if (UseG1GC && type == T_OBJECT) {
|
||||
bool gen_code_stub = true; // Assume we need to generate the slow code stub.
|
||||
bool gen_offset_check = true; // Assume the code stub has to generate the offset guard.
|
||||
bool gen_source_check = true; // Assume the code stub has to check the src object for null.
|
||||
|
||||
if (off.is_constant()) {
|
||||
jlong off_con = (off.type()->is_int() ?
|
||||
(jlong) off.get_jint_constant() :
|
||||
off.get_jlong_constant());
|
||||
|
||||
|
||||
if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
|
||||
// The constant offset is something other than referent_offset.
|
||||
// We can skip generating/checking the remaining guards and
|
||||
// skip generation of the code stub.
|
||||
gen_code_stub = false;
|
||||
} else {
|
||||
// The constant offset is the same as referent_offset -
|
||||
// we do not need to generate a runtime offset check.
|
||||
gen_offset_check = false;
|
||||
}
|
||||
}
|
||||
|
||||
// We don't need to generate stub if the source object is an array
|
||||
if (gen_code_stub && src.type()->is_array()) {
|
||||
gen_code_stub = false;
|
||||
}
|
||||
|
||||
if (gen_code_stub) {
|
||||
// We still need to continue with the checks.
|
||||
if (src.is_constant()) {
|
||||
ciObject* src_con = src.get_jobject_constant();
|
||||
|
||||
if (src_con->is_null_object()) {
|
||||
// The constant src object is null - We can skip
|
||||
// generating the code stub.
|
||||
gen_code_stub = false;
|
||||
} else {
|
||||
// Non-null constant source object. We still have to generate
|
||||
// the slow stub - but we don't need to generate the runtime
|
||||
// null object check.
|
||||
gen_source_check = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (gen_code_stub) {
|
||||
// Temoraries.
|
||||
LIR_Opr src_klass = new_register(T_OBJECT);
|
||||
|
||||
// Get the thread pointer for the pre-barrier
|
||||
LIR_Opr thread = getThreadPointer();
|
||||
|
||||
CodeStub* stub;
|
||||
|
||||
// We can have generate one runtime check here. Let's start with
|
||||
// the offset check.
|
||||
if (gen_offset_check) {
|
||||
// if (offset == referent_offset) -> slow code stub
|
||||
// If offset is an int then we can do the comparison with the
|
||||
// referent_offset constant; otherwise we need to move
|
||||
// referent_offset into a temporary register and generate
|
||||
// a reg-reg compare.
|
||||
|
||||
LIR_Opr referent_off;
|
||||
|
||||
if (off.type()->is_int()) {
|
||||
referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
|
||||
} else {
|
||||
assert(off.type()->is_long(), "what else?");
|
||||
referent_off = new_register(T_LONG);
|
||||
__ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
|
||||
}
|
||||
|
||||
__ cmp(lir_cond_equal, off.result(), referent_off);
|
||||
|
||||
// Optionally generate "src == null" check.
|
||||
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
|
||||
src_klass, thread,
|
||||
gen_source_check);
|
||||
|
||||
__ branch(lir_cond_equal, as_BasicType(off.type()), stub);
|
||||
} else {
|
||||
if (gen_source_check) {
|
||||
// offset is a const and equals referent offset
|
||||
// if (source != null) -> slow code stub
|
||||
__ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL));
|
||||
|
||||
// Since we are generating the "if src == null" guard here,
|
||||
// there is no need to generate the "src == null" check again.
|
||||
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
|
||||
src_klass, thread,
|
||||
false);
|
||||
|
||||
__ branch(lir_cond_notEqual, T_OBJECT, stub);
|
||||
} else {
|
||||
// We have statically determined that offset == referent_offset
|
||||
// && src != null so we unconditionally branch to code stub
|
||||
// to perform the guards and record reg in the SATB log buffer.
|
||||
|
||||
stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
|
||||
src_klass, thread,
|
||||
false);
|
||||
|
||||
__ branch(lir_cond_always, T_ILLEGAL, stub);
|
||||
}
|
||||
}
|
||||
|
||||
// Continuation point
|
||||
__ branch_destination(stub->continuation());
|
||||
}
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
|
||||
}
|
||||
|
||||
@ -2759,6 +2942,10 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
||||
do_AttemptUpdate(x);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_Reference_get:
|
||||
do_Reference_get(x);
|
||||
break;
|
||||
|
||||
default: ShouldNotReachHere(); break;
|
||||
}
|
||||
}
|
||||
|
@ -246,6 +246,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
||||
void do_AttemptUpdate(Intrinsic* x);
|
||||
void do_NIOCheckIndex(Intrinsic* x);
|
||||
void do_FPIntrinsics(Intrinsic* x);
|
||||
void do_Reference_get(Intrinsic* x);
|
||||
|
||||
void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store);
|
||||
|
||||
@ -260,13 +261,14 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
||||
|
||||
// generic interface
|
||||
|
||||
void pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info);
|
||||
void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
|
||||
void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
|
||||
|
||||
// specific implementations
|
||||
// pre barriers
|
||||
|
||||
void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info);
|
||||
void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
|
||||
bool do_load, bool patch, CodeEmitInfo* info);
|
||||
|
||||
// post barriers
|
||||
|
||||
|
@ -2196,11 +2196,12 @@ typeArrayHandle ClassFileParser::sort_methods(objArrayHandle methods,
|
||||
TRAPS) {
|
||||
typeArrayHandle nullHandle;
|
||||
int length = methods()->length();
|
||||
// If JVMTI original method ordering is enabled we have to
|
||||
// If JVMTI original method ordering or sharing is enabled we have to
|
||||
// remember the original class file ordering.
|
||||
// We temporarily use the vtable_index field in the methodOop to store the
|
||||
// class file index, so we can read in after calling qsort.
|
||||
if (JvmtiExport::can_maintain_original_method_order()) {
|
||||
// Put the method ordering in the shared archive.
|
||||
if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
|
||||
for (int index = 0; index < length; index++) {
|
||||
methodOop m = methodOop(methods->obj_at(index));
|
||||
assert(!m->valid_vtable_index(), "vtable index should not be set");
|
||||
@ -2214,8 +2215,9 @@ typeArrayHandle ClassFileParser::sort_methods(objArrayHandle methods,
|
||||
methods_parameter_annotations(),
|
||||
methods_default_annotations());
|
||||
|
||||
// If JVMTI original method ordering is enabled construct int array remembering the original ordering
|
||||
if (JvmtiExport::can_maintain_original_method_order()) {
|
||||
// If JVMTI original method ordering or sharing is enabled construct int
|
||||
// array remembering the original ordering
|
||||
if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
|
||||
typeArrayOop new_ordering = oopFactory::new_permanent_intArray(length, CHECK_(nullHandle));
|
||||
typeArrayHandle method_ordering(THREAD, new_ordering);
|
||||
for (int index = 0; index < length; index++) {
|
||||
|
@ -1255,6 +1255,16 @@ instanceKlassHandle SystemDictionary::load_shared_class(
|
||||
methodHandle m(THREAD, methodOop(methods->obj_at(index2)));
|
||||
m()->link_method(m, CHECK_(nh));
|
||||
}
|
||||
if (JvmtiExport::has_redefined_a_class()) {
|
||||
// Reinitialize vtable because RedefineClasses may have changed some
|
||||
// entries in this vtable for super classes so the CDS vtable might
|
||||
// point to old or obsolete entries. RedefineClasses doesn't fix up
|
||||
// vtables in the shared system dictionary, only the main one.
|
||||
// It also redefines the itable too so fix that too.
|
||||
ResourceMark rm(THREAD);
|
||||
ik->vtable()->initialize_vtable(false, CHECK_(nh));
|
||||
ik->itable()->initialize_itable(false, CHECK_(nh));
|
||||
}
|
||||
}
|
||||
|
||||
if (TraceClassLoading) {
|
||||
|
@ -678,6 +678,10 @@
|
||||
do_intrinsic(_checkIndex, java_nio_Buffer, checkIndex_name, int_int_signature, F_R) \
|
||||
do_name( checkIndex_name, "checkIndex") \
|
||||
\
|
||||
/* java/lang/ref/Reference */ \
|
||||
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
|
||||
\
|
||||
\
|
||||
do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \
|
||||
do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \
|
||||
/* (symbols get_name and void_long_signature defined above) */ \
|
||||
|
@ -172,7 +172,7 @@ class ConcurrentG1Refine: public CHeapObj {
|
||||
|
||||
// hash a given key (index of card_ptr) with the specified size
|
||||
static unsigned int hash(size_t key, size_t size) {
|
||||
return (unsigned int) key % size;
|
||||
return (unsigned int) (key % size);
|
||||
}
|
||||
|
||||
// hash a given key (index of card_ptr)
|
||||
@ -180,11 +180,11 @@ class ConcurrentG1Refine: public CHeapObj {
|
||||
return hash(key, _n_card_counts);
|
||||
}
|
||||
|
||||
unsigned ptr_2_card_num(jbyte* card_ptr) {
|
||||
return (unsigned) (card_ptr - _ct_bot);
|
||||
unsigned int ptr_2_card_num(jbyte* card_ptr) {
|
||||
return (unsigned int) (card_ptr - _ct_bot);
|
||||
}
|
||||
|
||||
jbyte* card_num_2_ptr(unsigned card_num) {
|
||||
jbyte* card_num_2_ptr(unsigned int card_num) {
|
||||
return (jbyte*) (_ct_bot + card_num);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,7 +47,9 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
|
||||
|
||||
|
||||
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
||||
assert(pre_val->is_oop_or_null(true), "Error");
|
||||
// Nulls should have been already filtered.
|
||||
assert(pre_val->is_oop(true), "Error");
|
||||
|
||||
if (!JavaThread::satb_mark_queue_set().is_active()) return;
|
||||
Thread* thr = Thread::current();
|
||||
if (thr->is_Java_thread()) {
|
||||
@ -59,20 +61,6 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
||||
}
|
||||
}
|
||||
|
||||
// When we know the current java thread:
|
||||
template <class T> void
|
||||
G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
|
||||
oop new_val,
|
||||
JavaThread* jt) {
|
||||
if (!JavaThread::satb_mark_queue_set().is_active()) return;
|
||||
T heap_oop = oopDesc::load_heap_oop(field);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(pre_val->is_oop(true /* ignore mark word */), "Error");
|
||||
jt->satb_mark_queue().enqueue(pre_val);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void
|
||||
G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
|
||||
if (!JavaThread::satb_mark_queue_set().is_active()) return;
|
||||
|
@ -37,12 +37,11 @@ class DirtyCardQueueSet;
|
||||
// snapshot-at-the-beginning marking.
|
||||
|
||||
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
|
||||
private:
|
||||
public:
|
||||
// Add "pre_val" to a set of objects that may have been disconnected from the
|
||||
// pre-marking object graph.
|
||||
static void enqueue(oop pre_val);
|
||||
|
||||
public:
|
||||
G1SATBCardTableModRefBS(MemRegion whole_heap,
|
||||
int max_covered_regions);
|
||||
|
||||
@ -61,10 +60,6 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// When we know the current java thread:
|
||||
template <class T> static void write_ref_field_pre_static(T* field, oop newVal,
|
||||
JavaThread* jt);
|
||||
|
||||
// We export this to make it available in cases where the static
|
||||
// type of the barrier set is known. Note that it is non-virtual.
|
||||
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
|
||||
|
@ -89,13 +89,9 @@
|
||||
"The number of discovered reference objects to process before " \
|
||||
"draining concurrent marking work queues.") \
|
||||
\
|
||||
experimental(bool, G1UseConcMarkReferenceProcessing, false, \
|
||||
experimental(bool, G1UseConcMarkReferenceProcessing, true, \
|
||||
"If true, enable reference discovery during concurrent " \
|
||||
"marking and reference processing at the end of remark " \
|
||||
"(unsafe).") \
|
||||
\
|
||||
develop(bool, G1SATBBarrierPrintNullPreVals, false, \
|
||||
"If true, count frac of ptr writes with null pre-vals.") \
|
||||
"marking and reference processing at the end of remark.") \
|
||||
\
|
||||
product(intx, G1SATBBufferSize, 1*K, \
|
||||
"Number of entries in an SATB log buffer.") \
|
||||
@ -150,12 +146,6 @@
|
||||
develop(bool, G1PrintParCleanupStats, false, \
|
||||
"When true, print extra stats about parallel cleanup.") \
|
||||
\
|
||||
develop(bool, G1DisablePreBarrier, false, \
|
||||
"Disable generation of pre-barrier (i.e., marking barrier) ") \
|
||||
\
|
||||
develop(bool, G1DisablePostBarrier, false, \
|
||||
"Disable generation of post-barrier (i.e., RS barrier) ") \
|
||||
\
|
||||
product(intx, G1UpdateBufferSize, 256, \
|
||||
"Size of an update buffer") \
|
||||
\
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2011 Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,7 +36,6 @@
|
||||
void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
bool clear,
|
||||
int n_threads) {
|
||||
if (n_threads > 0) {
|
||||
assert((n_threads == 1 && ParallelGCThreads == 0) ||
|
||||
@ -57,7 +56,7 @@ void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
|
||||
|
||||
int stride = 0;
|
||||
while (!pst->is_task_claimed(/* reference */ stride)) {
|
||||
process_stride(sp, mr, stride, n_strides, dcto_cl, cl, clear,
|
||||
process_stride(sp, mr, stride, n_strides, dcto_cl, cl,
|
||||
lowest_non_clean,
|
||||
lowest_non_clean_base_chunk_index,
|
||||
lowest_non_clean_chunk_size);
|
||||
@ -83,7 +82,6 @@ process_stride(Space* sp,
|
||||
jint stride, int n_strides,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
bool clear,
|
||||
jbyte** lowest_non_clean,
|
||||
uintptr_t lowest_non_clean_base_chunk_index,
|
||||
size_t lowest_non_clean_chunk_size) {
|
||||
@ -129,7 +127,7 @@ process_stride(Space* sp,
|
||||
lowest_non_clean_base_chunk_index,
|
||||
lowest_non_clean_chunk_size);
|
||||
|
||||
non_clean_card_iterate_work(chunk_mr, cl, clear);
|
||||
non_clean_card_iterate_work(chunk_mr, cl);
|
||||
|
||||
// Find the next chunk of the stride.
|
||||
chunk_card_start += CardsPerStrideChunk * n_strides;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -176,10 +176,6 @@ void PSOldGen::compact() {
|
||||
object_mark_sweep()->compact(ZapUnusedHeapArea);
|
||||
}
|
||||
|
||||
void PSOldGen::move_and_update(ParCompactionManager* cm) {
|
||||
PSParallelCompact::move_and_update(cm, PSParallelCompact::old_space_id);
|
||||
}
|
||||
|
||||
size_t PSOldGen::contiguous_available() const {
|
||||
return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -143,9 +143,6 @@ class PSOldGen : public CHeapObj {
|
||||
void adjust_pointers();
|
||||
void compact();
|
||||
|
||||
// Parallel old
|
||||
virtual void move_and_update(ParCompactionManager* cm);
|
||||
|
||||
// Size info
|
||||
size_t capacity_in_bytes() const { return object_space()->capacity_in_bytes(); }
|
||||
size_t used_in_bytes() const { return object_space()->used_in_bytes(); }
|
||||
|
@ -2104,11 +2104,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
// klasses are used in the update of an object?
|
||||
compact_perm(vmthread_cm);
|
||||
|
||||
if (UseParallelOldGCCompacting) {
|
||||
compact();
|
||||
} else {
|
||||
compact_serial(vmthread_cm);
|
||||
}
|
||||
compact();
|
||||
|
||||
// Reset the mark bitmap, summary data, and do other bookkeeping. Must be
|
||||
// done before resizing.
|
||||
@ -2582,18 +2578,16 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
|
||||
// each thread?
|
||||
if (total_dense_prefix_regions > 0) {
|
||||
uint tasks_for_dense_prefix = 1;
|
||||
if (UseParallelDensePrefixUpdate) {
|
||||
if (total_dense_prefix_regions <=
|
||||
(parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
|
||||
// Don't over partition. This assumes that
|
||||
// PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
|
||||
// so there are not many regions to process.
|
||||
tasks_for_dense_prefix = parallel_gc_threads;
|
||||
} else {
|
||||
// Over partition
|
||||
tasks_for_dense_prefix = parallel_gc_threads *
|
||||
PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
|
||||
}
|
||||
if (total_dense_prefix_regions <=
|
||||
(parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
|
||||
// Don't over partition. This assumes that
|
||||
// PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
|
||||
// so there are not many regions to process.
|
||||
tasks_for_dense_prefix = parallel_gc_threads;
|
||||
} else {
|
||||
// Over partition
|
||||
tasks_for_dense_prefix = parallel_gc_threads *
|
||||
PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
|
||||
}
|
||||
size_t regions_per_thread = total_dense_prefix_regions /
|
||||
tasks_for_dense_prefix;
|
||||
@ -2733,21 +2727,6 @@ void PSParallelCompact::verify_complete(SpaceId space_id) {
|
||||
}
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
|
||||
EventMark m("5 compact serial");
|
||||
TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
|
||||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
old_gen->start_array()->reset();
|
||||
old_gen->move_and_update(cm);
|
||||
young_gen->move_and_update(cm);
|
||||
}
|
||||
|
||||
void
|
||||
PSParallelCompact::follow_weak_klass_links() {
|
||||
// All klasses on the revisit stack are marked at this point.
|
||||
@ -3530,11 +3509,8 @@ PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
"Object liveness is wrong.");
|
||||
return ParMarkBitMap::incomplete;
|
||||
}
|
||||
assert(UseParallelOldGCDensePrefix ||
|
||||
(HeapMaximumCompactionInterval > 1) ||
|
||||
(MarkSweepAlwaysCompactCount > 1) ||
|
||||
(forwarding_ptr == new_pointer),
|
||||
"Calculation of new location is incorrect");
|
||||
assert(HeapMaximumCompactionInterval > 1 || MarkSweepAlwaysCompactCount > 1 ||
|
||||
forwarding_ptr == new_pointer, "new location is incorrect");
|
||||
return ParMarkBitMap::incomplete;
|
||||
}
|
||||
|
||||
|
@ -1027,9 +1027,6 @@ class PSParallelCompact : AllStatic {
|
||||
ParallelTaskTerminator* terminator_ptr,
|
||||
uint parallel_gc_threads);
|
||||
|
||||
// For debugging only - compacts the old gen serially
|
||||
static void compact_serial(ParCompactionManager* cm);
|
||||
|
||||
// If objects are left in eden after a collection, try to move the boundary
|
||||
// and absorb them into the old gen. Returns true if eden was emptied.
|
||||
static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -121,12 +121,6 @@ void PSPermGen::compute_new_size(size_t used_before_collection) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void PSPermGen::move_and_update(ParCompactionManager* cm) {
|
||||
PSParallelCompact::move_and_update(cm, PSParallelCompact::perm_space_id);
|
||||
}
|
||||
|
||||
void PSPermGen::precompact() {
|
||||
// Reset start array first.
|
||||
_start_array.reset();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,9 +51,6 @@ class PSPermGen : public PSOldGen {
|
||||
// MarkSweep code
|
||||
virtual void precompact();
|
||||
|
||||
// Parallel old
|
||||
virtual void move_and_update(ParCompactionManager* cm);
|
||||
|
||||
virtual const char* name() const { return "PSPermGen"; }
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -792,12 +792,6 @@ void PSYoungGen::compact() {
|
||||
to_mark_sweep()->compact(false);
|
||||
}
|
||||
|
||||
void PSYoungGen::move_and_update(ParCompactionManager* cm) {
|
||||
PSParallelCompact::move_and_update(cm, PSParallelCompact::eden_space_id);
|
||||
PSParallelCompact::move_and_update(cm, PSParallelCompact::from_space_id);
|
||||
PSParallelCompact::move_and_update(cm, PSParallelCompact::to_space_id);
|
||||
}
|
||||
|
||||
void PSYoungGen::print() const { print_on(tty); }
|
||||
void PSYoungGen::print_on(outputStream* st) const {
|
||||
st->print(" %-15s", "PSYoungGen");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -127,9 +127,6 @@ class PSYoungGen : public CHeapObj {
|
||||
void adjust_pointers();
|
||||
void compact();
|
||||
|
||||
// Parallel Old
|
||||
void move_and_update(ParCompactionManager* cm);
|
||||
|
||||
// Called during/after gc
|
||||
void swap_spaces();
|
||||
|
||||
|
@ -76,7 +76,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
||||
_beforeSweep = 0;
|
||||
_coalBirths = 0;
|
||||
_coalDeaths = 0;
|
||||
_splitBirths = split_birth? 1 : 0;
|
||||
_splitBirths = (split_birth ? 1 : 0);
|
||||
_splitDeaths = 0;
|
||||
_returnedBytes = 0;
|
||||
}
|
||||
|
@ -104,6 +104,7 @@ class AbstractInterpreter: AllStatic {
|
||||
java_lang_math_sqrt, // implementation of java.lang.Math.sqrt (x)
|
||||
java_lang_math_log, // implementation of java.lang.Math.log (x)
|
||||
java_lang_math_log10, // implementation of java.lang.Math.log10 (x)
|
||||
java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get()
|
||||
number_of_method_entries,
|
||||
invalid = -1
|
||||
};
|
||||
@ -140,7 +141,7 @@ class AbstractInterpreter: AllStatic {
|
||||
// Method activation
|
||||
static MethodKind method_kind(methodHandle m);
|
||||
static address entry_for_kind(MethodKind k) { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; }
|
||||
static address entry_for_method(methodHandle m) { return _entry_table[method_kind(m)]; }
|
||||
static address entry_for_method(methodHandle m) { return entry_for_kind(method_kind(m)); }
|
||||
|
||||
static void print_method_kind(MethodKind kind) PRODUCT_RETURN;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -125,6 +125,7 @@ void CppInterpreterGenerator::generate_all() {
|
||||
method_entry(java_lang_math_sqrt );
|
||||
method_entry(java_lang_math_log );
|
||||
method_entry(java_lang_math_log10 );
|
||||
method_entry(java_lang_ref_reference_get);
|
||||
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
|
||||
method_entry(native);
|
||||
method_entry(native_synchronized);
|
||||
|
@ -208,12 +208,6 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
|
||||
return empty;
|
||||
}
|
||||
|
||||
// Accessor method?
|
||||
if (m->is_accessor()) {
|
||||
assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
|
||||
return accessor;
|
||||
}
|
||||
|
||||
// Special intrinsic method?
|
||||
// Note: This test must come _after_ the test for native methods,
|
||||
// otherwise we will run into problems with JDK 1.2, see also
|
||||
@ -227,6 +221,15 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
|
||||
case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
|
||||
case vmIntrinsics::_dlog : return java_lang_math_log ;
|
||||
case vmIntrinsics::_dlog10: return java_lang_math_log10;
|
||||
|
||||
case vmIntrinsics::_Reference_get:
|
||||
return java_lang_ref_reference_get;
|
||||
}
|
||||
|
||||
// Accessor method?
|
||||
if (m->is_accessor()) {
|
||||
assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
|
||||
return accessor;
|
||||
}
|
||||
|
||||
// Note: for now: zero locals for all non-empty methods
|
||||
|
@ -372,6 +372,7 @@ void TemplateInterpreterGenerator::generate_all() {
|
||||
method_entry(java_lang_math_sqrt )
|
||||
method_entry(java_lang_math_log )
|
||||
method_entry(java_lang_math_log10)
|
||||
method_entry(java_lang_ref_reference_get)
|
||||
|
||||
// all native method kinds (must be one contiguous block)
|
||||
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -459,18 +459,17 @@ bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
|
||||
void CardTableModRefBS::non_clean_card_iterate(Space* sp,
|
||||
MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
bool clear) {
|
||||
MemRegionClosure* cl) {
|
||||
if (!mr.is_empty()) {
|
||||
int n_threads = SharedHeap::heap()->n_par_threads();
|
||||
if (n_threads > 0) {
|
||||
#ifndef SERIALGC
|
||||
par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
|
||||
par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, n_threads);
|
||||
#else // SERIALGC
|
||||
fatal("Parallel gc not supported here.");
|
||||
#endif // SERIALGC
|
||||
} else {
|
||||
non_clean_card_iterate_work(mr, cl, clear);
|
||||
non_clean_card_iterate_work(mr, cl);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -481,10 +480,7 @@ void CardTableModRefBS::non_clean_card_iterate(Space* sp,
|
||||
// cards (and miss those marked precleaned). In that sense,
|
||||
// the name precleaned is currently somewhat of a misnomer.
|
||||
void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
|
||||
MemRegionClosure* cl,
|
||||
bool clear) {
|
||||
// Figure out whether we have to worry about parallelism.
|
||||
bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
|
||||
MemRegionClosure* cl) {
|
||||
for (int i = 0; i < _cur_covered_regions; i++) {
|
||||
MemRegion mri = mr.intersection(_covered[i]);
|
||||
if (mri.word_size() > 0) {
|
||||
@ -506,22 +502,6 @@ void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
|
||||
MemRegion cur_cards(addr_for(cur_entry),
|
||||
non_clean_cards * card_size_in_words);
|
||||
MemRegion dirty_region = cur_cards.intersection(mri);
|
||||
if (clear) {
|
||||
for (size_t i = 0; i < non_clean_cards; i++) {
|
||||
// Clean the dirty cards (but leave the other non-clean
|
||||
// alone.) If parallel, do the cleaning atomically.
|
||||
jbyte cur_entry_val = cur_entry[i];
|
||||
if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
|
||||
if (is_par) {
|
||||
jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
|
||||
assert(res != clean_card,
|
||||
"Dirty card mysteriously cleaned");
|
||||
} else {
|
||||
cur_entry[i] = clean_card;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
cl->do_MemRegion(dirty_region);
|
||||
}
|
||||
cur_entry = next_entry;
|
||||
@ -530,22 +510,6 @@ void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
|
||||
}
|
||||
}
|
||||
|
||||
void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
|
||||
OopClosure* cl,
|
||||
bool clear,
|
||||
bool before_save_marks) {
|
||||
// Note that dcto_cl is resource-allocated, so there is no
|
||||
// corresponding "delete".
|
||||
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
|
||||
MemRegion used_mr;
|
||||
if (before_save_marks) {
|
||||
used_mr = sp->used_region_at_save_marks();
|
||||
} else {
|
||||
used_mr = sp->used_region();
|
||||
}
|
||||
non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
|
||||
}
|
||||
|
||||
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
|
||||
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
|
||||
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
|
||||
@ -593,9 +557,8 @@ void CardTableModRefBS::dirty(MemRegion mr) {
|
||||
memset(first, dirty_card, last-first);
|
||||
}
|
||||
|
||||
// NOTES:
|
||||
// (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
|
||||
// iterates over dirty cards ranges in increasing address order.
|
||||
// Unlike several other card table methods, dirty_card_iterate()
|
||||
// iterates over dirty cards ranges in increasing address order.
|
||||
void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
|
||||
MemRegionClosure* cl) {
|
||||
for (int i = 0; i < _cur_covered_regions; i++) {
|
||||
@ -698,7 +661,7 @@ public:
|
||||
|
||||
void CardTableModRefBS::verify_clean_region(MemRegion mr) {
|
||||
GuaranteeNotModClosure blk(this);
|
||||
non_clean_card_iterate_work(mr, &blk, false);
|
||||
non_clean_card_iterate_work(mr, &blk);
|
||||
}
|
||||
|
||||
// To verify a MemRegion is entirely dirty this closure is passed to
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -171,17 +171,14 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
||||
// mode if worker threads are available.
|
||||
void non_clean_card_iterate(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
bool clear);
|
||||
MemRegionClosure* cl);
|
||||
|
||||
// Utility function used to implement the other versions below.
|
||||
void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl,
|
||||
bool clear);
|
||||
void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl);
|
||||
|
||||
void par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
bool clear,
|
||||
int n_threads);
|
||||
|
||||
// Dirty the bytes corresponding to "mr" (not all of which must be
|
||||
@ -241,7 +238,6 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
||||
jint stride, int n_strides,
|
||||
DirtyCardToOopClosure* dcto_cl,
|
||||
MemRegionClosure* cl,
|
||||
bool clear,
|
||||
jbyte** lowest_non_clean,
|
||||
uintptr_t lowest_non_clean_base_chunk_index,
|
||||
size_t lowest_non_clean_chunk_size);
|
||||
@ -402,9 +398,6 @@ public:
|
||||
virtual void invalidate(MemRegion mr, bool whole_heap = false);
|
||||
void clear(MemRegion mr);
|
||||
void dirty(MemRegion mr);
|
||||
void mod_oop_in_space_iterate(Space* sp, OopClosure* cl,
|
||||
bool clear = false,
|
||||
bool before_save_marks = false);
|
||||
|
||||
// *** Card-table-RemSet-specific things.
|
||||
|
||||
@ -415,18 +408,15 @@ public:
|
||||
// *decreasing* address order. (This order aids with imprecise card
|
||||
// marking, where a dirty card may cause scanning, and summarization
|
||||
// marking, of objects that extend onto subsequent cards.)
|
||||
// If "clear" is true, the card is (conceptually) marked unmodified before
|
||||
// applying the closure.
|
||||
void mod_card_iterate(MemRegionClosure* cl, bool clear = false) {
|
||||
non_clean_card_iterate_work(_whole_heap, cl, clear);
|
||||
void mod_card_iterate(MemRegionClosure* cl) {
|
||||
non_clean_card_iterate_work(_whole_heap, cl);
|
||||
}
|
||||
|
||||
// Like the "mod_cards_iterate" above, except only invokes the closure
|
||||
// for cards within the MemRegion "mr" (which is required to be
|
||||
// card-aligned and sized.)
|
||||
void mod_card_iterate(MemRegion mr, MemRegionClosure* cl,
|
||||
bool clear = false) {
|
||||
non_clean_card_iterate_work(mr, cl, clear);
|
||||
void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) {
|
||||
non_clean_card_iterate_work(mr, cl);
|
||||
}
|
||||
|
||||
static uintx ct_max_alignment_constraint();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -247,7 +247,7 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
|
||||
ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
|
||||
|
||||
_ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
|
||||
dcto_cl, &clear_cl, false);
|
||||
dcto_cl, &clear_cl);
|
||||
}
|
||||
|
||||
void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) {
|
||||
|
@ -623,24 +623,48 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// Itable indices are calculated based on methods array order
|
||||
// (see klassItable::compute_itable_index()). Must reinitialize
|
||||
// Vtable and Itable indices are calculated based on methods array
|
||||
// order (see klassItable::compute_itable_index()). Must reinitialize
|
||||
// after ALL methods of ALL classes have been reordered.
|
||||
// We assume that since checkconstraints is false, this method
|
||||
// cannot throw an exception. An exception here would be
|
||||
// problematic since this is the VMThread, not a JavaThread.
|
||||
|
||||
class ReinitializeItables: public ObjectClosure {
|
||||
class ReinitializeTables: public ObjectClosure {
|
||||
private:
|
||||
Thread* _thread;
|
||||
|
||||
public:
|
||||
ReinitializeItables(Thread* thread) : _thread(thread) {}
|
||||
ReinitializeTables(Thread* thread) : _thread(thread) {}
|
||||
|
||||
// Initialize super vtable first, check if already initialized to avoid
|
||||
// quadradic behavior. The vtable is cleared in remove_unshareable_info.
|
||||
void reinitialize_vtables(klassOop k) {
|
||||
if (k->blueprint()->oop_is_instanceKlass()) {
|
||||
instanceKlass* ik = instanceKlass::cast(k);
|
||||
if (ik->vtable()->is_initialized()) return;
|
||||
if (ik->super() != NULL) {
|
||||
reinitialize_vtables(ik->super());
|
||||
}
|
||||
ik->vtable()->initialize_vtable(false, _thread);
|
||||
}
|
||||
}
|
||||
|
||||
void do_object(oop obj) {
|
||||
if (obj->blueprint()->oop_is_instanceKlass()) {
|
||||
instanceKlass* ik = instanceKlass::cast((klassOop)obj);
|
||||
ResourceMark rm(_thread);
|
||||
ik->itable()->initialize_itable(false, _thread);
|
||||
reinitialize_vtables((klassOop)obj);
|
||||
#ifdef ASSERT
|
||||
ik->vtable()->verify(tty, true);
|
||||
#endif // ASSERT
|
||||
} else if (obj->blueprint()->oop_is_arrayKlass()) {
|
||||
// The vtable for array klasses are that of its super class,
|
||||
// ie. java.lang.Object.
|
||||
arrayKlass* ak = arrayKlass::cast((klassOop)obj);
|
||||
if (ak->vtable()->is_initialized()) return;
|
||||
ak->vtable()->initialize_vtable(false, _thread);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1205,9 +1229,9 @@ public:
|
||||
gen->ro_space()->object_iterate(&sort);
|
||||
gen->rw_space()->object_iterate(&sort);
|
||||
|
||||
ReinitializeItables reinit_itables(THREAD);
|
||||
gen->ro_space()->object_iterate(&reinit_itables);
|
||||
gen->rw_space()->object_iterate(&reinit_itables);
|
||||
ReinitializeTables reinit_tables(THREAD);
|
||||
gen->ro_space()->object_iterate(&reinit_tables);
|
||||
gen->rw_space()->object_iterate(&reinit_tables);
|
||||
tty->print_cr("done. ");
|
||||
tty->cr();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -88,15 +88,6 @@ public:
|
||||
assert(false, "can't call");
|
||||
}
|
||||
|
||||
// Invoke "cl->do_oop" on (the address of) every possibly-modifed
|
||||
// reference field in objects in "sp". If "clear" is "true", the oops
|
||||
// are no longer considered possibly modified after application of the
|
||||
// closure. If' "before_save_marks" is true, oops in objects allocated
|
||||
// after the last call to "save_marks" on "sp" will not be considered.
|
||||
virtual void mod_oop_in_space_iterate(Space* sp, OopClosure* cl,
|
||||
bool clear = false,
|
||||
bool before_save_marks = false) = 0;
|
||||
|
||||
// Causes all refs in "mr" to be assumed to be modified. If "whole_heap"
|
||||
// is true, the caller asserts that the entire heap is being invalidated,
|
||||
// which may admit an optimized implementation for some barriers.
|
||||
|
@ -401,6 +401,8 @@ class instanceKlass: public Klass {
|
||||
ReferenceType reference_type() const { return _reference_type; }
|
||||
void set_reference_type(ReferenceType t) { _reference_type = t; }
|
||||
|
||||
static int reference_type_offset_in_bytes() { return offset_of(instanceKlass, _reference_type); }
|
||||
|
||||
// find local field, returns true if found
|
||||
bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
|
||||
// find field in direct superinterfaces, returns the interface in which the field is defined
|
||||
|
@ -690,7 +690,8 @@ void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) {
|
||||
guarantee(method_ordering->is_perm(), "should be in permspace");
|
||||
guarantee(method_ordering->is_typeArray(), "should be type array");
|
||||
int length = method_ordering->length();
|
||||
if (JvmtiExport::can_maintain_original_method_order()) {
|
||||
if (JvmtiExport::can_maintain_original_method_order() ||
|
||||
(UseSharedSpaces && length != 0)) {
|
||||
guarantee(length == methods->length(), "invalid method ordering length");
|
||||
jlong sum = 0;
|
||||
for (j = 0; j < length; j++) {
|
||||
|
@ -453,6 +453,14 @@ void Klass::remove_unshareable_info() {
|
||||
ik->unlink_class();
|
||||
}
|
||||
}
|
||||
// Clear the Java vtable if the oop has one.
|
||||
// The vtable isn't shareable because it's in the wrong order wrt the methods
|
||||
// once the method names get moved and resorted.
|
||||
klassVtable* vt = vtable();
|
||||
if (vt != NULL) {
|
||||
assert(oop_is_instance() || oop_is_array(), "nothing else has vtable");
|
||||
vt->clear_vtable();
|
||||
}
|
||||
set_subklass(NULL);
|
||||
set_next_sibling(NULL);
|
||||
}
|
||||
|
@ -645,6 +645,15 @@ void klassVtable::adjust_method_entries(methodOop* old_methods, methodOop* new_m
|
||||
}
|
||||
}
|
||||
|
||||
// CDS/RedefineClasses support - clear vtables so they can be reinitialized
|
||||
void klassVtable::clear_vtable() {
|
||||
for (int i = 0; i < _length; i++) table()[i].clear();
|
||||
}
|
||||
|
||||
bool klassVtable::is_initialized() {
|
||||
return _length == 0 || table()[0].method() != NULL;
|
||||
}
|
||||
|
||||
|
||||
// Garbage collection
|
||||
void klassVtable::oop_follow_contents() {
|
||||
|
@ -75,7 +75,15 @@ class klassVtable : public ResourceObj {
|
||||
|
||||
void initialize_vtable(bool checkconstraints, TRAPS); // initialize vtable of a new klass
|
||||
|
||||
// conputes vtable length (in words) and the number of miranda methods
|
||||
// CDS/RedefineClasses support - clear vtables so they can be reinitialized
|
||||
// at dump time. Clearing gives us an easy way to tell if the vtable has
|
||||
// already been reinitialized at dump time (see dump.cpp). Vtables can
|
||||
// be initialized at run time by RedefineClasses so dumping the right order
|
||||
// is necessary.
|
||||
void clear_vtable();
|
||||
bool is_initialized();
|
||||
|
||||
// computes vtable length (in words) and the number of miranda methods
|
||||
static void compute_vtable_size_and_num_mirandas(int &vtable_length, int &num_miranda_methods,
|
||||
klassOop super, objArrayOop methods,
|
||||
AccessFlags class_flags, Handle classloader,
|
||||
|
@ -629,7 +629,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
initial_gvn()->transform_no_reclaim(top());
|
||||
|
||||
// Set up tf(), start(), and find a CallGenerator.
|
||||
CallGenerator* cg;
|
||||
CallGenerator* cg = NULL;
|
||||
if (is_osr_compilation()) {
|
||||
const TypeTuple *domain = StartOSRNode::osr_domain();
|
||||
const TypeTuple *range = TypeTuple::make_range(method()->signature());
|
||||
@ -644,9 +644,24 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
StartNode* s = new (this, 2) StartNode(root(), tf()->domain());
|
||||
initial_gvn()->set_type_bottom(s);
|
||||
init_start(s);
|
||||
float past_uses = method()->interpreter_invocation_count();
|
||||
float expected_uses = past_uses;
|
||||
cg = CallGenerator::for_inline(method(), expected_uses);
|
||||
if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
|
||||
// With java.lang.ref.reference.get() we must go through the
|
||||
// intrinsic when G1 is enabled - even when get() is the root
|
||||
// method of the compile - so that, if necessary, the value in
|
||||
// the referent field of the reference object gets recorded by
|
||||
// the pre-barrier code.
|
||||
// Specifically, if G1 is enabled, the value in the referent
|
||||
// field is recorded by the G1 SATB pre barrier. This will
|
||||
// result in the referent being marked live and the reference
|
||||
// object removed from the list of discovered references during
|
||||
// reference processing.
|
||||
cg = find_intrinsic(method(), false);
|
||||
}
|
||||
if (cg == NULL) {
|
||||
float past_uses = method()->interpreter_invocation_count();
|
||||
float expected_uses = past_uses;
|
||||
cg = CallGenerator::for_inline(method(), expected_uses);
|
||||
}
|
||||
}
|
||||
if (failing()) return;
|
||||
if (cg == NULL) {
|
||||
@ -2041,6 +2056,52 @@ static bool oop_offset_is_sane(const TypeInstPtr* tp) {
|
||||
// Note that OffsetBot and OffsetTop are very negative.
|
||||
}
|
||||
|
||||
// Eliminate trivially redundant StoreCMs and accumulate their
|
||||
// precedence edges.
|
||||
static void eliminate_redundant_card_marks(Node* n) {
|
||||
assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
|
||||
if (n->in(MemNode::Address)->outcnt() > 1) {
|
||||
// There are multiple users of the same address so it might be
|
||||
// possible to eliminate some of the StoreCMs
|
||||
Node* mem = n->in(MemNode::Memory);
|
||||
Node* adr = n->in(MemNode::Address);
|
||||
Node* val = n->in(MemNode::ValueIn);
|
||||
Node* prev = n;
|
||||
bool done = false;
|
||||
// Walk the chain of StoreCMs eliminating ones that match. As
|
||||
// long as it's a chain of single users then the optimization is
|
||||
// safe. Eliminating partially redundant StoreCMs would require
|
||||
// cloning copies down the other paths.
|
||||
while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
|
||||
if (adr == mem->in(MemNode::Address) &&
|
||||
val == mem->in(MemNode::ValueIn)) {
|
||||
// redundant StoreCM
|
||||
if (mem->req() > MemNode::OopStore) {
|
||||
// Hasn't been processed by this code yet.
|
||||
n->add_prec(mem->in(MemNode::OopStore));
|
||||
} else {
|
||||
// Already converted to precedence edge
|
||||
for (uint i = mem->req(); i < mem->len(); i++) {
|
||||
// Accumulate any precedence edges
|
||||
if (mem->in(i) != NULL) {
|
||||
n->add_prec(mem->in(i));
|
||||
}
|
||||
}
|
||||
// Everything above this point has been processed.
|
||||
done = true;
|
||||
}
|
||||
// Eliminate the previous StoreCM
|
||||
prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
|
||||
assert(mem->outcnt() == 0, "should be dead");
|
||||
mem->disconnect_inputs(NULL);
|
||||
} else {
|
||||
prev = mem;
|
||||
}
|
||||
mem = prev->in(MemNode::Memory);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------final_graph_reshaping_impl----------------------
|
||||
// Implement items 1-5 from final_graph_reshaping below.
|
||||
static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
|
||||
@ -2167,9 +2228,19 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
|
||||
frc.inc_float_count();
|
||||
goto handle_mem;
|
||||
|
||||
case Op_StoreCM:
|
||||
{
|
||||
// Convert OopStore dependence into precedence edge
|
||||
Node* prec = n->in(MemNode::OopStore);
|
||||
n->del_req(MemNode::OopStore);
|
||||
n->add_prec(prec);
|
||||
eliminate_redundant_card_marks(n);
|
||||
}
|
||||
|
||||
// fall through
|
||||
|
||||
case Op_StoreB:
|
||||
case Op_StoreC:
|
||||
case Op_StoreCM:
|
||||
case Op_StorePConditional:
|
||||
case Op_StoreI:
|
||||
case Op_StoreL:
|
||||
|
@ -1457,19 +1457,22 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
|
||||
}
|
||||
|
||||
|
||||
void GraphKit::pre_barrier(Node* ctl,
|
||||
void GraphKit::pre_barrier(bool do_load,
|
||||
Node* ctl,
|
||||
Node* obj,
|
||||
Node* adr,
|
||||
uint adr_idx,
|
||||
Node* val,
|
||||
const TypeOopPtr* val_type,
|
||||
Node* pre_val,
|
||||
BasicType bt) {
|
||||
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
set_control(ctl);
|
||||
switch (bs->kind()) {
|
||||
case BarrierSet::G1SATBCT:
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
|
||||
g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
|
||||
break;
|
||||
|
||||
case BarrierSet::CardTableModRef:
|
||||
@ -1532,7 +1535,11 @@ Node* GraphKit::store_oop(Node* ctl,
|
||||
uint adr_idx = C->get_alias_index(adr_type);
|
||||
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
|
||||
|
||||
pre_barrier(control(), obj, adr, adr_idx, val, val_type, bt);
|
||||
pre_barrier(true /* do_load */,
|
||||
control(), obj, adr, adr_idx, val, val_type,
|
||||
NULL /* pre_val */,
|
||||
bt);
|
||||
|
||||
Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
|
||||
post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
|
||||
return store;
|
||||
@ -3470,12 +3477,31 @@ void GraphKit::write_barrier_post(Node* oop_store,
|
||||
}
|
||||
|
||||
// G1 pre/post barriers
|
||||
void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||
void GraphKit::g1_write_barrier_pre(bool do_load,
|
||||
Node* obj,
|
||||
Node* adr,
|
||||
uint alias_idx,
|
||||
Node* val,
|
||||
const TypeOopPtr* val_type,
|
||||
Node* pre_val,
|
||||
BasicType bt) {
|
||||
|
||||
// Some sanity checks
|
||||
// Note: val is unused in this routine.
|
||||
|
||||
if (do_load) {
|
||||
// We need to generate the load of the previous value
|
||||
assert(obj != NULL, "must have a base");
|
||||
assert(adr != NULL, "where are loading from?");
|
||||
assert(pre_val == NULL, "loaded already?");
|
||||
assert(val_type != NULL, "need a type");
|
||||
} else {
|
||||
// In this case both val_type and alias_idx are unused.
|
||||
assert(pre_val != NULL, "must be loaded already");
|
||||
assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
|
||||
}
|
||||
assert(bt == T_OBJECT, "or we shouldn't be here");
|
||||
|
||||
IdealKit ideal(this, true);
|
||||
|
||||
Node* tls = __ thread(); // ThreadLocalStorage
|
||||
@ -3497,32 +3523,28 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||
PtrQueue::byte_offset_of_index());
|
||||
const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652
|
||||
PtrQueue::byte_offset_of_buf());
|
||||
|
||||
// Now the actual pointers into the thread
|
||||
|
||||
// set_control( ctl);
|
||||
|
||||
Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
|
||||
Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
|
||||
Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
|
||||
|
||||
// Now some of the values
|
||||
|
||||
Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
|
||||
|
||||
// if (!marking)
|
||||
__ if_then(marking, BoolTest::ne, zero); {
|
||||
Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
|
||||
|
||||
const Type* t1 = adr->bottom_type();
|
||||
const Type* t2 = val->bottom_type();
|
||||
|
||||
Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx);
|
||||
// if (orig != NULL)
|
||||
__ if_then(orig, BoolTest::ne, null()); {
|
||||
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
||||
|
||||
if (do_load) {
|
||||
// load original value
|
||||
// alias_idx correct??
|
||||
pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
|
||||
}
|
||||
|
||||
// if (pre_val != NULL)
|
||||
__ if_then(pre_val, BoolTest::ne, null()); {
|
||||
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
||||
|
||||
// is the queue for this thread full?
|
||||
__ if_then(index, BoolTest::ne, zero, likely); {
|
||||
@ -3536,10 +3558,9 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||
next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
||||
#endif
|
||||
|
||||
// Now get the buffer location we will log the original value into and store it
|
||||
// Now get the buffer location we will log the previous value into and store it
|
||||
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
|
||||
__ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
|
||||
|
||||
__ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
|
||||
// update the index
|
||||
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
||||
|
||||
@ -3547,9 +3568,9 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||
|
||||
// logging buffer is full, call the runtime
|
||||
const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
|
||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
|
||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
|
||||
} __ end_if(); // (!index)
|
||||
} __ end_if(); // (orig != NULL)
|
||||
} __ end_if(); // (pre_val != NULL)
|
||||
} __ end_if(); // (!marking)
|
||||
|
||||
// Final sync IdealKit and GraphKit.
|
||||
|
@ -544,8 +544,10 @@ class GraphKit : public Phase {
|
||||
BasicType bt);
|
||||
|
||||
// For the few case where the barriers need special help
|
||||
void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
|
||||
Node* val, const TypeOopPtr* val_type, BasicType bt);
|
||||
void pre_barrier(bool do_load, Node* ctl,
|
||||
Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
|
||||
Node* pre_val,
|
||||
BasicType bt);
|
||||
|
||||
void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
|
||||
Node* val, BasicType bt, bool use_precise);
|
||||
@ -671,11 +673,13 @@ class GraphKit : public Phase {
|
||||
Node* adr, uint adr_idx, Node* val, bool use_precise);
|
||||
|
||||
// G1 pre/post barriers
|
||||
void g1_write_barrier_pre(Node* obj,
|
||||
void g1_write_barrier_pre(bool do_load,
|
||||
Node* obj,
|
||||
Node* adr,
|
||||
uint alias_idx,
|
||||
Node* val,
|
||||
const TypeOopPtr* val_type,
|
||||
Node* pre_val,
|
||||
BasicType bt);
|
||||
|
||||
void g1_write_barrier_post(Node* store,
|
||||
|
@ -688,20 +688,22 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
}
|
||||
ready_cnt[n->_idx] = local; // Count em up
|
||||
|
||||
// A few node types require changing a required edge to a precedence edge
|
||||
// before allocation.
|
||||
#ifdef ASSERT
|
||||
if( UseConcMarkSweepGC || UseG1GC ) {
|
||||
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
|
||||
// Note: Required edges with an index greater than oper_input_base
|
||||
// are not supported by the allocator.
|
||||
// Note2: Can only depend on unmatched edge being last,
|
||||
// can not depend on its absolute position.
|
||||
Node *oop_store = n->in(n->req() - 1);
|
||||
n->del_req(n->req() - 1);
|
||||
n->add_prec(oop_store);
|
||||
assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
|
||||
// Check the precedence edges
|
||||
for (uint prec = n->req(); prec < n->len(); prec++) {
|
||||
Node* oop_store = n->in(prec);
|
||||
if (oop_store != NULL) {
|
||||
assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// A few node types require changing a required edge to a precedence edge
|
||||
// before allocation.
|
||||
if( n->is_Mach() && n->req() > TypeFunc::Parms &&
|
||||
(n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
|
||||
n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
|
||||
|
@ -166,6 +166,10 @@ class LibraryCallKit : public GraphKit {
|
||||
// This returns Type::AnyPtr, RawPtr, or OopPtr.
|
||||
int classify_unsafe_addr(Node* &base, Node* &offset);
|
||||
Node* make_unsafe_address(Node* base, Node* offset);
|
||||
// Helper for inline_unsafe_access.
|
||||
// Generates the guards that check whether the result of
|
||||
// Unsafe.getObject should be recorded in an SATB log buffer.
|
||||
void insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val);
|
||||
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
|
||||
bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
|
||||
bool inline_unsafe_allocate();
|
||||
@ -240,6 +244,8 @@ class LibraryCallKit : public GraphKit {
|
||||
bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
|
||||
bool inline_bitCount(vmIntrinsics::ID id);
|
||||
bool inline_reverseBytes(vmIntrinsics::ID id);
|
||||
|
||||
bool inline_reference_get();
|
||||
};
|
||||
|
||||
|
||||
@ -336,6 +342,14 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||
if (!UsePopCountInstruction) return NULL;
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_Reference_get:
|
||||
// It is only when G1 is enabled that we absolutely
|
||||
// need to use the intrinsic version of Reference.get()
|
||||
// so that the value in the referent field, if necessary,
|
||||
// can be registered by the pre-barrier code.
|
||||
if (!UseG1GC) return NULL;
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
|
||||
assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
|
||||
@ -387,6 +401,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
|
||||
tty->print_cr("Intrinsic %s", str);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (kit.try_to_inline()) {
|
||||
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
|
||||
CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
|
||||
@ -402,11 +417,19 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
|
||||
}
|
||||
|
||||
if (PrintIntrinsics) {
|
||||
tty->print("Did not inline intrinsic %s%s at bci:%d in",
|
||||
if (jvms->has_method()) {
|
||||
// Not a root compile.
|
||||
tty->print("Did not inline intrinsic %s%s at bci:%d in",
|
||||
vmIntrinsics::name_at(intrinsic_id()),
|
||||
(is_virtual() ? " (virtual)" : ""), kit.bci());
|
||||
kit.caller()->print_short_name(tty);
|
||||
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
|
||||
} else {
|
||||
// Root compile
|
||||
tty->print("Did not generate intrinsic %s%s at bci:%d in",
|
||||
vmIntrinsics::name_at(intrinsic_id()),
|
||||
(is_virtual() ? " (virtual)" : ""), kit.bci());
|
||||
kit.caller()->print_short_name(tty);
|
||||
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
|
||||
}
|
||||
}
|
||||
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
|
||||
return NULL;
|
||||
@ -418,6 +441,14 @@ bool LibraryCallKit::try_to_inline() {
|
||||
const bool is_native_ptr = true;
|
||||
const bool is_static = true;
|
||||
|
||||
if (!jvms()->has_method()) {
|
||||
// Root JVMState has a null method.
|
||||
assert(map()->memory()->Opcode() == Op_Parm, "");
|
||||
// Insert the memory aliasing node
|
||||
set_all_memory(reset_memory());
|
||||
}
|
||||
assert(merged_memory(), "");
|
||||
|
||||
switch (intrinsic_id()) {
|
||||
case vmIntrinsics::_hashCode:
|
||||
return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
|
||||
@ -658,6 +689,9 @@ bool LibraryCallKit::try_to_inline() {
|
||||
case vmIntrinsics::_getCallerClass:
|
||||
return inline_native_Reflection_getCallerClass();
|
||||
|
||||
case vmIntrinsics::_Reference_get:
|
||||
return inline_reference_get();
|
||||
|
||||
default:
|
||||
// If you get here, it may be that someone has added a new intrinsic
|
||||
// to the list in vmSymbols.hpp without implementing it here.
|
||||
@ -2076,6 +2110,106 @@ bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
|
||||
|
||||
const static BasicType T_ADDRESS_HOLDER = T_LONG;
|
||||
|
||||
// Helper that guards and inserts a G1 pre-barrier.
|
||||
void LibraryCallKit::insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val) {
|
||||
assert(UseG1GC, "should not call this otherwise");
|
||||
|
||||
// We could be accessing the referent field of a reference object. If so, when G1
|
||||
// is enabled, we need to log the value in the referent field in an SATB buffer.
|
||||
// This routine performs some compile time filters and generates suitable
|
||||
// runtime filters that guard the pre-barrier code.
|
||||
|
||||
// Some compile time checks.
|
||||
|
||||
// If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
|
||||
const TypeX* otype = offset->find_intptr_t_type();
|
||||
if (otype != NULL && otype->is_con() &&
|
||||
otype->get_con() != java_lang_ref_Reference::referent_offset) {
|
||||
// Constant offset but not the reference_offset so just return
|
||||
return;
|
||||
}
|
||||
|
||||
// We only need to generate the runtime guards for instances.
|
||||
const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
|
||||
if (btype != NULL) {
|
||||
if (btype->isa_aryptr()) {
|
||||
// Array type so nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
const TypeInstPtr* itype = btype->isa_instptr();
|
||||
if (itype != NULL) {
|
||||
// Can the klass of base_oop be statically determined
|
||||
// to be _not_ a sub-class of Reference?
|
||||
ciKlass* klass = itype->klass();
|
||||
if (klass->is_subtype_of(env()->Reference_klass()) &&
|
||||
!env()->Reference_klass()->is_subtype_of(klass)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The compile time filters did not reject base_oop/offset so
|
||||
// we need to generate the following runtime filters
|
||||
//
|
||||
// if (offset == java_lang_ref_Reference::_reference_offset) {
|
||||
// if (base != null) {
|
||||
// if (klass(base)->reference_type() != REF_NONE)) {
|
||||
// pre_barrier(_, pre_val, ...);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
float likely = PROB_LIKELY(0.999);
|
||||
float unlikely = PROB_UNLIKELY(0.999);
|
||||
|
||||
IdealKit ideal(this);
|
||||
#define __ ideal.
|
||||
|
||||
const int reference_type_offset = instanceKlass::reference_type_offset_in_bytes() +
|
||||
sizeof(oopDesc);
|
||||
|
||||
Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
|
||||
|
||||
__ if_then(offset, BoolTest::eq, referent_off, unlikely); {
|
||||
__ if_then(base_oop, BoolTest::ne, null(), likely); {
|
||||
|
||||
// Update graphKit memory and control from IdealKit.
|
||||
sync_kit(ideal);
|
||||
|
||||
Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
|
||||
Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
|
||||
|
||||
// Update IdealKit memory and control from graphKit.
|
||||
__ sync_kit(this);
|
||||
|
||||
Node* one = __ ConI(1);
|
||||
|
||||
__ if_then(is_instof, BoolTest::eq, one, unlikely); {
|
||||
|
||||
// Update graphKit from IdeakKit.
|
||||
sync_kit(ideal);
|
||||
|
||||
// Use the pre-barrier to record the value in the referent field
|
||||
pre_barrier(false /* do_load */,
|
||||
__ ctrl(),
|
||||
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
|
||||
pre_val /* pre_val */,
|
||||
T_OBJECT);
|
||||
|
||||
// Update IdealKit from graphKit.
|
||||
__ sync_kit(this);
|
||||
|
||||
} __ end_if(); // _ref_type != ref_none
|
||||
} __ end_if(); // base != NULL
|
||||
} __ end_if(); // offset == referent_offset
|
||||
|
||||
// Final sync IdealKit and GraphKit.
|
||||
final_sync(ideal);
|
||||
#undef __
|
||||
}
|
||||
|
||||
|
||||
// Interpret Unsafe.fieldOffset cookies correctly:
|
||||
extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
|
||||
|
||||
@ -2152,9 +2286,11 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
||||
// Build address expression. See the code in inline_unsafe_prefetch.
|
||||
Node *adr;
|
||||
Node *heap_base_oop = top();
|
||||
Node* offset = top();
|
||||
|
||||
if (!is_native_ptr) {
|
||||
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
|
||||
Node* offset = pop_pair();
|
||||
offset = pop_pair();
|
||||
// The base is either a Java object or a value produced by Unsafe.staticFieldBase
|
||||
Node* base = pop();
|
||||
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
|
||||
@ -2195,6 +2331,13 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
||||
// or Compile::must_alias will throw a diagnostic assert.)
|
||||
bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
|
||||
|
||||
// If we are reading the value of the referent field of a Reference
|
||||
// object (either by using Unsafe directly or through reflection)
|
||||
// then, if G1 is enabled, we need to record the referent in an
|
||||
// SATB log buffer using the pre-barrier mechanism.
|
||||
bool need_read_barrier = UseG1GC && !is_native_ptr && !is_store &&
|
||||
offset != top() && heap_base_oop != top();
|
||||
|
||||
if (!is_store && type == T_OBJECT) {
|
||||
// Attempt to infer a sharper value type from the offset and base type.
|
||||
ciKlass* sharpened_klass = NULL;
|
||||
@ -2278,8 +2421,13 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
||||
case T_SHORT:
|
||||
case T_INT:
|
||||
case T_FLOAT:
|
||||
push(p);
|
||||
break;
|
||||
case T_OBJECT:
|
||||
push( p );
|
||||
if (need_read_barrier) {
|
||||
insert_g1_pre_barrier(heap_base_oop, offset, p);
|
||||
}
|
||||
push(p);
|
||||
break;
|
||||
case T_ADDRESS:
|
||||
// Cast to an int type.
|
||||
@ -2534,7 +2682,10 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
|
||||
case T_OBJECT:
|
||||
// reference stores need a store barrier.
|
||||
// (They don't if CAS fails, but it isn't worth checking.)
|
||||
pre_barrier(control(), base, adr, alias_idx, newval, value_type->make_oopptr(), T_OBJECT);
|
||||
pre_barrier(true /* do_load*/,
|
||||
control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
|
||||
NULL /* pre_val*/,
|
||||
T_OBJECT);
|
||||
#ifdef _LP64
|
||||
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
|
||||
Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
|
||||
@ -5235,3 +5386,44 @@ LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
|
||||
copyfunc_addr, copyfunc_name, adr_type,
|
||||
src_start, dest_start, copy_length XTOP);
|
||||
}
|
||||
|
||||
//----------------------------inline_reference_get----------------------------
|
||||
|
||||
bool LibraryCallKit::inline_reference_get() {
|
||||
const int nargs = 1; // self
|
||||
|
||||
guarantee(java_lang_ref_Reference::referent_offset > 0,
|
||||
"should have already been set");
|
||||
|
||||
int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
|
||||
// Restore the stack and pop off the argument
|
||||
_sp += nargs;
|
||||
Node *reference_obj = pop();
|
||||
|
||||
// Null check on self without removing any arguments.
|
||||
_sp += nargs;
|
||||
reference_obj = do_null_check(reference_obj, T_OBJECT);
|
||||
_sp -= nargs;;
|
||||
|
||||
if (stopped()) return true;
|
||||
|
||||
Node *adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
|
||||
|
||||
ciInstanceKlass* klass = env()->Object_klass();
|
||||
const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
|
||||
|
||||
Node* no_ctrl = NULL;
|
||||
Node *result = make_load(no_ctrl, adr, object_type, T_OBJECT);
|
||||
|
||||
// Use the pre-barrier to record the value in the referent field
|
||||
pre_barrier(false /* do_load */,
|
||||
control(),
|
||||
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
|
||||
result /* pre_val */,
|
||||
T_OBJECT);
|
||||
|
||||
push(result);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2159,9 +2159,12 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* address = in(MemNode::Address);
|
||||
|
||||
// Back-to-back stores to same address? Fold em up.
|
||||
// Generally unsafe if I have intervening uses...
|
||||
if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address)) {
|
||||
// Back-to-back stores to same address? Fold em up. Generally
|
||||
// unsafe if I have intervening uses... Also disallowed for StoreCM
|
||||
// since they must follow each StoreP operation. Redundant StoreCMs
|
||||
// are eliminated just before matching in final_graph_reshape.
|
||||
if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address) &&
|
||||
mem->Opcode() != Op_StoreCM) {
|
||||
// Looking at a dead closed cycle of memory?
|
||||
assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
|
||||
|
||||
|
@ -1354,15 +1354,20 @@ void Compile::Fill_buffer() {
|
||||
// Check that oop-store precedes the card-mark
|
||||
else if( mach->ideal_Opcode() == Op_StoreCM ) {
|
||||
uint storeCM_idx = j;
|
||||
Node *oop_store = mach->in(mach->_cnt); // First precedence edge
|
||||
assert( oop_store != NULL, "storeCM expects a precedence edge");
|
||||
uint i4;
|
||||
for( i4 = 0; i4 < last_inst; ++i4 ) {
|
||||
if( b->_nodes[i4] == oop_store ) break;
|
||||
int count = 0;
|
||||
for (uint prec = mach->req(); prec < mach->len(); prec++) {
|
||||
Node *oop_store = mach->in(prec); // Precedence edge
|
||||
if (oop_store == NULL) continue;
|
||||
count++;
|
||||
uint i4;
|
||||
for( i4 = 0; i4 < last_inst; ++i4 ) {
|
||||
if( b->_nodes[i4] == oop_store ) break;
|
||||
}
|
||||
// Note: This test can provide a false failure if other precedence
|
||||
// edges have been added to the storeCMNode.
|
||||
assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
|
||||
}
|
||||
// Note: This test can provide a false failure if other precedence
|
||||
// edges have been added to the storeCMNode.
|
||||
assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
|
||||
assert(count > 0, "storeCM expects at least one precedence edge");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -29,6 +29,9 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "interpreter/linkResolver.hpp"
|
||||
#ifndef SERIALGC
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif // SERIALGC
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/gcLocker.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
@ -1724,6 +1727,26 @@ JNI_ENTRY(jobject, jni_GetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID
|
||||
o = JvmtiExport::jni_GetField_probe(thread, obj, o, k, fieldID, false);
|
||||
}
|
||||
jobject ret = JNIHandles::make_local(env, o->obj_field(offset));
|
||||
#ifndef SERIALGC
|
||||
// If G1 is enabled and we are accessing the value of the referent
|
||||
// field in a reference object then we need to register a non-null
|
||||
// referent with the SATB barrier.
|
||||
if (UseG1GC) {
|
||||
bool needs_barrier = false;
|
||||
|
||||
if (ret != NULL &&
|
||||
offset == java_lang_ref_Reference::referent_offset &&
|
||||
instanceKlass::cast(k)->reference_type() != REF_NONE) {
|
||||
assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
|
||||
needs_barrier = true;
|
||||
}
|
||||
|
||||
if (needs_barrier) {
|
||||
oop referent = JNIHandles::resolve(ret);
|
||||
G1SATBCardTableModRefBS::enqueue(referent);
|
||||
}
|
||||
}
|
||||
#endif // SERIALGC
|
||||
DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret);
|
||||
return ret;
|
||||
JNI_END
|
||||
|
@ -525,7 +525,7 @@ JvmtiEnv::AddToSystemClassLoaderSearch(const char* segment) {
|
||||
ObjectLocker ol(loader, THREAD);
|
||||
|
||||
// need the path as java.lang.String
|
||||
Handle path = java_lang_String::create_from_str(segment, THREAD);
|
||||
Handle path = java_lang_String::create_from_platform_dependent_str(segment, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
return JVMTI_ERROR_INTERNAL;
|
||||
|
@ -24,6 +24,9 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#ifndef SERIALGC
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif // SERIALGC
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "prims/jni.h"
|
||||
#include "prims/jvm.h"
|
||||
@ -193,7 +196,32 @@ UNSAFE_ENTRY(jobject, Unsafe_GetObject140(JNIEnv *env, jobject unsafe, jobject o
|
||||
UnsafeWrapper("Unsafe_GetObject");
|
||||
if (obj == NULL) THROW_0(vmSymbols::java_lang_NullPointerException());
|
||||
GET_OOP_FIELD(obj, offset, v)
|
||||
return JNIHandles::make_local(env, v);
|
||||
jobject ret = JNIHandles::make_local(env, v);
|
||||
#ifndef SERIALGC
|
||||
// We could be accessing the referent field in a reference
|
||||
// object. If G1 is enabled then we need to register a non-null
|
||||
// referent with the SATB barrier.
|
||||
if (UseG1GC) {
|
||||
bool needs_barrier = false;
|
||||
|
||||
if (ret != NULL) {
|
||||
if (offset == java_lang_ref_Reference::referent_offset) {
|
||||
oop o = JNIHandles::resolve_non_null(obj);
|
||||
klassOop k = o->klass();
|
||||
if (instanceKlass::cast(k)->reference_type() != REF_NONE) {
|
||||
assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
|
||||
needs_barrier = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (needs_barrier) {
|
||||
oop referent = JNIHandles::resolve(ret);
|
||||
G1SATBCardTableModRefBS::enqueue(referent);
|
||||
}
|
||||
}
|
||||
#endif // SERIALGC
|
||||
return ret;
|
||||
UNSAFE_END
|
||||
|
||||
UNSAFE_ENTRY(void, Unsafe_SetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset, jobject x_h))
|
||||
@ -226,7 +254,32 @@ UNSAFE_END
|
||||
UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
|
||||
UnsafeWrapper("Unsafe_GetObject");
|
||||
GET_OOP_FIELD(obj, offset, v)
|
||||
return JNIHandles::make_local(env, v);
|
||||
jobject ret = JNIHandles::make_local(env, v);
|
||||
#ifndef SERIALGC
|
||||
// We could be accessing the referent field in a reference
|
||||
// object. If G1 is enabled then we need to register non-null
|
||||
// referent with the SATB barrier.
|
||||
if (UseG1GC) {
|
||||
bool needs_barrier = false;
|
||||
|
||||
if (ret != NULL) {
|
||||
if (offset == java_lang_ref_Reference::referent_offset && obj != NULL) {
|
||||
oop o = JNIHandles::resolve(obj);
|
||||
klassOop k = o->klass();
|
||||
if (instanceKlass::cast(k)->reference_type() != REF_NONE) {
|
||||
assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
|
||||
needs_barrier = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (needs_barrier) {
|
||||
oop referent = JNIHandles::resolve(ret);
|
||||
G1SATBCardTableModRefBS::enqueue(referent);
|
||||
}
|
||||
}
|
||||
#endif // SERIALGC
|
||||
return ret;
|
||||
UNSAFE_END
|
||||
|
||||
UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
|
||||
|
@ -244,6 +244,12 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
||||
{ "MaxLiveObjectEvacuationRatio",
|
||||
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
|
||||
{ "ForceSharedSpaces", JDK_Version::jdk_update(6,25), JDK_Version::jdk(8) },
|
||||
{ "UseParallelOldGCCompacting",
|
||||
JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
|
||||
{ "UseParallelDensePrefixUpdate",
|
||||
JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
|
||||
{ "UseParallelOldGCDensePrefix",
|
||||
JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
|
||||
{ "AllowTransitionalJSR292", JDK_Version::jdk(7), JDK_Version::jdk(8) },
|
||||
{ NULL, JDK_Version(0), JDK_Version(0) }
|
||||
};
|
||||
@ -801,26 +807,22 @@ bool Arguments::process_argument(const char* arg,
|
||||
|
||||
JDK_Version since = JDK_Version();
|
||||
|
||||
if (parse_argument(arg, origin)) {
|
||||
// do nothing
|
||||
} else if (is_newly_obsolete(arg, &since)) {
|
||||
enum { bufsize = 256 };
|
||||
char buffer[bufsize];
|
||||
since.to_string(buffer, bufsize);
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Warning: The flag %s has been EOL'd as of %s and will"
|
||||
" be ignored\n", arg, buffer);
|
||||
} else {
|
||||
if (!ignore_unrecognized) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Unrecognized VM option '%s'\n", arg);
|
||||
// allow for commandline "commenting out" options like -XX:#+Verbose
|
||||
if (strlen(arg) == 0 || arg[0] != '#') {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (parse_argument(arg, origin) || ignore_unrecognized) {
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
|
||||
const char * const argname = *arg == '+' || *arg == '-' ? arg + 1 : arg;
|
||||
if (is_newly_obsolete(arg, &since)) {
|
||||
char version[256];
|
||||
since.to_string(version, sizeof(version));
|
||||
warning("ignoring option %s; support was removed in %s", argname, version);
|
||||
return true;
|
||||
}
|
||||
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Unrecognized VM option '%s'\n", argname);
|
||||
// allow for commandline "commenting out" options like -XX:#+Verbose
|
||||
return arg[0] == '#';
|
||||
}
|
||||
|
||||
bool Arguments::process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized) {
|
||||
|
@ -1355,13 +1355,6 @@ class CommandLineFlags {
|
||||
product(bool, UseParallelOldGC, false, \
|
||||
"Use the Parallel Old garbage collector") \
|
||||
\
|
||||
product(bool, UseParallelOldGCCompacting, true, \
|
||||
"In the Parallel Old garbage collector use parallel compaction") \
|
||||
\
|
||||
product(bool, UseParallelDensePrefixUpdate, true, \
|
||||
"In the Parallel Old garbage collector use parallel dense" \
|
||||
" prefix update") \
|
||||
\
|
||||
product(uintx, HeapMaximumCompactionInterval, 20, \
|
||||
"How often should we maximally compact the heap (not allowing " \
|
||||
"any dead space)") \
|
||||
@ -1381,9 +1374,6 @@ class CommandLineFlags {
|
||||
"The standard deviation used by the par compact dead wood" \
|
||||
"limiter (a number between 0-100).") \
|
||||
\
|
||||
product(bool, UseParallelOldGCDensePrefix, true, \
|
||||
"Use a dense prefix with the Parallel Old garbage collector") \
|
||||
\
|
||||
product(uintx, ParallelGCThreads, 0, \
|
||||
"Number of parallel threads parallel gc will use") \
|
||||
\
|
||||
|
@ -4,3 +4,4 @@
|
||||
^drop_included/
|
||||
^webrev/
|
||||
/nbproject/private/
|
||||
^.hgtip
|
||||
|
@ -113,3 +113,4 @@ d56b326ae0544fc16c3e0d0285876f3c82054db2 jdk7-b134
|
||||
1759daa85d33800bd578853f9531f9de73f70fc7 jdk7-b136
|
||||
1d87f7460cde7f8f30af668490f82b52b879bfd8 jdk7-b137
|
||||
be3758943770a0a3dd4be6a1cb4063507c4d7062 jdk7-b138
|
||||
28c7c0ed2444607829ba11ad827f8d52197a2830 jdk7-b139
|
||||
|
@ -113,3 +113,4 @@ d5fc61f18043765705ef22b57a68c924ab2f1a5b jdk7-b135
|
||||
c81d289c9a532d6e94af3c09d856a2a20529040f jdk7-b136
|
||||
ccea3282991ce8b678e188cf32a8239f76ff3bfa jdk7-b137
|
||||
cc956c8a8255583535597e9a63db23c510e9a063 jdk7-b138
|
||||
c025078c8362076503bb83b8e4da14ba7b347940 jdk7-b139
|
||||
|
@ -25,8 +25,8 @@
|
||||
|
||||
drops.master.copy.base=${drops.dir}
|
||||
|
||||
jaxws_src.bundle.name=jdk7-jaxws2_2_2-2010_12_14.zip
|
||||
jaxws_src.bundle.md5.checksum=fee9ac72fabc96719eefc66ecaff4bc3
|
||||
jaxws_src.bundle.name=jdk7-jaxws2_2_4-b01-2011_04_08.zip
|
||||
jaxws_src.bundle.md5.checksum=9f35dd731c99ddb62db650aaf20e5bf4
|
||||
jaxws_src.master.bundle.dir=${drops.master.copy.base}
|
||||
jaxws_src.master.bundle.url.base=http://download.java.net/glassfish/components/jax-ws/openjdk/jdk7
|
||||
|
||||
|
@ -113,3 +113,4 @@ d8ced728159fbb2caa8b6adb477fd8efdbbdf179 jdk7-b135
|
||||
aa13e7702cd9d8aca9aa38f1227f966990866944 jdk7-b136
|
||||
29296ea6529a418037ccce95903249665ef31c11 jdk7-b137
|
||||
60d3d55dcc9c31a30ced9caa6ef5c0dcd7db031d jdk7-b138
|
||||
d80954a89b49fda47c0c5cace65a17f5a758b8bd jdk7-b139
|
||||
|
@ -56,10 +56,6 @@ build: unpacker
|
||||
|
||||
vpath %.cpp $(SHARE_SRC)/native/$(PKGDIR)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
ifeq ($(STANDALONE),true)
|
||||
ZIPOBJDIR = $(OUTPUTDIR)/tmp/sun/java.util.zip/zip/$(OBJDIRNAME)
|
||||
|
||||
@ -131,8 +127,9 @@ prop:
|
||||
pack200-tool:
|
||||
$(call make-launcher, pack200, com.sun.java.util.jar.pack.Driver, , --pack)
|
||||
|
||||
# ignore mapfile for non-product binary
|
||||
unpacker:
|
||||
$(MAKE) $(UNPACK_EXE) STANDALONE=true LDMAPFLAGS_OPT= LDMAPFLAGS_DBG=
|
||||
$(MAKE) $(UNPACK_EXE) STANDALONE=true LDMAPFLAGS_DBG=
|
||||
|
||||
ifeq ($(PLATFORM), windows)
|
||||
IMVERSIONVALUE=$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION).$(JDK_UPDATE_VER).$(COOKED_BUILD_NUMBER)
|
||||
@ -147,8 +144,14 @@ winres::
|
||||
$(ECHO) "Resource files not required for Unix"
|
||||
endif
|
||||
|
||||
# Mapfile-vers.gmk, does not copy over the mapfile-vers-unpack200, when
|
||||
# the make utiliy is re-invoked, as in this case. In order to workaround
|
||||
# this special case, the mapfile required for the unpack200 command, is
|
||||
# explicitly copied over to the expected location.
|
||||
$(UNPACK_EXE): $(UNPACK_EXE_FILES_o) updatefiles winres
|
||||
$(prep-target)
|
||||
$(RM) $(TEMPDIR)/mapfile-vers
|
||||
$(CP) mapfile-vers-unpack200 $(TEMPDIR)/mapfile-vers
|
||||
$(LINKER) $(LDDFLAGS) $(UNPACK_EXE_FILES_o) $(RES) $(LIBCXX) $(LDOUTPUT)$(TEMPDIR)/unpack200$(EXE_SUFFIX)
|
||||
ifdef MT
|
||||
$(MT) /manifest $(OBJDIR)/unpack200$(EXE_SUFFIX).manifest /outputresource:$(TEMPDIR)/unpack200$(EXE_SUFFIX);#1
|
||||
|
31
jdk/make/com/sun/java/pack/mapfile-vers-unpack200
Normal file
31
jdk/make/com/sun/java/pack/mapfile-vers-unpack200
Normal file
@ -0,0 +1,31 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
# Define library interface.
|
||||
|
||||
SUNWprivate_1.1 {
|
||||
local:
|
||||
*;
|
||||
};
|
@ -52,8 +52,8 @@ ifeq ($(VARIANT), OPT)
|
||||
endif
|
||||
|
||||
# If we are re-ordering functions in this solaris library, we need to make
|
||||
# sure that -xF is added to the compile lines. This option is critical and
|
||||
# enables the functions to be reordered.
|
||||
# sure that -xF is added to the compile lines. This option is critical and
|
||||
# enables the functions to be reordered.
|
||||
ifdef FILES_reorder
|
||||
CFLAGS_OPT += -xF
|
||||
CXXFLAGS_OPT += -xF
|
||||
@ -76,7 +76,6 @@ endif
|
||||
|
||||
endif # PLATFORM
|
||||
|
||||
|
||||
ifeq ($(PLATFORM), linux)
|
||||
|
||||
ifeq ($(VARIANT), OPT)
|
||||
|
@ -55,6 +55,11 @@ program_default_rule: all
|
||||
|
||||
program: $(ACTUAL_PROGRAM)
|
||||
|
||||
# reuse the mapfiles in the launcher's directory, the same should
|
||||
# be applicable to the tool launchers as well.
|
||||
FILES_m = $(BUILDDIR)/java/main/java/mapfile-$(ARCH)
|
||||
include $(BUILDDIR)/common/Mapfile-vers.gmk
|
||||
|
||||
include $(JDK_TOPDIR)/make/common/Rules.gmk
|
||||
|
||||
ifdef NEVER_ACT_AS_SERVER_CLASS_MACHINE
|
||||
|
@ -885,12 +885,18 @@ else
|
||||
ABS_DB_PATH :=$(call FullPath,$(CLOSED_SHARE_SRC)/db)
|
||||
DB_ZIP_LIST = $(shell $(LS) $(ABS_DB_PATH)/*.zip 2>/dev/null)
|
||||
|
||||
# Java DB image. Move the Java DB demo directory into the JDK's demo
|
||||
# dir and in the process, rename it to db. Also remove index.html,
|
||||
# since it presumes docs are co-located. Also remove register.html (no
|
||||
# longer relevant).
|
||||
initial-image-jdk-db: $(DB_ZIP_LIST)
|
||||
$(MKDIR) -p $(JDK_IMAGE_DIR)/db
|
||||
for d in $(DB_ZIP_LIST); do \
|
||||
($(CD) $(JDK_IMAGE_DIR)/db && $(UNZIP) -o $$d); \
|
||||
done
|
||||
|
||||
$(RM) -rf $(DEMODIR)/db
|
||||
$(MV) $(JDK_IMAGE_DIR)/db/demo $(DEMODIR)/db
|
||||
$(RM) $(JDK_IMAGE_DIR)/db/index.html $(JDK_IMAGE_DIR)/db/register.html
|
||||
endif
|
||||
|
||||
# Standard jdk image
|
||||
|
@ -53,7 +53,7 @@ DEV_DOCS_URL-7 = http://download.oracle.com/javase/7/docs/index.html
|
||||
DEV_DOCS_URL = $(DEV_DOCS_URL-$(JDK_MINOR_VERSION))
|
||||
|
||||
# Url to Java Language Spec
|
||||
JLS3_URL = http://java.sun.com/docs/books/jls/
|
||||
#JLS3_URL = http://java.sun.com/docs/books/jls/
|
||||
|
||||
# Common Java trademark line
|
||||
JAVA_TRADEMARK_LINE = Java is a trademark or registered trademark of \
|
||||
@ -293,8 +293,8 @@ COREAPI_HEADER = \
|
||||
<strong>Java$(TRADEMARK) Platform<br>Standard Ed. $(JDK_MINOR_VERSION)</strong>
|
||||
|
||||
# Java language specification cite
|
||||
TAG_JLS3 = jls3:a:See <cite><a href="$(JLS3_URL)"> \
|
||||
The Java Language Specification, Third Edition</a></cite>:
|
||||
TAG_JLS = jls:a:See <cite> \
|
||||
The Java™ Language Specification</cite>:
|
||||
|
||||
# Overview file for core apis
|
||||
COREAPI_OVERVIEW = $(SHARE_SRC)/classes/overview-core.html
|
||||
@ -329,7 +329,7 @@ $(COREAPI_OPTIONS_FILE): $(COREAPI_OVERVIEW)
|
||||
$(call OptionPair,-tag,specdefault:X) ; \
|
||||
$(call OptionPair,-tag,Note:X) ; \
|
||||
$(call OptionPair,-tag,ToDo:X) ; \
|
||||
$(call OptionPair,-tag,$(TAG_JLS3)) ; \
|
||||
$(call OptionPair,-tag,$(TAG_JLS)) ; \
|
||||
$(call OptionOnly,-splitIndex) ; \
|
||||
$(call OptionPair,-overview,$(COREAPI_OVERVIEW)) ; \
|
||||
$(call OptionPair,-doctitle,$(COREAPI_DOCTITLE)) ; \
|
||||
@ -1081,6 +1081,7 @@ $(TREEAPI_OPTIONS_FILE):
|
||||
$(call OptionPair,-doctitle,$(TREEAPI_DOCTITLE)) ; \
|
||||
$(call OptionPair,-windowtitle,$(TREEAPI_WINDOWTITLE) $(DRAFT_WINTITLE));\
|
||||
$(call OptionPair,-header,$(TREEAPI_HEADER)$(DRAFT_HEADER)) ; \
|
||||
$(call OptionPair,-tag,$(TAG_JLS)) ; \
|
||||
$(call OptionPair,-bottom,$(TREEAPI_BOTTOM)$(DRAFT_BOTTOM)) ; \
|
||||
$(call OptionTrip,-group,$(TREEAPI_GROUPNAME),$(TREEAPI_REGEXP)); \
|
||||
$(call OptionTrip,-linkoffline,$(TREEAPI2COREAPI),$(COREAPI_DOCSDIR)/); \
|
||||
|
@ -189,7 +189,6 @@ JAVA_JAVA_java = \
|
||||
java/util/ListResourceBundle.java \
|
||||
sun/util/EmptyListResourceBundle.java \
|
||||
java/util/Locale.java \
|
||||
sun/util/locale/AsciiUtil.java \
|
||||
sun/util/locale/BaseLocale.java \
|
||||
sun/util/locale/Extension.java \
|
||||
sun/util/locale/InternalLocaleBuilder.java \
|
||||
@ -197,6 +196,7 @@ JAVA_JAVA_java = \
|
||||
sun/util/locale/LocaleExtensions.java \
|
||||
sun/util/locale/LocaleObjectCache.java \
|
||||
sun/util/locale/LocaleSyntaxException.java \
|
||||
sun/util/locale/LocaleUtils.java \
|
||||
sun/util/locale/ParseStatus.java \
|
||||
sun/util/locale/StringTokenIterator.java \
|
||||
sun/util/locale/UnicodeLocaleExtension.java \
|
||||
|
@ -61,5 +61,4 @@ OTHER_CPPFLAGS += -DLAUNCHER_NAME='"$(LAUNCHER_NAME)"'
|
||||
|
||||
ifeq ($(PLATFORM), solaris)
|
||||
LDFLAGS += -R$(OPENWIN_LIB)
|
||||
LDFLAGS += -M mapfile-$(ARCH)
|
||||
endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
# interested in declaring a version, simply scoping the file is sufficient.
|
||||
#
|
||||
|
||||
{
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
main; # Provides basic adb symbol offsets
|
||||
environ; # Public symbols and required by Java run time
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, 2005, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
# interested in declaring a version, simply scoping the file is sufficient.
|
||||
#
|
||||
|
||||
{
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
main; # Provides basic adb symbol offsets
|
||||
environ; # Public symbols and required by Java run time
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
# interested in declaring a version, simply scoping the file is sufficient.
|
||||
#
|
||||
|
||||
{
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
main; # Provides basic adb symbol offsets
|
||||
environ; # Public symbols and required by Java run time
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
# interested in declaring a version, simply scoping the file is sufficient.
|
||||
#
|
||||
|
||||
{
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
main; # Provides basic adb symbol offsets
|
||||
environ; # Public symbols and required by Java run time
|
||||
|
@ -58,7 +58,7 @@ endef
|
||||
SIGNING_KEY_DIR = /security/ws/JCE-signing/src
|
||||
SIGNING_KEYSTORE = $(SIGNING_KEY_DIR)/KeyStore.jks
|
||||
SIGNING_PASSPHRASE = $(SIGNING_KEY_DIR)/passphrase.txt
|
||||
SIGNING_ALIAS = jce_rsa
|
||||
SIGNING_ALIAS = oracle_jce_rsa
|
||||
|
||||
#
|
||||
# Defines for signing the various jar files.
|
||||
|
@ -519,9 +519,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"Both the JNI signature and the generic signature are "
|
||||
"returned for each class. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
)
|
||||
@ -623,8 +622,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(referenceType refType "The reference type ID.")
|
||||
)
|
||||
(Reply
|
||||
(int modBits "Modifier bits as defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>")
|
||||
(int modBits "Modifier bits as defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>")
|
||||
)
|
||||
(ErrorSet
|
||||
(Error INVALID_CLASS "refType is not the ID of a reference "
|
||||
@ -651,8 +650,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int modBits "The modifier bit flags (also known as access flags) "
|
||||
"which provide additional information on the "
|
||||
"field declaration. Individual flag values are "
|
||||
"defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>."
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the field as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -686,8 +685,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int modBits "The modifier bit flags (also known as access flags) "
|
||||
"which provide additional information on the "
|
||||
"method declaration. Individual flag values are "
|
||||
"defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>."
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the method as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -773,8 +772,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(Command Status=9
|
||||
"Returns the current status of the reference type. The status "
|
||||
"indicates the extent to which the reference type has been "
|
||||
"initialized, as described in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/Concepts.doc.html#16491\">VM specification</a>. "
|
||||
"initialized, as described in section 2.1.6 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"If the class is linked the PREPARED and VERIFIED bits in the returned status bits "
|
||||
"will be set. If the class is initialized the INITIALIZED bit in the returned "
|
||||
"status bits will be set. If an error occured during initialization then the "
|
||||
@ -852,9 +851,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"Returns the JNI signature of a reference type along with the "
|
||||
"generic signature if there is one. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
"<p>
|
||||
(Out
|
||||
@ -882,9 +880,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"by the compiler. "
|
||||
"Fields are returned in the order they occur in the class file. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The reference type ID.")
|
||||
@ -900,8 +897,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int modBits "The modifier bit flags (also known as access flags) "
|
||||
"which provide additional information on the "
|
||||
"field declaration. Individual flag values are "
|
||||
"defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>."
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the field as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -925,9 +922,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"if present, and any synthetic methods created by the compiler. "
|
||||
"Methods are returned in the order they occur in the class file. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The reference type ID.")
|
||||
@ -943,8 +939,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(int modBits "The modifier bit flags (also known as access flags) "
|
||||
"which provide additional information on the "
|
||||
"method declaration. Individual flag values are "
|
||||
"defined in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html\">VM Specification</a>."
|
||||
"defined in Chapter 4 of "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"In addition, The <code>0xf0000000</code> bit identifies "
|
||||
"the method as synthetic, if the synthetic attribute "
|
||||
"<a href=\"#JDWP_VirtualMachine_Capabilities\">capability</a> is available.")
|
||||
@ -1006,8 +1002,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
)
|
||||
(Command ConstantPool=18
|
||||
"Return the raw bytes of the constant pool in the format of the "
|
||||
"constant_pool item of the Class File Format in the "
|
||||
"Java Virtual Machine Specification. "
|
||||
"constant_pool item of the Class File Format in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"<p>Since JDWP version 1.6. Requires canGetConstantPool capability - see "
|
||||
"<a href=\"#JDWP_VirtualMachine_CapabilitiesNew\">CapabilitiesNew</a>.""
|
||||
(Out
|
||||
@ -1016,7 +1012,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
(Reply
|
||||
(int count "Total number of constant pool entries plus one. This "
|
||||
"corresponds to the constant_pool_count item of the "
|
||||
"Class File Format in the Java Virtual Machine Specification. ")
|
||||
"Class File Format in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. ")
|
||||
(Repeat bytes
|
||||
(byte cpbytes "Raw bytes of constant pool")
|
||||
)
|
||||
@ -1324,7 +1321,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
)
|
||||
)
|
||||
(Command Bytecodes=3
|
||||
"Retrieve the method's bytecodes as defined in the JVM Specification."
|
||||
"Retrieve the method's bytecodes as defined in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Requires canGetBytecodes capability - see "
|
||||
"<a href=\"#JDWP_VirtualMachine_CapabilitiesNew\">CapabilitiesNew</a>."
|
||||
(Out
|
||||
@ -1379,9 +1377,8 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"instance methods, the \"this\" reference is included in the "
|
||||
"table. Also, synthetic variables may be present. "
|
||||
"Generic signatures are described in the signature attribute "
|
||||
"section in the "
|
||||
"<a href=\"http://java.sun.com/docs/books/vmspec\">
|
||||
"Java Virtual Machine Specification, 3rd Edition.</a> "
|
||||
"section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>. "
|
||||
"Since JDWP version 1.5."
|
||||
(Out
|
||||
(referenceType refType "The class.")
|
||||
@ -1970,8 +1967,9 @@ JDWP "Java(tm) Debug Wire Protocol"
|
||||
"<p>"
|
||||
"The method which will return early is referred to as the "
|
||||
"called method. The called method is the current method (as "
|
||||
"defined by the Frames section in the Java Virtual Machine "
|
||||
"Specification) for the specified thread at the time this command "
|
||||
"defined by the Frames section in "
|
||||
"<cite>The Java™ Virtual Machine Specification</cite>) "
|
||||
"for the specified thread at the time this command "
|
||||
"is received. "
|
||||
"<p>"
|
||||
"The specified thread must be suspended. "
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,7 +38,7 @@ else
|
||||
endif
|
||||
|
||||
SUBDIRS =
|
||||
SUBDIRS_misc = nio scripting nbproject
|
||||
SUBDIRS_misc = nio scripting nbproject forkjoin
|
||||
SUBDIRS_enterprise = $(WEBSERVICES_SUBDIR)
|
||||
SUBDIRS_management = jmx
|
||||
|
||||
|
41
jdk/make/mksample/forkjoin/Makefile
Normal file
41
jdk/make/mksample/forkjoin/Makefile
Normal file
@ -0,0 +1,41 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
#
|
||||
# Makefile for building all the samples under the forkjoin subdirectory.
|
||||
#
|
||||
|
||||
BUILDDIR = ../..
|
||||
PRODUCT = java
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = mergesort
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
clobber clean ::
|
||||
$(RM) -r $(SAMPLEDIR)/forkjoin
|
51
jdk/make/mksample/forkjoin/mergesort/Makefile
Normal file
51
jdk/make/mksample/forkjoin/mergesort/Makefile
Normal file
@ -0,0 +1,51 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
#
|
||||
# Makefile for the forkjoin/mergesort sample code
|
||||
#
|
||||
|
||||
BUILDDIR = ../../..
|
||||
|
||||
PRODUCT = java
|
||||
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SAMPLE_SRC_DIR = $(SHARE_SRC)/sample/forkjoin/mergesort
|
||||
SAMPLE_DST_DIR = $(SAMPLEDIR)/forkjoin/mergesort
|
||||
|
||||
SAMPLE_FILES = \
|
||||
$(SAMPLE_DST_DIR)/MergeDemo.java \
|
||||
$(SAMPLE_DST_DIR)/MergeSort.java
|
||||
|
||||
all build: $(SAMPLE_FILES)
|
||||
|
||||
$(SAMPLE_DST_DIR)/%: $(SAMPLE_SRC_DIR)/%
|
||||
$(install-file)
|
||||
|
||||
clean clobber:
|
||||
$(RM) -r $(SAMPLE_DST_DIR)
|
||||
|
||||
.PHONY: all build clean clobber
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,7 @@ BUILDDIR = ../..
|
||||
PRODUCT = java
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = file multicast server
|
||||
SUBDIRS = chatserver file multicast server
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
|
56
jdk/make/mksample/nio/chatserver/Makefile
Normal file
56
jdk/make/mksample/nio/chatserver/Makefile
Normal file
@ -0,0 +1,56 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
#
|
||||
# Makefile for the nio/chatserver sample code
|
||||
#
|
||||
|
||||
BUILDDIR = ../../..
|
||||
|
||||
PRODUCT = java
|
||||
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SAMPLE_SRC_DIR = $(SHARE_SRC)/sample/nio/chatserver
|
||||
SAMPLE_DST_DIR = $(SAMPLEDIR)/nio/chatserver
|
||||
|
||||
SAMPLE_FILES = \
|
||||
$(SAMPLE_DST_DIR)/ChatServer.java \
|
||||
$(SAMPLE_DST_DIR)/Client.java \
|
||||
$(SAMPLE_DST_DIR)/ClientReader.java \
|
||||
$(SAMPLE_DST_DIR)/DataReader.java \
|
||||
$(SAMPLE_DST_DIR)/MessageReader.java \
|
||||
$(SAMPLE_DST_DIR)/NameReader.java \
|
||||
$(SAMPLE_DST_DIR)/README.txt
|
||||
|
||||
all build: $(SAMPLE_FILES)
|
||||
|
||||
$(SAMPLE_DST_DIR)/%: $(SAMPLE_SRC_DIR)/%
|
||||
$(install-file)
|
||||
|
||||
clean clobber:
|
||||
$(RM) -r $(SAMPLE_DST_DIR)
|
||||
|
||||
.PHONY: all build clean clobber
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user