Merge
This commit is contained in:
commit
581e4116ec
@ -58,3 +58,4 @@ a3242906c7747b5d9bcc3d118c7c3c69aa40f4b7 jdk7-b80
|
||||
8403096d1fe7ff5318df9708cfec84a3fd3e1cf9 jdk7-b81
|
||||
e1176f86805fe07fd9fb9da065dc51b47712ce76 jdk7-b82
|
||||
6880a3af9addb41541e80ebe8cde6f79ec402a58 jdk7-b83
|
||||
2f3ea057d1ad56cf3b269cdc4de2741411151982 jdk7-b84
|
||||
|
@ -58,3 +58,4 @@ ec0421b5703b677e2226cf4bf7ae4eaafd8061c5 jdk7-b79
|
||||
e08a42a2a94d97ea8eedb187a94dbff822c8fbba jdk7-b81
|
||||
1e8c1bfad1abb4b81407a0f2645e0fb85764ca48 jdk7-b82
|
||||
fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83
|
||||
68c8961a82e4a3ad2a67991e5d834192a81eb4cd jdk7-b84
|
||||
|
@ -80,3 +80,4 @@ ac59d4e6dae51ac5fc31a9a4940d1857f91161b1 hs16-b08
|
||||
9ab385cb0c42997e16a7761ebcd25c90560a2714 hs15-b04
|
||||
fafab5d5349c7c066d677538db67a1ee0fb33bd2 hs15-b05
|
||||
3f370a32906eb5ba993fabd7b4279be7f31052b9 jdk7-b83
|
||||
ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2009
|
||||
|
||||
HS_MAJOR_VER=17
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=09
|
||||
HS_BUILD_NUMBER=10
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=7
|
||||
|
@ -2730,9 +2730,6 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
}
|
||||
|
||||
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
|
||||
__ lduw(counter_addr, tmp1);
|
||||
__ add(tmp1, DataLayout::counter_increment, tmp1);
|
||||
__ stw(tmp1, counter_addr);
|
||||
Bytecodes::Code bc = method->java_code_at_bci(bci);
|
||||
// Perform additional virtual call profiling for invokevirtual and
|
||||
// invokeinterface bytecodes
|
||||
@ -2822,15 +2819,23 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
__ set(DataLayout::counter_increment, tmp1);
|
||||
__ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
|
||||
mdo_offset_bias);
|
||||
if (i < (VirtualCallData::row_limit() - 1)) {
|
||||
__ br(Assembler::always, false, Assembler::pt, update_done);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
__ br(Assembler::always, false, Assembler::pt, update_done);
|
||||
__ delayed()->nop();
|
||||
__ bind(next_test);
|
||||
}
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
__ lduw(counter_addr, tmp1);
|
||||
__ add(tmp1, DataLayout::counter_increment, tmp1);
|
||||
__ stw(tmp1, counter_addr);
|
||||
|
||||
__ bind(update_done);
|
||||
}
|
||||
} else {
|
||||
// Static call
|
||||
__ lduw(counter_addr, tmp1);
|
||||
__ add(tmp1, DataLayout::counter_increment, tmp1);
|
||||
__ stw(tmp1, counter_addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1733,7 +1733,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
brx(Assembler::zero, false, Assembler::pn, found_null);
|
||||
delayed()->nop();
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polimorphic case.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
|
||||
ba(false, done);
|
||||
delayed()->nop();
|
||||
|
@ -851,10 +851,10 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
__ set(reg2offset(r_1) + extraspace + bias, ld_off);
|
||||
#else
|
||||
int ld_off = reg2offset(r_1) + extraspace + bias;
|
||||
#endif // _LP64
|
||||
#ifdef ASSERT
|
||||
G1_forced = true;
|
||||
#endif // ASSERT
|
||||
#endif // _LP64
|
||||
r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
|
||||
if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
|
||||
else __ ldx(base, ld_off, G1_scratch);
|
||||
@ -865,9 +865,11 @@ void AdapterGenerator::gen_c2i_adapter(
|
||||
if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
|
||||
store_c2i_object(r, base, st_off);
|
||||
} else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
|
||||
#ifndef _LP64
|
||||
if (TieredCompilation) {
|
||||
assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
|
||||
}
|
||||
#endif // _LP64
|
||||
store_c2i_long(r, base, st_off, r_2->is_stack());
|
||||
} else {
|
||||
store_c2i_int(r, base, st_off);
|
||||
|
@ -3279,7 +3279,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
__ bind(next_test);
|
||||
}
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polimorphic case.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
__ addl(counter_addr, DataLayout::counter_increment);
|
||||
|
||||
__ bind(update_done);
|
||||
|
@ -233,7 +233,8 @@ inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
} else {
|
||||
// sp() may have been extended or shrunk by an adapter. At least
|
||||
// check that we don't fall behind the legal region.
|
||||
assert(last_sp < (intptr_t*) interpreter_frame_monitor_begin(), "bad tos");
|
||||
// For top deoptimized frame last_sp == interpreter_frame_monitor_end.
|
||||
assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
|
||||
return last_sp;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1308,7 +1308,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
if (is_virtual_call) {
|
||||
jccb(Assembler::zero, found_null);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polimorphic case.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
jmp(done);
|
||||
bind(found_null);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1341,7 +1341,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
if (is_virtual_call) {
|
||||
jccb(Assembler::zero, found_null);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polimorphic case.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
|
||||
jmp(done);
|
||||
bind(found_null);
|
||||
|
@ -3238,17 +3238,19 @@ void TemplateTable::_new() {
|
||||
__ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
|
||||
__ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
|
||||
__ store_klass(rax, rsi); // store klass last
|
||||
|
||||
{
|
||||
SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
|
||||
// Trigger dtrace event for fastpath
|
||||
__ push(atos); // save the return value
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
|
||||
__ pop(atos); // restore the return value
|
||||
|
||||
}
|
||||
__ jmp(done);
|
||||
}
|
||||
|
||||
{
|
||||
SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
|
||||
// Trigger dtrace event for fastpath
|
||||
__ push(atos); // save the return value
|
||||
__ call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
|
||||
__ pop(atos); // restore the return value
|
||||
}
|
||||
|
||||
// slow case
|
||||
__ bind(slow_case);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -145,7 +145,7 @@ void CppInterpreter::main_loop(int recurse, TRAPS) {
|
||||
}
|
||||
else if (istate->msg() == BytecodeInterpreter::return_from_method) {
|
||||
// Copy the result into the caller's frame
|
||||
result_slots = type2size[method->result_type()];
|
||||
result_slots = type2size[result_type_of(method)];
|
||||
assert(result_slots >= 0 && result_slots <= 2, "what?");
|
||||
result = istate->stack() + result_slots;
|
||||
break;
|
||||
@ -394,9 +394,10 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
|
||||
|
||||
// Push our result
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
stack->set_sp(stack->sp() - type2size[method->result_type()]);
|
||||
BasicType type = result_type_of(method);
|
||||
stack->set_sp(stack->sp() - type2size[type]);
|
||||
|
||||
switch (method->result_type()) {
|
||||
switch (type) {
|
||||
case T_VOID:
|
||||
break;
|
||||
|
||||
@ -707,6 +708,26 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
return i;
|
||||
}
|
||||
|
||||
BasicType CppInterpreter::result_type_of(methodOop method) {
|
||||
BasicType t;
|
||||
switch (method->result_index()) {
|
||||
case 0 : t = T_BOOLEAN; break;
|
||||
case 1 : t = T_CHAR; break;
|
||||
case 2 : t = T_BYTE; break;
|
||||
case 3 : t = T_SHORT; break;
|
||||
case 4 : t = T_INT; break;
|
||||
case 5 : t = T_LONG; break;
|
||||
case 6 : t = T_VOID; break;
|
||||
case 7 : t = T_FLOAT; break;
|
||||
case 8 : t = T_DOUBLE; break;
|
||||
case 9 : t = T_OBJECT; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
|
||||
"out of step with AbstractInterpreter::BasicType_as_index");
|
||||
return t;
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry() {
|
||||
if (!UseFastEmptyMethods)
|
||||
return NULL;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,3 +41,7 @@
|
||||
private:
|
||||
// Stack overflow checks
|
||||
static bool stack_overflow_imminent(JavaThread *thread);
|
||||
|
||||
private:
|
||||
// Fast result type determination
|
||||
static BasicType result_type_of(methodOop method);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,7 +40,7 @@ define_pd_global(intx, PreInflateSpin, 10);
|
||||
|
||||
define_pd_global(intx, StackYellowPages, 2);
|
||||
define_pd_global(intx, StackRedPages, 1);
|
||||
define_pd_global(intx, StackShadowPages, 3 LP64_ONLY(+3) DEBUG_ONLY(+3));
|
||||
define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
|
||||
|
||||
define_pd_global(bool, RewriteBytecodes, true);
|
||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2003-2004 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* Copyright 2007, 2008, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,13 @@
|
||||
"stfd %0, 0(%2)\n"
|
||||
: "=f"(tmp)
|
||||
: "b"(src), "b"(dst));
|
||||
#elif defined(S390) && !defined(_LP64)
|
||||
double tmp;
|
||||
asm volatile ("ld %0, 0(%1)\n"
|
||||
"std %0, 0(%2)\n"
|
||||
: "=r"(tmp)
|
||||
: "a"(src), "a"(dst));
|
||||
#else
|
||||
*(jlong *) dst = *(jlong *) src;
|
||||
#endif // PPC && !_LP64
|
||||
#endif
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -730,11 +730,12 @@ void os::print_context(outputStream *st, void *context) {
|
||||
st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
|
||||
st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
|
||||
st->cr();
|
||||
st->print(", R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
|
||||
st->print( "R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
|
||||
st->print(", R9=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
|
||||
st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
|
||||
st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
|
||||
st->print(", R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
|
||||
st->cr();
|
||||
st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
|
||||
st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
|
||||
st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
|
||||
st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
|
||||
|
@ -253,7 +253,8 @@ class IRScopeDebugInfo: public CompilationResourceObj {
|
||||
// reexecute allowed only for the topmost frame
|
||||
bool reexecute = topmost ? should_reexecute() : false;
|
||||
bool is_method_handle_invoke = false;
|
||||
recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, locvals, expvals, monvals);
|
||||
bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
|
||||
recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, return_oop, locvals, expvals, monvals);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1075,6 +1075,7 @@ enum {
|
||||
};
|
||||
|
||||
|
||||
// Below length is the # elements copied.
|
||||
template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
|
||||
oopDesc* dst, T* dst_addr,
|
||||
int length) {
|
||||
@ -1083,22 +1084,22 @@ template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
|
||||
// barrier. The assert will fail if this is not the case.
|
||||
// Note that we use the non-virtual inlineable variant of write_ref_array.
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(),
|
||||
"Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
if (src == dst) {
|
||||
// same object, no check
|
||||
bs->write_ref_array_pre(dst_addr, length);
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
bs->write_ref_array((HeapWord*)dst_addr, length);
|
||||
return ac_ok;
|
||||
} else {
|
||||
klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
|
||||
klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
|
||||
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
|
||||
// Elements are guaranteed to be subtypes, so no check necessary
|
||||
bs->write_ref_array_pre(dst_addr, length);
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
bs->write_ref_array((HeapWord*)dst_addr, length);
|
||||
return ac_ok;
|
||||
}
|
||||
}
|
||||
@ -1162,9 +1163,16 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
|
||||
#endif
|
||||
|
||||
if (num == 0) return;
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
bs->write_ref_array(MemRegion(dst, dst + num));
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
if (UseCompressedOops) {
|
||||
bs->write_ref_array_pre((narrowOop*)dst, num);
|
||||
} else {
|
||||
bs->write_ref_array_pre((oop*)dst, num);
|
||||
}
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
bs->write_ref_array(dst, num);
|
||||
JRT_END
|
||||
|
||||
|
||||
|
@ -445,7 +445,8 @@ ciCallProfile ciMethod::call_profile_at_bci(int bci) {
|
||||
(morphism == ciCallProfile::MorphismLimit && count == 0)) {
|
||||
#ifdef ASSERT
|
||||
if (count > 0) {
|
||||
tty->print_cr("bci: %d", bci);
|
||||
this->print_short_name(tty);
|
||||
tty->print_cr(" @ bci:%d", bci);
|
||||
this->print_codes();
|
||||
assert(false, "this call site should not be polymorphic");
|
||||
}
|
||||
|
@ -1121,10 +1121,23 @@ class BacktraceBuilder: public StackObj {
|
||||
}
|
||||
|
||||
void flush() {
|
||||
// The following appears to have been an optimization to save from
|
||||
// doing a barrier for each individual store into the _methods array,
|
||||
// but rather to do it for the entire array after the series of writes.
|
||||
// That optimization seems to have been lost when compressed oops was
|
||||
// implemented. However, the extra card-marks below was left in place,
|
||||
// but is now redundant because the individual stores into the
|
||||
// _methods array already execute the barrier code. CR 6918185 has
|
||||
// been filed so the original code may be restored by deferring the
|
||||
// barriers until after the entire sequence of stores, thus re-enabling
|
||||
// the intent of the original optimization. In the meantime the redundant
|
||||
// card mark below is now disabled.
|
||||
if (_dirty && _methods != NULL) {
|
||||
#if 0
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
|
||||
#endif
|
||||
_dirty = false;
|
||||
}
|
||||
}
|
||||
@ -1168,9 +1181,7 @@ class BacktraceBuilder: public StackObj {
|
||||
method = mhandle();
|
||||
}
|
||||
|
||||
_methods->obj_at_put(_index, method);
|
||||
// bad for UseCompressedOops
|
||||
// *_methods->obj_at_addr(_index) = method;
|
||||
_methods->obj_at_put(_index, method);
|
||||
_bcis->ushort_at_put(_index, bci);
|
||||
_index++;
|
||||
_dirty = true;
|
||||
|
@ -457,7 +457,8 @@ void LoaderConstraintTable::merge_loader_constraints(
|
||||
}
|
||||
|
||||
|
||||
void LoaderConstraintTable::verify(Dictionary* dictionary) {
|
||||
void LoaderConstraintTable::verify(Dictionary* dictionary,
|
||||
PlaceholderTable* placeholders) {
|
||||
Thread *thread = Thread::current();
|
||||
for (int cindex = 0; cindex < _loader_constraint_size; cindex++) {
|
||||
for (LoaderConstraintEntry* probe = bucket(cindex);
|
||||
@ -472,7 +473,23 @@ void LoaderConstraintTable::verify(Dictionary* dictionary) {
|
||||
unsigned int d_hash = dictionary->compute_hash(name, loader);
|
||||
int d_index = dictionary->hash_to_index(d_hash);
|
||||
klassOop k = dictionary->find_class(d_index, d_hash, name, loader);
|
||||
guarantee(k == probe->klass(), "klass should be in dictionary");
|
||||
if (k != NULL) {
|
||||
// We found the class in the system dictionary, so we should
|
||||
// make sure that the klassOop matches what we already have.
|
||||
guarantee(k == probe->klass(), "klass should be in dictionary");
|
||||
} else {
|
||||
// If we don't find the class in the system dictionary, it
|
||||
// has to be in the placeholders table.
|
||||
unsigned int p_hash = placeholders->compute_hash(name, loader);
|
||||
int p_index = placeholders->hash_to_index(p_hash);
|
||||
PlaceholderEntry* entry = placeholders->get_entry(p_index, p_hash,
|
||||
name, loader);
|
||||
|
||||
// The instanceKlass might not be on the entry, so the only
|
||||
// thing we can check here is whether we were successful in
|
||||
// finding the class in the placeholders table.
|
||||
guarantee(entry != NULL, "klass should be in the placeholders");
|
||||
}
|
||||
}
|
||||
for (int n = 0; n< probe->num_loaders(); n++) {
|
||||
guarantee(probe->loader(n)->is_oop_or_null(), "should be oop");
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
|
||||
void purge_loader_constraints(BoolObjectClosure* is_alive);
|
||||
|
||||
void verify(Dictionary* dictionary);
|
||||
void verify(Dictionary* dictionary, PlaceholderTable* placeholders);
|
||||
#ifndef PRODUCT
|
||||
void print();
|
||||
#endif
|
||||
|
@ -2573,7 +2573,7 @@ void SystemDictionary::verify() {
|
||||
|
||||
// Verify constraint table
|
||||
guarantee(constraints() != NULL, "Verify of loader constraints failed");
|
||||
constraints()->verify(dictionary());
|
||||
constraints()->verify(dictionary(), placeholders());
|
||||
}
|
||||
|
||||
|
||||
|
@ -282,6 +282,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
|
||||
int bci,
|
||||
bool reexecute,
|
||||
bool is_method_handle_invoke,
|
||||
bool return_oop,
|
||||
DebugToken* locals,
|
||||
DebugToken* expressions,
|
||||
DebugToken* monitors) {
|
||||
@ -296,6 +297,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
|
||||
// Record flags into pcDesc.
|
||||
last_pd->set_should_reexecute(reexecute);
|
||||
last_pd->set_is_method_handle_invoke(is_method_handle_invoke);
|
||||
last_pd->set_return_oop(return_oop);
|
||||
|
||||
// serialize sender stream offest
|
||||
stream()->write_int(sender_stream_offset);
|
||||
|
@ -89,6 +89,7 @@ class DebugInformationRecorder: public ResourceObj {
|
||||
int bci,
|
||||
bool reexecute,
|
||||
bool is_method_handle_invoke = false,
|
||||
bool return_oop = false,
|
||||
DebugToken* locals = NULL,
|
||||
DebugToken* expressions = NULL,
|
||||
DebugToken* monitors = NULL);
|
||||
|
@ -988,7 +988,8 @@ ScopeDesc* nmethod::scope_desc_at(address pc) {
|
||||
PcDesc* pd = pc_desc_at(pc);
|
||||
guarantee(pd != NULL, "scope must be present");
|
||||
return new ScopeDesc(this, pd->scope_decode_offset(),
|
||||
pd->obj_decode_offset(), pd->should_reexecute());
|
||||
pd->obj_decode_offset(), pd->should_reexecute(),
|
||||
pd->return_oop());
|
||||
}
|
||||
|
||||
|
||||
@ -2010,7 +2011,10 @@ address nmethod::continuation_for_implicit_exception(address pc) {
|
||||
print_pcs();
|
||||
}
|
||||
#endif
|
||||
guarantee(cont_offset != 0, "unhandled implicit exception in compiled code");
|
||||
if (cont_offset == 0) {
|
||||
// Let the normal error handling report the exception
|
||||
return NULL;
|
||||
}
|
||||
return instructions_begin() + cont_offset;
|
||||
}
|
||||
|
||||
@ -2156,7 +2160,8 @@ void nmethod::verify_interrupt_point(address call_site) {
|
||||
PcDesc* pd = pc_desc_at(ic->end_of_call());
|
||||
assert(pd != NULL, "PcDesc must exist");
|
||||
for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
|
||||
pd->obj_decode_offset(), pd->should_reexecute());
|
||||
pd->obj_decode_offset(), pd->should_reexecute(),
|
||||
pd->return_oop());
|
||||
!sd->is_top(); sd = sd->sender()) {
|
||||
sd->verify();
|
||||
}
|
||||
@ -2421,7 +2426,8 @@ ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
|
||||
PcDesc* p = pc_desc_near(begin+1);
|
||||
if (p != NULL && p->real_pc(this) <= end) {
|
||||
return new ScopeDesc(this, p->scope_decode_offset(),
|
||||
p->obj_decode_offset(), p->should_reexecute());
|
||||
p->obj_decode_offset(), p->should_reexecute(),
|
||||
p->return_oop());
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,8 @@ void PcDesc::print(nmethod* code) {
|
||||
tty->print(" ");
|
||||
sd->method()->print_short_name(tty);
|
||||
tty->print(" @%d", sd->bci());
|
||||
tty->print(" reexecute=%s", sd->should_reexecute()?"true":"false");
|
||||
if (sd->should_reexecute())
|
||||
tty->print(" reexecute=true");
|
||||
tty->cr();
|
||||
}
|
||||
#endif
|
||||
|
@ -39,6 +39,7 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
|
||||
struct {
|
||||
unsigned int reexecute: 1;
|
||||
unsigned int is_method_handle_invoke: 1;
|
||||
unsigned int return_oop: 1;
|
||||
} bits;
|
||||
bool operator ==(const PcDescFlags& other) { return word == other.word; }
|
||||
} _flags;
|
||||
@ -76,6 +77,9 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
|
||||
bool is_method_handle_invoke() const { return _flags.bits.is_method_handle_invoke; }
|
||||
void set_is_method_handle_invoke(bool z) { _flags.bits.is_method_handle_invoke = z; }
|
||||
|
||||
bool return_oop() const { return _flags.bits.return_oop; }
|
||||
void set_return_oop(bool z) { _flags.bits.return_oop = z; }
|
||||
|
||||
// Returns the real pc
|
||||
address real_pc(const nmethod* code) const;
|
||||
|
||||
|
@ -26,19 +26,21 @@
|
||||
# include "incls/_scopeDesc.cpp.incl"
|
||||
|
||||
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute) {
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool return_oop) {
|
||||
_code = code;
|
||||
_decode_offset = decode_offset;
|
||||
_objects = decode_object_values(obj_decode_offset);
|
||||
_reexecute = reexecute;
|
||||
_return_oop = return_oop;
|
||||
decode_body();
|
||||
}
|
||||
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute) {
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop) {
|
||||
_code = code;
|
||||
_decode_offset = decode_offset;
|
||||
_objects = decode_object_values(DebugInformationRecorder::serialized_null);
|
||||
_reexecute = reexecute;
|
||||
_return_oop = return_oop;
|
||||
decode_body();
|
||||
}
|
||||
|
||||
@ -48,6 +50,7 @@ ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
|
||||
_decode_offset = parent->_sender_decode_offset;
|
||||
_objects = parent->_objects;
|
||||
_reexecute = false; //reexecute only applies to the first scope
|
||||
_return_oop = false;
|
||||
decode_body();
|
||||
}
|
||||
|
||||
|
@ -52,17 +52,18 @@ class SimpleScopeDesc : public StackObj {
|
||||
class ScopeDesc : public ResourceObj {
|
||||
public:
|
||||
// Constructor
|
||||
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute);
|
||||
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool return_oop);
|
||||
|
||||
// Calls above, giving default value of "serialized_null" to the
|
||||
// "obj_decode_offset" argument. (We don't use a default argument to
|
||||
// avoid a .hpp-.hpp dependency.)
|
||||
ScopeDesc(const nmethod* code, int decode_offset, bool reexecute);
|
||||
ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop);
|
||||
|
||||
// JVM state
|
||||
methodHandle method() const { return _method; }
|
||||
int bci() const { return _bci; }
|
||||
bool should_reexecute() const { return _reexecute; }
|
||||
bool return_oop() const { return _return_oop; }
|
||||
|
||||
GrowableArray<ScopeValue*>* locals();
|
||||
GrowableArray<ScopeValue*>* expressions();
|
||||
@ -88,6 +89,7 @@ class ScopeDesc : public ResourceObj {
|
||||
methodHandle _method;
|
||||
int _bci;
|
||||
bool _reexecute;
|
||||
bool _return_oop;
|
||||
|
||||
// Decoding offsets
|
||||
int _decode_offset;
|
||||
|
@ -300,7 +300,23 @@ jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
|
||||
int count;
|
||||
jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
|
||||
assert(cached_ptr != NULL, "bad cached card ptr");
|
||||
assert(!is_young_card(cached_ptr), "shouldn't get a card in young region");
|
||||
|
||||
if (is_young_card(cached_ptr)) {
|
||||
// The region containing cached_ptr has been freed during a clean up
|
||||
// pause, reallocated, and tagged as young.
|
||||
assert(cached_ptr != card_ptr, "shouldn't be");
|
||||
|
||||
// We've just inserted a new old-gen card pointer into the card count
|
||||
// cache and evicted the previous contents of that count slot.
|
||||
// The evicted card pointer has been determined to be in a young region
|
||||
// and so cannot be the newly inserted card pointer (that will be
|
||||
// in an old region).
|
||||
// The count for newly inserted card will be set to zero during the
|
||||
// insertion, so we don't want to defer the cleaning of the newly
|
||||
// inserted card pointer.
|
||||
assert(*defer == false, "deferring non-hot card");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// The card pointer we obtained from card count cache is not hot
|
||||
// so do not store it in the cache; return it for immediate
|
||||
|
@ -2505,6 +2505,7 @@ G1CollectedHeap* G1CollectedHeap::heap() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
||||
// always_do_update_barrier = false;
|
||||
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
||||
// Call allocation profiler
|
||||
AllocationProfiler::iterate_since_last_gc();
|
||||
@ -2518,6 +2519,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||
// is set.
|
||||
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
|
||||
"derived pointer present"));
|
||||
// always_do_update_barrier = true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_collection_pause() {
|
||||
@ -2644,6 +2646,13 @@ G1CollectedHeap::cleanup_surviving_young_words() {
|
||||
|
||||
// </NEW PREDICTION>
|
||||
|
||||
struct PrepareForRSScanningClosure : public HeapRegionClosure {
|
||||
bool doHeapRegion(HeapRegion *r) {
|
||||
r->rem_set()->set_iter_claimed(0);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void
|
||||
G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
if (PrintHeapAtGC) {
|
||||
@ -2782,6 +2791,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||
gclog_or_tty->print_cr("\nAfter pause, heap:");
|
||||
print();
|
||||
#endif
|
||||
PrepareForRSScanningClosure prepare_for_rs_scan;
|
||||
collection_set_iterate(&prepare_for_rs_scan);
|
||||
|
||||
setup_surviving_young_words();
|
||||
|
||||
@ -3779,22 +3790,16 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
|
||||
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
|
||||
template <class T>
|
||||
void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
|
||||
void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee>
|
||||
::do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
assert(barrier != G1BarrierRS || obj != NULL,
|
||||
"Precondition: G1BarrierRS implies obj is nonNull");
|
||||
|
||||
// The only time we skip the cset test is when we're scanning
|
||||
// references popped from the queue. And we only push on the queue
|
||||
// references that we know point into the cset, so no point in
|
||||
// checking again. But we'll leave an assert here for peace of mind.
|
||||
assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
|
||||
|
||||
// here the null check is implicit in the cset_fast_test() test
|
||||
if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
#if G1_REM_SET_LOGGING
|
||||
gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
|
||||
"into CS.", p, (void*) obj);
|
||||
@ -3811,7 +3816,6 @@ void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_tes
|
||||
}
|
||||
}
|
||||
|
||||
// When scanning moved objs, must look at all oops.
|
||||
if (barrier == G1BarrierEvac && obj != NULL) {
|
||||
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
@ -3821,8 +3825,8 @@ void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_tes
|
||||
}
|
||||
}
|
||||
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
|
||||
|
||||
template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
@ -3894,11 +3898,11 @@ public:
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*) stolen_task;
|
||||
assert(has_partial_array_mask(p) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error");
|
||||
pss->push_on_queue(p);
|
||||
} else {
|
||||
oop* p = (oop*) stolen_task;
|
||||
assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
|
||||
assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error");
|
||||
pss->push_on_queue(p);
|
||||
}
|
||||
continue;
|
||||
@ -3960,6 +3964,7 @@ public:
|
||||
G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
|
||||
G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
|
||||
G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
|
||||
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
|
||||
|
||||
G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
|
||||
G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
|
||||
@ -3983,7 +3988,7 @@ public:
|
||||
_g1h->g1_process_strong_roots(/* not collecting perm */ false,
|
||||
SharedHeap::SO_AllClasses,
|
||||
scan_root_cl,
|
||||
&only_scan_heap_rs_cl,
|
||||
&push_heap_rs_cl,
|
||||
scan_so_cl,
|
||||
scan_perm_cl,
|
||||
i);
|
||||
|
@ -1623,7 +1623,7 @@ public:
|
||||
template <class T> void push_on_queue(T* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(ref)), "invariant");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
|
||||
#ifdef ASSERT
|
||||
if (has_partial_array_mask(ref)) {
|
||||
oop p = clear_partial_array_mask(ref);
|
||||
@ -1644,9 +1644,9 @@ public:
|
||||
assert((oop*)ref != NULL, "pop_local() returned true");
|
||||
assert(UseCompressedOops || !ref.is_narrow(), "Error");
|
||||
assert(has_partial_array_mask((oop*)ref) ||
|
||||
_g1h->obj_in_cs(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)ref)),
|
||||
"invariant");
|
||||
_g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)ref)),
|
||||
"invariant");
|
||||
IF_G1_DETAILED_STATS(note_pop());
|
||||
} else {
|
||||
StarTask null_task;
|
||||
@ -1659,9 +1659,9 @@ public:
|
||||
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
|
||||
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
|
||||
assert(has_partial_array_mask((oop*)new_ref) ||
|
||||
_g1h->obj_in_cs(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
|
||||
"invariant");
|
||||
_g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
|
||||
"invariant");
|
||||
ref = new_ref;
|
||||
}
|
||||
|
||||
@ -1825,12 +1825,12 @@ public:
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||
assert(!has_partial_array_mask(p) &&
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
} else {
|
||||
oop* p = (oop*)ref_to_scan;
|
||||
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
}
|
||||
}
|
||||
@ -1844,12 +1844,12 @@ public:
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||
assert(!has_partial_array_mask(p) &&
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
} else {
|
||||
oop* p = (oop*)ref_to_scan;
|
||||
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
}
|
||||
}
|
||||
|
@ -205,6 +205,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
// policy is created before the heap, we have to set this up here,
|
||||
// so it's done as soon as possible.
|
||||
HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
|
||||
HeapRegionRemSet::setup_remset_size();
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
|
@ -53,6 +53,15 @@ public:
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
};
|
||||
|
||||
class G1ParPushHeapRSClosure : public G1ParClosureSuper {
|
||||
public:
|
||||
G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParClosureSuper(g1, par_scan_state) { }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
class G1ParScanClosure : public G1ParClosureSuper {
|
||||
public:
|
||||
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
@ -100,7 +109,7 @@ public:
|
||||
};
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
bool do_mark_forwardee>
|
||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||
G1ParScanClosure _scanner;
|
||||
template <class T> void do_oop_work(T* p);
|
||||
@ -116,12 +125,13 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, false> G1ParScanPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
|
||||
|
||||
// This is the only case when we set skip_cset_test. Basically, this
|
||||
// closure is (should?) only be called directly while we're draining
|
||||
// the overflow and task queues. In that case we know that the
|
||||
@ -132,7 +142,7 @@ typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHea
|
||||
// We need a separate closure to handle references during evacuation
|
||||
// failure processing, as we cannot asume that the reference already
|
||||
// points into the collection set (like G1ParScanHeapEvacClosure does).
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
|
||||
|
||||
class FilterIntoCSClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
|
@ -104,3 +104,16 @@ template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
Prefetch::write(obj->mark_addr(), 0);
|
||||
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
||||
_par_scan_state->push_on_queue(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -155,8 +155,8 @@ class ScanRSClosure : public HeapRegionClosure {
|
||||
G1BlockOffsetSharedArray* _bot_shared;
|
||||
CardTableModRefBS *_ct_bs;
|
||||
int _worker_i;
|
||||
int _block_size;
|
||||
bool _try_claimed;
|
||||
size_t _min_skip_distance, _max_skip_distance;
|
||||
public:
|
||||
ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
|
||||
_oc(oc),
|
||||
@ -168,8 +168,7 @@ public:
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
_bot_shared = _g1h->bot_shared();
|
||||
_ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
|
||||
_min_skip_distance = 16;
|
||||
_max_skip_distance = 2 * _g1h->n_par_threads() * _min_skip_distance;
|
||||
_block_size = MAX2<int>(G1RSetScanBlockSize, 1);
|
||||
}
|
||||
|
||||
void set_try_claimed() { _try_claimed = true; }
|
||||
@ -225,12 +224,15 @@ public:
|
||||
HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
|
||||
hrrs->init_iterator(iter);
|
||||
size_t card_index;
|
||||
size_t skip_distance = 0, current_card = 0, jump_to_card = 0;
|
||||
while (iter->has_next(card_index)) {
|
||||
if (current_card < jump_to_card) {
|
||||
++current_card;
|
||||
continue;
|
||||
|
||||
// We claim cards in block so as to recude the contention. The block size is determined by
|
||||
// the G1RSetScanBlockSize parameter.
|
||||
size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
|
||||
if (current_card >= jump_to_card + _block_size) {
|
||||
jump_to_card = hrrs->iter_claimed_next(_block_size);
|
||||
}
|
||||
if (current_card < jump_to_card) continue;
|
||||
HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
|
||||
#if 0
|
||||
gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
|
||||
@ -247,22 +249,14 @@ public:
|
||||
|
||||
// If the card is dirty, then we will scan it during updateRS.
|
||||
if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
|
||||
if (!_ct_bs->is_card_claimed(card_index) && _ct_bs->claim_card(card_index)) {
|
||||
scanCard(card_index, card_region);
|
||||
} else if (_try_claimed) {
|
||||
if (jump_to_card == 0 || jump_to_card != current_card) {
|
||||
// We did some useful work in the previous iteration.
|
||||
// Decrease the distance.
|
||||
skip_distance = MAX2(skip_distance >> 1, _min_skip_distance);
|
||||
} else {
|
||||
// Previous iteration resulted in a claim failure.
|
||||
// Increase the distance.
|
||||
skip_distance = MIN2(skip_distance << 1, _max_skip_distance);
|
||||
}
|
||||
jump_to_card = current_card + skip_distance;
|
||||
}
|
||||
// We make the card as "claimed" lazily (so races are possible but they're benign),
|
||||
// which reduces the number of duplicate scans (the rsets of the regions in the cset
|
||||
// can intersect).
|
||||
if (!_ct_bs->is_card_claimed(card_index)) {
|
||||
_ct_bs->set_card_claimed(card_index);
|
||||
scanCard(card_index, card_region);
|
||||
}
|
||||
}
|
||||
++current_card;
|
||||
}
|
||||
if (!_try_claimed) {
|
||||
hrrs->set_iter_complete();
|
||||
@ -299,30 +293,18 @@ void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
||||
double rs_time_start = os::elapsedTime();
|
||||
HeapRegion *startRegion = calculateStartRegion(worker_i);
|
||||
|
||||
BufferingOopsInHeapRegionClosure boc(oc);
|
||||
ScanRSClosure scanRScl(&boc, worker_i);
|
||||
ScanRSClosure scanRScl(oc, worker_i);
|
||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||
scanRScl.set_try_claimed();
|
||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||
|
||||
boc.done();
|
||||
double closure_app_time_sec = boc.closure_app_seconds();
|
||||
double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
|
||||
closure_app_time_sec;
|
||||
double closure_app_time_ms = closure_app_time_sec * 1000.0;
|
||||
double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
|
||||
|
||||
assert( _cards_scanned != NULL, "invariant" );
|
||||
_cards_scanned[worker_i] = scanRScl.cards_done();
|
||||
|
||||
_g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
|
||||
_g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
|
||||
|
||||
double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
|
||||
if (scan_new_refs_time_ms > 0.0) {
|
||||
closure_app_time_ms += scan_new_refs_time_ms;
|
||||
}
|
||||
|
||||
_g1p->record_obj_copy_time(worker_i, closure_app_time_ms);
|
||||
}
|
||||
|
||||
void HRInto_G1RemSet::updateRS(int worker_i) {
|
||||
@ -449,9 +431,8 @@ HRInto_G1RemSet::scanNewRefsRS_work(OopsInHeapRegionClosure* oc,
|
||||
oc->do_oop(p);
|
||||
}
|
||||
}
|
||||
_g1p->record_scan_new_refs_time(worker_i,
|
||||
(os::elapsedTime() - scan_new_refs_start_sec)
|
||||
* 1000.0);
|
||||
double scan_new_refs_time_ms = (os::elapsedTime() - scan_new_refs_start_sec) * 1000.0;
|
||||
_g1p->record_scan_new_refs_time(worker_i, scan_new_refs_time_ms);
|
||||
}
|
||||
|
||||
void HRInto_G1RemSet::cleanupHRRS() {
|
||||
|
@ -207,8 +207,20 @@
|
||||
develop(bool, G1PrintOopAppls, false, \
|
||||
"When true, print applications of closures to external locs.") \
|
||||
\
|
||||
develop(intx, G1LogRSRegionEntries, 7, \
|
||||
"Log_2 of max number of regions for which we keep bitmaps.") \
|
||||
develop(intx, G1RSetRegionEntriesBase, 256, \
|
||||
"Max number of regions in a fine-grain table per MB.") \
|
||||
\
|
||||
product(intx, G1RSetRegionEntries, 0, \
|
||||
"Max number of regions for which we keep bitmaps." \
|
||||
"Will be set ergonomically by default") \
|
||||
\
|
||||
develop(intx, G1RSetSparseRegionEntriesBase, 4, \
|
||||
"Max number of entries per region in a sparse table " \
|
||||
"per MB.") \
|
||||
\
|
||||
product(intx, G1RSetSparseRegionEntries, 0, \
|
||||
"Max number of entries per region in a sparse table." \
|
||||
"Will be set ergonomically by default.") \
|
||||
\
|
||||
develop(bool, G1RecordHRRSOops, false, \
|
||||
"When true, record recent calls to rem set operations.") \
|
||||
@ -293,6 +305,10 @@
|
||||
develop(bool, G1VerifyCTCleanup, false, \
|
||||
"Verify card table cleanup.") \
|
||||
\
|
||||
product(uintx, G1RSetScanBlockSize, 64, \
|
||||
"Size of a work unit of cards claimed by a worker thread" \
|
||||
"during RSet scanning.") \
|
||||
\
|
||||
develop(bool, ReduceInitialCardMarksForG1, false, \
|
||||
"When ReduceInitialCardMarks is true, this flag setting " \
|
||||
" controls whether G1 allows the RICM optimization")
|
||||
|
@ -33,11 +33,12 @@ enum G1Barrier {
|
||||
};
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
bool do_mark_forwardee>
|
||||
class G1ParCopyClosure;
|
||||
class G1ParScanClosure;
|
||||
class G1ParPushHeapRSClosure;
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
|
||||
|
||||
class FilterIntoCSClosure;
|
||||
class FilterOutOfRegionClosure;
|
||||
@ -51,6 +52,7 @@ class FilterAndMarkInHeapRegionAndIntoCSClosure;
|
||||
#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \
|
||||
f(G1ParScanHeapEvacClosure,_nv) \
|
||||
f(G1ParScanClosure,_nv) \
|
||||
f(G1ParPushHeapRSClosure,_nv) \
|
||||
f(FilterIntoCSClosure,_nv) \
|
||||
f(FilterOutOfRegionClosure,_nv) \
|
||||
f(FilterInHeapRegionAndIntoCSClosure,_nv) \
|
||||
|
@ -258,42 +258,6 @@ class PosParPRT: public PerRegionTable {
|
||||
ReserveParTableExpansion = 1
|
||||
};
|
||||
|
||||
void par_expand() {
|
||||
int n = HeapRegionRemSet::num_par_rem_sets()-1;
|
||||
if (n <= 0) return;
|
||||
if (_par_tables == NULL) {
|
||||
PerRegionTable* res =
|
||||
(PerRegionTable*)
|
||||
Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
|
||||
&_par_tables, NULL);
|
||||
if (res != NULL) return;
|
||||
// Otherwise, we reserved the right to do the expansion.
|
||||
|
||||
PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
PerRegionTable* ptable = PerRegionTable::alloc(hr());
|
||||
ptables[i] = ptable;
|
||||
}
|
||||
// Here we do not need an atomic.
|
||||
_par_tables = ptables;
|
||||
#if COUNT_PAR_EXPANDS
|
||||
print_par_expand();
|
||||
#endif
|
||||
// We must put this table on the expanded list.
|
||||
PosParPRT* exp_head = _par_expanded_list;
|
||||
while (true) {
|
||||
set_next_par_expanded(exp_head);
|
||||
PosParPRT* res =
|
||||
(PosParPRT*)
|
||||
Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
|
||||
if (res == exp_head) return;
|
||||
// Otherwise.
|
||||
exp_head = res;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void par_contract() {
|
||||
assert(_par_tables != NULL, "Precondition.");
|
||||
int n = HeapRegionRemSet::num_par_rem_sets()-1;
|
||||
@ -391,13 +355,49 @@ public:
|
||||
void set_next(PosParPRT* nxt) { _next = nxt; }
|
||||
PosParPRT** next_addr() { return &_next; }
|
||||
|
||||
bool should_expand(int tid) {
|
||||
return par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region();
|
||||
}
|
||||
|
||||
void par_expand() {
|
||||
int n = HeapRegionRemSet::num_par_rem_sets()-1;
|
||||
if (n <= 0) return;
|
||||
if (_par_tables == NULL) {
|
||||
PerRegionTable* res =
|
||||
(PerRegionTable*)
|
||||
Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
|
||||
&_par_tables, NULL);
|
||||
if (res != NULL) return;
|
||||
// Otherwise, we reserved the right to do the expansion.
|
||||
|
||||
PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
PerRegionTable* ptable = PerRegionTable::alloc(hr());
|
||||
ptables[i] = ptable;
|
||||
}
|
||||
// Here we do not need an atomic.
|
||||
_par_tables = ptables;
|
||||
#if COUNT_PAR_EXPANDS
|
||||
print_par_expand();
|
||||
#endif
|
||||
// We must put this table on the expanded list.
|
||||
PosParPRT* exp_head = _par_expanded_list;
|
||||
while (true) {
|
||||
set_next_par_expanded(exp_head);
|
||||
PosParPRT* res =
|
||||
(PosParPRT*)
|
||||
Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
|
||||
if (res == exp_head) return;
|
||||
// Otherwise.
|
||||
exp_head = res;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
// Expand if necessary.
|
||||
PerRegionTable** pt = par_tables();
|
||||
if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) {
|
||||
par_expand();
|
||||
pt = par_tables();
|
||||
}
|
||||
if (pt != NULL) {
|
||||
// We always have to assume that mods to table 0 are in parallel,
|
||||
// because of the claiming scheme in parallel expansion. A thread
|
||||
@ -505,12 +505,13 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
|
||||
typedef PosParPRT* PosParPRTPtr;
|
||||
if (_max_fine_entries == 0) {
|
||||
assert(_mod_max_fine_entries_mask == 0, "Both or none.");
|
||||
_max_fine_entries = (size_t)(1 << G1LogRSRegionEntries);
|
||||
size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
|
||||
_max_fine_entries = (size_t)(1 << max_entries_log);
|
||||
_mod_max_fine_entries_mask = _max_fine_entries - 1;
|
||||
#if SAMPLE_FOR_EVICTION
|
||||
assert(_fine_eviction_sample_size == 0
|
||||
&& _fine_eviction_stride == 0, "All init at same time.");
|
||||
_fine_eviction_sample_size = MAX2((size_t)4, (size_t)G1LogRSRegionEntries);
|
||||
_fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
|
||||
_fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
|
||||
#endif
|
||||
}
|
||||
@ -655,13 +656,6 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
#endif
|
||||
}
|
||||
|
||||
// Otherwise, transfer from sparse to fine-grain.
|
||||
CardIdx_t cards[SparsePRTEntry::CardsPerEntry];
|
||||
if (G1HRRSUseSparseTable) {
|
||||
bool res = _sparse_table.get_cards(from_hrs_ind, &cards[0]);
|
||||
assert(res, "There should have been an entry");
|
||||
}
|
||||
|
||||
if (_n_fine_entries == _max_fine_entries) {
|
||||
prt = delete_region_table();
|
||||
} else {
|
||||
@ -676,10 +670,12 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
_fine_grain_regions[ind] = prt;
|
||||
_n_fine_entries++;
|
||||
|
||||
// Add in the cards from the sparse table.
|
||||
if (G1HRRSUseSparseTable) {
|
||||
for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) {
|
||||
CardIdx_t c = cards[i];
|
||||
// Transfer from sparse to fine-grain.
|
||||
SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
|
||||
assert(sprt_entry != NULL, "There should have been an entry");
|
||||
for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
|
||||
CardIdx_t c = sprt_entry->card(i);
|
||||
if (c != SparsePRTEntry::NullEntry) {
|
||||
prt->add_card(c);
|
||||
}
|
||||
@ -696,7 +692,21 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
// OtherRegionsTable for why this is OK.
|
||||
assert(prt != NULL, "Inv");
|
||||
|
||||
prt->add_reference(from, tid);
|
||||
if (prt->should_expand(tid)) {
|
||||
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
|
||||
HeapRegion* prt_hr = prt->hr();
|
||||
if (prt_hr == from_hr) {
|
||||
// Make sure the table still corresponds to the same region
|
||||
prt->par_expand();
|
||||
prt->add_reference(from, tid);
|
||||
}
|
||||
// else: The table has been concurrently coarsened, evicted, and
|
||||
// the table data structure re-used for another table. So, we
|
||||
// don't need to add the reference any more given that the table
|
||||
// has been coarsened and the whole region will be scanned anyway.
|
||||
} else {
|
||||
prt->add_reference(from, tid);
|
||||
}
|
||||
if (G1RecordHRRSOops) {
|
||||
HeapRegionRemSet::record(hr(), from);
|
||||
#if HRRS_VERBOSE
|
||||
@ -1070,6 +1080,19 @@ HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
||||
{}
|
||||
|
||||
|
||||
void HeapRegionRemSet::setup_remset_size() {
|
||||
// Setup sparse and fine-grain tables sizes.
|
||||
// table_size = base * (log(region_size / 1M) + 1)
|
||||
int region_size_log_mb = MAX2((int)HeapRegion::LogOfHRGrainBytes - (int)LOG_M, 0);
|
||||
if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
|
||||
G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
|
||||
G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
|
||||
}
|
||||
guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
|
||||
}
|
||||
|
||||
void HeapRegionRemSet::init_for_par_iteration() {
|
||||
_iter_state = Unclaimed;
|
||||
}
|
||||
@ -1385,7 +1408,7 @@ void HeapRegionRemSet::test() {
|
||||
os::sleep(Thread::current(), (jlong)5000, false);
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// Run with "-XX:G1LogRSRegionEntries=2", so that 1 and 5 end up in same
|
||||
// Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
|
||||
// hash bucket.
|
||||
HeapRegion* hr0 = g1h->region_at(0);
|
||||
HeapRegion* hr1 = g1h->region_at(1);
|
||||
|
@ -187,7 +187,8 @@ private:
|
||||
void clear_outgoing_entries();
|
||||
|
||||
enum ParIterState { Unclaimed, Claimed, Complete };
|
||||
ParIterState _iter_state;
|
||||
volatile ParIterState _iter_state;
|
||||
volatile jlong _iter_claimed;
|
||||
|
||||
// Unused unless G1RecordHRRSOops is true.
|
||||
|
||||
@ -209,6 +210,7 @@ public:
|
||||
HeapRegion* hr);
|
||||
|
||||
static int num_par_rem_sets();
|
||||
static void setup_remset_size();
|
||||
|
||||
HeapRegion* hr() const {
|
||||
return _other_regions.hr();
|
||||
@ -272,6 +274,19 @@ public:
|
||||
// Returns "true" iff the region's iteration is complete.
|
||||
bool iter_is_complete();
|
||||
|
||||
// Support for claiming blocks of cards during iteration
|
||||
void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; }
|
||||
size_t iter_claimed() const { return (size_t)_iter_claimed; }
|
||||
// Claim the next block of cards
|
||||
size_t iter_claimed_next(size_t step) {
|
||||
size_t current, next;
|
||||
do {
|
||||
current = iter_claimed();
|
||||
next = current + step;
|
||||
} while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
|
||||
return current;
|
||||
}
|
||||
|
||||
// Initialize the given iterator to iterate over this rem set.
|
||||
void init_iterator(HeapRegionRemSetIterator* iter) const;
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#define SPARSE_PRT_VERBOSE 0
|
||||
|
||||
#define UNROLL_CARD_LOOPS 1
|
||||
#define UNROLL_CARD_LOOPS 1
|
||||
|
||||
void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
|
||||
sprt_iter->init(this);
|
||||
@ -36,27 +36,32 @@ void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
|
||||
void SparsePRTEntry::init(RegionIdx_t region_ind) {
|
||||
_region_ind = region_ind;
|
||||
_next_index = NullEntry;
|
||||
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
_cards[0] = NullEntry;
|
||||
_cards[1] = NullEntry;
|
||||
_cards[2] = NullEntry;
|
||||
_cards[3] = NullEntry;
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
_cards[i] = NullEntry;
|
||||
_cards[i + 1] = NullEntry;
|
||||
_cards[i + 2] = NullEntry;
|
||||
_cards[i + 3] = NullEntry;
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++)
|
||||
for (int i = 0; i < cards_num(); i++)
|
||||
_cards[i] = NullEntry;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
if (_cards[0] == card_index) return true;
|
||||
if (_cards[1] == card_index) return true;
|
||||
if (_cards[2] == card_index) return true;
|
||||
if (_cards[3] == card_index) return true;
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
if (_cards[i] == card_index ||
|
||||
_cards[i + 1] == card_index ||
|
||||
_cards[i + 2] == card_index ||
|
||||
_cards[i + 3] == card_index) return true;
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
for (int i = 0; i < cards_num(); i++) {
|
||||
if (_cards[i] == card_index) return true;
|
||||
}
|
||||
#endif
|
||||
@ -67,14 +72,16 @@ bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
|
||||
int SparsePRTEntry::num_valid_cards() const {
|
||||
int sum = 0;
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
if (_cards[0] != NullEntry) sum++;
|
||||
if (_cards[1] != NullEntry) sum++;
|
||||
if (_cards[2] != NullEntry) sum++;
|
||||
if (_cards[3] != NullEntry) sum++;
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
sum += (_cards[i] != NullEntry);
|
||||
sum += (_cards[i + 1] != NullEntry);
|
||||
sum += (_cards[i + 2] != NullEntry);
|
||||
sum += (_cards[i + 3] != NullEntry);
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
if (_cards[i] != NulLEntry) sum++;
|
||||
for (int i = 0; i < cards_num(); i++) {
|
||||
sum += (_cards[i] != NullEntry);
|
||||
}
|
||||
#endif
|
||||
// Otherwise, we're full.
|
||||
@ -83,27 +90,27 @@ int SparsePRTEntry::num_valid_cards() const {
|
||||
|
||||
SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
CardIdx_t c = _cards[0];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[0] = card_index; return added; }
|
||||
c = _cards[1];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[1] = card_index; return added; }
|
||||
c = _cards[2];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[2] = card_index; return added; }
|
||||
c = _cards[3];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[3] = card_index; return added; }
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
CardIdx_t c;
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
c = _cards[i];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i] = card_index; return added; }
|
||||
c = _cards[i + 1];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i + 1] = card_index; return added; }
|
||||
c = _cards[i + 2];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i + 2] = card_index; return added; }
|
||||
c = _cards[i + 3];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) { _cards[i + 3] = card_index; return added; }
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
for (int i = 0; i < cards_num(); i++) {
|
||||
CardIdx_t c = _cards[i];
|
||||
if (c == card_index) return found;
|
||||
if (c == NullEntry) {
|
||||
_cards[i] = card_index;
|
||||
return added;
|
||||
}
|
||||
if (c == NullEntry) { _cards[i] = card_index; return added; }
|
||||
}
|
||||
#endif
|
||||
// Otherwise, we're full.
|
||||
@ -112,13 +119,15 @@ SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
|
||||
|
||||
void SparsePRTEntry::copy_cards(CardIdx_t* cards) const {
|
||||
#if UNROLL_CARD_LOOPS
|
||||
assert(CardsPerEntry == 4, "Assumption. If changes, un-unroll.");
|
||||
cards[0] = _cards[0];
|
||||
cards[1] = _cards[1];
|
||||
cards[2] = _cards[2];
|
||||
cards[3] = _cards[3];
|
||||
assert((cards_num() & (UnrollFactor - 1)) == 0, "Invalid number of cards in the entry");
|
||||
for (int i = 0; i < cards_num(); i += UnrollFactor) {
|
||||
cards[i] = _cards[i];
|
||||
cards[i + 1] = _cards[i + 1];
|
||||
cards[i + 2] = _cards[i + 2];
|
||||
cards[i + 3] = _cards[i + 3];
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < CardsPerEntry; i++) {
|
||||
for (int i = 0; i < cards_num(); i++) {
|
||||
cards[i] = _cards[i];
|
||||
}
|
||||
#endif
|
||||
@ -133,7 +142,7 @@ void SparsePRTEntry::copy_cards(SparsePRTEntry* e) const {
|
||||
RSHashTable::RSHashTable(size_t capacity) :
|
||||
_capacity(capacity), _capacity_mask(capacity-1),
|
||||
_occupied_entries(0), _occupied_cards(0),
|
||||
_entries(NEW_C_HEAP_ARRAY(SparsePRTEntry, capacity)),
|
||||
_entries((SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, SparsePRTEntry::size() * capacity)),
|
||||
_buckets(NEW_C_HEAP_ARRAY(int, capacity)),
|
||||
_free_list(NullEntry), _free_region(0)
|
||||
{
|
||||
@ -161,8 +170,8 @@ void RSHashTable::clear() {
|
||||
"_capacity too large");
|
||||
|
||||
// This will put -1 == NullEntry in the key field of all entries.
|
||||
memset(_entries, -1, _capacity * sizeof(SparsePRTEntry));
|
||||
memset(_buckets, -1, _capacity * sizeof(int));
|
||||
memset(_entries, NullEntry, _capacity * SparsePRTEntry::size());
|
||||
memset(_buckets, NullEntry, _capacity * sizeof(int));
|
||||
_free_list = NullEntry;
|
||||
_free_region = 0;
|
||||
}
|
||||
@ -175,8 +184,8 @@ bool RSHashTable::add_card(RegionIdx_t region_ind, CardIdx_t card_index) {
|
||||
if (res == SparsePRTEntry::added) _occupied_cards++;
|
||||
#if SPARSE_PRT_VERBOSE
|
||||
gclog_or_tty->print_cr(" after add_card[%d]: valid-cards = %d.",
|
||||
pointer_delta(e, _entries, sizeof(SparsePRTEntry)),
|
||||
e->num_valid_cards());
|
||||
pointer_delta(e, _entries, SparsePRTEntry::size()),
|
||||
e->num_valid_cards());
|
||||
#endif
|
||||
assert(e->num_valid_cards() > 0, "Postcondition");
|
||||
return res != SparsePRTEntry::overflow;
|
||||
@ -199,6 +208,22 @@ bool RSHashTable::get_cards(RegionIdx_t region_ind, CardIdx_t* cards) {
|
||||
return true;
|
||||
}
|
||||
|
||||
SparsePRTEntry* RSHashTable::get_entry(RegionIdx_t region_ind) {
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int cur_ind = _buckets[ind];
|
||||
SparsePRTEntry* cur;
|
||||
while (cur_ind != NullEntry &&
|
||||
(cur = entry(cur_ind))->r_ind() != region_ind) {
|
||||
cur_ind = cur->next_index();
|
||||
}
|
||||
|
||||
if (cur_ind == NullEntry) return NULL;
|
||||
// Otherwise...
|
||||
assert(cur->r_ind() == region_ind, "Postcondition of loop + test above.");
|
||||
assert(cur->num_valid_cards() > 0, "Inv");
|
||||
return cur;
|
||||
}
|
||||
|
||||
bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int* prev_loc = &_buckets[ind];
|
||||
@ -225,20 +250,8 @@ RSHashTable::entry_for_region_ind(RegionIdx_t region_ind) const {
|
||||
int ind = (int) (region_ind & capacity_mask());
|
||||
int cur_ind = _buckets[ind];
|
||||
SparsePRTEntry* cur;
|
||||
// XXX
|
||||
// int k = 0;
|
||||
while (cur_ind != NullEntry &&
|
||||
(cur = entry(cur_ind))->r_ind() != region_ind) {
|
||||
/*
|
||||
k++;
|
||||
if (k > 10) {
|
||||
gclog_or_tty->print_cr("RSHashTable::entry_for_region_ind(%d): "
|
||||
"k = %d, cur_ind = %d.", region_ind, k, cur_ind);
|
||||
if (k >= 1000) {
|
||||
while (1) ;
|
||||
}
|
||||
}
|
||||
*/
|
||||
cur_ind = cur->next_index();
|
||||
}
|
||||
|
||||
@ -319,7 +332,7 @@ size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
|
||||
bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
|
||||
_card_ind++;
|
||||
CardIdx_t ci;
|
||||
if (_card_ind < SparsePRTEntry::CardsPerEntry &&
|
||||
if (_card_ind < SparsePRTEntry::cards_num() &&
|
||||
((ci = _rsht->entry(_bl_ind)->card(_card_ind)) !=
|
||||
SparsePRTEntry::NullEntry)) {
|
||||
card_index = compute_card_ind(ci);
|
||||
@ -359,7 +372,7 @@ bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index)
|
||||
|
||||
size_t RSHashTable::mem_size() const {
|
||||
return sizeof(this) +
|
||||
capacity() * (sizeof(SparsePRTEntry) + sizeof(int));
|
||||
capacity() * (SparsePRTEntry::size() + sizeof(int));
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
@ -446,6 +459,10 @@ bool SparsePRT::get_cards(RegionIdx_t region_id, CardIdx_t* cards) {
|
||||
return _next->get_cards(region_id, cards);
|
||||
}
|
||||
|
||||
SparsePRTEntry* SparsePRT::get_entry(RegionIdx_t region_id) {
|
||||
return _next->get_entry(region_id);
|
||||
}
|
||||
|
||||
bool SparsePRT::delete_entry(RegionIdx_t region_id) {
|
||||
return _next->delete_entry(region_id);
|
||||
}
|
||||
|
@ -32,21 +32,28 @@
|
||||
// insertions only enqueue old versions for deletions, but do not delete
|
||||
// old versions synchronously.
|
||||
|
||||
|
||||
class SparsePRTEntry: public CHeapObj {
|
||||
public:
|
||||
|
||||
enum SomePublicConstants {
|
||||
CardsPerEntry = 4,
|
||||
NullEntry = -1
|
||||
NullEntry = -1,
|
||||
UnrollFactor = 4
|
||||
};
|
||||
|
||||
private:
|
||||
RegionIdx_t _region_ind;
|
||||
int _next_index;
|
||||
CardIdx_t _cards[CardsPerEntry];
|
||||
|
||||
CardIdx_t _cards[1];
|
||||
// WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
|
||||
// It should always be the last data member.
|
||||
public:
|
||||
// Returns the size of the entry, used for entry allocation.
|
||||
static size_t size() { return sizeof(SparsePRTEntry) + sizeof(CardIdx_t) * (cards_num() - 1); }
|
||||
// Returns the size of the card array.
|
||||
static int cards_num() {
|
||||
// The number of cards should be a multiple of 4, because that's our current
|
||||
// unrolling factor.
|
||||
static const int s = MAX2<int>(G1RSetSparseRegionEntries & ~(UnrollFactor - 1), UnrollFactor);
|
||||
return s;
|
||||
}
|
||||
|
||||
// Set the region_ind to the given value, and delete all cards.
|
||||
inline void init(RegionIdx_t region_ind);
|
||||
@ -134,12 +141,15 @@ public:
|
||||
bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
|
||||
|
||||
bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
|
||||
|
||||
bool delete_entry(RegionIdx_t region_id);
|
||||
|
||||
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
|
||||
|
||||
void add_entry(SparsePRTEntry* e);
|
||||
|
||||
SparsePRTEntry* get_entry(RegionIdx_t region_id);
|
||||
|
||||
void clear();
|
||||
|
||||
size_t capacity() const { return _capacity; }
|
||||
@ -148,7 +158,7 @@ public:
|
||||
size_t occupied_cards() const { return _occupied_cards; }
|
||||
size_t mem_size() const;
|
||||
|
||||
SparsePRTEntry* entry(int i) const { return &_entries[i]; }
|
||||
SparsePRTEntry* entry(int i) const { return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i); }
|
||||
|
||||
void print();
|
||||
};
|
||||
@ -157,7 +167,7 @@ public:
|
||||
class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
||||
int _tbl_ind; // [-1, 0.._rsht->_capacity)
|
||||
int _bl_ind; // [-1, 0.._rsht->_capacity)
|
||||
short _card_ind; // [0..CardsPerEntry)
|
||||
short _card_ind; // [0..SparsePRTEntry::cards_num())
|
||||
RSHashTable* _rsht;
|
||||
size_t _heap_bot_card_ind;
|
||||
|
||||
@ -176,7 +186,7 @@ public:
|
||||
RSHashTableIter(size_t heap_bot_card_ind) :
|
||||
_tbl_ind(RSHashTable::NullEntry),
|
||||
_bl_ind(RSHashTable::NullEntry),
|
||||
_card_ind((SparsePRTEntry::CardsPerEntry-1)),
|
||||
_card_ind((SparsePRTEntry::cards_num() - 1)),
|
||||
_rsht(NULL),
|
||||
_heap_bot_card_ind(heap_bot_card_ind)
|
||||
{}
|
||||
@ -185,7 +195,7 @@ public:
|
||||
_rsht = rsht;
|
||||
_tbl_ind = -1; // So that first increment gets to 0.
|
||||
_bl_ind = RSHashTable::NullEntry;
|
||||
_card_ind = (SparsePRTEntry::CardsPerEntry-1);
|
||||
_card_ind = (SparsePRTEntry::cards_num() - 1);
|
||||
}
|
||||
|
||||
bool has_next(size_t& card_index);
|
||||
@ -241,9 +251,13 @@ public:
|
||||
|
||||
// If the table hold an entry for "region_ind", Copies its
|
||||
// cards into "cards", which must be an array of length at least
|
||||
// "CardsPerEntry", and returns "true"; otherwise, returns "false".
|
||||
// "SparePRTEntry::cards_num()", and returns "true"; otherwise,
|
||||
// returns "false".
|
||||
bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
|
||||
|
||||
// Return the pointer to the entry associated with the given region.
|
||||
SparsePRTEntry* get_entry(RegionIdx_t region_ind);
|
||||
|
||||
// If there is an entry for "region_ind", removes it and return "true";
|
||||
// otherwise returns "false."
|
||||
bool delete_entry(RegionIdx_t region_ind);
|
||||
|
@ -175,6 +175,7 @@ arguments.cpp jvmtiExport.hpp
|
||||
arguments.cpp management.hpp
|
||||
arguments.cpp oop.inline.hpp
|
||||
arguments.cpp os_<os_family>.inline.hpp
|
||||
arguments.cpp referenceProcessor.hpp
|
||||
arguments.cpp universe.inline.hpp
|
||||
arguments.cpp vm_version_<arch>.hpp
|
||||
|
||||
@ -1483,6 +1484,7 @@ deoptimization.cpp thread.hpp
|
||||
deoptimization.cpp vframe.hpp
|
||||
deoptimization.cpp vframeArray.hpp
|
||||
deoptimization.cpp vframe_hp.hpp
|
||||
deoptimization.cpp vmreg_<arch>.inline.hpp
|
||||
deoptimization.cpp xmlstream.hpp
|
||||
|
||||
deoptimization.hpp allocation.hpp
|
||||
@ -2653,6 +2655,7 @@ loaderConstraints.cpp resourceArea.hpp
|
||||
loaderConstraints.cpp safepoint.hpp
|
||||
|
||||
loaderConstraints.hpp dictionary.hpp
|
||||
loaderConstraints.hpp placeholders.hpp
|
||||
loaderConstraints.hpp hashtable.hpp
|
||||
|
||||
location.cpp debugInfo.hpp
|
||||
|
@ -124,8 +124,6 @@ public:
|
||||
// Below length is the # array elements being written
|
||||
virtual void write_ref_array_pre( oop* dst, int length) {}
|
||||
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
|
||||
// Below MemRegion mr is expected to be HeapWord-aligned
|
||||
inline void write_ref_array(MemRegion mr);
|
||||
// Below count is the # array elements being written, starting
|
||||
// at the address "start", which may not necessarily be HeapWord-aligned
|
||||
inline void write_ref_array(HeapWord* start, size_t count);
|
||||
|
@ -42,16 +42,6 @@ void BarrierSet::write_ref_field(void* field, oop new_val) {
|
||||
}
|
||||
}
|
||||
|
||||
void BarrierSet::write_ref_array(MemRegion mr) {
|
||||
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
|
||||
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
|
||||
if (kind() == CardTableModRef) {
|
||||
((CardTableModRefBS*)this)->inline_write_ref_array(mr);
|
||||
} else {
|
||||
write_ref_array_work(mr);
|
||||
}
|
||||
}
|
||||
|
||||
// count is number of array elements being written
|
||||
void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
||||
assert(count <= (size_t)max_intx, "count too large");
|
||||
@ -61,12 +51,12 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
||||
// strictly necessary for current uses, but a case of good hygiene and,
|
||||
// if you will, aesthetics) and the second upward (this is essential for
|
||||
// current uses) to a HeapWord boundary, so we mark all cards overlapping
|
||||
// this write. In the event that this evolves in the future to calling a
|
||||
// this write. If this evolves in the future to calling a
|
||||
// logging barrier of narrow oop granularity, like the pre-barrier for G1
|
||||
// (mentioned here merely by way of example), we will need to change this
|
||||
// interface, much like the pre-barrier one above, so it is "exactly precise"
|
||||
// (if i may be allowed the adverbial redundancy for emphasis) and does not
|
||||
// include narrow oop slots not included in the original write interval.
|
||||
// interface, so it is "exactly precise" (if i may be allowed the adverbial
|
||||
// redundancy for emphasis) and does not include narrow oop slots not
|
||||
// included in the original write interval.
|
||||
HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
|
||||
HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
|
||||
// If compressed oops were not being used, these should already be aligned
|
||||
|
@ -339,6 +339,16 @@ public:
|
||||
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
|
||||
}
|
||||
|
||||
void set_card_claimed(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
if (val == clean_card_val()) {
|
||||
val = (jbyte)claimed_card_val();
|
||||
} else {
|
||||
val |= (jbyte)claimed_card_val();
|
||||
}
|
||||
_byte_map[card_index] = val;
|
||||
}
|
||||
|
||||
bool claim_card(size_t card_index);
|
||||
|
||||
bool is_card_clean(size_t card_index) {
|
||||
|
@ -263,10 +263,13 @@ class ReferenceProcessor : public CHeapObj {
|
||||
int parallel_gc_threads = 1,
|
||||
bool mt_processing = false,
|
||||
bool discovered_list_needs_barrier = false);
|
||||
|
||||
// RefDiscoveryPolicy values
|
||||
enum {
|
||||
enum DiscoveryPolicy {
|
||||
ReferenceBasedDiscovery = 0,
|
||||
ReferentBasedDiscovery = 1
|
||||
ReferentBasedDiscovery = 1,
|
||||
DiscoveryPolicyMin = ReferenceBasedDiscovery,
|
||||
DiscoveryPolicyMax = ReferentBasedDiscovery
|
||||
};
|
||||
|
||||
static void init_statics();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -545,6 +545,10 @@ public:
|
||||
return cell_offset(counter_cell_count);
|
||||
}
|
||||
|
||||
void set_count(uint count) {
|
||||
set_uint_at(count_off, count);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st);
|
||||
#endif
|
||||
@ -692,6 +696,23 @@ public:
|
||||
|
||||
void clear_row(uint row) {
|
||||
assert(row < row_limit(), "oob");
|
||||
// Clear total count - indicator of polymorphic call site.
|
||||
// The site may look like as monomorphic after that but
|
||||
// it allow to have more accurate profiling information because
|
||||
// there was execution phase change since klasses were unloaded.
|
||||
// If the site is still polymorphic then MDO will be updated
|
||||
// to reflect it. But it could be the case that the site becomes
|
||||
// only bimorphic. Then keeping total count not 0 will be wrong.
|
||||
// Even if we use monomorphic (when it is not) for compilation
|
||||
// we will only have trap, deoptimization and recompile again
|
||||
// with updated MDO after executing method in Interpreter.
|
||||
// An additional receiver will be recorded in the cleaned row
|
||||
// during next call execution.
|
||||
//
|
||||
// Note: our profiling logic works with empty rows in any slot.
|
||||
// We do sorting a profiling info (ciCallProfile) for compilation.
|
||||
//
|
||||
set_count(0);
|
||||
set_receiver(row, NULL);
|
||||
set_receiver_count(row, 0);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -136,8 +136,10 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
||||
}
|
||||
// Mark the call node as virtual, sort of:
|
||||
call->set_optimized_virtual(true);
|
||||
if (method()->is_method_handle_invoke())
|
||||
if (method()->is_method_handle_invoke()) {
|
||||
call->set_method_handle_invoke(true);
|
||||
kit.C->set_has_method_handle_invokes(true);
|
||||
}
|
||||
}
|
||||
kit.set_arguments_for_java_call(call);
|
||||
kit.set_edges_for_java_call(call, false, _separate_io_proj);
|
||||
@ -194,6 +196,7 @@ JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
|
||||
call->set_optimized_virtual(true);
|
||||
// Take extra care (in the presence of argument motion) not to trash the SP:
|
||||
call->set_method_handle_invoke(true);
|
||||
kit.C->set_has_method_handle_invokes(true);
|
||||
|
||||
// Pass the target MethodHandle as first argument and shift the
|
||||
// other arguments.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -465,6 +465,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
_code_buffer("Compile::Fill_buffer"),
|
||||
_orig_pc_slot(0),
|
||||
_orig_pc_slot_offset_in_bytes(0),
|
||||
_has_method_handle_invokes(false),
|
||||
_node_bundling_limit(0),
|
||||
_node_bundling_base(NULL),
|
||||
_java_calls(0),
|
||||
@ -759,6 +760,7 @@ Compile::Compile( ciEnv* ci_env,
|
||||
_do_escape_analysis(false),
|
||||
_failure_reason(NULL),
|
||||
_code_buffer("Compile::Fill_buffer"),
|
||||
_has_method_handle_invokes(false),
|
||||
_node_bundling_limit(0),
|
||||
_node_bundling_base(NULL),
|
||||
_java_calls(0),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -166,6 +166,9 @@ class Compile : public Phase {
|
||||
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
|
||||
#endif
|
||||
|
||||
// JSR 292
|
||||
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
|
||||
|
||||
// Compilation environment.
|
||||
Arena _comp_arena; // Arena with lifetime equivalent to Compile
|
||||
ciEnv* _env; // CI interface
|
||||
@ -336,6 +339,10 @@ class Compile : public Phase {
|
||||
void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
|
||||
#endif
|
||||
|
||||
// JSR 292
|
||||
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|
||||
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
|
||||
|
||||
void begin_method() {
|
||||
#ifndef PRODUCT
|
||||
if (_printer) _printer->begin_method(this);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,7 +70,7 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
|
||||
CompileLog* log = this->log();
|
||||
if (log != NULL) {
|
||||
int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
|
||||
int r2id = (profile.morphism() == 2)? log->identify(profile.receiver(1)):-1;
|
||||
int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
|
||||
log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
|
||||
log->identify(call_method), site_count, prof_factor);
|
||||
if (call_is_virtual) log->print(" virtual='1'");
|
||||
|
@ -780,12 +780,20 @@ bool GraphKit::dead_locals_are_killed() {
|
||||
|
||||
// Helper function for enforcing certain bytecodes to reexecute if
|
||||
// deoptimization happens
|
||||
static bool should_reexecute_implied_by_bytecode(JVMState *jvms) {
|
||||
static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
|
||||
ciMethod* cur_method = jvms->method();
|
||||
int cur_bci = jvms->bci();
|
||||
if (cur_method != NULL && cur_bci != InvocationEntryBci) {
|
||||
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
|
||||
return Interpreter::bytecode_should_reexecute(code);
|
||||
return Interpreter::bytecode_should_reexecute(code) ||
|
||||
is_anewarray && code == Bytecodes::_multianewarray;
|
||||
// Reexecute _multianewarray bytecode which was replaced with
|
||||
// sequence of [a]newarray. See Parse::do_multianewarray().
|
||||
//
|
||||
// Note: interpreter should not have it set since this optimization
|
||||
// is limited by dimensions and guarded by flag so in some cases
|
||||
// multianewarray() runtime calls will be generated and
|
||||
// the bytecode should not be reexecutes (stack will not be reset).
|
||||
} else
|
||||
return false;
|
||||
}
|
||||
@ -836,7 +844,7 @@ void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
|
||||
// For a known set of bytecodes, the interpreter should reexecute them if
|
||||
// deoptimization happens. We set the reexecute state for them here
|
||||
if (out_jvms->is_reexecute_undefined() && //don't change if already specified
|
||||
should_reexecute_implied_by_bytecode(out_jvms)) {
|
||||
should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
|
||||
out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,7 +47,7 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
|
||||
int offset = t_oop->offset();
|
||||
phi = new (C,region->req()) PhiNode(region, type, NULL, iid, index, offset);
|
||||
} else {
|
||||
phi = new (C,region->req()) PhiNode(region, type);
|
||||
phi = PhiNode::make_blank(region, n);
|
||||
}
|
||||
uint old_unique = C->unique();
|
||||
for( uint i = 1; i < region->req(); i++ ) {
|
||||
|
@ -795,6 +795,7 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
||||
|
||||
int safepoint_pc_offset = current_offset;
|
||||
bool is_method_handle_invoke = false;
|
||||
bool return_oop = false;
|
||||
|
||||
// Add the safepoint in the DebugInfoRecorder
|
||||
if( !mach->is_MachCall() ) {
|
||||
@ -804,9 +805,18 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
||||
mcall = mach->as_MachCall();
|
||||
|
||||
// Is the call a MethodHandle call?
|
||||
if (mcall->is_MachCallJava())
|
||||
is_method_handle_invoke = mcall->as_MachCallJava()->_method_handle_invoke;
|
||||
if (mcall->is_MachCallJava()) {
|
||||
if (mcall->as_MachCallJava()->_method_handle_invoke) {
|
||||
assert(has_method_handle_invokes(), "must have been set during call generation");
|
||||
is_method_handle_invoke = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if a call returns an object.
|
||||
if (mcall->return_value_is_used() &&
|
||||
mcall->tf()->range()->field_at(TypeFunc::Parms)->isa_ptr()) {
|
||||
return_oop = true;
|
||||
}
|
||||
safepoint_pc_offset += mcall->ret_addr_offset();
|
||||
debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
|
||||
}
|
||||
@ -919,7 +929,7 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
||||
assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
|
||||
assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
|
||||
// Now we can describe the scope.
|
||||
debug_info()->describe_scope(safepoint_pc_offset, scope_method, jvms->bci(), jvms->should_reexecute(), is_method_handle_invoke, locvals, expvals, monvals);
|
||||
debug_info()->describe_scope(safepoint_pc_offset, scope_method, jvms->bci(), jvms->should_reexecute(), is_method_handle_invoke, return_oop, locvals, expvals, monvals);
|
||||
} // End jvms loop
|
||||
|
||||
// Mark the end of the scope set.
|
||||
@ -1086,9 +1096,21 @@ void Compile::Fill_buffer() {
|
||||
deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
|
||||
stub_req += MAX_stubs_size; // ensure per-stub margin
|
||||
code_req += MAX_inst_size; // ensure per-instruction margin
|
||||
|
||||
if (StressCodeBuffers)
|
||||
code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
|
||||
int total_req = code_req + pad_req + stub_req + exception_handler_req + deopt_handler_req + const_req;
|
||||
|
||||
int total_req =
|
||||
code_req +
|
||||
pad_req +
|
||||
stub_req +
|
||||
exception_handler_req +
|
||||
deopt_handler_req + // deopt handler
|
||||
const_req;
|
||||
|
||||
if (has_method_handle_invokes())
|
||||
total_req += deopt_handler_req; // deopt MH handler
|
||||
|
||||
CodeBuffer* cb = code_buffer();
|
||||
cb->initialize(total_req, locs_req);
|
||||
|
||||
@ -1430,10 +1452,13 @@ void Compile::Fill_buffer() {
|
||||
_code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
|
||||
// Emit the deopt handler code.
|
||||
_code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
|
||||
// Emit the MethodHandle deopt handler code. We can use the same
|
||||
// code as for the normal deopt handler, we just need a different
|
||||
// entry point address.
|
||||
_code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
|
||||
|
||||
// Emit the MethodHandle deopt handler code (if required).
|
||||
if (has_method_handle_invokes()) {
|
||||
// We can use the same code as for the normal deopt handler, we
|
||||
// just need a different entry point address.
|
||||
_code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
|
||||
}
|
||||
}
|
||||
|
||||
// One last check for failed CodeBuffer::expand:
|
||||
|
@ -824,7 +824,6 @@ bool Parse::can_rerun_bytecode() {
|
||||
case Bytecodes::_ddiv:
|
||||
case Bytecodes::_checkcast:
|
||||
case Bytecodes::_instanceof:
|
||||
case Bytecodes::_athrow:
|
||||
case Bytecodes::_anewarray:
|
||||
case Bytecodes::_newarray:
|
||||
case Bytecodes::_multianewarray:
|
||||
@ -834,6 +833,8 @@ bool Parse::can_rerun_bytecode() {
|
||||
return true;
|
||||
break;
|
||||
|
||||
// Don't rerun athrow since it's part of the exception path.
|
||||
case Bytecodes::_athrow:
|
||||
case Bytecodes::_invokestatic:
|
||||
case Bytecodes::_invokedynamic:
|
||||
case Bytecodes::_invokespecial:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -439,8 +439,18 @@ void Parse::do_multianewarray() {
|
||||
|
||||
// Can use multianewarray instead of [a]newarray if only one dimension,
|
||||
// or if all non-final dimensions are small constants.
|
||||
if (expand_count == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
|
||||
Node* obj = expand_multianewarray(array_klass, &length[0], ndimensions, ndimensions);
|
||||
if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
|
||||
Node* obj = NULL;
|
||||
// Set the original stack and the reexecute bit for the interpreter
|
||||
// to reexecute the multianewarray bytecode if deoptimization happens.
|
||||
// Do it unconditionally even for one dimension multianewarray.
|
||||
// Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
|
||||
// when AllocateArray node for newarray is created.
|
||||
{ PreserveReexecuteState preexecs(this);
|
||||
_sp += ndimensions;
|
||||
// Pass 0 as nargs since uncommon trap code does not need to restore stack.
|
||||
obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
|
||||
} //original reexecute and sp are set back here
|
||||
push(obj);
|
||||
return;
|
||||
}
|
||||
|
@ -708,7 +708,7 @@ JRT_LEAF(void, OptoRuntime::profile_receiver_type_C(DataLayout* data, oopDesc* r
|
||||
*(mdp + count_off) = DataLayout::counter_increment;
|
||||
} else {
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polimorphic case.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
intptr_t* count_p = (intptr_t*)(((byte*)(data)) + in_bytes(CounterData::count_offset()));
|
||||
*count_p += DataLayout::counter_increment;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2009-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1073,7 +1073,7 @@ void PhaseStringOpts::int_getChars(GraphKit& kit, Node* arg, Node* char_array, N
|
||||
kit.set_control(head);
|
||||
kit.set_memory(mem, char_adr_idx);
|
||||
|
||||
Node* q = __ DivI(kit.null(), i_phi, __ intcon(10));
|
||||
Node* q = __ DivI(NULL, i_phi, __ intcon(10));
|
||||
Node* r = __ SubI(i_phi, __ AddI(__ LShiftI(q, __ intcon(3)),
|
||||
__ LShiftI(q, __ intcon(1))));
|
||||
Node* m1 = __ SubI(charPos, __ intcon(1));
|
||||
@ -1270,14 +1270,15 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
|
||||
// length = length + (s.count - s.offset);
|
||||
RegionNode *r = new (C, 3) RegionNode(3);
|
||||
kit.gvn().set_type(r, Type::CONTROL);
|
||||
Node *phi = new (C, 3) PhiNode(r, type->join(TypeInstPtr::NOTNULL));
|
||||
Node *phi = new (C, 3) PhiNode(r, type);
|
||||
kit.gvn().set_type(phi, phi->bottom_type());
|
||||
Node* p = __ Bool(__ CmpP(arg, kit.null()), BoolTest::ne);
|
||||
IfNode* iff = kit.create_and_map_if(kit.control(), p, PROB_MIN, COUNT_UNKNOWN);
|
||||
Node* notnull = __ IfTrue(iff);
|
||||
Node* isnull = __ IfFalse(iff);
|
||||
kit.set_control(notnull); // set control for the cast_not_null
|
||||
r->init_req(1, notnull);
|
||||
phi->init_req(1, arg);
|
||||
phi->init_req(1, kit.cast_not_null(arg, false));
|
||||
r->init_req(2, isnull);
|
||||
phi->init_req(2, null_string);
|
||||
kit.set_control(r);
|
||||
|
@ -402,7 +402,7 @@ void JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nmethod *nm,
|
||||
|
||||
address scopes_data = nm->scopes_data_begin();
|
||||
for( pcd = nm->scopes_pcs_begin(); pcd < nm->scopes_pcs_end(); ++pcd ) {
|
||||
ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute());
|
||||
ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute(), pcd->return_oop());
|
||||
ScopeDesc *sd = &sc0;
|
||||
while( !sd->is_top() ) { sd = sd->sender(); }
|
||||
int bci = sd->bci();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1487,6 +1487,20 @@ bool Arguments::created_by_java_launcher() {
|
||||
//===========================================================================================================
|
||||
// Parsing of main arguments
|
||||
|
||||
bool Arguments::verify_interval(uintx val, uintx min,
|
||||
uintx max, const char* name) {
|
||||
// Returns true iff value is in the inclusive interval [min..max]
|
||||
// false, otherwise.
|
||||
if (val >= min && val <= max) {
|
||||
return true;
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"%s of " UINTX_FORMAT " is invalid; must be between " UINTX_FORMAT
|
||||
" and " UINTX_FORMAT "\n",
|
||||
name, val, min, max);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Arguments::verify_percentage(uintx value, const char* name) {
|
||||
if (value <= 100) {
|
||||
return true;
|
||||
@ -1723,6 +1737,16 @@ bool Arguments::check_vm_args_consistency() {
|
||||
status = false;
|
||||
}
|
||||
|
||||
status = status && verify_interval(RefDiscoveryPolicy,
|
||||
ReferenceProcessor::DiscoveryPolicyMin,
|
||||
ReferenceProcessor::DiscoveryPolicyMax,
|
||||
"RefDiscoveryPolicy");
|
||||
|
||||
// Limit the lower bound of this flag to 1 as it is used in a division
|
||||
// expression.
|
||||
status = status && verify_interval(TLABWasteTargetPercent,
|
||||
1, 100, "TLABWasteTargetPercent");
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -2500,6 +2524,9 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
|
||||
SOLARIS_ONLY(FLAG_SET_DEFAULT(UseISM, false));
|
||||
}
|
||||
|
||||
// Tiered compilation is undefined with C1.
|
||||
TieredCompilation = false;
|
||||
|
||||
#else
|
||||
if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
|
||||
FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
|
||||
|
@ -336,6 +336,8 @@ class Arguments : AllStatic {
|
||||
static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
|
||||
return is_bad_option(option, ignore, NULL);
|
||||
}
|
||||
static bool verify_interval(uintx val, uintx min,
|
||||
uintx max, const char* name);
|
||||
static bool verify_percentage(uintx value, const char* name);
|
||||
static void describe_range_error(ArgsRange errcode);
|
||||
static ArgsRange check_memory_size(julong size, julong min_size);
|
||||
|
@ -145,6 +145,27 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
|
||||
if (EliminateAllocations) {
|
||||
assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
|
||||
GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
|
||||
|
||||
// The flag return_oop() indicates call sites which return oop
|
||||
// in compiled code. Such sites include java method calls,
|
||||
// runtime calls (for example, used to allocate new objects/arrays
|
||||
// on slow code path) and any other calls generated in compiled code.
|
||||
// It is not guaranteed that we can get such information here only
|
||||
// by analyzing bytecode in deoptimized frames. This is why this flag
|
||||
// is set during method compilation (see Compile::Process_OopMap_Node()).
|
||||
bool save_oop_result = chunk->at(0)->scope()->return_oop();
|
||||
Handle return_value;
|
||||
if (save_oop_result) {
|
||||
// Reallocation may trigger GC. If deoptimization happened on return from
|
||||
// call which returns oop we need to save it since it is not in oopmap.
|
||||
oop result = deoptee.saved_oop_result(&map);
|
||||
assert(result == NULL || result->is_oop(), "must be oop");
|
||||
return_value = Handle(thread, result);
|
||||
assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
|
||||
if (TraceDeoptimization) {
|
||||
tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread);
|
||||
}
|
||||
}
|
||||
bool reallocated = false;
|
||||
if (objects != NULL) {
|
||||
JRT_BLOCK
|
||||
@ -158,9 +179,13 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
|
||||
print_objects(objects);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (save_oop_result) {
|
||||
// Restore result.
|
||||
deoptee.set_saved_oop_result(&map, return_value());
|
||||
}
|
||||
}
|
||||
if (EliminateLocks) {
|
||||
#ifndef PRODUCT
|
||||
@ -913,21 +938,6 @@ vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, Re
|
||||
if (TraceDeoptimization) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr(" Created vframeArray " INTPTR_FORMAT, array);
|
||||
if (Verbose) {
|
||||
int count = 0;
|
||||
// this used to leak deoptimizedVFrame like it was going out of style!!!
|
||||
for (int index = 0; index < array->frames(); index++ ) {
|
||||
vframeArrayElement* e = array->element(index);
|
||||
e->print(tty);
|
||||
|
||||
/*
|
||||
No printing yet.
|
||||
array->vframe_at(index)->print_activation(count++);
|
||||
// better as...
|
||||
array->print_activation_for(index, count++);
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
|
@ -606,12 +606,12 @@ void frame::interpreter_frame_print_on(outputStream* st) const {
|
||||
for (BasicObjectLock* current = interpreter_frame_monitor_end();
|
||||
current < interpreter_frame_monitor_begin();
|
||||
current = next_monitor_in_interpreter_frame(current)) {
|
||||
st->print_cr(" [ - obj ");
|
||||
st->print(" - obj [");
|
||||
current->obj()->print_value_on(st);
|
||||
st->cr();
|
||||
st->print_cr(" - lock ");
|
||||
st->print_cr("]");
|
||||
st->print(" - lock [");
|
||||
current->lock()->print_on(st);
|
||||
st->cr();
|
||||
st->print_cr("]");
|
||||
}
|
||||
// monitor
|
||||
st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin());
|
||||
|
@ -607,7 +607,9 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
|
||||
_implicit_null_throws++;
|
||||
#endif
|
||||
target_pc = nm->continuation_for_implicit_exception(pc);
|
||||
guarantee(target_pc != 0, "must have a continuation point");
|
||||
// If there's an unexpected fault, target_pc might be NULL,
|
||||
// in which case we want to fall through into the normal
|
||||
// error handling code.
|
||||
}
|
||||
|
||||
break; // fall through
|
||||
@ -621,14 +623,15 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
|
||||
_implicit_div0_throws++;
|
||||
#endif
|
||||
target_pc = nm->continuation_for_implicit_exception(pc);
|
||||
guarantee(target_pc != 0, "must have a continuation point");
|
||||
// If there's an unexpected fault, target_pc might be NULL,
|
||||
// in which case we want to fall through into the normal
|
||||
// error handling code.
|
||||
break; // fall through
|
||||
}
|
||||
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
guarantee(target_pc != NULL, "must have computed destination PC for implicit exception");
|
||||
assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
|
||||
|
||||
// for AbortVMOnException flag
|
||||
@ -1944,7 +1947,7 @@ class AdapterHandlerTable : public BasicHashtable {
|
||||
|
||||
private:
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
static int _lookups; // number of calls to lookup
|
||||
static int _buckets; // number of buckets checked
|
||||
static int _equals; // number of buckets checked with matching hash
|
||||
@ -1980,16 +1983,16 @@ class AdapterHandlerTable : public BasicHashtable {
|
||||
|
||||
// Find a entry with the same fingerprint if it exists
|
||||
AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
|
||||
debug_only(_lookups++);
|
||||
NOT_PRODUCT(_lookups++);
|
||||
AdapterFingerPrint fp(total_args_passed, sig_bt);
|
||||
unsigned int hash = fp.compute_hash();
|
||||
int index = hash_to_index(hash);
|
||||
for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
debug_only(_buckets++);
|
||||
NOT_PRODUCT(_buckets++);
|
||||
if (e->hash() == hash) {
|
||||
debug_only(_equals++);
|
||||
NOT_PRODUCT(_equals++);
|
||||
if (fp.equals(e->fingerprint())) {
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
if (fp.is_compact()) _compact++;
|
||||
_hits++;
|
||||
#endif
|
||||
@ -2000,6 +2003,7 @@ class AdapterHandlerTable : public BasicHashtable {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_statistics() {
|
||||
ResourceMark rm;
|
||||
int longest = 0;
|
||||
@ -2018,15 +2022,14 @@ class AdapterHandlerTable : public BasicHashtable {
|
||||
}
|
||||
tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f",
|
||||
empty, longest, total, total / (double)nonempty);
|
||||
#ifdef ASSERT
|
||||
tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d",
|
||||
_lookups, _buckets, _equals, _hits, _compact);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
|
||||
int AdapterHandlerTable::_lookups;
|
||||
int AdapterHandlerTable::_buckets;
|
||||
|
@ -196,11 +196,19 @@ void stubRoutines_init2() { StubRoutines::initialize2(); }
|
||||
// Default versions of arraycopy functions
|
||||
//
|
||||
|
||||
static void gen_arraycopy_barrier_pre(oop* dest, size_t count) {
|
||||
assert(count != 0, "count should be non-zero");
|
||||
assert(count <= (size_t)max_intx, "count too large");
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt");
|
||||
bs->write_ref_array_pre(dest, (int)count);
|
||||
}
|
||||
|
||||
static void gen_arraycopy_barrier(oop* dest, size_t count) {
|
||||
assert(count != 0, "count should be non-zero");
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dest, (HeapWord*)(dest + count)));
|
||||
bs->write_ref_array((HeapWord*)dest, count);
|
||||
}
|
||||
|
||||
JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
|
||||
@ -240,6 +248,7 @@ JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
|
||||
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
|
||||
#endif // !PRODUCT
|
||||
assert(count != 0, "count should be non-zero");
|
||||
gen_arraycopy_barrier_pre(dest, count);
|
||||
Copy::conjoint_oops_atomic(src, dest, count);
|
||||
gen_arraycopy_barrier(dest, count);
|
||||
JRT_END
|
||||
@ -281,6 +290,7 @@ JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, siz
|
||||
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
|
||||
#endif // !PRODUCT
|
||||
assert(count != 0, "count should be non-zero");
|
||||
gen_arraycopy_barrier_pre((oop *) dest, count);
|
||||
Copy::arrayof_conjoint_oops(src, dest, count);
|
||||
gen_arraycopy_barrier((oop *) dest, count);
|
||||
JRT_END
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -186,7 +186,7 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters,
|
||||
int popframe_preserved_args_size_in_bytes = 0;
|
||||
int popframe_preserved_args_size_in_words = 0;
|
||||
if (is_top_frame) {
|
||||
JvmtiThreadState *state = thread->jvmti_thread_state();
|
||||
JvmtiThreadState *state = thread->jvmti_thread_state();
|
||||
if (JvmtiExport::can_pop_frame() &&
|
||||
(thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
|
||||
if (thread->has_pending_popframe()) {
|
||||
@ -381,7 +381,6 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters,
|
||||
RegisterMap map(thread);
|
||||
vframe* f = vframe::new_vframe(iframe(), &map, thread);
|
||||
f->print();
|
||||
iframe()->interpreter_frame_print_on(tty);
|
||||
|
||||
tty->print_cr("locals size %d", locals()->size());
|
||||
tty->print_cr("expression size %d", expressions()->size());
|
||||
@ -582,7 +581,7 @@ void vframeArray::print_on_2(outputStream* st) {
|
||||
}
|
||||
|
||||
void vframeArrayElement::print(outputStream* st) {
|
||||
st->print_cr(" - interpreter_frame -> sp: ", INTPTR_FORMAT, iframe()->sp());
|
||||
st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, iframe()->sp());
|
||||
}
|
||||
|
||||
void vframeArray::print_value_on(outputStream* st) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -426,11 +426,6 @@ void VMThread::loop() {
|
||||
// follow that also require a safepoint
|
||||
if (_cur_vm_operation->evaluate_at_safepoint()) {
|
||||
|
||||
if (PrintGCApplicationConcurrentTime) {
|
||||
gclog_or_tty->print_cr("Application time: %3.7f seconds",
|
||||
RuntimeService::last_application_time_sec());
|
||||
}
|
||||
|
||||
_vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned
|
||||
|
||||
SafepointSynchronize::begin();
|
||||
@ -477,12 +472,6 @@ void VMThread::loop() {
|
||||
// Complete safepoint synchronization
|
||||
SafepointSynchronize::end();
|
||||
|
||||
if (PrintGCApplicationStoppedTime) {
|
||||
gclog_or_tty->print_cr("Total time for which application threads "
|
||||
"were stopped: %3.7f seconds",
|
||||
RuntimeService::last_safepoint_time_sec());
|
||||
}
|
||||
|
||||
} else { // not a safepoint operation
|
||||
if (TraceLongCompiles) {
|
||||
elapsedTimer t;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -104,6 +104,13 @@ void RuntimeService::init() {
|
||||
|
||||
void RuntimeService::record_safepoint_begin() {
|
||||
HS_DTRACE_PROBE(hs_private, safepoint__begin);
|
||||
|
||||
// Print the time interval in which the app was executing
|
||||
if (PrintGCApplicationConcurrentTime) {
|
||||
gclog_or_tty->print_cr("Application time: %3.7f seconds",
|
||||
last_application_time_sec());
|
||||
}
|
||||
|
||||
// update the time stamp to begin recording safepoint time
|
||||
_safepoint_timer.update();
|
||||
if (UsePerfData) {
|
||||
@ -122,6 +129,15 @@ void RuntimeService::record_safepoint_synchronized() {
|
||||
|
||||
void RuntimeService::record_safepoint_end() {
|
||||
HS_DTRACE_PROBE(hs_private, safepoint__end);
|
||||
|
||||
// Print the time interval for which the app was stopped
|
||||
// during the current safepoint operation.
|
||||
if (PrintGCApplicationStoppedTime) {
|
||||
gclog_or_tty->print_cr("Total time for which application threads "
|
||||
"were stopped: %3.7f seconds",
|
||||
last_safepoint_time_sec());
|
||||
}
|
||||
|
||||
// update the time stamp to begin recording app time
|
||||
_app_timer.update();
|
||||
if (UsePerfData) {
|
||||
|
@ -139,6 +139,10 @@ const size_t M = K*K;
|
||||
const size_t G = M*K;
|
||||
const size_t HWperKB = K / sizeof(HeapWord);
|
||||
|
||||
const size_t LOG_K = 10;
|
||||
const size_t LOG_M = 2 * LOG_K;
|
||||
const size_t LOG_G = 2 * LOG_M;
|
||||
|
||||
const jint min_jint = (jint)1 << (sizeof(jint)*BitsPerByte-1); // 0x80000000 == smallest jint
|
||||
const jint max_jint = (juint)min_jint - 1; // 0x7FFFFFFF == largest jint
|
||||
|
||||
|
78
hotspot/test/compiler/6910605/Test.java
Normal file
78
hotspot/test/compiler/6910605/Test.java
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6910605
|
||||
* @summary C2: NullPointerException/ClassCaseException is thrown when C2 with DeoptimizeALot is used
|
||||
*
|
||||
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+DeoptimizeALot -Xbatch Test
|
||||
*
|
||||
* original test: nsk/coverage/runtime/runtime007
|
||||
*/
|
||||
|
||||
import java.io.*;
|
||||
|
||||
public class Test {
|
||||
public static int buf=0;
|
||||
|
||||
public static void main( String argv[] ) {
|
||||
System.exit(run(argv, System.out)+95);
|
||||
}
|
||||
|
||||
public static int run(String argv[],PrintStream out) {
|
||||
int ret=0, retx=0, bad=0;
|
||||
|
||||
for( int i=0; (i < 100000) && (bad < 10) ; i++ ) {
|
||||
retx = OptoRuntime_f2i_Type(out);
|
||||
ret += retx;
|
||||
if( retx !=0 ) {
|
||||
out.println("i="+i);
|
||||
bad++;
|
||||
}
|
||||
}
|
||||
return ret==0 ? 0 : 2 ;
|
||||
}
|
||||
|
||||
public static int OptoRuntime_f2i_Type(PrintStream out) {
|
||||
int c1=2, c2=3, c3=4, c4=5, c5=6;
|
||||
int j=0, k=0;
|
||||
try {
|
||||
int[][] iii=(int[][])(new int[c1][c2]);
|
||||
|
||||
for( j=0; j<c1; j++ ) {
|
||||
for( k=0; k<c2; k++ ) {
|
||||
iii[j][k]=(int)((float)(j+1)/(float)(k+1));
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
out.println("Unexpected exception " + e);
|
||||
e.printStackTrace(out);
|
||||
out.println("j="+j+", k="+k);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
74
hotspot/test/compiler/6910618/Test.java
Normal file
74
hotspot/test/compiler/6910618/Test.java
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6910605
|
||||
* @summary C2: NullPointerException/ClassCaseException is thrown when C2 with DeoptimizeALot is used
|
||||
*
|
||||
* @run main/othervm -Xmx64m -XX:+IgnoreUnrecognizedVMOptions -XX:+DeoptimizeALot -XX:+DoEscapeAnalysis -Xbatch -XX:InlineSmallCode=2000 Test
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Added InlineSmallCode=2000 to guaranty inlining of StringBuilder::append() to allow scalar replace StringBuilder object.
|
||||
*
|
||||
* original test: gc/gctests/StringGC
|
||||
*/
|
||||
|
||||
public class Test {
|
||||
private final String toAdd = "0123456789abcdef";
|
||||
private int maxLength;
|
||||
private static final int numberOfThreads = 8;
|
||||
|
||||
private class StringAdder extends Thread {
|
||||
private String s;
|
||||
|
||||
public void test() {
|
||||
s = s + toAdd;
|
||||
}
|
||||
public void run() {
|
||||
do {
|
||||
test();
|
||||
} while (s.length() < maxLength);
|
||||
}
|
||||
}
|
||||
|
||||
public void test() throws InterruptedException {
|
||||
maxLength = toAdd.length() * 15000/ numberOfThreads;
|
||||
StringAdder[] sa = new StringAdder[numberOfThreads];
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
sa[i] = new StringAdder();
|
||||
sa[i].start();
|
||||
}
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
sa[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws InterruptedException {
|
||||
Test t = new Test();
|
||||
t.test();
|
||||
}
|
||||
}
|
@ -58,3 +58,4 @@ b1005c504358c18694c84e95fec16b28cdce7ae1 jdk7-b79
|
||||
204e59d488cdaa9eafa8cb7164ea955b5a9d4a51 jdk7-b81
|
||||
c876ad22e4bf9d3c6460080db7ace478e29a3ff9 jdk7-b82
|
||||
309a0a7fc6ceb1c9fc3a85b3608e97ef8f7b0dfd jdk7-b83
|
||||
32c0cf01d555747918529a6ff9e06b0090c7a474 jdk7-b84
|
||||
|
@ -58,3 +58,4 @@ c08894f5b6e594b9b12993e256b96c1b38099632 jdk7-b79
|
||||
f051045fe94a48fae1097f90cbd9227e6aae6b7e jdk7-b81
|
||||
31573ae8eed15a6c170f3f0d1abd0b9109c0e086 jdk7-b82
|
||||
371e3ded591d09112a9f231e37cb072781c486ac jdk7-b83
|
||||
8bc02839eee4ef02cd1b50e87638874368a26535 jdk7-b84
|
||||
|
@ -58,3 +58,4 @@ e6a5d095c356a547cf5b3c8885885aca5e91e09b jdk7-b77
|
||||
10b993d417fcdb40480dad7032ac241f4b87f1af jdk7-b81
|
||||
69ef657320ad5c35cfa12e4d8322d877e778f8b3 jdk7-b82
|
||||
9027c6b9d7e2c9ca04a1add691b5b50d0f22b1aa jdk7-b83
|
||||
7cb9388bb1a16365fa5118c5efa38b1cd58be40d jdk7-b84
|
||||
|
@ -240,7 +240,12 @@ import_product:
|
||||
|
||||
all build:: sanity-all post-sanity-all
|
||||
|
||||
SUBDIRS = tools java javax org sun sunw com jpda mkdemo mksample launchers
|
||||
SUBDIRS = tools java javax sun com
|
||||
SUBDIRS_tools = launchers
|
||||
SUBDIRS_misc = org sunw jpda mkdemo mksample
|
||||
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -32,6 +32,8 @@ PRODUCT = com
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = sun
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -38,10 +38,19 @@ ifndef OPENJDK
|
||||
endif
|
||||
endif
|
||||
|
||||
# jarsigner is part of JRE
|
||||
SUBDIRS = java security net/ssl jarsigner
|
||||
|
||||
SUBDIRS_management = jmx
|
||||
SUBDIRS_desktop = image inputmethods
|
||||
SUBDIRS_enterprise = crypto/provider jndi \
|
||||
org xml rowset net/httpserver
|
||||
SUBDIRS_misc = $(SCRIPT_SUBDIR) tracing servicetag nio demo
|
||||
|
||||
# Omit mirror since it's built with the apt tool.
|
||||
SUBDIRS = $(SCRIPT_SUBDIR) image security crypto/provider jndi jmx \
|
||||
java inputmethods org xml rowset net/httpserver net/ssl demo \
|
||||
tools jarsigner tracing servicetag nio
|
||||
SUBDIRS_tools = tools
|
||||
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
@ -38,6 +38,8 @@ PRODUCT = sun
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = jvmti
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -38,6 +38,8 @@ PRODUCT = sun
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = hprof
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -32,6 +32,8 @@ PRODUCT = sun
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = indicim thaiim
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -31,7 +31,10 @@ BUILDDIR = ../../..
|
||||
PRODUCT = plugin
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = browser pack
|
||||
SUBDIRS = pack
|
||||
SUBDIRS_misc = browser
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -32,6 +32,8 @@ PRODUCT = plugin
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = dom net
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -51,6 +51,8 @@ else
|
||||
SUBDIRS = snmp
|
||||
endif
|
||||
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build:
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -31,6 +31,8 @@ BUILDDIR = ../../..
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = toolkit cosnaming ldap rmi dns
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -31,6 +31,8 @@ BUILDDIR = ../../../..
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = registry
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -31,6 +31,8 @@ BUILDDIR = ../../..
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = sctp
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -32,6 +32,8 @@ PRODUCT = org
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = apache
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -31,6 +31,7 @@ BUILDDIR = ../../../..
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = xml
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
@ -30,7 +30,10 @@
|
||||
BUILDDIR = ../../..
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = jgss sasl auth auth/module
|
||||
SUBDIRS = auth
|
||||
SUBDIRS_misc = jgss sasl auth/module
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -35,6 +35,8 @@ include $(BUILDDIR)/common/Defs.gmk
|
||||
AUTO_FILES_JAVA_DIRS = com/sun/tools/extcheck
|
||||
|
||||
SUBDIRS = attach
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build:
|
||||
$(SUBDIRS-loop)
|
||||
clean clobber::
|
||||
|
@ -33,6 +33,8 @@ PRODUCT = sun
|
||||
include $(BUILDDIR)/common/Defs.gmk
|
||||
|
||||
SUBDIRS = dtrace
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build:
|
||||
$(SUBDIRS-loop)
|
||||
clean clobber::
|
||||
|
@ -576,17 +576,17 @@ endef
|
||||
define install-file
|
||||
$(prep-target)
|
||||
$(CP) $< $@
|
||||
$(install-module-file)
|
||||
@$(install-module-file)
|
||||
endef
|
||||
|
||||
define chmod-file
|
||||
$(CHMOD) $1 $@
|
||||
$(call chmod-module-file, $1)
|
||||
@$(call chmod-module-file, $1)
|
||||
endef
|
||||
|
||||
define install-sym-link
|
||||
$(LN) -s $1 $@
|
||||
$(call install-module-sym-link, $1)
|
||||
@$(call install-module-sym-link, $1)
|
||||
endef
|
||||
|
||||
#
|
||||
@ -607,131 +607,9 @@ define java-vm-cleanup
|
||||
if [ -w $(HOTSPOT_LOG_NAME) ] ; then $(RM) $(HOTSPOT_LOG_NAME); fi
|
||||
endef
|
||||
|
||||
# Default make settings for processing SUBDIRS with clobber or clean names
|
||||
SUBDIRS_MAKEFLAGS-clobber = INCREMENTAL_BUILD=false
|
||||
SUBDIRS_MAKEFLAGS-clean = INCREMENTAL_BUILD=false
|
||||
|
||||
# Current directory
|
||||
CURRENT_DIRECTORY := $(shell $(PWD))
|
||||
|
||||
# If no timing wanted, we need to define these as empty
|
||||
ifdef NO_TIMING
|
||||
|
||||
TIMING_ID:=NA
|
||||
|
||||
define TIMING_start
|
||||
t=0:0:0:0
|
||||
endef
|
||||
|
||||
define TIMING_end
|
||||
time_used=0
|
||||
endef
|
||||
|
||||
else # NO_TIMING
|
||||
|
||||
# Default timing id
|
||||
TIMING_ID:=$(shell $(BASENAME) $(CURRENT_DIRECTORY))
|
||||
|
||||
# Timing start (must be used in same shell, e.g. same command line)
|
||||
# Defines the shell variable $1 to have the start time.
|
||||
define TIMING_start
|
||||
$1=`$(DATE) +%j:%H:%M:%S`
|
||||
endef
|
||||
|
||||
# Timing end (must be used in same shell, e.g. same command line)
|
||||
# Expects shell variable $1 to have been defined as the start time.
|
||||
# Expects shell variable $2 to have timing id string
|
||||
# Sets total_seconds shell variable as the total seconds used.
|
||||
# Sets time_used shell variable to contain format "%dh%dm%ds"
|
||||
define TIMING_end
|
||||
begTime="$${$1}"; \
|
||||
timing_id="$${$2}"; \
|
||||
endTime=`$(DATE) +%j:%H:%M:%S`; \
|
||||
d1=`$(ECHO) $${begTime} | $(CUT) -d':' -f1 | $(SED) -e 's@^0*@@'`; \
|
||||
if [ "$${d1}" = "" ] ; then d1=0; fi; \
|
||||
h1=`$(ECHO) $${begTime} | $(CUT) -d':' -f2 | $(SED) -e 's@^0*@@'`; \
|
||||
if [ "$${h1}" = "" ] ; then h1=0; fi; \
|
||||
m1=`$(ECHO) $${begTime} | $(CUT) -d':' -f3 | $(SED) -e 's@^0*@@'`; \
|
||||
if [ "$${m1}" = "" ] ; then m1=0; fi; \
|
||||
s1=`$(ECHO) $${begTime} | $(CUT) -d':' -f4 | $(SED) -e 's@^0*@@'`; \
|
||||
if [ "$${s1}" = "" ] ; then s1=0; fi; \
|
||||
d2=`$(ECHO) $${endTime} | $(CUT) -d':' -f1 | $(SED) -e 's@^0*@@'`; \
|
||||
if [ "$${d2}" = "" ] ; then d2=0; fi; \
|
||||
h2=`$(ECHO) $${endTime} | $(CUT) -d':' -f2 | $(SED) -e 's@^0*@@'`; \
|
||||
if [ "$${h2}" = "" ] ; then h2=0; fi; \
|
||||
m2=`$(ECHO) $${endTime} | $(CUT) -d':' -f3 | $(SED) -e 's@^0*@@'`; \
|
||||
if [ "$${m2}" = "" ] ; then m2=0; fi; \
|
||||
s2=`$(ECHO) $${endTime} | $(CUT) -d':' -f4 | $(SED) -e 's@^0*@@'`; \
|
||||
if [ "$${s2}" = "" ] ; then s2=0; fi; \
|
||||
t1_secs=`$(EXPR) $${d1} '*' 60 '*' 60 '*' 24 '+' $${h1} '*' 60 '*' 60 \
|
||||
'+' $${m1} '*' 60 '+' $${s1}`; \
|
||||
t2_secs=`$(EXPR) $${d2} '*' 60 '*' 60 '*' 24 '+' $${h2} '*' 60 '*' 60 \
|
||||
'+' $${m2} '*' 60 '+' $${s2}`; \
|
||||
total_seconds=`$(EXPR) $${t2_secs} '-' $${t1_secs}`; \
|
||||
if [ "$${total_seconds}" -lt 0 ] ; then total_seconds=0; fi; \
|
||||
t_hour=`$(EXPR) $${total_seconds} '/' '(' 60 '*' 60 ')'`h; \
|
||||
t_min=`$(EXPR) '(' $${total_seconds} '%' '(' 60 '*' 60 ')' ')' '/' 60`m; \
|
||||
t_sec=`$(EXPR) $${total_seconds} '%' 60`s; \
|
||||
time_used=$${t_sec}; \
|
||||
if [ "$${t_hour}" != "0h" ] ; then \
|
||||
time_used=$${t_hour}$${t_min}$${t_sec}; \
|
||||
elif [ "$${t_min}" != "0m" ] ; then \
|
||||
time_used=$${t_min}$${t_sec}; \
|
||||
else \
|
||||
time_used=$${t_sec}; \
|
||||
fi; \
|
||||
$(PRINTF) " Timing: %05d seconds or %s for %s\n" \
|
||||
$${total_seconds} $${time_used} $${timing_id}
|
||||
endef
|
||||
|
||||
endif # NO_TIMING
|
||||
|
||||
# Given a SUBDIRS list, cd into them and make them
|
||||
# SUBDIRS_MAKEFLAGS Make settings for a subdir make
|
||||
# SUBDIRS_MAKEFLAGS-$@ Make settings specific to this target
|
||||
define SUBDIRS-loop
|
||||
@$(ECHO) "Begin Processing SUBDIRS: $(SUBDIRS)"
|
||||
@for i in DUMMY $(SUBDIRS) ; do \
|
||||
if [ "$$i" != "DUMMY" ] ; then \
|
||||
$(ECHO) ">>>Recursively making "$$i" "$@" @ `$(DATE)` ..."; \
|
||||
timing_id="$(TIMING_ID)-`$(BASENAME) $${i}`"; \
|
||||
$(call TIMING_start,startTime); \
|
||||
curDir=$(CURRENT_DIRECTORY); \
|
||||
$(CD) $$i; $(MAKE) $@ TIMING_ID=$${timing_id} \
|
||||
$(SUBDIRS_MAKEFLAGS) \
|
||||
$(SUBDIRS_MAKEFLAGS-$@) \
|
||||
FULL_VERSION=$(FULL_VERSION) \
|
||||
RELEASE=$(RELEASE) || exit 1; \
|
||||
$(CD) $${curDir}; \
|
||||
$(call TIMING_end,startTime,timing_id); \
|
||||
$(ECHO) "<<<Finished Recursively making "$$i" "$@" @ `$(DATE)`." ; \
|
||||
fi ; \
|
||||
done
|
||||
@$(ECHO) "Done Processing SUBDIRS: $(SUBDIRS)"
|
||||
endef
|
||||
|
||||
# Given a OTHERSUBDIRS list, cd into them and make them (extra loop define)
|
||||
# OTHERSUBDIRS_MAKEFLAGS Make settings for a subdir make
|
||||
define OTHERSUBDIRS-loop
|
||||
@$(ECHO) "Begin Processing OTHERSUBDIRS: $(OTHERSUBDIRS)"
|
||||
@for i in DUMMY $(OTHERSUBDIRS) ; do \
|
||||
if [ "$$i" != "DUMMY" ] ; then \
|
||||
$(ECHO) ">>>Recursively making "$$i" "$@" @ `$(DATE)` ..."; \
|
||||
timing_id="$(TIMING_ID)-`$(BASENAME) $${i}`"; \
|
||||
$(call TIMING_start,startTime); \
|
||||
curDir=$(CURRENT_DIRECTORY); \
|
||||
$(CD) $$i; $(MAKE) $@ TIMING_ID=$${timing_id} \
|
||||
$(OTHERSUBDIRS_MAKEFLAGS) \
|
||||
FULL_VERSION=$(FULL_VERSION) \
|
||||
RELEASE=$(RELEASE) || exit 1; \
|
||||
$(CD) $${curDir}; \
|
||||
$(call TIMING_end,startTime,timing_id); \
|
||||
$(ECHO) "<<<Finished Recursively making "$$i" "$@" @ `$(DATE)`." ; \
|
||||
fi ; \
|
||||
done
|
||||
@$(ECHO) "Done Processing OTHERSUBDIRS: $(OTHERSUBDIRS)"
|
||||
endef
|
||||
|
||||
#
|
||||
# Create BYFILE OPT and DBG settings, if CFLAGS_OPT/foobar.o is set then it is
|
||||
# used for this file, otherwise the default settings are used.
|
||||
@ -819,7 +697,7 @@ endef
|
||||
|
||||
define install-import-file
|
||||
$(install-importonly-file)
|
||||
$(install-module-file)
|
||||
@$(install-module-file)
|
||||
endef
|
||||
|
||||
.PHONY: all build clean clobber
|
||||
|
@ -71,7 +71,8 @@ sanity-base: pre-sanity \
|
||||
sane-cacerts \
|
||||
sane-ant_version \
|
||||
sane-zip_version \
|
||||
sane-msvcrt_path
|
||||
sane-msvcrt_path \
|
||||
sane-build_modules
|
||||
|
||||
# The rules sanity-* have a one-to-one correspondence with the major targets
|
||||
# Each sanity-* rule should have no body to ensure that the post-sanity-* is the
|
||||
|
204
jdk/make/common/Subdirs.gmk
Normal file
204
jdk/make/common/Subdirs.gmk
Normal file
@ -0,0 +1,204 @@
|
||||
# Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Sun designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Sun in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
# CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
# have any questions
|
||||
#
|
||||
|
||||
#
|
||||
# Generic makefile for building subdirectories.
|
||||
#
|
||||
# SUBDIRS variables to specify the subdirectories to build recursively.
|
||||
# Makefile has to include Subdirs.gmk AFTER all SUBDIRS variables are
|
||||
# defined.
|
||||
#
|
||||
# This file does not contain any rule.
|
||||
#
|
||||
# WARNING: This file is shared with other workspaces.
|
||||
# So when it includes other files, it must use JDK_TOPDIR.
|
||||
|
||||
#
|
||||
# SUBDIRS subdirs for the base module always get built
|
||||
# SUBDIRS_<group> subdirs for the named group
|
||||
#
|
||||
# By default, subdirs specified in the SUBDIRS and all SUBDIRS_*
|
||||
# variables will be built.
|
||||
#
|
||||
# BUILD_MODULES variable can be used to specify one or more groups
|
||||
# to be built (BUILD_MODULES=all will build all groups).
|
||||
#
|
||||
# Variables of the currently supported groups are:
|
||||
# SUBDIRS_desktop
|
||||
# SUBDIRS_management
|
||||
# SUBDIRS_enterprise
|
||||
# SUBDIRS_misc
|
||||
# SUBDIRS_tools
|
||||
#
|
||||
# Change to the above list also need to update
|
||||
# make/common/shared/Sanity.gmk. NOTE: this list is subject
|
||||
# to change till the JDK 7 SE profiles/modules are finalized.
|
||||
#
|
||||
# Eventually we want to restructure the make directory
|
||||
# according to these grouping (e.g. make/desktop/...) and
|
||||
# the SUBDIRS_<group> variables would not be needed.
|
||||
#
|
||||
# To build the desktop and tools groups only, you can do:
|
||||
# gnumake BUILD_MODULES="desktop tools" ...
|
||||
#
|
||||
|
||||
# Iterate the subdirectories specified in $1.
|
||||
# - cd into each subdir and make them
|
||||
|
||||
# Given a SUBDIRS* list (first argument), cd into them and make them
|
||||
# SUBDIRS_MAKEFLAGS Make settings for a subdir make
|
||||
# SUBDIRS_MAKEFLAGS-$@ Make settings specific to this target
|
||||
#
|
||||
define subdirs-group-loop
|
||||
@$(ECHO) "Begin Processing SUBDIRS: $($1)"
|
||||
@for i in DUMMY $($1) ; do \
|
||||
if [ "$$i" != "DUMMY" ] ; then \
|
||||
$(MAKE) -C $$i $@ $(SUBDIRS_MAKEFLAGS) $(SUBDIRS_MAKEFLAGS-$@) \
|
||||
FULL_VERSION=$(FULL_VERSION) RELEASE=$(RELEASE) || exit 1; \
|
||||
fi ; \
|
||||
done
|
||||
@$(ECHO) "Done Processing SUBDIRS: $($1)"
|
||||
endef
|
||||
|
||||
# Given a OTHERSUBDIRS list, cd into them and make them (extra loop define)
|
||||
# OTHERSUBDIRS_MAKEFLAGS Make settings for a subdir make
|
||||
define OTHERSUBDIRS-loop
|
||||
@$(ECHO) "Begin Processing OTHERSUBDIRS: $(OTHERSUBDIRS)"
|
||||
@for i in DUMMY $(OTHERSUBDIRS) ; do \
|
||||
if [ "$$i" != "DUMMY" ] ; then \
|
||||
$(MAKE) -C $$i $@ $(OTHERSUBDIRS_MAKEFLAGS) \
|
||||
FULL_VERSION=$(FULL_VERSION) RELEASE=$(RELEASE) || exit 1; \
|
||||
fi ; \
|
||||
done
|
||||
@$(ECHO) "Done Processing OTHERSUBDIRS: $(OTHERSUBDIRS)"
|
||||
endef
|
||||
|
||||
#
|
||||
# Iterate the list specified in SUBDIRS_<group> only if
|
||||
# SUBDIRS_<group> is set and <group> or "all" is specified
|
||||
# in the BUILD_MODULES variable
|
||||
#
|
||||
ifdef SUBDIRS_desktop
|
||||
ifneq (,$(findstring desktop, $(BUILD_MODULES)))
|
||||
define subdirs-desktop-loop
|
||||
@$(call subdirs-group-loop,SUBDIRS_desktop)
|
||||
endef
|
||||
else
|
||||
define subdirs-desktop-loop
|
||||
endef
|
||||
endif
|
||||
else
|
||||
define subdirs-desktop-loop
|
||||
endef
|
||||
endif # SUBDIRS_desktop
|
||||
|
||||
ifdef SUBDIRS_enterprise
|
||||
ifneq (,$(findstring enterprise, $(BUILD_MODULES)))
|
||||
define subdirs-enterprise-loop
|
||||
@$(call subdirs-group-loop,SUBDIRS_enterprise)
|
||||
endef
|
||||
else
|
||||
define subdirs-enterprise-loop
|
||||
endef
|
||||
endif
|
||||
else
|
||||
define subdirs-enterprise-loop
|
||||
endef
|
||||
endif # SUBDIRS_enterprise
|
||||
|
||||
ifdef SUBDIRS_management
|
||||
ifneq (,$(findstring management, $(BUILD_MODULES)))
|
||||
define subdirs-management-loop
|
||||
@$(call subdirs-group-loop,SUBDIRS_management)
|
||||
endef
|
||||
else
|
||||
define subdirs-management-loop
|
||||
endef
|
||||
endif
|
||||
else
|
||||
define subdirs-management-loop
|
||||
endef
|
||||
endif # SUBDIRS_management
|
||||
|
||||
ifdef SUBDIRS_misc
|
||||
ifneq (,$(findstring misc, $(BUILD_MODULES)))
|
||||
define subdirs-misc-loop
|
||||
@$(call subdirs-group-loop,SUBDIRS_misc)
|
||||
endef
|
||||
else
|
||||
define subdirs-misc-loop
|
||||
endef
|
||||
endif
|
||||
else
|
||||
define subdirs-misc-loop
|
||||
endef
|
||||
endif # SUBDIRS_misc
|
||||
|
||||
ifdef SUBDIRS_tools
|
||||
ifneq (,$(findstring tools, $(BUILD_MODULES)))
|
||||
define subdirs-tools-loop
|
||||
@$(call subdirs-group-loop,SUBDIRS_tools)
|
||||
endef
|
||||
else
|
||||
define subdirs-tools-loop
|
||||
endef
|
||||
endif
|
||||
else
|
||||
define subdirs-tools-loop
|
||||
endef
|
||||
endif # SUBDIRS_tools
|
||||
|
||||
#
|
||||
# If BUILD_MODULES is not set or it's set to "all",
|
||||
# iterate all groups.
|
||||
SUBDIRS_all = $(SUBDIRS) $(SUBDIRS_desktop) $(SUBDIRS_enterprise) \
|
||||
$(SUBDIRS_management) $(SUBDIRS_misc) $(SUBDIRS_tools)
|
||||
|
||||
ifndef BUILD_MODULES
|
||||
define SUBDIRS-loop
|
||||
@$(call subdirs-group-loop,SUBDIRS_all)
|
||||
endef
|
||||
|
||||
else
|
||||
|
||||
ifneq (,$(findstring all, $(BUILD_MODULES)))
|
||||
define SUBDIRS-loop
|
||||
@$(call subdirs-group-loop,SUBDIRS_all)
|
||||
endef
|
||||
|
||||
else # BUILD_MODULES set
|
||||
#
|
||||
# Iterate SUBDIRS and the groups specified in BUILD_MODULES
|
||||
#
|
||||
define SUBDIRS-loop
|
||||
@$(call subdirs-group-loop,SUBDIRS)
|
||||
@$(subdirs-desktop-loop)
|
||||
@$(subdirs-enterprise-loop)
|
||||
@$(subdirs-management-loop)
|
||||
@$(subdirs-misc-loop)
|
||||
@$(subdirs-tools-loop)
|
||||
endef
|
||||
|
||||
endif
|
||||
endif # BUILD_MODULES
|
@ -218,7 +218,8 @@ include $(JDK_MAKE_SHARED_DIR)/Sanity-Settings.gmk
|
||||
sane-zip_version \
|
||||
sane-unzip_version \
|
||||
sane-msvcrt_path \
|
||||
sane-freetype
|
||||
sane-freetype \
|
||||
sane-build_modules
|
||||
|
||||
######################################################
|
||||
# check for COPYRIGHT_YEAR variable
|
||||
@ -844,6 +845,21 @@ else
|
||||
sane-freetype:
|
||||
endif
|
||||
|
||||
######################################################
|
||||
# if specified, BUILD_MODULES must contain valid values.
|
||||
######################################################
|
||||
MODULES_REGEX="all|base|desktop|management|enterprise|misc|tools"
|
||||
sane-build_modules:
|
||||
ifdef BUILD_MODULES
|
||||
@for m in $(BUILD_MODULES) ; do \
|
||||
valid=`$(ECHO) $$m | $(EGREP) $(MODULES_REGEX)`; \
|
||||
if [ "x$$valid" = "x" ] ; then \
|
||||
$(ECHO) "ERROR: $$m set in the BUILD_MODULES variable is invalid.\n" \
|
||||
"" >> $(ERROR_FILE); \
|
||||
fi \
|
||||
done
|
||||
endif
|
||||
|
||||
######################################################
|
||||
# CUPS_HEADERS_PATH must be valid
|
||||
######################################################
|
||||
|
@ -38,9 +38,13 @@ SUBDIRS += hpi version jvm redist verify fdlibm java sun_nio jli main zip
|
||||
|
||||
# Others
|
||||
# Note: java_crw_demo java_hprof_demo are demos but must be delivered built in sdk
|
||||
SUBDIRS += security npt java_crw_demo java_hprof_demo \
|
||||
math awt util text applet net nio dyn \
|
||||
sql rmi jar beans logging management instrument
|
||||
|
||||
SUBDIRS += security math util text net nio jar
|
||||
|
||||
SUBDIRS_desktop = awt applet beans
|
||||
SUBDIRS_management = management
|
||||
SUBDIRS_misc = npt java_crw_demo java_hprof_demo \
|
||||
logging instrument dyn sql rmi
|
||||
|
||||
|
||||
ifeq ($(PLATFORM), solaris)
|
||||
@ -53,6 +57,8 @@ ifeq ($(PLATFORM), linux)
|
||||
SUBDIRS += jexec
|
||||
endif # PLATFORM
|
||||
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
@ -34,6 +34,8 @@ include $(BUILDDIR)/common/Defs.gmk
|
||||
# Build specified the HPI implementations
|
||||
#
|
||||
SUBDIRS = $(HPIS)
|
||||
include $(BUILDDIR)/common/Subdirs.gmk
|
||||
|
||||
all build clean clobber::
|
||||
$(SUBDIRS-loop)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user