Merge
This commit is contained in:
commit
5071cda359
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -177,4 +177,6 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
}
|
||||
*interpreter_frame->interpreter_frame_cache_addr() =
|
||||
method->constants()->cache();
|
||||
*interpreter_frame->interpreter_frame_mirror_addr() =
|
||||
method->method_holder()->java_mirror();
|
||||
}
|
||||
|
@ -331,7 +331,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
|
||||
length.load_item();
|
||||
|
||||
}
|
||||
if (needs_store_check) {
|
||||
if (needs_store_check || x->check_boolean()) {
|
||||
value.load_item();
|
||||
} else {
|
||||
value.load_for_store(x->elt_type());
|
||||
@ -380,7 +380,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
|
||||
// Seems to be a precise
|
||||
post_barrier(LIR_OprFact::address(array_addr), value.result());
|
||||
} else {
|
||||
__ move(value.result(), array_addr, null_check_info);
|
||||
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
|
||||
__ move(result, array_addr, null_check_info);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -78,7 +78,9 @@
|
||||
interpreter_frame_last_sp_offset = interpreter_frame_sender_sp_offset - 1,
|
||||
interpreter_frame_method_offset = interpreter_frame_last_sp_offset - 1,
|
||||
interpreter_frame_mdp_offset = interpreter_frame_method_offset - 1,
|
||||
interpreter_frame_cache_offset = interpreter_frame_mdp_offset - 1,
|
||||
interpreter_frame_padding_offset = interpreter_frame_mdp_offset - 1,
|
||||
interpreter_frame_mirror_offset = interpreter_frame_padding_offset - 1,
|
||||
interpreter_frame_cache_offset = interpreter_frame_mirror_offset - 1,
|
||||
interpreter_frame_locals_offset = interpreter_frame_cache_offset - 1,
|
||||
interpreter_frame_bcp_offset = interpreter_frame_locals_offset - 1,
|
||||
interpreter_frame_initial_sp_offset = interpreter_frame_bcp_offset - 1,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -188,6 +188,12 @@ inline Method** frame::interpreter_frame_method_addr() const {
|
||||
return (Method**)addr_at(interpreter_frame_method_offset);
|
||||
}
|
||||
|
||||
// Mirror
|
||||
|
||||
inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
return (oop*)addr_at(interpreter_frame_mirror_offset);
|
||||
}
|
||||
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
intptr_t* last_sp = interpreter_frame_last_sp();
|
||||
|
@ -48,9 +48,9 @@ define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
|
||||
#define DEFAULT_STACK_RESERVED_PAGES (0)
|
||||
|
||||
#define MIN_STACK_YELLOW_PAGES 1
|
||||
#define MIN_STACK_RED_PAGES 1
|
||||
#define MIN_STACK_SHADOW_PAGES 1
|
||||
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||
#define MIN_STACK_RESERVED_PAGES (0)
|
||||
|
||||
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||
|
@ -40,7 +40,43 @@
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
|
||||
// Implementation of InterpreterMacroAssembler
|
||||
void InterpreterMacroAssembler::narrow(Register result) {
|
||||
|
||||
// Get method->_constMethod->_result_type
|
||||
ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
|
||||
ldr(rscratch1, Address(rscratch1, Method::const_offset()));
|
||||
ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
|
||||
|
||||
Label done, notBool, notByte, notChar;
|
||||
|
||||
// common case first
|
||||
cmpw(rscratch1, T_INT);
|
||||
br(Assembler::EQ, done);
|
||||
|
||||
// mask integer result to narrower return type.
|
||||
cmpw(rscratch1, T_BOOLEAN);
|
||||
br(Assembler::NE, notBool);
|
||||
andw(result, result, 0x1);
|
||||
b(done);
|
||||
|
||||
bind(notBool);
|
||||
cmpw(rscratch1, T_BYTE);
|
||||
br(Assembler::NE, notByte);
|
||||
sbfx(result, result, 0, 8);
|
||||
b(done);
|
||||
|
||||
bind(notByte);
|
||||
cmpw(rscratch1, T_CHAR);
|
||||
br(Assembler::NE, notChar);
|
||||
ubfx(result, result, 0, 16); // truncate upper 16 bits
|
||||
b(done);
|
||||
|
||||
bind(notChar);
|
||||
sbfx(result, result, 0, 16); // sign-extend short
|
||||
|
||||
// Nothing to do for T_INT
|
||||
bind(done);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::jump_to_entry(address entry) {
|
||||
assert(entry, "Entry must have been generated by now");
|
||||
@ -81,6 +117,7 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
|
||||
verify_oop(r0, state); break;
|
||||
case ltos: ldr(r0, val_addr); break;
|
||||
case btos: // fall through
|
||||
case ztos: // fall through
|
||||
case ctos: // fall through
|
||||
case stos: // fall through
|
||||
case itos: ldrw(r0, val_addr); break;
|
||||
@ -314,6 +351,7 @@ void InterpreterMacroAssembler::pop(TosState state) {
|
||||
switch (state) {
|
||||
case atos: pop_ptr(); break;
|
||||
case btos:
|
||||
case ztos:
|
||||
case ctos:
|
||||
case stos:
|
||||
case itos: pop_i(); break;
|
||||
@ -331,6 +369,7 @@ void InterpreterMacroAssembler::push(TosState state) {
|
||||
switch (state) {
|
||||
case atos: push_ptr(); break;
|
||||
case btos:
|
||||
case ztos:
|
||||
case ctos:
|
||||
case stos:
|
||||
case itos: push_i(); break;
|
||||
|
@ -245,6 +245,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void update_mdp_by_constant(Register mdp_in, int constant);
|
||||
void update_mdp_for_ret(Register return_bci);
|
||||
|
||||
// narrow int return value
|
||||
void narrow(Register result);
|
||||
|
||||
void profile_taken_branch(Register mdp, Register bumped_count);
|
||||
void profile_not_taken_branch(Register mdp);
|
||||
void profile_call(Register mdp);
|
||||
|
@ -3217,6 +3217,14 @@ void MacroAssembler::load_klass(Register dst, Register src) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_mirror(Register dst, Register method) {
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
ldr(dst, Address(rmethod, Method::const_offset()));
|
||||
ldr(dst, Address(dst, ConstMethod::constants_offset()));
|
||||
ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
|
||||
ldr(dst, Address(dst, mirror_offset));
|
||||
}
|
||||
|
||||
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
|
||||
if (UseCompressedClassPointers) {
|
||||
ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -770,6 +770,8 @@ public:
|
||||
void store_klass(Register dst, Register src);
|
||||
void cmp_klass(Register oop, Register trial_klass, Register tmp);
|
||||
|
||||
void load_mirror(Register dst, Register method);
|
||||
|
||||
void load_heap_oop(Register dst, Address src);
|
||||
|
||||
void load_heap_oop_not_null(Register dst, Address src);
|
||||
@ -1184,6 +1186,10 @@ public:
|
||||
Register result, Register cnt1,
|
||||
int elem_size, bool is_string);
|
||||
|
||||
void fill_words(Register base, Register cnt, Register value);
|
||||
void zero_words(Register base, Register cnt);
|
||||
void zero_words(Register base, u_int64_t cnt);
|
||||
|
||||
void byte_array_inflate(Register src, Register dst, Register len,
|
||||
FloatRegister vtmp1, FloatRegister vtmp2,
|
||||
FloatRegister vtmp3, Register tmp4);
|
||||
|
@ -759,18 +759,13 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
|
||||
// get synchronization object
|
||||
{
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
Label done;
|
||||
__ ldrw(r0, access_flags);
|
||||
__ tst(r0, JVM_ACC_STATIC);
|
||||
// get receiver (assume this is frequent case)
|
||||
__ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
|
||||
__ br(Assembler::EQ, done);
|
||||
__ ldr(r0, Address(rmethod, Method::const_offset()));
|
||||
__ ldr(r0, Address(r0, ConstMethod::constants_offset()));
|
||||
__ ldr(r0, Address(r0,
|
||||
ConstantPool::pool_holder_offset_in_bytes()));
|
||||
__ ldr(r0, Address(r0, mirror_offset));
|
||||
__ load_mirror(r0, rmethod);
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
@ -807,16 +802,16 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
// initialize fixed part of activation frame
|
||||
if (native_call) {
|
||||
__ sub(esp, sp, 12 * wordSize);
|
||||
__ sub(esp, sp, 14 * wordSize);
|
||||
__ mov(rbcp, zr);
|
||||
__ stp(esp, zr, Address(__ pre(sp, -12 * wordSize)));
|
||||
__ stp(esp, zr, Address(__ pre(sp, -14 * wordSize)));
|
||||
// add 2 zero-initialized slots for native calls
|
||||
__ stp(zr, zr, Address(sp, 10 * wordSize));
|
||||
__ stp(zr, zr, Address(sp, 12 * wordSize));
|
||||
} else {
|
||||
__ sub(esp, sp, 10 * wordSize);
|
||||
__ sub(esp, sp, 12 * wordSize);
|
||||
__ ldr(rscratch1, Address(rmethod, Method::const_offset())); // get ConstMethod
|
||||
__ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
|
||||
__ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize)));
|
||||
__ stp(esp, rbcp, Address(__ pre(sp, -12 * wordSize)));
|
||||
}
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
@ -825,22 +820,26 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ cbz(rscratch1, method_data_continue);
|
||||
__ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
|
||||
__ bind(method_data_continue);
|
||||
__ stp(rscratch1, rmethod, Address(sp, 4 * wordSize)); // save Method* and mdp (method data pointer)
|
||||
__ stp(rscratch1, rmethod, Address(sp, 6 * wordSize)); // save Method* and mdp (method data pointer)
|
||||
} else {
|
||||
__ stp(zr, rmethod, Address(sp, 4 * wordSize)); // save Method* (no mdp)
|
||||
__ stp(zr, rmethod, Address(sp, 6 * wordSize)); // save Method* (no mdp)
|
||||
}
|
||||
|
||||
// Get mirror and store it in the frame as GC root for this Method*
|
||||
__ load_mirror(rscratch1, rmethod);
|
||||
__ stp(rscratch1, zr, Address(sp, 4 * wordSize));
|
||||
|
||||
__ ldr(rcpool, Address(rmethod, Method::const_offset()));
|
||||
__ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
|
||||
__ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
|
||||
__ stp(rlocals, rcpool, Address(sp, 2 * wordSize));
|
||||
|
||||
__ stp(rfp, lr, Address(sp, 8 * wordSize));
|
||||
__ lea(rfp, Address(sp, 8 * wordSize));
|
||||
__ stp(rfp, lr, Address(sp, 10 * wordSize));
|
||||
__ lea(rfp, Address(sp, 10 * wordSize));
|
||||
|
||||
// set sender sp
|
||||
// leave last_sp as null
|
||||
__ stp(zr, r13, Address(sp, 6 * wordSize));
|
||||
__ stp(zr, r13, Address(sp, 8 * wordSize));
|
||||
|
||||
// Move SP out of the way
|
||||
if (! native_call) {
|
||||
@ -1242,15 +1241,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// pass mirror handle if static call
|
||||
{
|
||||
Label L;
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
__ ldrw(t, Address(rmethod, Method::access_flags_offset()));
|
||||
__ tst(t, JVM_ACC_STATIC);
|
||||
__ br(Assembler::EQ, L);
|
||||
// get mirror
|
||||
__ ldr(t, Address(rmethod, Method::const_offset()));
|
||||
__ ldr(t, Address(t, ConstMethod::constants_offset()));
|
||||
__ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
|
||||
__ ldr(t, Address(t, mirror_offset));
|
||||
__ load_mirror(t, rmethod);
|
||||
// copy mirror into activation frame
|
||||
__ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
|
||||
// pass handle to mirror
|
||||
|
@ -229,6 +229,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
|
||||
switch (bc) {
|
||||
case Bytecodes::_fast_aputfield:
|
||||
case Bytecodes::_fast_bputfield:
|
||||
case Bytecodes::_fast_zputfield:
|
||||
case Bytecodes::_fast_cputfield:
|
||||
case Bytecodes::_fast_dputfield:
|
||||
case Bytecodes::_fast_fputfield:
|
||||
@ -1082,6 +1083,17 @@ void TemplateTable::bastore()
|
||||
// r1: index
|
||||
// r3: array
|
||||
index_check(r3, r1); // prefer index in r1
|
||||
|
||||
// Need to check whether array is boolean or byte
|
||||
// since both types share the bastore bytecode.
|
||||
__ load_klass(r2, r3);
|
||||
__ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
|
||||
int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
|
||||
Label L_skip;
|
||||
__ tbz(r2, diffbit_index, L_skip);
|
||||
__ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
|
||||
__ bind(L_skip);
|
||||
|
||||
__ lea(rscratch1, Address(r3, r1, Address::uxtw(0)));
|
||||
__ strb(r0, Address(rscratch1,
|
||||
arrayOopDesc::base_offset_in_bytes(T_BYTE)));
|
||||
@ -2193,6 +2205,13 @@ void TemplateTable::_return(TosState state)
|
||||
if (_desc->bytecode() == Bytecodes::_return)
|
||||
__ membar(MacroAssembler::StoreStore);
|
||||
|
||||
// Narrow result if state is itos but result type is smaller.
|
||||
// Need to narrow in the return bytecode rather than in generate_return_entry
|
||||
// since compiled code callers expect the result to already be narrowed.
|
||||
if (state == itos) {
|
||||
__ narrow(r0);
|
||||
}
|
||||
|
||||
__ remove_activation(state);
|
||||
__ ret(lr);
|
||||
}
|
||||
@ -2386,7 +2405,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
|
||||
const Address field(obj, off);
|
||||
|
||||
Label Done, notByte, notInt, notShort, notChar,
|
||||
Label Done, notByte, notBool, notInt, notShort, notChar,
|
||||
notLong, notFloat, notObj, notDouble;
|
||||
|
||||
// x86 uses a shift and mask or wings it with a shift plus assert
|
||||
@ -2409,6 +2428,20 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmp(flags, ztos);
|
||||
__ br(Assembler::NE, notBool);
|
||||
|
||||
// ztos (same code as btos)
|
||||
__ ldrsb(r0, field);
|
||||
__ push(ztos);
|
||||
// Rewrite bytecode to be faster
|
||||
if (!is_static) {
|
||||
// use btos rewriting, no truncating to t/f bit is needed for getfield.
|
||||
patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
|
||||
}
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notBool);
|
||||
__ cmp(flags, atos);
|
||||
__ br(Assembler::NE, notObj);
|
||||
// atos
|
||||
@ -2604,7 +2637,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
// field address
|
||||
const Address field(obj, off);
|
||||
|
||||
Label notByte, notInt, notShort, notChar,
|
||||
Label notByte, notBool, notInt, notShort, notChar,
|
||||
notLong, notFloat, notObj, notDouble;
|
||||
|
||||
// x86 uses a shift and mask or wings it with a shift plus assert
|
||||
@ -2629,6 +2662,22 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
}
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmp(flags, ztos);
|
||||
__ br(Assembler::NE, notBool);
|
||||
|
||||
// ztos
|
||||
{
|
||||
__ pop(ztos);
|
||||
if (!is_static) pop_and_check_object(obj);
|
||||
__ andw(r0, r0, 0x1);
|
||||
__ strb(r0, field);
|
||||
if (!is_static) {
|
||||
patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
|
||||
}
|
||||
__ b(Done);
|
||||
}
|
||||
|
||||
__ bind(notBool);
|
||||
__ cmp(flags, atos);
|
||||
__ br(Assembler::NE, notObj);
|
||||
|
||||
@ -2783,6 +2832,7 @@ void TemplateTable::jvmti_post_fast_field_mod()
|
||||
switch (bytecode()) { // load values into the jvalue object
|
||||
case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
|
||||
case Bytecodes::_fast_bputfield: // fall through
|
||||
case Bytecodes::_fast_zputfield: // fall through
|
||||
case Bytecodes::_fast_sputfield: // fall through
|
||||
case Bytecodes::_fast_cputfield: // fall through
|
||||
case Bytecodes::_fast_iputfield: __ push_i(r0); break;
|
||||
@ -2808,6 +2858,7 @@ void TemplateTable::jvmti_post_fast_field_mod()
|
||||
switch (bytecode()) { // restore tos values
|
||||
case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
|
||||
case Bytecodes::_fast_bputfield: // fall through
|
||||
case Bytecodes::_fast_zputfield: // fall through
|
||||
case Bytecodes::_fast_sputfield: // fall through
|
||||
case Bytecodes::_fast_cputfield: // fall through
|
||||
case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
|
||||
@ -2863,6 +2914,9 @@ void TemplateTable::fast_storefield(TosState state)
|
||||
case Bytecodes::_fast_iputfield:
|
||||
__ strw(r0, field);
|
||||
break;
|
||||
case Bytecodes::_fast_zputfield:
|
||||
__ andw(r0, r0, 0x1); // boolean is true if LSB is 1
|
||||
// fall through to bputfield
|
||||
case Bytecodes::_fast_bputfield:
|
||||
__ strb(r0, field);
|
||||
break;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -152,6 +152,7 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
|
||||
|
||||
interpreter_frame->interpreter_frame_set_method(method);
|
||||
interpreter_frame->interpreter_frame_set_mirror(method->method_holder()->java_mirror());
|
||||
interpreter_frame->interpreter_frame_set_locals(locals_base);
|
||||
interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
|
||||
interpreter_frame->interpreter_frame_set_esp(esp);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -261,6 +261,7 @@
|
||||
uint64_t ijava_reserved2; // Inserted for alignment.
|
||||
#endif
|
||||
uint64_t method;
|
||||
uint64_t mirror;
|
||||
uint64_t locals;
|
||||
uint64_t monitors;
|
||||
uint64_t cpoolCache;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -148,6 +148,11 @@ inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
|
||||
inline Method** frame::interpreter_frame_method_addr() const {
|
||||
return (Method**) &(get_ijava_state()->method);
|
||||
}
|
||||
|
||||
inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
return (oop*) &(get_ijava_state()->mirror);
|
||||
}
|
||||
|
||||
inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
|
||||
return (ConstantPoolCache**) &(get_ijava_state()->cpoolCache);
|
||||
}
|
||||
|
@ -3118,6 +3118,14 @@ void MacroAssembler::load_klass(Register dst, Register src) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_mirror(Register mirror, Register method) {
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
ld(mirror, in_bytes(Method::const_offset()), method);
|
||||
ld(mirror, in_bytes(ConstMethod::constants_offset()), mirror);
|
||||
ld(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
|
||||
ld(mirror, mirror_offset, mirror);
|
||||
}
|
||||
|
||||
// Clear Array
|
||||
// Kills both input registers. tmp == R0 is allowed.
|
||||
void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) {
|
||||
|
@ -647,6 +647,9 @@ class MacroAssembler: public Assembler {
|
||||
void load_klass(Register dst, Register src);
|
||||
void store_klass(Register dst_oop, Register klass, Register tmp = R0);
|
||||
void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
|
||||
|
||||
void load_mirror(Register mirror, Register method);
|
||||
|
||||
static int instr_size_for_decode_klass_not_null();
|
||||
void decode_klass_not_null(Register dst, Register src = noreg);
|
||||
Register encode_klass_not_null(Register dst, Register src = noreg);
|
||||
|
@ -869,7 +869,6 @@ void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratc
|
||||
|
||||
// Get synchronization object to Rscratch2.
|
||||
{
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
Label Lstatic;
|
||||
Label Ldone;
|
||||
|
||||
@ -881,10 +880,7 @@ void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratc
|
||||
__ b(Ldone);
|
||||
|
||||
__ bind(Lstatic); // Static case: Lock the java mirror
|
||||
__ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method);
|
||||
__ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock);
|
||||
__ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock);
|
||||
__ ld(Robj_to_lock, mirror_offset, Robj_to_lock);
|
||||
__ load_mirror(Robj_to_lock, R19_method);
|
||||
|
||||
__ bind(Ldone);
|
||||
__ verify_oop(Robj_to_lock);
|
||||
@ -1049,10 +1045,14 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
|
||||
__ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
|
||||
__ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
|
||||
|
||||
// Get mirror and store it in the frame as GC root for this Method*
|
||||
__ load_mirror(R12_scratch2, R19_method);
|
||||
|
||||
// Store values.
|
||||
// R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
|
||||
// in InterpreterMacroAssembler::call_from_interpreter.
|
||||
__ std(R19_method, _ijava_state_neg(method), R1_SP);
|
||||
__ std(R12_scratch2, _ijava_state_neg(mirror), R1_SP);
|
||||
__ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP);
|
||||
__ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP);
|
||||
__ std(R18_locals, _ijava_state_neg(locals), R1_SP);
|
||||
@ -1317,21 +1317,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
|
||||
__ bfalse(CCR0, method_is_not_static);
|
||||
|
||||
// constants = method->constants();
|
||||
__ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
|
||||
__ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
|
||||
// pool_holder = method->constants()->pool_holder();
|
||||
__ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
|
||||
R11_scratch1/*constants*/);
|
||||
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
|
||||
// mirror = pool_holder->klass_part()->java_mirror();
|
||||
__ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
|
||||
__ load_mirror(R12_sratch2, R19_method);
|
||||
// state->_native_mirror = mirror;
|
||||
|
||||
__ ld(R11_scratch1, 0, R1_SP);
|
||||
__ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
|
||||
__ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
|
||||
// R4_ARG2 = &state->_oop_temp;
|
||||
__ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
|
||||
BIND(method_is_not_static);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -281,11 +281,12 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
// Llast_SP will be same as SP as there is no adapter space
|
||||
*interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
|
||||
*interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
|
||||
// save the mirror in the interpreter frame
|
||||
*interpreter_frame->interpreter_frame_mirror_addr() = method->method_holder()->java_mirror();
|
||||
#ifdef FAST_DISPATCH
|
||||
*interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
BasicObjectLock* mp = (BasicObjectLock*)monitors;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -783,7 +783,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_padding);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
|
||||
|
||||
// esp, according to Lesp (e.g. not depending on bci), if seems valid
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -209,7 +209,8 @@
|
||||
// 2 words, also used to save float regs across calls to C
|
||||
interpreter_frame_d_scratch_fp_offset = -2,
|
||||
interpreter_frame_l_scratch_fp_offset = -4,
|
||||
interpreter_frame_padding_offset = -5, // for native calls only
|
||||
interpreter_frame_mirror_offset = -5, // keep interpreted method alive
|
||||
|
||||
interpreter_frame_oop_temp_offset = -6, // for native calls only
|
||||
interpreter_frame_vm_locals_fp_offset = -6, // should be same as above, and should be zero mod 8
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -163,6 +163,10 @@ inline void frame::interpreter_frame_set_monitors(BasicObjectLock* monitors) {
|
||||
*interpreter_frame_monitors_addr() = monitors;
|
||||
}
|
||||
|
||||
inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
return (oop*)(fp() + interpreter_frame_mirror_offset);
|
||||
}
|
||||
|
||||
// Constant pool cache
|
||||
|
||||
// where LcpoolCache is saved:
|
||||
|
@ -3972,6 +3972,14 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
|
||||
card_table_write(bs->byte_map_base, tmp, store_addr);
|
||||
}
|
||||
|
||||
void MacroAssembler::load_mirror(Register mirror, Register method) {
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
ld_ptr(method, in_bytes(Method::const_offset()), mirror);
|
||||
ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror);
|
||||
ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
|
||||
ld_ptr(mirror, mirror_offset, mirror);
|
||||
}
|
||||
|
||||
void MacroAssembler::load_klass(Register src_oop, Register klass) {
|
||||
// The number of bytes in this code is used by
|
||||
// MachCallDynamicJavaNode::ret_addr_offset()
|
||||
|
@ -1012,6 +1012,8 @@ public:
|
||||
inline void ldbool(const Address& a, Register d);
|
||||
inline void movbool( bool boolconst, Register d);
|
||||
|
||||
void load_mirror(Register mirror, Register method);
|
||||
|
||||
// klass oop manipulations if compressed
|
||||
void load_klass(Register src_oop, Register klass);
|
||||
void store_klass(Register klass, Register dst_oop);
|
||||
|
@ -557,17 +557,12 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
|
||||
// get synchronization object to O0
|
||||
{ Label done;
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
__ btst(JVM_ACC_STATIC, O0);
|
||||
__ br( Assembler::zero, true, Assembler::pt, done);
|
||||
__ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
|
||||
|
||||
__ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0);
|
||||
__ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0);
|
||||
__ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0);
|
||||
|
||||
// lock the mirror, not the Klass*
|
||||
__ ld_ptr( O0, mirror_offset, O0);
|
||||
__ load_mirror(O0, Lmethod);
|
||||
|
||||
#ifdef ASSERT
|
||||
__ tst(O0);
|
||||
@ -881,6 +876,10 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
|
||||
}
|
||||
__ mov( G5_method, Lmethod); // set Lmethod
|
||||
// Get mirror and store it in the frame as GC root for this Method*
|
||||
Register mirror = LcpoolCache;
|
||||
__ load_mirror(mirror, Lmethod);
|
||||
__ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
|
||||
__ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
|
||||
__ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
|
||||
#ifdef _LP64
|
||||
@ -1297,12 +1296,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// get native function entry point(O0 is a good temp until the very end)
|
||||
__ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
|
||||
// for static methods insert the mirror argument
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
|
||||
__ ld_ptr(Lmethod, Method:: const_offset(), O1);
|
||||
__ ld_ptr(O1, ConstMethod::constants_offset(), O1);
|
||||
__ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
|
||||
__ ld_ptr(O1, mirror_offset, O1);
|
||||
__ load_mirror(O1, Lmethod);
|
||||
#ifdef ASSERT
|
||||
if (!PrintSignatureHandlers) // do not dirty the output with this
|
||||
{ Label L;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -113,6 +113,8 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
}
|
||||
*interpreter_frame->interpreter_frame_cache_addr() =
|
||||
method->constants()->cache();
|
||||
*interpreter_frame->interpreter_frame_mirror_addr() =
|
||||
method->method_holder()->java_mirror();
|
||||
}
|
||||
|
||||
#ifndef _LP64
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -640,6 +640,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
|
||||
|
@ -70,7 +70,8 @@
|
||||
// outgoing sp before a call to an invoked method
|
||||
interpreter_frame_last_sp_offset = interpreter_frame_sender_sp_offset - 1,
|
||||
interpreter_frame_method_offset = interpreter_frame_last_sp_offset - 1,
|
||||
interpreter_frame_mdp_offset = interpreter_frame_method_offset - 1,
|
||||
interpreter_frame_mirror_offset = interpreter_frame_method_offset - 1,
|
||||
interpreter_frame_mdp_offset = interpreter_frame_mirror_offset - 1,
|
||||
interpreter_frame_cache_offset = interpreter_frame_mdp_offset - 1,
|
||||
interpreter_frame_locals_offset = interpreter_frame_cache_offset - 1,
|
||||
interpreter_frame_bcp_offset = interpreter_frame_locals_offset - 1,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -184,6 +184,12 @@ inline Method** frame::interpreter_frame_method_addr() const {
|
||||
return (Method**)addr_at(interpreter_frame_method_offset);
|
||||
}
|
||||
|
||||
// Mirror
|
||||
|
||||
inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
return (oop*)addr_at(interpreter_frame_mirror_offset);
|
||||
}
|
||||
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
intptr_t* last_sp = interpreter_frame_last_sp();
|
||||
|
@ -6705,6 +6705,14 @@ void MacroAssembler::restore_cpu_control_state_after_jni() {
|
||||
#endif // _LP64
|
||||
}
|
||||
|
||||
void MacroAssembler::load_mirror(Register mirror, Register method) {
|
||||
// get mirror
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
movptr(mirror, Address(method, Method::const_offset()));
|
||||
movptr(mirror, Address(mirror, ConstMethod::constants_offset()));
|
||||
movptr(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
|
||||
movptr(mirror, Address(mirror, mirror_offset));
|
||||
}
|
||||
|
||||
void MacroAssembler::load_klass(Register dst, Register src) {
|
||||
#ifdef _LP64
|
||||
|
@ -323,6 +323,8 @@ class MacroAssembler: public Assembler {
|
||||
void movbool(Address dst, Register src);
|
||||
void testbool(Register dst);
|
||||
|
||||
void load_mirror(Register mirror, Register method);
|
||||
|
||||
// oop manipulations
|
||||
void load_klass(Register dst, Register src);
|
||||
void store_klass(Register dst, Register src);
|
||||
|
@ -608,18 +608,13 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
|
||||
// get synchronization object
|
||||
{
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
Label done;
|
||||
__ movl(rax, access_flags);
|
||||
__ testl(rax, JVM_ACC_STATIC);
|
||||
// get receiver (assume this is frequent case)
|
||||
__ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
|
||||
__ jcc(Assembler::zero, done);
|
||||
__ movptr(rax, Address(rbx, Method::const_offset()));
|
||||
__ movptr(rax, Address(rax, ConstMethod::constants_offset()));
|
||||
__ movptr(rax, Address(rax,
|
||||
ConstantPool::pool_holder_offset_in_bytes()));
|
||||
__ movptr(rax, Address(rax, mirror_offset));
|
||||
__ load_mirror(rax, rbx);
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
@ -662,6 +657,9 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod*
|
||||
__ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
|
||||
__ push(rbx); // save Method*
|
||||
// Get mirror and store it in the frame as GC root for this Method*
|
||||
__ load_mirror(rdx, rbx);
|
||||
__ push(rdx);
|
||||
if (ProfileInterpreter) {
|
||||
Label method_data_continue;
|
||||
__ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
|
||||
@ -999,15 +997,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// pass mirror handle if static call
|
||||
{
|
||||
Label L;
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
__ movl(t, Address(method, Method::access_flags_offset()));
|
||||
__ testl(t, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L);
|
||||
// get mirror
|
||||
__ movptr(t, Address(method, Method::const_offset()));
|
||||
__ movptr(t, Address(t, ConstMethod::constants_offset()));
|
||||
__ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
|
||||
__ movptr(t, Address(t, mirror_offset));
|
||||
__ load_mirror(t, method);
|
||||
// copy mirror into activation frame
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
|
||||
t);
|
||||
|
@ -49,6 +49,7 @@ const char *BytecodeInterpreter::name_of_field_at_address(address addr) {
|
||||
DO(_locals);
|
||||
DO(_constants);
|
||||
DO(_method);
|
||||
DO(_mirror);
|
||||
DO(_mdx);
|
||||
DO(_stack);
|
||||
DO(_msg);
|
||||
@ -77,6 +78,7 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
|
||||
bool is_top_frame) {
|
||||
istate->set_locals(locals);
|
||||
istate->set_method(method);
|
||||
istate->set_mirror(method->method_holder()->java_mirror());
|
||||
istate->set_self_link(istate);
|
||||
istate->set_prev_link(NULL);
|
||||
// thread will be set by a hacky repurposing of frame::patch_pc()
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2011 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -44,6 +44,9 @@
|
||||
inline void set_method(Method* new_method) {
|
||||
_method = new_method;
|
||||
}
|
||||
inline void set_mirror(oop new_mirror) {
|
||||
_mirror = new_mirror;
|
||||
}
|
||||
inline interpreterState self_link() {
|
||||
return _self_link;
|
||||
}
|
||||
|
@ -755,6 +755,7 @@ InterpreterFrame *InterpreterFrame::build(Method* const method, TRAPS) {
|
||||
|
||||
istate->set_locals(locals);
|
||||
istate->set_method(method);
|
||||
istate->set_mirror(method->method_holder()->java_mirror());
|
||||
istate->set_self_link(istate);
|
||||
istate->set_prev_link(NULL);
|
||||
istate->set_thread(thread);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -115,6 +115,10 @@ inline Method** frame::interpreter_frame_method_addr() const {
|
||||
return &(get_interpreterState()->_method);
|
||||
}
|
||||
|
||||
inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
return &(get_interpreterState()->_mirror);
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_mdp_addr() const {
|
||||
return (intptr_t*) &(get_interpreterState()->_mdx);
|
||||
}
|
||||
|
@ -4875,3 +4875,16 @@ bool os::start_debugging(char *buf, int buflen) {
|
||||
}
|
||||
return yes;
|
||||
}
|
||||
|
||||
static inline time_t get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
return st.st_mtime;
|
||||
}
|
||||
|
||||
int os::compare_file_modified_times(const char* file1, const char* file2) {
|
||||
time_t t1 = get_mtime(file1);
|
||||
time_t t2 = get_mtime(file2);
|
||||
return t1 - t2;
|
||||
}
|
||||
|
@ -2013,8 +2013,8 @@ void os::print_os_info(outputStream* st) {
|
||||
// their own specific XXX-release file as well as a redhat-release file.
|
||||
// Because of this the XXX-release file needs to be searched for before the
|
||||
// redhat-release file.
|
||||
// Since Red Hat has a lsb-release file that is not very descriptive the
|
||||
// search for redhat-release needs to be before lsb-release.
|
||||
// Since Red Hat and SuSE have an lsb-release file that is not very descriptive the
|
||||
// search for redhat-release / SuSE-release needs to be before lsb-release.
|
||||
// Since the lsb-release file is the new standard it needs to be searched
|
||||
// before the older style release files.
|
||||
// Searching system-release (Red Hat) and os-release (other Linuxes) are a
|
||||
@ -2031,8 +2031,8 @@ const char* distro_files[] = {
|
||||
"/etc/mandrake-release",
|
||||
"/etc/sun-release",
|
||||
"/etc/redhat-release",
|
||||
"/etc/lsb-release",
|
||||
"/etc/SuSE-release",
|
||||
"/etc/lsb-release",
|
||||
"/etc/turbolinux-release",
|
||||
"/etc/gentoo-release",
|
||||
"/etc/ltib-release",
|
||||
@ -2062,14 +2062,11 @@ void os::Linux::print_distro_info(outputStream* st) {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
static void parse_os_info(char* distro, size_t length, const char* file) {
|
||||
FILE* fp = fopen(file, "r");
|
||||
if (fp != NULL) {
|
||||
char buf[256];
|
||||
// get last line of the file.
|
||||
while (fgets(buf, sizeof(buf), fp)) { }
|
||||
// Edit out extra stuff in expected ubuntu format
|
||||
if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL) {
|
||||
static void parse_os_info_helper(FILE* fp, char* distro, size_t length, bool get_first_line) {
|
||||
char buf[256];
|
||||
while (fgets(buf, sizeof(buf), fp)) {
|
||||
// Edit out extra stuff in expected format
|
||||
if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) {
|
||||
char* ptr = strstr(buf, "\""); // the name is in quotes
|
||||
if (ptr != NULL) {
|
||||
ptr++; // go beyond first quote
|
||||
@ -2083,13 +2080,26 @@ static void parse_os_info(char* distro, size_t length, const char* file) {
|
||||
if (nl != NULL) *nl = '\0';
|
||||
strncpy(distro, ptr, length);
|
||||
}
|
||||
} else {
|
||||
// if not in expected Ubuntu format, print out whole line minus \n
|
||||
return;
|
||||
} else if (get_first_line) {
|
||||
char* nl = strchr(buf, '\n');
|
||||
if (nl != NULL) *nl = '\0';
|
||||
strncpy(distro, buf, length);
|
||||
return;
|
||||
}
|
||||
// close distro file
|
||||
}
|
||||
// print last line and close
|
||||
char* nl = strchr(buf, '\n');
|
||||
if (nl != NULL) *nl = '\0';
|
||||
strncpy(distro, buf, length);
|
||||
}
|
||||
|
||||
static void parse_os_info(char* distro, size_t length, const char* file) {
|
||||
FILE* fp = fopen(file, "r");
|
||||
if (fp != NULL) {
|
||||
// if suse format, print out first line
|
||||
bool get_first_line = (strcmp(file, "/etc/SuSE-release") == 0);
|
||||
parse_os_info_helper(fp, distro, length, get_first_line);
|
||||
fclose(fp);
|
||||
}
|
||||
}
|
||||
@ -3041,6 +3051,48 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||
return addr == MAP_FAILED ? NULL : addr;
|
||||
}
|
||||
|
||||
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
|
||||
// (req_addr != NULL) or with a given alignment.
|
||||
// - bytes shall be a multiple of alignment.
|
||||
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
|
||||
// - alignment sets the alignment at which memory shall be allocated.
|
||||
// It must be a multiple of allocation granularity.
|
||||
// Returns address of memory or NULL. If req_addr was not NULL, will only return
|
||||
// req_addr or NULL.
|
||||
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
|
||||
|
||||
size_t extra_size = bytes;
|
||||
if (req_addr == NULL && alignment > 0) {
|
||||
extra_size += alignment;
|
||||
}
|
||||
|
||||
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
|
||||
-1, 0);
|
||||
if (start == MAP_FAILED) {
|
||||
start = NULL;
|
||||
} else {
|
||||
if (req_addr != NULL) {
|
||||
if (start != req_addr) {
|
||||
::munmap(start, extra_size);
|
||||
start = NULL;
|
||||
}
|
||||
} else {
|
||||
char* const start_aligned = (char*) align_ptr_up(start, alignment);
|
||||
char* const end_aligned = start_aligned + bytes;
|
||||
char* const end = start + extra_size;
|
||||
if (start_aligned > start) {
|
||||
::munmap(start, start_aligned - start);
|
||||
}
|
||||
if (end_aligned < end) {
|
||||
::munmap(end_aligned, end - end_aligned);
|
||||
}
|
||||
start = start_aligned;
|
||||
}
|
||||
}
|
||||
return start;
|
||||
}
|
||||
|
||||
static int anon_munmap(char * addr, size_t size) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
@ -3317,29 +3369,113 @@ void os::large_page_init() {
|
||||
#define SHM_HUGETLB 04000
|
||||
#endif
|
||||
|
||||
#define shm_warning_format(format, ...) \
|
||||
do { \
|
||||
if (UseLargePages && \
|
||||
(!FLAG_IS_DEFAULT(UseLargePages) || \
|
||||
!FLAG_IS_DEFAULT(UseSHM) || \
|
||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \
|
||||
warning(format, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define shm_warning(str) shm_warning_format("%s", str)
|
||||
|
||||
#define shm_warning_with_errno(str) \
|
||||
do { \
|
||||
int err = errno; \
|
||||
shm_warning_format(str " (error = %d)", err); \
|
||||
} while (0)
|
||||
|
||||
static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
|
||||
assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment");
|
||||
|
||||
if (!is_size_aligned(alignment, SHMLBA)) {
|
||||
assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// To ensure that we get 'alignment' aligned memory from shmat,
|
||||
// we pre-reserve aligned virtual memory and then attach to that.
|
||||
|
||||
char* pre_reserved_addr = anon_mmap_aligned(bytes, alignment, NULL);
|
||||
if (pre_reserved_addr == NULL) {
|
||||
// Couldn't pre-reserve aligned memory.
|
||||
shm_warning("Failed to pre-reserve aligned memory for shmat.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// SHM_REMAP is needed to allow shmat to map over an existing mapping.
|
||||
char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);
|
||||
|
||||
if ((intptr_t)addr == -1) {
|
||||
int err = errno;
|
||||
shm_warning_with_errno("Failed to attach shared memory.");
|
||||
|
||||
assert(err != EACCES, "Unexpected error");
|
||||
assert(err != EIDRM, "Unexpected error");
|
||||
assert(err != EINVAL, "Unexpected error");
|
||||
|
||||
// Since we don't know if the kernel unmapped the pre-reserved memory area
|
||||
// we can't unmap it, since that would potentially unmap memory that was
|
||||
// mapped from other threads.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static char* shmat_at_address(int shmid, char* req_addr) {
|
||||
if (!is_ptr_aligned(req_addr, SHMLBA)) {
|
||||
assert(false, "Requested address needs to be SHMLBA aligned");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
char* addr = (char*)shmat(shmid, req_addr, 0);
|
||||
|
||||
if ((intptr_t)addr == -1) {
|
||||
shm_warning_with_errno("Failed to attach shared memory.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
|
||||
// If a req_addr has been provided, we assume that the caller has already aligned the address.
|
||||
if (req_addr != NULL) {
|
||||
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
|
||||
assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment");
|
||||
return shmat_at_address(shmid, req_addr);
|
||||
}
|
||||
|
||||
// Since shmid has been setup with SHM_HUGETLB, shmat will automatically
|
||||
// return large page size aligned memory addresses when req_addr == NULL.
|
||||
// However, if the alignment is larger than the large page size, we have
|
||||
// to manually ensure that the memory returned is 'alignment' aligned.
|
||||
if (alignment > os::large_page_size()) {
|
||||
assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
|
||||
return shmat_with_alignment(shmid, bytes, alignment);
|
||||
} else {
|
||||
return shmat_at_address(shmid, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
|
||||
char* req_addr, bool exec) {
|
||||
// "exec" is passed in but not used. Creating the shared image for
|
||||
// the code cache doesn't have an SHM_X executable permission to check.
|
||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
||||
assert(is_ptr_aligned(req_addr, alignment), "Unaligned address");
|
||||
|
||||
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
|
||||
if (!is_size_aligned(bytes, os::large_page_size())) {
|
||||
return NULL; // Fallback to small pages.
|
||||
}
|
||||
|
||||
key_t key = IPC_PRIVATE;
|
||||
char *addr;
|
||||
|
||||
bool warn_on_failure = UseLargePages &&
|
||||
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||
!FLAG_IS_DEFAULT(UseSHM) ||
|
||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
|
||||
char msg[128];
|
||||
|
||||
// Create a large shared memory region to attach to based on size.
|
||||
// Currently, size is the total size of the heap
|
||||
int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
|
||||
// Currently, size is the total size of the heap.
|
||||
int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
|
||||
if (shmid == -1) {
|
||||
// Possible reasons for shmget failure:
|
||||
// 1. shmmax is too small for Java heap.
|
||||
@ -3355,16 +3491,12 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
|
||||
// they are so fragmented after a long run that they can't
|
||||
// coalesce into large pages. Try to reserve large pages when
|
||||
// the system is still "fresh".
|
||||
if (warn_on_failure) {
|
||||
jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
|
||||
warning("%s", msg);
|
||||
}
|
||||
shm_warning_with_errno("Failed to reserve shared memory.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// attach to the region
|
||||
addr = (char*)shmat(shmid, req_addr, 0);
|
||||
int err = errno;
|
||||
// Attach to the region.
|
||||
char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);
|
||||
|
||||
// Remove shmid. If shmat() is successful, the actual shared memory segment
|
||||
// will be deleted when it's detached by shmdt() or when the process
|
||||
@ -3372,14 +3504,6 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
|
||||
// segment immediately.
|
||||
shmctl(shmid, IPC_RMID, NULL);
|
||||
|
||||
if ((intptr_t)addr == -1) {
|
||||
if (warn_on_failure) {
|
||||
jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
|
||||
warning("%s", msg);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
@ -3422,50 +3546,6 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
|
||||
return addr;
|
||||
}
|
||||
|
||||
// Helper for os::Linux::reserve_memory_special_huge_tlbfs_mixed().
|
||||
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
|
||||
// (req_addr != NULL) or with a given alignment.
|
||||
// - bytes shall be a multiple of alignment.
|
||||
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
|
||||
// - alignment sets the alignment at which memory shall be allocated.
|
||||
// It must be a multiple of allocation granularity.
|
||||
// Returns address of memory or NULL. If req_addr was not NULL, will only return
|
||||
// req_addr or NULL.
|
||||
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
|
||||
|
||||
size_t extra_size = bytes;
|
||||
if (req_addr == NULL && alignment > 0) {
|
||||
extra_size += alignment;
|
||||
}
|
||||
|
||||
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
|
||||
-1, 0);
|
||||
if (start == MAP_FAILED) {
|
||||
start = NULL;
|
||||
} else {
|
||||
if (req_addr != NULL) {
|
||||
if (start != req_addr) {
|
||||
::munmap(start, extra_size);
|
||||
start = NULL;
|
||||
}
|
||||
} else {
|
||||
char* const start_aligned = (char*) align_ptr_up(start, alignment);
|
||||
char* const end_aligned = start_aligned + bytes;
|
||||
char* const end = start + extra_size;
|
||||
if (start_aligned > start) {
|
||||
::munmap(start, start_aligned - start);
|
||||
}
|
||||
if (end_aligned < end) {
|
||||
::munmap(end_aligned, end - end_aligned);
|
||||
}
|
||||
start = start_aligned;
|
||||
}
|
||||
}
|
||||
return start;
|
||||
|
||||
}
|
||||
|
||||
// Reserve memory using mmap(MAP_HUGETLB).
|
||||
// - bytes shall be a multiple of alignment.
|
||||
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
|
||||
|
@ -181,8 +181,8 @@ int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||
return vsnprintf(buf, len, fmt, args);
|
||||
}
|
||||
|
||||
int os::fileno(FILE* fp) {
|
||||
return ::fileno(fp);
|
||||
int os::get_fileno(FILE* fp) {
|
||||
return NOT_AIX(::)fileno(fp);
|
||||
}
|
||||
|
||||
void os::Posix::print_load_average(outputStream* st) {
|
||||
|
@ -4602,7 +4602,7 @@ int os::ftruncate(int fd, jlong length) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int os::fileno(FILE* fp) {
|
||||
int os::get_fileno(FILE* fp) {
|
||||
return _fileno(fp);
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1DefaultPolicy.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
|
||||
@ -40,5 +41,5 @@ HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
||||
}
|
||||
|
||||
G1Policy* G1CollectedHeap::create_g1_policy() {
|
||||
return new G1Policy;
|
||||
return new G1DefaultPolicy();
|
||||
}
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectionSet.hpp"
|
||||
#include "gc/g1/g1ConcurrentMark.hpp"
|
||||
#include "gc/g1/g1DefaultPolicy.hpp"
|
||||
#include "gc/g1/g1IHOPControl.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
@ -42,7 +43,7 @@
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
|
||||
G1Policy::G1Policy() :
|
||||
G1DefaultPolicy::G1DefaultPolicy() :
|
||||
_predictor(G1ConfidencePercent / 100.0),
|
||||
_analytics(new G1Analytics(&_predictor)),
|
||||
_mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
|
||||
@ -63,13 +64,13 @@ G1Policy::G1Policy() :
|
||||
_max_survivor_regions(0),
|
||||
_survivors_age_table(true) { }
|
||||
|
||||
G1Policy::~G1Policy() {
|
||||
G1DefaultPolicy::~G1DefaultPolicy() {
|
||||
delete _ihop_control;
|
||||
}
|
||||
|
||||
G1CollectorState* G1Policy::collector_state() const { return _g1->collector_state(); }
|
||||
G1CollectorState* G1DefaultPolicy::collector_state() const { return _g1->collector_state(); }
|
||||
|
||||
void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
|
||||
void G1DefaultPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
|
||||
_g1 = g1h;
|
||||
_collection_set = collection_set;
|
||||
|
||||
@ -88,14 +89,14 @@ void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
|
||||
_collection_set->start_incremental_building();
|
||||
}
|
||||
|
||||
void G1Policy::note_gc_start() {
|
||||
void G1DefaultPolicy::note_gc_start() {
|
||||
phase_times()->note_gc_start();
|
||||
}
|
||||
|
||||
bool G1Policy::predict_will_fit(uint young_length,
|
||||
double base_time_ms,
|
||||
uint base_free_regions,
|
||||
double target_pause_time_ms) const {
|
||||
bool G1DefaultPolicy::predict_will_fit(uint young_length,
|
||||
double base_time_ms,
|
||||
uint base_free_regions,
|
||||
double target_pause_time_ms) const {
|
||||
if (young_length >= base_free_regions) {
|
||||
// end condition 1: not enough space for the young regions
|
||||
return false;
|
||||
@ -134,7 +135,7 @@ bool G1Policy::predict_will_fit(uint young_length,
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1Policy::record_new_heap_size(uint new_number_of_regions) {
|
||||
void G1DefaultPolicy::record_new_heap_size(uint new_number_of_regions) {
|
||||
// re-calculate the necessary reserve
|
||||
double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
|
||||
// We use ceiling so that if reserve_regions_d is > 0.0 (but
|
||||
@ -146,7 +147,7 @@ void G1Policy::record_new_heap_size(uint new_number_of_regions) {
|
||||
_ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {
|
||||
uint G1DefaultPolicy::calculate_young_list_desired_min_length(uint base_min_length) const {
|
||||
uint desired_min_length = 0;
|
||||
if (adaptive_young_list_length()) {
|
||||
if (_analytics->num_alloc_rate_ms() > 3) {
|
||||
@ -163,30 +164,30 @@ uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) con
|
||||
return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
|
||||
}
|
||||
|
||||
uint G1Policy::calculate_young_list_desired_max_length() const {
|
||||
uint G1DefaultPolicy::calculate_young_list_desired_max_length() const {
|
||||
// Here, we might want to also take into account any additional
|
||||
// constraints (i.e., user-defined minimum bound). Currently, we
|
||||
// effectively don't set this bound.
|
||||
return _young_gen_sizer.max_desired_young_length();
|
||||
}
|
||||
|
||||
uint G1Policy::update_young_list_max_and_target_length() {
|
||||
uint G1DefaultPolicy::update_young_list_max_and_target_length() {
|
||||
return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
|
||||
}
|
||||
|
||||
uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) {
|
||||
uint G1DefaultPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
|
||||
uint unbounded_target_length = update_young_list_target_length(rs_lengths);
|
||||
update_max_gc_locker_expansion();
|
||||
return unbounded_target_length;
|
||||
}
|
||||
|
||||
uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
|
||||
uint G1DefaultPolicy::update_young_list_target_length(size_t rs_lengths) {
|
||||
YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
|
||||
_young_list_target_length = young_lengths.first;
|
||||
return young_lengths.second;
|
||||
}
|
||||
|
||||
G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengths) const {
|
||||
G1DefaultPolicy::YoungTargetLengths G1DefaultPolicy::young_list_target_lengths(size_t rs_lengths) const {
|
||||
YoungTargetLengths result;
|
||||
|
||||
// Calculate the absolute and desired min bounds first.
|
||||
@ -252,10 +253,10 @@ G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengt
|
||||
}
|
||||
|
||||
uint
|
||||
G1Policy::calculate_young_list_target_length(size_t rs_lengths,
|
||||
uint base_min_length,
|
||||
uint desired_min_length,
|
||||
uint desired_max_length) const {
|
||||
G1DefaultPolicy::calculate_young_list_target_length(size_t rs_lengths,
|
||||
uint base_min_length,
|
||||
uint desired_min_length,
|
||||
uint desired_max_length) const {
|
||||
assert(adaptive_young_list_length(), "pre-condition");
|
||||
assert(collector_state()->gcs_are_young(), "only call this for young GCs");
|
||||
|
||||
@ -355,7 +356,7 @@ G1Policy::calculate_young_list_target_length(size_t rs_lengths,
|
||||
return base_min_length + min_young_length;
|
||||
}
|
||||
|
||||
double G1Policy::predict_survivor_regions_evac_time() const {
|
||||
double G1DefaultPolicy::predict_survivor_regions_evac_time() const {
|
||||
double survivor_regions_evac_time = 0.0;
|
||||
for (HeapRegion * r = _g1->young_list()->first_survivor_region();
|
||||
r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region();
|
||||
@ -365,7 +366,7 @@ double G1Policy::predict_survivor_regions_evac_time() const {
|
||||
return survivor_regions_evac_time;
|
||||
}
|
||||
|
||||
void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
|
||||
void G1DefaultPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
|
||||
guarantee( adaptive_young_list_length(), "should not call this otherwise" );
|
||||
|
||||
if (rs_lengths > _rs_lengths_prediction) {
|
||||
@ -377,25 +378,25 @@ void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::update_rs_lengths_prediction() {
|
||||
void G1DefaultPolicy::update_rs_lengths_prediction() {
|
||||
update_rs_lengths_prediction(_analytics->predict_rs_lengths());
|
||||
}
|
||||
|
||||
void G1Policy::update_rs_lengths_prediction(size_t prediction) {
|
||||
void G1DefaultPolicy::update_rs_lengths_prediction(size_t prediction) {
|
||||
if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
|
||||
_rs_lengths_prediction = prediction;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool G1Policy::verify_young_ages() {
|
||||
bool G1DefaultPolicy::verify_young_ages() {
|
||||
HeapRegion* head = _g1->young_list()->first_region();
|
||||
return
|
||||
verify_young_ages(head, _short_lived_surv_rate_group);
|
||||
// also call verify_young_ages on any additional surv rate groups
|
||||
}
|
||||
|
||||
bool G1Policy::verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group) {
|
||||
bool G1DefaultPolicy::verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group) {
|
||||
guarantee( surv_rate_group != NULL, "pre-condition" );
|
||||
|
||||
const char* name = surv_rate_group->name();
|
||||
@ -431,13 +432,13 @@ bool G1Policy::verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_grou
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void G1Policy::record_full_collection_start() {
|
||||
void G1DefaultPolicy::record_full_collection_start() {
|
||||
_full_collection_start_sec = os::elapsedTime();
|
||||
// Release the future to-space so that it is available for compaction into.
|
||||
collector_state()->set_full_collection(true);
|
||||
}
|
||||
|
||||
void G1Policy::record_full_collection_end() {
|
||||
void G1DefaultPolicy::record_full_collection_end() {
|
||||
// Consider this like a collection pause for the purposes of allocation
|
||||
// since last pause.
|
||||
double end_sec = os::elapsedTime();
|
||||
@ -472,7 +473,7 @@ void G1Policy::record_full_collection_end() {
|
||||
record_pause(FullGC, _full_collection_start_sec, end_sec);
|
||||
}
|
||||
|
||||
void G1Policy::record_collection_pause_start(double start_time_sec) {
|
||||
void G1DefaultPolicy::record_collection_pause_start(double start_time_sec) {
|
||||
// We only need to do this here as the policy will only be applied
|
||||
// to the GC we're about to start. so, no point is calculating this
|
||||
// every time we calculate / recalculate the target young length.
|
||||
@ -497,18 +498,18 @@ void G1Policy::record_collection_pause_start(double start_time_sec) {
|
||||
assert( verify_young_ages(), "region age verification" );
|
||||
}
|
||||
|
||||
void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
|
||||
void G1DefaultPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
|
||||
collector_state()->set_during_marking(true);
|
||||
assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
|
||||
collector_state()->set_during_initial_mark_pause(false);
|
||||
}
|
||||
|
||||
void G1Policy::record_concurrent_mark_remark_start() {
|
||||
void G1DefaultPolicy::record_concurrent_mark_remark_start() {
|
||||
_mark_remark_start_sec = os::elapsedTime();
|
||||
collector_state()->set_during_marking(false);
|
||||
}
|
||||
|
||||
void G1Policy::record_concurrent_mark_remark_end() {
|
||||
void G1DefaultPolicy::record_concurrent_mark_remark_end() {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
|
||||
_analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
|
||||
@ -517,11 +518,11 @@ void G1Policy::record_concurrent_mark_remark_end() {
|
||||
record_pause(Remark, _mark_remark_start_sec, end_time_sec);
|
||||
}
|
||||
|
||||
void G1Policy::record_concurrent_mark_cleanup_start() {
|
||||
void G1DefaultPolicy::record_concurrent_mark_cleanup_start() {
|
||||
_mark_cleanup_start_sec = os::elapsedTime();
|
||||
}
|
||||
|
||||
void G1Policy::record_concurrent_mark_cleanup_completed() {
|
||||
void G1DefaultPolicy::record_concurrent_mark_cleanup_completed() {
|
||||
bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
|
||||
"skip last young-only gc");
|
||||
collector_state()->set_last_young_gc(should_continue_with_reclaim);
|
||||
@ -532,22 +533,22 @@ void G1Policy::record_concurrent_mark_cleanup_completed() {
|
||||
collector_state()->set_in_marking_window(false);
|
||||
}
|
||||
|
||||
double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
|
||||
double G1DefaultPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
|
||||
return phase_times()->average_time_ms(phase);
|
||||
}
|
||||
|
||||
double G1Policy::young_other_time_ms() const {
|
||||
double G1DefaultPolicy::young_other_time_ms() const {
|
||||
return phase_times()->young_cset_choice_time_ms() +
|
||||
phase_times()->young_free_cset_time_ms();
|
||||
}
|
||||
|
||||
double G1Policy::non_young_other_time_ms() const {
|
||||
double G1DefaultPolicy::non_young_other_time_ms() const {
|
||||
return phase_times()->non_young_cset_choice_time_ms() +
|
||||
phase_times()->non_young_free_cset_time_ms();
|
||||
|
||||
}
|
||||
|
||||
double G1Policy::other_time_ms(double pause_time_ms) const {
|
||||
double G1DefaultPolicy::other_time_ms(double pause_time_ms) const {
|
||||
return pause_time_ms -
|
||||
average_time_ms(G1GCPhaseTimes::UpdateRS) -
|
||||
average_time_ms(G1GCPhaseTimes::ScanRS) -
|
||||
@ -555,19 +556,19 @@ double G1Policy::other_time_ms(double pause_time_ms) const {
|
||||
average_time_ms(G1GCPhaseTimes::Termination);
|
||||
}
|
||||
|
||||
double G1Policy::constant_other_time_ms(double pause_time_ms) const {
|
||||
double G1DefaultPolicy::constant_other_time_ms(double pause_time_ms) const {
|
||||
return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
|
||||
}
|
||||
|
||||
CollectionSetChooser* G1Policy::cset_chooser() const {
|
||||
CollectionSetChooser* G1DefaultPolicy::cset_chooser() const {
|
||||
return _collection_set->cset_chooser();
|
||||
}
|
||||
|
||||
bool G1Policy::about_to_start_mixed_phase() const {
|
||||
bool G1DefaultPolicy::about_to_start_mixed_phase() const {
|
||||
return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
|
||||
}
|
||||
|
||||
bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
|
||||
bool G1DefaultPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
|
||||
if (about_to_start_mixed_phase()) {
|
||||
return false;
|
||||
}
|
||||
@ -592,7 +593,7 @@ bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_siz
|
||||
// Anything below that is considered to be zero
|
||||
#define MIN_TIMER_GRANULARITY 0.0000001
|
||||
|
||||
void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
|
||||
void G1DefaultPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
|
||||
size_t cur_used_bytes = _g1->used();
|
||||
@ -778,7 +779,7 @@ void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_sc
|
||||
cset_chooser()->verify();
|
||||
}
|
||||
|
||||
G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
|
||||
G1IHOPControl* G1DefaultPolicy::create_ihop_control(const G1Predictions* predictor){
|
||||
if (G1UseAdaptiveIHOP) {
|
||||
return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
|
||||
predictor,
|
||||
@ -789,7 +790,7 @@ G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::update_ihop_prediction(double mutator_time_s,
|
||||
void G1DefaultPolicy::update_ihop_prediction(double mutator_time_s,
|
||||
size_t mutator_alloc_bytes,
|
||||
size_t young_gen_size) {
|
||||
// Always try to update IHOP prediction. Even evacuation failures give information
|
||||
@ -827,15 +828,15 @@ void G1Policy::update_ihop_prediction(double mutator_time_s,
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::report_ihop_statistics() {
|
||||
void G1DefaultPolicy::report_ihop_statistics() {
|
||||
_ihop_control->print();
|
||||
}
|
||||
|
||||
void G1Policy::print_phases() {
|
||||
void G1DefaultPolicy::print_phases() {
|
||||
phase_times()->print();
|
||||
}
|
||||
|
||||
double G1Policy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
|
||||
double G1DefaultPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
|
||||
TruncatedSeq* seq = surv_rate_group->get_seq(age);
|
||||
guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
|
||||
double pred = _predictor.get_new_prediction(seq);
|
||||
@ -845,15 +846,15 @@ double G1Policy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) c
|
||||
return pred;
|
||||
}
|
||||
|
||||
double G1Policy::predict_yg_surv_rate(int age) const {
|
||||
double G1DefaultPolicy::predict_yg_surv_rate(int age) const {
|
||||
return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
|
||||
}
|
||||
|
||||
double G1Policy::accum_yg_surv_rate_pred(int age) const {
|
||||
double G1DefaultPolicy::accum_yg_surv_rate_pred(int age) const {
|
||||
return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
|
||||
}
|
||||
|
||||
double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
|
||||
double G1DefaultPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
|
||||
size_t scanned_cards) const {
|
||||
return
|
||||
_analytics->predict_rs_update_time_ms(pending_cards) +
|
||||
@ -861,13 +862,13 @@ double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
|
||||
_analytics->predict_constant_other_time_ms();
|
||||
}
|
||||
|
||||
double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
|
||||
double G1DefaultPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
|
||||
size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
|
||||
size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
|
||||
return predict_base_elapsed_time_ms(pending_cards, card_num);
|
||||
}
|
||||
|
||||
size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
|
||||
size_t G1DefaultPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
|
||||
size_t bytes_to_copy;
|
||||
if (hr->is_marked())
|
||||
bytes_to_copy = hr->max_live_bytes();
|
||||
@ -880,7 +881,7 @@ size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
|
||||
return bytes_to_copy;
|
||||
}
|
||||
|
||||
double G1Policy::predict_region_elapsed_time_ms(HeapRegion* hr,
|
||||
double G1DefaultPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
|
||||
bool for_young_gc) const {
|
||||
size_t rs_length = hr->rem_set()->occupied();
|
||||
// Predicting the number of cards is based on which type of GC
|
||||
@ -903,30 +904,30 @@ double G1Policy::predict_region_elapsed_time_ms(HeapRegion* hr,
|
||||
}
|
||||
|
||||
|
||||
void G1Policy::print_yg_surv_rate_info() const {
|
||||
void G1DefaultPolicy::print_yg_surv_rate_info() const {
|
||||
#ifndef PRODUCT
|
||||
_short_lived_surv_rate_group->print_surv_rate_summary();
|
||||
// add this call for any other surv rate groups
|
||||
#endif // PRODUCT
|
||||
}
|
||||
|
||||
bool G1Policy::is_young_list_full() const {
|
||||
bool G1DefaultPolicy::is_young_list_full() const {
|
||||
uint young_list_length = _g1->young_list()->length();
|
||||
uint young_list_target_length = _young_list_target_length;
|
||||
return young_list_length >= young_list_target_length;
|
||||
}
|
||||
|
||||
bool G1Policy::can_expand_young_list() const {
|
||||
bool G1DefaultPolicy::can_expand_young_list() const {
|
||||
uint young_list_length = _g1->young_list()->length();
|
||||
uint young_list_max_length = _young_list_max_length;
|
||||
return young_list_length < young_list_max_length;
|
||||
}
|
||||
|
||||
bool G1Policy::adaptive_young_list_length() const {
|
||||
bool G1DefaultPolicy::adaptive_young_list_length() const {
|
||||
return _young_gen_sizer.adaptive_young_list_length();
|
||||
}
|
||||
|
||||
void G1Policy::update_max_gc_locker_expansion() {
|
||||
void G1DefaultPolicy::update_max_gc_locker_expansion() {
|
||||
uint expansion_region_num = 0;
|
||||
if (GCLockerEdenExpansionPercent > 0) {
|
||||
double perc = (double) GCLockerEdenExpansionPercent / 100.0;
|
||||
@ -942,7 +943,7 @@ void G1Policy::update_max_gc_locker_expansion() {
|
||||
}
|
||||
|
||||
// Calculates survivor space parameters.
|
||||
void G1Policy::update_survivors_policy() {
|
||||
void G1DefaultPolicy::update_survivors_policy() {
|
||||
double max_survivor_regions_d =
|
||||
(double) _young_list_target_length / (double) SurvivorRatio;
|
||||
// We use ceiling so that if max_survivor_regions_d is > 0.0 (but
|
||||
@ -953,7 +954,7 @@ void G1Policy::update_survivors_policy() {
|
||||
HeapRegion::GrainWords * _max_survivor_regions, _policy_counters);
|
||||
}
|
||||
|
||||
bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
|
||||
bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
|
||||
// We actually check whether we are marking here and not if we are in a
|
||||
// reclamation phase. This means that we will schedule a concurrent mark
|
||||
// even while we are still in the process of reclaiming memory.
|
||||
@ -968,12 +969,12 @@ bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::initiate_conc_mark() {
|
||||
void G1DefaultPolicy::initiate_conc_mark() {
|
||||
collector_state()->set_during_initial_mark_pause(true);
|
||||
collector_state()->set_initiate_conc_mark_if_possible(false);
|
||||
}
|
||||
|
||||
void G1Policy::decide_on_conc_mark_initiation() {
|
||||
void G1DefaultPolicy::decide_on_conc_mark_initiation() {
|
||||
// We are about to decide on whether this pause will be an
|
||||
// initial-mark pause.
|
||||
|
||||
@ -1019,7 +1020,7 @@ void G1Policy::decide_on_conc_mark_initiation() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::record_concurrent_mark_cleanup_end() {
|
||||
void G1DefaultPolicy::record_concurrent_mark_cleanup_end() {
|
||||
cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
|
||||
|
||||
double end_sec = os::elapsedTime();
|
||||
@ -1030,7 +1031,7 @@ void G1Policy::record_concurrent_mark_cleanup_end() {
|
||||
record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
|
||||
}
|
||||
|
||||
double G1Policy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
|
||||
double G1DefaultPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
|
||||
// Returns the given amount of reclaimable bytes (that represents
|
||||
// the amount of reclaimable space still to be collected) as a
|
||||
// percentage of the current heap capacity.
|
||||
@ -1038,7 +1039,7 @@ double G1Policy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
|
||||
return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
|
||||
}
|
||||
|
||||
void G1Policy::maybe_start_marking() {
|
||||
void G1DefaultPolicy::maybe_start_marking() {
|
||||
if (need_to_start_conc_mark("end of GC")) {
|
||||
// Note: this might have already been set, if during the last
|
||||
// pause we decided to start a cycle but at the beginning of
|
||||
@ -1047,7 +1048,7 @@ void G1Policy::maybe_start_marking() {
|
||||
}
|
||||
}
|
||||
|
||||
G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
|
||||
G1DefaultPolicy::PauseKind G1DefaultPolicy::young_gc_pause_kind() const {
|
||||
assert(!collector_state()->full_collection(), "must be");
|
||||
if (collector_state()->during_initial_mark_pause()) {
|
||||
assert(collector_state()->last_gc_was_young(), "must be");
|
||||
@ -1069,7 +1070,7 @@ G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::record_pause(PauseKind kind, double start, double end) {
|
||||
void G1DefaultPolicy::record_pause(PauseKind kind, double start, double end) {
|
||||
// Manage the MMU tracker. For some reason it ignores Full GCs.
|
||||
if (kind != FullGC) {
|
||||
_mmu_tracker->add_pause(start, end);
|
||||
@ -1096,11 +1097,11 @@ void G1Policy::record_pause(PauseKind kind, double start, double end) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::abort_time_to_mixed_tracking() {
|
||||
void G1DefaultPolicy::abort_time_to_mixed_tracking() {
|
||||
_initial_mark_to_mixed.reset();
|
||||
}
|
||||
|
||||
bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
bool G1DefaultPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
const char* false_action_str) const {
|
||||
if (cset_chooser()->is_empty()) {
|
||||
log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
|
||||
@ -1121,7 +1122,7 @@ bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
return true;
|
||||
}
|
||||
|
||||
uint G1Policy::calc_min_old_cset_length() const {
|
||||
uint G1DefaultPolicy::calc_min_old_cset_length() const {
|
||||
// The min old CSet region bound is based on the maximum desired
|
||||
// number of mixed GCs after a cycle. I.e., even if some old regions
|
||||
// look expensive, we should add them to the CSet anyway to make
|
||||
@ -1142,7 +1143,7 @@ uint G1Policy::calc_min_old_cset_length() const {
|
||||
return (uint) result;
|
||||
}
|
||||
|
||||
uint G1Policy::calc_max_old_cset_length() const {
|
||||
uint G1DefaultPolicy::calc_max_old_cset_length() const {
|
||||
// The max old CSet region bound is based on the threshold expressed
|
||||
// as a percentage of the heap size. I.e., it should bound the
|
||||
// number of old regions added to the CSet irrespective of how many
|
||||
@ -1159,7 +1160,7 @@ uint G1Policy::calc_max_old_cset_length() const {
|
||||
return (uint) result;
|
||||
}
|
||||
|
||||
void G1Policy::finalize_collection_set(double target_pause_time_ms) {
|
||||
void G1DefaultPolicy::finalize_collection_set(double target_pause_time_ms) {
|
||||
double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
|
||||
_collection_set->finalize_old_part(time_remaining_ms);
|
||||
}
|
440
hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp
Normal file
440
hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp
Normal file
@ -0,0 +1,440 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1DEFAULTPOLICY_HPP
|
||||
#define SHARE_VM_GC_G1_G1DEFAULTPOLICY_HPP
|
||||
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1InCSetState.hpp"
|
||||
#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
|
||||
#include "gc/g1/g1MMUTracker.hpp"
|
||||
#include "gc/g1/g1Predictions.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1YoungGenSizer.hpp"
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
|
||||
// A G1Policy makes policy decisions that determine the
|
||||
// characteristics of the collector. Examples include:
|
||||
// * choice of collection set.
|
||||
// * when to collect.
|
||||
|
||||
class HeapRegion;
|
||||
class G1CollectionSet;
|
||||
class CollectionSetChooser;
|
||||
class G1IHOPControl;
|
||||
class G1Analytics;
|
||||
class G1YoungGenSizer;
|
||||
class GCPolicyCounters;
|
||||
|
||||
class G1DefaultPolicy: public G1Policy {
|
||||
private:
|
||||
|
||||
static G1IHOPControl* create_ihop_control(const G1Predictions* predictor);
|
||||
// Update the IHOP control with necessary statistics.
|
||||
void update_ihop_prediction(double mutator_time_s,
|
||||
size_t mutator_alloc_bytes,
|
||||
size_t young_gen_size);
|
||||
void report_ihop_statistics();
|
||||
|
||||
G1Predictions _predictor;
|
||||
G1Analytics* _analytics;
|
||||
G1MMUTracker* _mmu_tracker;
|
||||
G1IHOPControl* _ihop_control;
|
||||
|
||||
GCPolicyCounters* _policy_counters;
|
||||
|
||||
double _full_collection_start_sec;
|
||||
|
||||
uint _young_list_target_length;
|
||||
uint _young_list_fixed_length;
|
||||
|
||||
// The max number of regions we can extend the eden by while the GC
|
||||
// locker is active. This should be >= _young_list_target_length;
|
||||
uint _young_list_max_length;
|
||||
|
||||
// SurvRateGroups below must be initialized after the predictor because they
|
||||
// indirectly use it through this object passed to their constructor.
|
||||
SurvRateGroup* _short_lived_surv_rate_group;
|
||||
SurvRateGroup* _survivor_surv_rate_group;
|
||||
|
||||
double _reserve_factor;
|
||||
// This will be set when the heap is expanded
|
||||
// for the first time during initialization.
|
||||
uint _reserve_regions;
|
||||
|
||||
G1YoungGenSizer _young_gen_sizer;
|
||||
|
||||
uint _free_regions_at_end_of_collection;
|
||||
|
||||
size_t _max_rs_lengths;
|
||||
|
||||
size_t _rs_lengths_prediction;
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
|
||||
#endif // PRODUCT
|
||||
|
||||
size_t _pending_cards;
|
||||
|
||||
// The amount of allocated bytes in old gen during the last mutator and the following
|
||||
// young GC phase.
|
||||
size_t _bytes_allocated_in_old_since_last_gc;
|
||||
|
||||
G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
|
||||
public:
|
||||
const G1Predictions& predictor() const { return _predictor; }
|
||||
const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
|
||||
|
||||
// Add the given number of bytes to the total number of allocated bytes in the old gen.
|
||||
void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
|
||||
|
||||
// Accessors
|
||||
|
||||
void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
|
||||
hr->set_eden();
|
||||
hr->install_surv_rate_group(_short_lived_surv_rate_group);
|
||||
hr->set_young_index_in_cset(young_index_in_cset);
|
||||
}
|
||||
|
||||
void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
|
||||
assert(hr->is_survivor(), "pre-condition");
|
||||
hr->install_surv_rate_group(_survivor_surv_rate_group);
|
||||
hr->set_young_index_in_cset(young_index_in_cset);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool verify_young_ages();
|
||||
#endif // PRODUCT
|
||||
|
||||
void record_max_rs_lengths(size_t rs_lengths) {
|
||||
_max_rs_lengths = rs_lengths;
|
||||
}
|
||||
|
||||
|
||||
double predict_base_elapsed_time_ms(size_t pending_cards) const;
|
||||
double predict_base_elapsed_time_ms(size_t pending_cards,
|
||||
size_t scanned_cards) const;
|
||||
size_t predict_bytes_to_copy(HeapRegion* hr) const;
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
|
||||
|
||||
double predict_survivor_regions_evac_time() const;
|
||||
|
||||
bool should_update_surv_rate_group_predictors() {
|
||||
return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
|
||||
}
|
||||
|
||||
void cset_regions_freed() {
|
||||
bool update = should_update_surv_rate_group_predictors();
|
||||
|
||||
_short_lived_surv_rate_group->all_surviving_words_recorded(update);
|
||||
_survivor_surv_rate_group->all_surviving_words_recorded(update);
|
||||
}
|
||||
|
||||
G1MMUTracker* mmu_tracker() {
|
||||
return _mmu_tracker;
|
||||
}
|
||||
|
||||
const G1MMUTracker* mmu_tracker() const {
|
||||
return _mmu_tracker;
|
||||
}
|
||||
|
||||
double max_pause_time_ms() const {
|
||||
return _mmu_tracker->max_gc_time() * 1000.0;
|
||||
}
|
||||
|
||||
// Returns an estimate of the survival rate of the region at yg-age
|
||||
// "yg_age".
|
||||
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
|
||||
|
||||
double predict_yg_surv_rate(int age) const;
|
||||
|
||||
double accum_yg_surv_rate_pred(int age) const;
|
||||
|
||||
protected:
|
||||
G1CollectionSet* _collection_set;
|
||||
virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
|
||||
virtual double other_time_ms(double pause_time_ms) const;
|
||||
|
||||
double young_other_time_ms() const;
|
||||
double non_young_other_time_ms() const;
|
||||
double constant_other_time_ms(double pause_time_ms) const;
|
||||
|
||||
CollectionSetChooser* cset_chooser() const;
|
||||
private:
|
||||
|
||||
// The number of bytes copied during the GC.
|
||||
size_t _bytes_copied_during_gc;
|
||||
|
||||
// Stash a pointer to the g1 heap.
|
||||
G1CollectedHeap* _g1;
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
|
||||
// This set of variables tracks the collector efficiency, in order to
|
||||
// determine whether we should initiate a new marking.
|
||||
double _mark_remark_start_sec;
|
||||
double _mark_cleanup_start_sec;
|
||||
|
||||
// Updates the internal young list maximum and target lengths. Returns the
|
||||
// unbounded young list target length.
|
||||
uint update_young_list_max_and_target_length();
|
||||
uint update_young_list_max_and_target_length(size_t rs_lengths);
|
||||
|
||||
// Update the young list target length either by setting it to the
|
||||
// desired fixed value or by calculating it using G1's pause
|
||||
// prediction model. If no rs_lengths parameter is passed, predict
|
||||
// the RS lengths using the prediction model, otherwise use the
|
||||
// given rs_lengths as the prediction.
|
||||
// Returns the unbounded young list target length.
|
||||
uint update_young_list_target_length(size_t rs_lengths);
|
||||
|
||||
// Calculate and return the minimum desired young list target
|
||||
// length. This is the minimum desired young list length according
|
||||
// to the user's inputs.
|
||||
uint calculate_young_list_desired_min_length(uint base_min_length) const;
|
||||
|
||||
// Calculate and return the maximum desired young list target
|
||||
// length. This is the maximum desired young list length according
|
||||
// to the user's inputs.
|
||||
uint calculate_young_list_desired_max_length() const;
|
||||
|
||||
// Calculate and return the maximum young list target length that
|
||||
// can fit into the pause time goal. The parameters are: rs_lengths
|
||||
// represent the prediction of how large the young RSet lengths will
|
||||
// be, base_min_length is the already existing number of regions in
|
||||
// the young list, min_length and max_length are the desired min and
|
||||
// max young list length according to the user's inputs.
|
||||
uint calculate_young_list_target_length(size_t rs_lengths,
|
||||
uint base_min_length,
|
||||
uint desired_min_length,
|
||||
uint desired_max_length) const;
|
||||
|
||||
// Result of the bounded_young_list_target_length() method, containing both the
|
||||
// bounded as well as the unbounded young list target lengths in this order.
|
||||
typedef Pair<uint, uint, StackObj> YoungTargetLengths;
|
||||
YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
|
||||
|
||||
void update_rs_lengths_prediction();
|
||||
void update_rs_lengths_prediction(size_t prediction);
|
||||
|
||||
// Check whether a given young length (young_length) fits into the
|
||||
// given target pause time and whether the prediction for the amount
|
||||
// of objects to be copied for the given length will fit into the
|
||||
// given free space (expressed by base_free_regions). It is used by
|
||||
// calculate_young_list_target_length().
|
||||
bool predict_will_fit(uint young_length, double base_time_ms,
|
||||
uint base_free_regions, double target_pause_time_ms) const;
|
||||
|
||||
public:
|
||||
size_t pending_cards() const { return _pending_cards; }
|
||||
|
||||
// Calculate the minimum number of old regions we'll add to the CSet
|
||||
// during a mixed GC.
|
||||
uint calc_min_old_cset_length() const;
|
||||
|
||||
// Calculate the maximum number of old regions we'll add to the CSet
|
||||
// during a mixed GC.
|
||||
uint calc_max_old_cset_length() const;
|
||||
|
||||
// Returns the given amount of uncollected reclaimable space
|
||||
// as a percentage of the current heap capacity.
|
||||
double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
|
||||
|
||||
private:
|
||||
// Sets up marking if proper conditions are met.
|
||||
void maybe_start_marking();
|
||||
|
||||
// The kind of STW pause.
|
||||
enum PauseKind {
|
||||
FullGC,
|
||||
YoungOnlyGC,
|
||||
MixedGC,
|
||||
LastYoungGC,
|
||||
InitialMarkGC,
|
||||
Cleanup,
|
||||
Remark
|
||||
};
|
||||
|
||||
// Calculate PauseKind from internal state.
|
||||
PauseKind young_gc_pause_kind() const;
|
||||
// Record the given STW pause with the given start and end times (in s).
|
||||
void record_pause(PauseKind kind, double start, double end);
|
||||
// Indicate that we aborted marking before doing any mixed GCs.
|
||||
void abort_time_to_mixed_tracking();
|
||||
public:
|
||||
|
||||
G1DefaultPolicy();
|
||||
|
||||
virtual ~G1DefaultPolicy();
|
||||
|
||||
G1CollectorState* collector_state() const;
|
||||
|
||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||
|
||||
// Check the current value of the young list RSet lengths and
|
||||
// compare it against the last prediction. If the current value is
|
||||
// higher, recalculate the young list target length prediction.
|
||||
void revise_young_list_target_length_if_necessary(size_t rs_lengths);
|
||||
|
||||
// This should be called after the heap is resized.
|
||||
void record_new_heap_size(uint new_number_of_regions);
|
||||
|
||||
void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
|
||||
|
||||
virtual void note_gc_start();
|
||||
|
||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||
|
||||
bool about_to_start_mixed_phase() const;
|
||||
|
||||
// Record the start and end of an evacuation pause.
|
||||
void record_collection_pause_start(double start_time_sec);
|
||||
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
|
||||
|
||||
// Record the start and end of a full collection.
|
||||
void record_full_collection_start();
|
||||
void record_full_collection_end();
|
||||
|
||||
// Must currently be called while the world is stopped.
|
||||
void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
|
||||
|
||||
// Record start and end of remark.
|
||||
void record_concurrent_mark_remark_start();
|
||||
void record_concurrent_mark_remark_end();
|
||||
|
||||
// Record start, end, and completion of cleanup.
|
||||
void record_concurrent_mark_cleanup_start();
|
||||
void record_concurrent_mark_cleanup_end();
|
||||
void record_concurrent_mark_cleanup_completed();
|
||||
|
||||
virtual void print_phases();
|
||||
|
||||
// Record how much space we copied during a GC. This is typically
|
||||
// called when a GC alloc region is being retired.
|
||||
void record_bytes_copied_during_gc(size_t bytes) {
|
||||
_bytes_copied_during_gc += bytes;
|
||||
}
|
||||
|
||||
// The amount of space we copied during a GC.
|
||||
size_t bytes_copied_during_gc() const {
|
||||
return _bytes_copied_during_gc;
|
||||
}
|
||||
|
||||
// Determine whether there are candidate regions so that the
|
||||
// next GC should be mixed. The two action strings are used
|
||||
// in the ergo output when the method returns true or false.
|
||||
bool next_gc_should_be_mixed(const char* true_action_str,
|
||||
const char* false_action_str) const;
|
||||
|
||||
virtual void finalize_collection_set(double target_pause_time_ms);
|
||||
private:
|
||||
// Set the state to start a concurrent marking cycle and clear
|
||||
// _initiate_conc_mark_if_possible because it has now been
|
||||
// acted on.
|
||||
void initiate_conc_mark();
|
||||
|
||||
public:
|
||||
// This sets the initiate_conc_mark_if_possible() flag to start a
|
||||
// new cycle, as long as we are not already in one. It's best if it
|
||||
// is called during a safepoint when the test whether a cycle is in
|
||||
// progress or not is stable.
|
||||
bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
|
||||
|
||||
// This is called at the very beginning of an evacuation pause (it
|
||||
// has to be the first thing that the pause does). If
|
||||
// initiate_conc_mark_if_possible() is true, and the concurrent
|
||||
// marking thread has completed its work during the previous cycle,
|
||||
// it will set during_initial_mark_pause() to so that the pause does
|
||||
// the initial-mark work and start a marking cycle.
|
||||
void decide_on_conc_mark_initiation();
|
||||
|
||||
// Print stats on young survival ratio
|
||||
void print_yg_surv_rate_info() const;
|
||||
|
||||
void finished_recalculating_age_indexes(bool is_survivors) {
|
||||
if (is_survivors) {
|
||||
_survivor_surv_rate_group->finished_recalculating_age_indexes();
|
||||
} else {
|
||||
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
|
||||
}
|
||||
}
|
||||
|
||||
size_t young_list_target_length() const { return _young_list_target_length; }
|
||||
|
||||
bool is_young_list_full() const;
|
||||
|
||||
bool can_expand_young_list() const;
|
||||
|
||||
uint young_list_max_length() const {
|
||||
return _young_list_max_length;
|
||||
}
|
||||
|
||||
bool adaptive_young_list_length() const;
|
||||
|
||||
virtual bool should_process_references() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
//
|
||||
// Survivor regions policy.
|
||||
//
|
||||
|
||||
// Current tenuring threshold, set to 0 if the collector reaches the
|
||||
// maximum amount of survivors regions.
|
||||
uint _tenuring_threshold;
|
||||
|
||||
// The limit on the number of regions allocated for survivors.
|
||||
uint _max_survivor_regions;
|
||||
|
||||
AgeTable _survivors_age_table;
|
||||
|
||||
public:
|
||||
uint tenuring_threshold() const { return _tenuring_threshold; }
|
||||
|
||||
uint max_survivor_regions() {
|
||||
return _max_survivor_regions;
|
||||
}
|
||||
|
||||
void note_start_adding_survivor_regions() {
|
||||
_survivor_surv_rate_group->start_adding_regions();
|
||||
}
|
||||
|
||||
void note_stop_adding_survivor_regions() {
|
||||
_survivor_surv_rate_group->stop_adding_regions();
|
||||
}
|
||||
|
||||
void record_age_table(AgeTable* age_table) {
|
||||
_survivors_age_table.merge(age_table);
|
||||
}
|
||||
|
||||
void update_max_gc_locker_expansion();
|
||||
|
||||
// Calculates survivor space parameters.
|
||||
void update_survivors_policy();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1DEFAULTPOLICY_HPP
|
@ -46,321 +46,106 @@ class CollectionSetChooser;
|
||||
class G1IHOPControl;
|
||||
class G1Analytics;
|
||||
class G1YoungGenSizer;
|
||||
class GCPolicyCounters;
|
||||
|
||||
class G1Policy: public CHeapObj<mtGC> {
|
||||
private:
|
||||
|
||||
static G1IHOPControl* create_ihop_control(const G1Predictions* predictor);
|
||||
// Update the IHOP control with necessary statistics.
|
||||
void update_ihop_prediction(double mutator_time_s,
|
||||
size_t mutator_alloc_bytes,
|
||||
size_t young_gen_size);
|
||||
void report_ihop_statistics();
|
||||
|
||||
G1Predictions _predictor;
|
||||
G1Analytics* _analytics;
|
||||
G1MMUTracker* _mmu_tracker;
|
||||
G1IHOPControl* _ihop_control;
|
||||
|
||||
GCPolicyCounters* _policy_counters;
|
||||
|
||||
double _full_collection_start_sec;
|
||||
|
||||
uint _young_list_target_length;
|
||||
uint _young_list_fixed_length;
|
||||
|
||||
// The max number of regions we can extend the eden by while the GC
|
||||
// locker is active. This should be >= _young_list_target_length;
|
||||
uint _young_list_max_length;
|
||||
|
||||
// SurvRateGroups below must be initialized after the predictor because they
|
||||
// indirectly use it through this object passed to their constructor.
|
||||
SurvRateGroup* _short_lived_surv_rate_group;
|
||||
SurvRateGroup* _survivor_surv_rate_group;
|
||||
|
||||
double _reserve_factor;
|
||||
// This will be set when the heap is expanded
|
||||
// for the first time during initialization.
|
||||
uint _reserve_regions;
|
||||
|
||||
G1YoungGenSizer _young_gen_sizer;
|
||||
|
||||
uint _free_regions_at_end_of_collection;
|
||||
|
||||
size_t _max_rs_lengths;
|
||||
|
||||
size_t _rs_lengths_prediction;
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
|
||||
#endif // PRODUCT
|
||||
|
||||
size_t _pending_cards;
|
||||
|
||||
// The amount of allocated bytes in old gen during the last mutator and the following
|
||||
// young GC phase.
|
||||
size_t _bytes_allocated_in_old_since_last_gc;
|
||||
|
||||
G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
|
||||
public:
|
||||
const G1Predictions& predictor() const { return _predictor; }
|
||||
const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
|
||||
virtual const G1Predictions& predictor() const = 0;
|
||||
virtual const G1Analytics* analytics() const = 0;
|
||||
|
||||
// Add the given number of bytes to the total number of allocated bytes in the old gen.
|
||||
void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
|
||||
virtual void add_bytes_allocated_in_old_since_last_gc(size_t bytes) = 0;
|
||||
|
||||
// Accessors
|
||||
|
||||
void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
|
||||
hr->set_eden();
|
||||
hr->install_surv_rate_group(_short_lived_surv_rate_group);
|
||||
hr->set_young_index_in_cset(young_index_in_cset);
|
||||
}
|
||||
virtual void set_region_eden(HeapRegion* hr, int young_index_in_cset) = 0;
|
||||
virtual void set_region_survivor(HeapRegion* hr, int young_index_in_cset) = 0;
|
||||
|
||||
void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
|
||||
assert(hr->is_survivor(), "pre-condition");
|
||||
hr->install_surv_rate_group(_survivor_surv_rate_group);
|
||||
hr->set_young_index_in_cset(young_index_in_cset);
|
||||
}
|
||||
virtual void record_max_rs_lengths(size_t rs_lengths) = 0;
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool verify_young_ages();
|
||||
#endif // PRODUCT
|
||||
virtual double predict_base_elapsed_time_ms(size_t pending_cards) const = 0;
|
||||
virtual double predict_base_elapsed_time_ms(size_t pending_cards,
|
||||
size_t scanned_cards) const = 0;
|
||||
|
||||
void record_max_rs_lengths(size_t rs_lengths) {
|
||||
_max_rs_lengths = rs_lengths;
|
||||
}
|
||||
virtual double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const = 0;
|
||||
|
||||
virtual void cset_regions_freed() = 0;
|
||||
|
||||
double predict_base_elapsed_time_ms(size_t pending_cards) const;
|
||||
double predict_base_elapsed_time_ms(size_t pending_cards,
|
||||
size_t scanned_cards) const;
|
||||
size_t predict_bytes_to_copy(HeapRegion* hr) const;
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
|
||||
virtual G1MMUTracker* mmu_tracker() = 0;
|
||||
|
||||
double predict_survivor_regions_evac_time() const;
|
||||
virtual const G1MMUTracker* mmu_tracker() const = 0;
|
||||
|
||||
bool should_update_surv_rate_group_predictors() {
|
||||
return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
|
||||
}
|
||||
virtual double max_pause_time_ms() const = 0;
|
||||
|
||||
void cset_regions_freed() {
|
||||
bool update = should_update_surv_rate_group_predictors();
|
||||
|
||||
_short_lived_surv_rate_group->all_surviving_words_recorded(update);
|
||||
_survivor_surv_rate_group->all_surviving_words_recorded(update);
|
||||
}
|
||||
|
||||
G1MMUTracker* mmu_tracker() {
|
||||
return _mmu_tracker;
|
||||
}
|
||||
|
||||
const G1MMUTracker* mmu_tracker() const {
|
||||
return _mmu_tracker;
|
||||
}
|
||||
|
||||
double max_pause_time_ms() const {
|
||||
return _mmu_tracker->max_gc_time() * 1000.0;
|
||||
}
|
||||
|
||||
// Returns an estimate of the survival rate of the region at yg-age
|
||||
// "yg_age".
|
||||
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
|
||||
|
||||
double predict_yg_surv_rate(int age) const;
|
||||
|
||||
double accum_yg_surv_rate_pred(int age) const;
|
||||
|
||||
protected:
|
||||
G1CollectionSet* _collection_set;
|
||||
virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
|
||||
virtual double other_time_ms(double pause_time_ms) const;
|
||||
|
||||
double young_other_time_ms() const;
|
||||
double non_young_other_time_ms() const;
|
||||
double constant_other_time_ms(double pause_time_ms) const;
|
||||
|
||||
CollectionSetChooser* cset_chooser() const;
|
||||
private:
|
||||
|
||||
// The number of bytes copied during the GC.
|
||||
size_t _bytes_copied_during_gc;
|
||||
|
||||
// Stash a pointer to the g1 heap.
|
||||
G1CollectedHeap* _g1;
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
|
||||
// This set of variables tracks the collector efficiency, in order to
|
||||
// determine whether we should initiate a new marking.
|
||||
double _mark_remark_start_sec;
|
||||
double _mark_cleanup_start_sec;
|
||||
|
||||
// Updates the internal young list maximum and target lengths. Returns the
|
||||
// unbounded young list target length.
|
||||
uint update_young_list_max_and_target_length();
|
||||
uint update_young_list_max_and_target_length(size_t rs_lengths);
|
||||
|
||||
// Update the young list target length either by setting it to the
|
||||
// desired fixed value or by calculating it using G1's pause
|
||||
// prediction model. If no rs_lengths parameter is passed, predict
|
||||
// the RS lengths using the prediction model, otherwise use the
|
||||
// given rs_lengths as the prediction.
|
||||
// Returns the unbounded young list target length.
|
||||
uint update_young_list_target_length(size_t rs_lengths);
|
||||
|
||||
// Calculate and return the minimum desired young list target
|
||||
// length. This is the minimum desired young list length according
|
||||
// to the user's inputs.
|
||||
uint calculate_young_list_desired_min_length(uint base_min_length) const;
|
||||
|
||||
// Calculate and return the maximum desired young list target
|
||||
// length. This is the maximum desired young list length according
|
||||
// to the user's inputs.
|
||||
uint calculate_young_list_desired_max_length() const;
|
||||
|
||||
// Calculate and return the maximum young list target length that
|
||||
// can fit into the pause time goal. The parameters are: rs_lengths
|
||||
// represent the prediction of how large the young RSet lengths will
|
||||
// be, base_min_length is the already existing number of regions in
|
||||
// the young list, min_length and max_length are the desired min and
|
||||
// max young list length according to the user's inputs.
|
||||
uint calculate_young_list_target_length(size_t rs_lengths,
|
||||
uint base_min_length,
|
||||
uint desired_min_length,
|
||||
uint desired_max_length) const;
|
||||
|
||||
// Result of the bounded_young_list_target_length() method, containing both the
|
||||
// bounded as well as the unbounded young list target lengths in this order.
|
||||
typedef Pair<uint, uint, StackObj> YoungTargetLengths;
|
||||
YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
|
||||
|
||||
void update_rs_lengths_prediction();
|
||||
void update_rs_lengths_prediction(size_t prediction);
|
||||
|
||||
// Check whether a given young length (young_length) fits into the
|
||||
// given target pause time and whether the prediction for the amount
|
||||
// of objects to be copied for the given length will fit into the
|
||||
// given free space (expressed by base_free_regions). It is used by
|
||||
// calculate_young_list_target_length().
|
||||
bool predict_will_fit(uint young_length, double base_time_ms,
|
||||
uint base_free_regions, double target_pause_time_ms) const;
|
||||
|
||||
public:
|
||||
size_t pending_cards() const { return _pending_cards; }
|
||||
virtual size_t pending_cards() const = 0;
|
||||
|
||||
// Calculate the minimum number of old regions we'll add to the CSet
|
||||
// during a mixed GC.
|
||||
uint calc_min_old_cset_length() const;
|
||||
virtual uint calc_min_old_cset_length() const = 0;
|
||||
|
||||
// Calculate the maximum number of old regions we'll add to the CSet
|
||||
// during a mixed GC.
|
||||
uint calc_max_old_cset_length() const;
|
||||
virtual uint calc_max_old_cset_length() const = 0;
|
||||
|
||||
// Returns the given amount of uncollected reclaimable space
|
||||
// as a percentage of the current heap capacity.
|
||||
double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
|
||||
virtual double reclaimable_bytes_perc(size_t reclaimable_bytes) const = 0;
|
||||
|
||||
private:
|
||||
// Sets up marking if proper conditions are met.
|
||||
void maybe_start_marking();
|
||||
virtual ~G1Policy() {}
|
||||
|
||||
// The kind of STW pause.
|
||||
enum PauseKind {
|
||||
FullGC,
|
||||
YoungOnlyGC,
|
||||
MixedGC,
|
||||
LastYoungGC,
|
||||
InitialMarkGC,
|
||||
Cleanup,
|
||||
Remark
|
||||
};
|
||||
virtual G1CollectorState* collector_state() const = 0;
|
||||
|
||||
// Calculate PauseKind from internal state.
|
||||
PauseKind young_gc_pause_kind() const;
|
||||
// Record the given STW pause with the given start and end times (in s).
|
||||
void record_pause(PauseKind kind, double start, double end);
|
||||
// Indicate that we aborted marking before doing any mixed GCs.
|
||||
void abort_time_to_mixed_tracking();
|
||||
public:
|
||||
|
||||
G1Policy();
|
||||
|
||||
virtual ~G1Policy();
|
||||
|
||||
G1CollectorState* collector_state() const;
|
||||
|
||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||
virtual G1GCPhaseTimes* phase_times() const = 0;
|
||||
|
||||
// Check the current value of the young list RSet lengths and
|
||||
// compare it against the last prediction. If the current value is
|
||||
// higher, recalculate the young list target length prediction.
|
||||
void revise_young_list_target_length_if_necessary(size_t rs_lengths);
|
||||
virtual void revise_young_list_target_length_if_necessary(size_t rs_lengths) = 0;
|
||||
|
||||
// This should be called after the heap is resized.
|
||||
void record_new_heap_size(uint new_number_of_regions);
|
||||
virtual void record_new_heap_size(uint new_number_of_regions) = 0;
|
||||
|
||||
void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
|
||||
virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) = 0;
|
||||
|
||||
virtual void note_gc_start();
|
||||
virtual void note_gc_start() = 0;
|
||||
|
||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||
|
||||
bool about_to_start_mixed_phase() const;
|
||||
virtual bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0) = 0;
|
||||
|
||||
// Record the start and end of an evacuation pause.
|
||||
void record_collection_pause_start(double start_time_sec);
|
||||
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
|
||||
virtual void record_collection_pause_start(double start_time_sec) = 0;
|
||||
virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) = 0;
|
||||
|
||||
// Record the start and end of a full collection.
|
||||
void record_full_collection_start();
|
||||
void record_full_collection_end();
|
||||
virtual void record_full_collection_start() = 0;
|
||||
virtual void record_full_collection_end() = 0;
|
||||
|
||||
// Must currently be called while the world is stopped.
|
||||
void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
|
||||
virtual void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) = 0;
|
||||
|
||||
// Record start and end of remark.
|
||||
void record_concurrent_mark_remark_start();
|
||||
void record_concurrent_mark_remark_end();
|
||||
virtual void record_concurrent_mark_remark_start() = 0;
|
||||
virtual void record_concurrent_mark_remark_end() = 0;
|
||||
|
||||
// Record start, end, and completion of cleanup.
|
||||
void record_concurrent_mark_cleanup_start();
|
||||
void record_concurrent_mark_cleanup_end();
|
||||
void record_concurrent_mark_cleanup_completed();
|
||||
virtual void record_concurrent_mark_cleanup_start() = 0;
|
||||
virtual void record_concurrent_mark_cleanup_end() = 0;
|
||||
virtual void record_concurrent_mark_cleanup_completed() = 0;
|
||||
|
||||
virtual void print_phases();
|
||||
virtual void print_phases() = 0;
|
||||
|
||||
// Record how much space we copied during a GC. This is typically
|
||||
// called when a GC alloc region is being retired.
|
||||
void record_bytes_copied_during_gc(size_t bytes) {
|
||||
_bytes_copied_during_gc += bytes;
|
||||
}
|
||||
virtual void record_bytes_copied_during_gc(size_t bytes) = 0;
|
||||
|
||||
// The amount of space we copied during a GC.
|
||||
size_t bytes_copied_during_gc() const {
|
||||
return _bytes_copied_during_gc;
|
||||
}
|
||||
virtual size_t bytes_copied_during_gc() const = 0;
|
||||
|
||||
// Determine whether there are candidate regions so that the
|
||||
// next GC should be mixed. The two action strings are used
|
||||
// in the ergo output when the method returns true or false.
|
||||
bool next_gc_should_be_mixed(const char* true_action_str,
|
||||
const char* false_action_str) const;
|
||||
virtual void finalize_collection_set(double target_pause_time_ms) = 0;
|
||||
|
||||
virtual void finalize_collection_set(double target_pause_time_ms);
|
||||
private:
|
||||
// Set the state to start a concurrent marking cycle and clear
|
||||
// _initiate_conc_mark_if_possible because it has now been
|
||||
// acted on.
|
||||
void initiate_conc_mark();
|
||||
|
||||
public:
|
||||
// This sets the initiate_conc_mark_if_possible() flag to start a
|
||||
// new cycle, as long as we are not already in one. It's best if it
|
||||
// is called during a safepoint when the test whether a cycle is in
|
||||
// progress or not is stable.
|
||||
bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
|
||||
virtual bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) = 0;
|
||||
|
||||
// This is called at the very beginning of an evacuation pause (it
|
||||
// has to be the first thing that the pause does). If
|
||||
@ -368,72 +153,33 @@ public:
|
||||
// marking thread has completed its work during the previous cycle,
|
||||
// it will set during_initial_mark_pause() to so that the pause does
|
||||
// the initial-mark work and start a marking cycle.
|
||||
void decide_on_conc_mark_initiation();
|
||||
virtual void decide_on_conc_mark_initiation() = 0;
|
||||
|
||||
// Print stats on young survival ratio
|
||||
void print_yg_surv_rate_info() const;
|
||||
virtual void print_yg_surv_rate_info() const = 0;
|
||||
|
||||
void finished_recalculating_age_indexes(bool is_survivors) {
|
||||
if (is_survivors) {
|
||||
_survivor_surv_rate_group->finished_recalculating_age_indexes();
|
||||
} else {
|
||||
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
|
||||
}
|
||||
}
|
||||
virtual void finished_recalculating_age_indexes(bool is_survivors) = 0;
|
||||
|
||||
size_t young_list_target_length() const { return _young_list_target_length; }
|
||||
virtual size_t young_list_target_length() const = 0;
|
||||
|
||||
bool is_young_list_full() const;
|
||||
virtual bool is_young_list_full() const = 0;
|
||||
|
||||
bool can_expand_young_list() const;
|
||||
virtual bool can_expand_young_list() const = 0;
|
||||
|
||||
uint young_list_max_length() const {
|
||||
return _young_list_max_length;
|
||||
}
|
||||
virtual uint young_list_max_length() const = 0;
|
||||
|
||||
bool adaptive_young_list_length() const;
|
||||
virtual bool adaptive_young_list_length() const = 0;
|
||||
|
||||
virtual bool should_process_references() const {
|
||||
return true;
|
||||
}
|
||||
virtual bool should_process_references() const = 0;
|
||||
|
||||
private:
|
||||
//
|
||||
// Survivor regions policy.
|
||||
//
|
||||
virtual uint tenuring_threshold() const = 0;
|
||||
virtual uint max_survivor_regions() = 0;
|
||||
|
||||
// Current tenuring threshold, set to 0 if the collector reaches the
|
||||
// maximum amount of survivors regions.
|
||||
uint _tenuring_threshold;
|
||||
virtual void note_start_adding_survivor_regions() = 0;
|
||||
|
||||
// The limit on the number of regions allocated for survivors.
|
||||
uint _max_survivor_regions;
|
||||
virtual void note_stop_adding_survivor_regions() = 0;
|
||||
|
||||
AgeTable _survivors_age_table;
|
||||
|
||||
public:
|
||||
uint tenuring_threshold() const { return _tenuring_threshold; }
|
||||
|
||||
uint max_survivor_regions() {
|
||||
return _max_survivor_regions;
|
||||
}
|
||||
|
||||
void note_start_adding_survivor_regions() {
|
||||
_survivor_surv_rate_group->start_adding_regions();
|
||||
}
|
||||
|
||||
void note_stop_adding_survivor_regions() {
|
||||
_survivor_surv_rate_group->stop_adding_regions();
|
||||
}
|
||||
|
||||
void record_age_table(AgeTable* age_table) {
|
||||
_survivors_age_table.merge(age_table);
|
||||
}
|
||||
|
||||
void update_max_gc_locker_expansion();
|
||||
|
||||
// Calculates survivor space parameters.
|
||||
void update_survivors_policy();
|
||||
virtual void record_age_table(AgeTable* age_table) = 0;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1POLICY_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,6 @@ public:
|
||||
|
||||
CLDClosure* weak_clds() { return &_closures._clds; }
|
||||
CLDClosure* strong_clds() { return &_closures._clds; }
|
||||
CLDClosure* thread_root_clds() { return NULL; }
|
||||
CLDClosure* second_pass_weak_clds() { return NULL; }
|
||||
|
||||
CodeBlobClosure* strong_codeblobs() { return &_closures._codeblobs; }
|
||||
@ -89,7 +88,6 @@ public:
|
||||
|
||||
// If MarkWeak is G1MarkFromRoot then all CLDs are processed by the weak and strong variants
|
||||
// return a NULL closure for the following specialized versions in that case.
|
||||
CLDClosure* thread_root_clds() { return null_if<G1MarkFromRoot>(&_strong._clds); }
|
||||
CLDClosure* second_pass_weak_clds() { return null_if<G1MarkFromRoot>(&_weak._clds); }
|
||||
|
||||
CodeBlobClosure* strong_codeblobs() { return &_strong._codeblobs; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,9 +41,6 @@ public:
|
||||
virtual CLDClosure* weak_clds() = 0;
|
||||
virtual CLDClosure* strong_clds() = 0;
|
||||
|
||||
// Applied to the CLDs reachable from the thread stacks.
|
||||
virtual CLDClosure* thread_root_clds() = 0;
|
||||
|
||||
// Applied to code blobs reachable as strong roots.
|
||||
virtual CodeBlobClosure* strong_codeblobs() = 0;
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -152,7 +152,6 @@ public:
|
||||
|
||||
CLDClosure* weak_clds() { return NULL; }
|
||||
CLDClosure* strong_clds() { return _clds; }
|
||||
CLDClosure* thread_root_clds() { return _clds; }
|
||||
|
||||
CodeBlobClosure* strong_codeblobs() { return _blobs; }
|
||||
};
|
||||
@ -184,9 +183,6 @@ public:
|
||||
// system.
|
||||
CLDClosure* weak_clds() { return _clds; }
|
||||
CLDClosure* strong_clds() { return _clds; }
|
||||
// We don't want to visit CLDs more than once, so we return NULL for the
|
||||
// thread root CLDs.
|
||||
CLDClosure* thread_root_clds() { return NULL; }
|
||||
|
||||
// We don't want to visit code blobs more than once, so we return NULL for the
|
||||
// strong case and walk the entire code cache as a separate step.
|
||||
@ -211,7 +207,6 @@ void G1RootProcessor::process_all_roots(OopClosure* oops,
|
||||
void G1RootProcessor::process_java_roots(G1RootClosures* closures,
|
||||
G1GCPhaseTimes* phase_times,
|
||||
uint worker_i) {
|
||||
assert(closures->thread_root_clds() == NULL || closures->weak_clds() == NULL, "There is overlap between those, only one may be set");
|
||||
// Iterating over the CLDG and the Threads are done early to allow us to
|
||||
// first process the strong CLDs and nmethods and then, after a barrier,
|
||||
// let the thread process the weak CLDs and nmethods.
|
||||
@ -227,7 +222,6 @@ void G1RootProcessor::process_java_roots(G1RootClosures* closures,
|
||||
bool is_par = n_workers() > 1;
|
||||
Threads::possibly_parallel_oops_do(is_par,
|
||||
closures->strong_oops(),
|
||||
closures->thread_root_clds(),
|
||||
closures->strong_codeblobs());
|
||||
}
|
||||
}
|
||||
|
@ -58,19 +58,16 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
|
||||
ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
|
||||
CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
|
||||
MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
|
||||
if (_java_thread != NULL)
|
||||
_java_thread->oops_do(
|
||||
&mark_and_push_closure,
|
||||
&mark_and_push_from_clds,
|
||||
&mark_and_push_in_blobs);
|
||||
|
||||
if (_vm_thread != NULL)
|
||||
_vm_thread->oops_do(
|
||||
&mark_and_push_closure,
|
||||
&mark_and_push_from_clds,
|
||||
&mark_and_push_in_blobs);
|
||||
|
||||
// Do the real work
|
||||
@ -99,8 +96,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
{
|
||||
ResourceMark rm;
|
||||
MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
|
||||
Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
|
||||
Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -505,9 +505,8 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
ParallelScavengeHeap::ParStrongRootsScope psrs;
|
||||
Universe::oops_do(mark_and_push_closure());
|
||||
JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
|
||||
CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
|
||||
MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
|
||||
Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
|
||||
Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
|
||||
ObjectSynchronizer::oops_do(mark_and_push_closure());
|
||||
FlatProfiler::oops_do(mark_and_push_closure());
|
||||
Management::oops_do(mark_and_push_closure());
|
||||
@ -597,8 +596,7 @@ void PSMarkSweep::mark_sweep_phase3() {
|
||||
// General strong roots.
|
||||
Universe::oops_do(adjust_pointer_closure());
|
||||
JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
|
||||
CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
|
||||
Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
|
||||
Threads::oops_do(adjust_pointer_closure(), NULL);
|
||||
ObjectSynchronizer::oops_do(adjust_pointer_closure());
|
||||
FlatProfiler::oops_do(adjust_pointer_closure());
|
||||
Management::oops_do(adjust_pointer_closure());
|
||||
|
@ -2148,8 +2148,7 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
|
||||
// General strong roots.
|
||||
Universe::oops_do(&oop_closure);
|
||||
JNIHandles::oops_do(&oop_closure); // Global (strong) JNI handles
|
||||
CLDToOopClosure adjust_from_cld(&oop_closure);
|
||||
Threads::oops_do(&oop_closure, &adjust_from_cld, NULL);
|
||||
Threads::oops_do(&oop_closure, NULL);
|
||||
ObjectSynchronizer::oops_do(&oop_closure);
|
||||
FlatProfiler::oops_do(&oop_closure);
|
||||
Management::oops_do(&oop_closure);
|
||||
|
@ -65,8 +65,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
case threads:
|
||||
{
|
||||
ResourceMark rm;
|
||||
CLDClosure* cld_closure = NULL; // Not needed. All CLDs are already visited.
|
||||
Threads::oops_do(&roots_closure, cld_closure, NULL);
|
||||
Threads::oops_do(&roots_closure, NULL);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -122,14 +121,13 @@ void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
|
||||
PSScavengeRootsClosure roots_closure(pm);
|
||||
CLDClosure* roots_from_clds = NULL; // Not needed. All CLDs are already visited.
|
||||
MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
|
||||
|
||||
if (_java_thread != NULL)
|
||||
_java_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
|
||||
_java_thread->oops_do(&roots_closure, &roots_in_blobs);
|
||||
|
||||
if (_vm_thread != NULL)
|
||||
_vm_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
|
||||
_vm_thread->oops_do(&roots_closure, &roots_in_blobs);
|
||||
|
||||
// Do the real work
|
||||
pm->drain_stacks(false);
|
||||
|
@ -582,14 +582,11 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
|
||||
ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
|
||||
}
|
||||
|
||||
// Some CLDs contained in the thread frames should be considered strong.
|
||||
// Don't process them if they will be processed during the ClassLoaderDataGraph phase.
|
||||
CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
|
||||
// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
|
||||
CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
|
||||
|
||||
bool is_par = scope->n_threads() > 1;
|
||||
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p);
|
||||
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
|
||||
Universe::oops_do(strong_roots);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -110,6 +110,7 @@ private:
|
||||
intptr_t* _locals; // local variable pointer
|
||||
ConstantPoolCache* _constants; // constant pool cache
|
||||
Method* _method; // method being executed
|
||||
oop _mirror; // mirror to klass containing method
|
||||
DataLayout* _mdx; // compiler profiling data for current bytecode
|
||||
intptr_t* _stack; // expression stack
|
||||
messages _msg; // frame manager <-> interpreter message
|
||||
|
@ -252,7 +252,7 @@ bool LogFileOutput::initialize(const char* options, outputStream* errstream) {
|
||||
|
||||
if (_file_count == 0 && is_regular_file(_file_name)) {
|
||||
log_trace(logging)("Truncating log file");
|
||||
os::ftruncate(os::fileno(_stream), 0);
|
||||
os::ftruncate(os::get_fileno(_stream), 0);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -762,7 +762,7 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m
|
||||
guarantee(false, "wrong number of expression stack elements during deopt");
|
||||
}
|
||||
VerifyOopClosure verify;
|
||||
iframe->oops_interpreted_do(&verify, NULL, &rm, false);
|
||||
iframe->oops_interpreted_do(&verify, &rm, false);
|
||||
callee_size_of_parameters = mh->size_of_parameters();
|
||||
callee_max_locals = mh->max_locals();
|
||||
is_top_frame = false;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -396,6 +396,11 @@ void frame::interpreter_frame_set_method(Method* method) {
|
||||
*interpreter_frame_method_addr() = method;
|
||||
}
|
||||
|
||||
void frame::interpreter_frame_set_mirror(oop mirror) {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
*interpreter_frame_mirror_addr() = mirror;
|
||||
}
|
||||
|
||||
jint frame::interpreter_frame_bci() const {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
address bcp = interpreter_frame_bcp();
|
||||
@ -852,8 +857,7 @@ oop* frame::interpreter_callee_receiver_addr(Symbol* signature) {
|
||||
}
|
||||
|
||||
|
||||
void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f,
|
||||
const RegisterMap* map, bool query_oop_map_cache) {
|
||||
void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) {
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
assert(map != NULL, "map must be set");
|
||||
Thread *thread = Thread::current();
|
||||
@ -879,21 +883,16 @@ void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f,
|
||||
current->oops_do(f);
|
||||
}
|
||||
|
||||
// process fixed part
|
||||
if (cld_f != NULL) {
|
||||
// The method pointer in the frame might be the only path to the method's
|
||||
// klass, and the klass needs to be kept alive while executing. The GCs
|
||||
// don't trace through method pointers, so typically in similar situations
|
||||
// the mirror or the class loader of the klass are installed as a GC root.
|
||||
// To minimize the overhead of doing that here, we ask the GC to pass down a
|
||||
// closure that knows how to keep klasses alive given a ClassLoaderData.
|
||||
cld_f->do_cld(m->method_holder()->class_loader_data());
|
||||
}
|
||||
|
||||
if (m->is_native() PPC32_ONLY(&& m->is_static())) {
|
||||
if (m->is_native()) {
|
||||
f->do_oop(interpreter_frame_temp_oop_addr());
|
||||
}
|
||||
|
||||
// The method pointer in the frame might be the only path to the method's
|
||||
// klass, and the klass needs to be kept alive while executing. The GCs
|
||||
// don't trace through method pointers, so the mirror of the method's klass
|
||||
// is installed as a GC root.
|
||||
f->do_oop(interpreter_frame_mirror_addr());
|
||||
|
||||
int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
|
||||
|
||||
Symbol* signature = NULL;
|
||||
@ -1093,7 +1092,7 @@ void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) {
|
||||
}
|
||||
|
||||
|
||||
void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
|
||||
void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
|
||||
#ifndef PRODUCT
|
||||
// simulate GC crash here to dump java thread in error report
|
||||
if (CrashGCForDumpingJavaThread) {
|
||||
@ -1102,7 +1101,7 @@ void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure*
|
||||
}
|
||||
#endif
|
||||
if (is_interpreted_frame()) {
|
||||
oops_interpreted_do(f, cld_f, map, use_interpreter_oop_map_cache);
|
||||
oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
|
||||
} else if (is_entry_frame()) {
|
||||
oops_entry_do(f, map);
|
||||
} else if (CodeCache::contains(pc())) {
|
||||
@ -1147,7 +1146,7 @@ void frame::verify(const RegisterMap* map) {
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
assert(DerivedPointerTable::is_empty(), "must be empty before verify");
|
||||
#endif
|
||||
oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false);
|
||||
oops_do_internal(&VerifyOopClosure::verify_oop, NULL, (RegisterMap*)map, false);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -320,6 +320,9 @@ class frame VALUE_OBJ_CLASS_SPEC {
|
||||
void interpreter_frame_set_method(Method* method);
|
||||
Method** interpreter_frame_method_addr() const;
|
||||
ConstantPoolCache** interpreter_frame_cache_addr() const;
|
||||
oop* interpreter_frame_mirror_addr() const;
|
||||
|
||||
void interpreter_frame_set_mirror(oop mirror);
|
||||
|
||||
public:
|
||||
// Entry frames
|
||||
@ -386,19 +389,19 @@ class frame VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
// Oops-do's
|
||||
void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f);
|
||||
void oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
|
||||
void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
|
||||
|
||||
private:
|
||||
void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f);
|
||||
|
||||
// Iteration of oops
|
||||
void oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
|
||||
void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
|
||||
void oops_entry_do(OopClosure* f, const RegisterMap* map);
|
||||
void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
|
||||
int adjust_offset(Method* method, int index); // helper for above fn
|
||||
public:
|
||||
// Memory management
|
||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
|
||||
void oops_do(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cf, map, true); }
|
||||
void nmethods_do(CodeBlobClosure* cf);
|
||||
|
||||
// RedefineClasses support for finding live interpreted methods on the stack
|
||||
|
@ -520,7 +520,7 @@ class os: AllStatic {
|
||||
static int ftruncate(int fd, jlong length);
|
||||
static int fsync(int fd);
|
||||
static int available(int fd, jlong *bytes);
|
||||
static int fileno(FILE* fp);
|
||||
static int get_fileno(FILE* fp);
|
||||
|
||||
static int compare_file_modified_times(const char* file1, const char* file2);
|
||||
|
||||
|
@ -785,7 +785,7 @@ bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void Thread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
|
||||
active_handles()->oops_do(f);
|
||||
// Do oop for ThreadShadow
|
||||
f->do_oop((oop*)&_pending_exception);
|
||||
@ -2758,7 +2758,7 @@ class RememberProcessedThread: public StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
|
||||
// Verify that the deferred card marks have been flushed.
|
||||
assert(deferred_card_mark().is_empty(), "Should be empty during GC");
|
||||
|
||||
@ -2766,7 +2766,7 @@ void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf)
|
||||
// since there may be more than one thread using each ThreadProfiler.
|
||||
|
||||
// Traverse the GCHandles
|
||||
Thread::oops_do(f, cld_f, cf);
|
||||
Thread::oops_do(f, cf);
|
||||
|
||||
JVMCI_ONLY(f->do_oop((oop*)&_pending_failed_speculation);)
|
||||
|
||||
@ -2796,7 +2796,7 @@ void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf)
|
||||
|
||||
// Traverse the execution stack
|
||||
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
|
||||
fst.current()->oops_do(f, cld_f, cf, fst.register_map());
|
||||
fst.current()->oops_do(f, cf, fst.register_map());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2946,7 +2946,7 @@ static void frame_verify(frame* f, const RegisterMap *map) { f->verify(map); }
|
||||
|
||||
void JavaThread::verify() {
|
||||
// Verify oops in the thread.
|
||||
oops_do(&VerifyOopClosure::verify_oop, NULL, NULL);
|
||||
oops_do(&VerifyOopClosure::verify_oop, NULL);
|
||||
|
||||
// Verify the stack frames.
|
||||
frames_do(frame_verify);
|
||||
@ -3186,7 +3186,7 @@ class PrintAndVerifyOopClosure: public OopClosure {
|
||||
static void oops_print(frame* f, const RegisterMap *map) {
|
||||
PrintAndVerifyOopClosure print;
|
||||
f->print_value();
|
||||
f->oops_do(&print, NULL, NULL, (RegisterMap*)map);
|
||||
f->oops_do(&print, NULL, (RegisterMap*)map);
|
||||
}
|
||||
|
||||
// Print our all the locations that contain oops and whether they are
|
||||
@ -3303,8 +3303,8 @@ CodeCacheSweeperThread::CodeCacheSweeperThread()
|
||||
_scanned_nmethod = NULL;
|
||||
}
|
||||
|
||||
void CodeCacheSweeperThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
JavaThread::oops_do(f, cld_f, cf);
|
||||
void CodeCacheSweeperThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
|
||||
JavaThread::oops_do(f, cf);
|
||||
if (_scanned_nmethod != NULL && cf != NULL) {
|
||||
// Safepoints can occur when the sweeper is scanning an nmethod so
|
||||
// process it here to make sure it isn't unloaded in the middle of
|
||||
@ -4291,11 +4291,11 @@ bool Threads::includes(JavaThread* p) {
|
||||
// uses the Threads_lock to guarantee this property. It also makes sure that
|
||||
// all threads gets blocked when exiting or starting).
|
||||
|
||||
void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
|
||||
ALL_JAVA_THREADS(p) {
|
||||
p->oops_do(f, cld_f, cf);
|
||||
p->oops_do(f, cf);
|
||||
}
|
||||
VMThread::vm_thread()->oops_do(f, cld_f, cf);
|
||||
VMThread::vm_thread()->oops_do(f, cf);
|
||||
}
|
||||
|
||||
void Threads::change_thread_claim_parity() {
|
||||
@ -4318,16 +4318,16 @@ void Threads::assert_all_threads_claimed() {
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf) {
|
||||
int cp = Threads::thread_claim_parity();
|
||||
ALL_JAVA_THREADS(p) {
|
||||
if (p->claim_oops_do(is_par, cp)) {
|
||||
p->oops_do(f, cld_f, cf);
|
||||
p->oops_do(f, cf);
|
||||
}
|
||||
}
|
||||
VMThread* vmt = VMThread::vm_thread();
|
||||
if (vmt->claim_oops_do(is_par, cp)) {
|
||||
vmt->oops_do(f, cld_f, cf);
|
||||
vmt->oops_do(f, cf);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -482,10 +482,9 @@ class Thread: public ThreadShadow {
|
||||
|
||||
// GC support
|
||||
// Apply "f->do_oop" to all root oops in "this".
|
||||
// Apply "cld_f->do_cld" to CLDs that are otherwise not kept alive.
|
||||
// Used by JavaThread::oops_do.
|
||||
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
|
||||
virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
virtual void oops_do(OopClosure* f, CodeBlobClosure* cf);
|
||||
|
||||
// Handles the parallel case for the method below.
|
||||
private:
|
||||
@ -1642,7 +1641,7 @@ class JavaThread: public Thread {
|
||||
void frames_do(void f(frame*, const RegisterMap*));
|
||||
|
||||
// Memory operations
|
||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
void oops_do(OopClosure* f, CodeBlobClosure* cf);
|
||||
|
||||
// Sweeper operations
|
||||
virtual void nmethods_do(CodeBlobClosure* cf);
|
||||
@ -1995,7 +1994,7 @@ class CodeCacheSweeperThread : public JavaThread {
|
||||
bool is_Code_cache_sweeper_thread() const { return true; }
|
||||
|
||||
// Prevent GC from unloading _scanned_nmethod
|
||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
void oops_do(OopClosure* f, CodeBlobClosure* cf);
|
||||
void nmethods_do(CodeBlobClosure* cf);
|
||||
};
|
||||
|
||||
@ -2122,9 +2121,9 @@ class Threads: AllStatic {
|
||||
|
||||
// Apply "f->do_oop" to all root oops in all threads.
|
||||
// This version may only be called by sequential code.
|
||||
static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
static void oops_do(OopClosure* f, CodeBlobClosure* cf);
|
||||
// This version may be called by sequential or parallel code.
|
||||
static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf);
|
||||
// This creates a list of GCTasks, one per thread.
|
||||
static void create_thread_roots_tasks(GCTaskQueue* q);
|
||||
// This creates a list of GCTasks, one per thread, for marking objects.
|
||||
|
@ -655,8 +655,8 @@ void VMThread::execute(VM_Operation* op) {
|
||||
}
|
||||
|
||||
|
||||
void VMThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
Thread::oops_do(f, cld_f, cf);
|
||||
void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
|
||||
Thread::oops_do(f, cf);
|
||||
_vm_queue->oops_do(f);
|
||||
}
|
||||
|
||||
@ -688,5 +688,5 @@ void VMOperationQueue::verify_queue(int prio) {
|
||||
#endif
|
||||
|
||||
void VMThread::verify() {
|
||||
oops_do(&VerifyOopClosure::verify_oop, NULL, NULL);
|
||||
oops_do(&VerifyOopClosure::verify_oop, NULL);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -126,7 +126,7 @@ class VMThread: public NamedThread {
|
||||
static VMThread* vm_thread() { return _vm_thread; }
|
||||
|
||||
// GC support
|
||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
void oops_do(OopClosure* f, CodeBlobClosure* cf);
|
||||
|
||||
void verify();
|
||||
|
||||
|
@ -353,7 +353,7 @@ hotspot_fast_runtime = \
|
||||
runtime/ \
|
||||
-runtime/ErrorHandling/ErrorHandler.java \
|
||||
-runtime/RedefineObject/TestRedefineObject.java \
|
||||
-runtime/8003720/Test8003720.java \
|
||||
-runtime/MirrorFrame/Test8003720.java \
|
||||
-runtime/Metaspace/FragmentMetaspace.java \
|
||||
-runtime/Metaspace/FragmentMetaspaceSimple.java \
|
||||
-runtime/Thread/TestThreadDumpMonitorContention.java \
|
||||
|
@ -1,96 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @summary Simple jar builder
|
||||
* Input: jarName className1 className2 ...
|
||||
* do not specify extensions, just the names
|
||||
* E.g. prot_domain ProtDomainA ProtDomainB
|
||||
* Output: A jar containing compiled classes, placed in a test classes folder
|
||||
*/
|
||||
|
||||
import jdk.test.lib.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import sun.tools.jar.Main;
|
||||
|
||||
// Using JarBuilder requires that all to-be-jarred classes should be placed
|
||||
// in the current working directory, aka "."
|
||||
public class BasicJarBuilder {
|
||||
private static final String classDir = System.getProperty("test.classes");
|
||||
|
||||
public static void build(boolean classesInWorkDir, String jarName,
|
||||
String ...classNames) throws Exception {
|
||||
|
||||
if (classesInWorkDir) {
|
||||
createSimpleJar(".", classDir + File.separator + jarName + ".jar", classNames);
|
||||
} else {
|
||||
build(jarName, classNames);
|
||||
}
|
||||
}
|
||||
|
||||
public static void build(String jarName, String ...classNames) throws Exception {
|
||||
createSimpleJar(classDir, classDir + File.separator + jarName + ".jar",
|
||||
classNames);
|
||||
}
|
||||
|
||||
private static void createSimpleJar(String jarclassDir, String jarName,
|
||||
String[] classNames) throws Exception {
|
||||
ArrayList<String> args = new ArrayList<String>();
|
||||
args.add("cf");
|
||||
args.add(jarName);
|
||||
addClassArgs(args, jarclassDir, classNames);
|
||||
createJar(args);
|
||||
}
|
||||
|
||||
private static void addClassArgs(ArrayList<String> args, String jarclassDir,
|
||||
String[] classNames) {
|
||||
|
||||
for (String name : classNames) {
|
||||
args.add("-C");
|
||||
args.add(jarclassDir);
|
||||
args.add(name + ".class");
|
||||
}
|
||||
}
|
||||
|
||||
private static void createJar(ArrayList<String> args) {
|
||||
Main jarTool = new Main(System.out, System.err, "jar");
|
||||
if (!jarTool.run(args.toArray(new String[1]))) {
|
||||
throw new RuntimeException("jar operation failed");
|
||||
}
|
||||
}
|
||||
|
||||
// Get full path to the test jar
|
||||
public static String getTestJar(String jar) {
|
||||
File dir = new File(System.getProperty("test.classes", "."));
|
||||
File jarFile = new File(dir, jar);
|
||||
if (!jarFile.exists()) {
|
||||
throw new RuntimeException("Cannot find " + jarFile.getPath());
|
||||
}
|
||||
if (!jarFile.isFile()) {
|
||||
throw new RuntimeException("Not a regular file: " + jarFile.getPath());
|
||||
}
|
||||
return jarFile.getPath();
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user