This commit is contained in:
Rachel Protacio 2016-06-03 16:19:53 +00:00
commit 80971e6c20
28 changed files with 563 additions and 168 deletions

View File

@ -3,7 +3,7 @@ The GNU General Public License (GPL)
Version 2, June 1991 Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc. Copyright (C) 1989, 1991 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies of this license Everyone is permitted to copy and distribute verbatim copies of this license
document, but changing it is not allowed. document, but changing it is not allowed.
@ -287,8 +287,8 @@ pointer to where the full notice is found.
more details. more details.
You should have received a copy of the GNU General Public License along You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc., 59 with this program; if not, write to the Free Software Foundation, Inc.,
Temple Place, Suite 330, Boston, MA 02111-1307 USA 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail. Also add information on how to contact you by electronic and paper mail.

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1530,6 +1530,10 @@ class Assembler : public AbstractAssembler {
inline void ld( Register d, int si16, Register s1); inline void ld( Register d, int si16, Register s1);
inline void ldu( Register d, int si16, Register s1); inline void ldu( Register d, int si16, Register s1);
// For convenience. Load pointer into d from b+s1.
inline void ld_ptr(Register d, int b, Register s1);
DEBUG_ONLY(inline void ld_ptr(Register d, ByteSize b, Register s1);)
// PPC 1, section 3.3.3 Fixed-Point Store Instructions // PPC 1, section 3.3.3 Fixed-Point Store Instructions
inline void stwx( Register d, Register s1, Register s2); inline void stwx( Register d, Register s1, Register s2);
inline void stw( Register d, int si16, Register s1); inline void stw( Register d, int si16, Register s1);
@ -2194,7 +2198,8 @@ class Assembler : public AbstractAssembler {
void add( Register d, RegisterOrConstant roc, Register s1); void add( Register d, RegisterOrConstant roc, Register s1);
void subf(Register d, RegisterOrConstant roc, Register s1); void subf(Register d, RegisterOrConstant roc, Register s1);
void cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1); void cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1);
// Load pointer d from s1+roc.
void ld_ptr(Register d, RegisterOrConstant roc, Register s1 = noreg) { ld(d, roc, s1); }
// Emit several instructions to load a 64 bit constant. This issues a fixed // Emit several instructions to load a 64 bit constant. This issues a fixed
// instruction pattern so that the constant can be patched later on. // instruction pattern so that the constant can be patched later on.

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -328,6 +328,9 @@ inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));} inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));} inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::ld_ptr(Register d, int b, Register s1) { ld(d, b, s1); }
DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_bytes(b), s1); })
// PPC 1, section 3.3.3 Fixed-Point Store Instructions // PPC 1, section 3.3.3 Fixed-Point Store Instructions
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));} inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));} inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1242,7 +1242,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
void LIR_Assembler::return_op(LIR_Opr result) { void LIR_Assembler::return_op(LIR_Opr result) {
const Register return_pc = R11; const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().
const Register polling_page = R12; const Register polling_page = R12;
// Pop the stack before the safepoint code. // Pop the stack before the safepoint code.
@ -1265,6 +1265,10 @@ void LIR_Assembler::return_op(LIR_Opr result) {
// Move return pc to LR. // Move return pc to LR.
__ mtlr(return_pc); __ mtlr(return_pc);
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check(return_pc);
}
// We need to mark the code position where the load from the safepoint // We need to mark the code position where the load from the safepoint
// polling page was emitted as relocInfo::poll_return_type here. // polling page was emitted as relocInfo::poll_return_type here.
__ relocate(relocInfo::poll_return_type); __ relocate(relocInfo::poll_return_type);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -52,4 +52,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define INCLUDE_RTM_OPT 1 #define INCLUDE_RTM_OPT 1
#endif #endif
#define SUPPORT_RESERVED_STACK_AREA
#endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP #endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP

View File

@ -43,7 +43,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs pas
#define DEFAULT_STACK_YELLOW_PAGES (6) #define DEFAULT_STACK_YELLOW_PAGES (6)
#define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2)) #define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
#define DEFAULT_STACK_RESERVED_PAGES (0) #define DEFAULT_STACK_RESERVED_PAGES (1)
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -480,6 +480,7 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register
void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) { void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) {
Label done; Label done;
BLOCK_COMMENT("stack_overflow_check_with_compare_and_throw {");
sub(Rmem_frame_size, R1_SP, Rmem_frame_size); sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
ld(Rscratch1, thread_(stack_overflow_limit)); ld(Rscratch1, thread_(stack_overflow_limit));
cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1); cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
@ -501,6 +502,7 @@ void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_t
align(32, 12); align(32, 12);
bind(done); bind(done);
BLOCK_COMMENT("} stack_overflow_check_with_compare_and_throw");
} }
// Separate these two to allow for delay slot in middle. // Separate these two to allow for delay slot in middle.
@ -805,16 +807,41 @@ void InterpreterMacroAssembler::narrow(Register result) {
void InterpreterMacroAssembler::remove_activation(TosState state, void InterpreterMacroAssembler::remove_activation(TosState state,
bool throw_monitor_exception, bool throw_monitor_exception,
bool install_monitor_exception) { bool install_monitor_exception) {
BLOCK_COMMENT("remove_activation {");
unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
// Save result (push state before jvmti call and pop it afterwards) and notify jvmti. // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
notify_method_exit(false, state, NotifyJVMTI, true); notify_method_exit(false, state, NotifyJVMTI, true);
BLOCK_COMMENT("reserved_stack_check:");
if (StackReservedPages > 0) {
// Test if reserved zone needs to be enabled.
Label no_reserved_zone_enabling;
// Compare frame pointers. There is no good stack pointer, as with stack
// frame compression we can get different SPs when we do calls. A subsequent
// call could have a smaller SP, so that this compare succeeds for an
// inner call of the method annotated with ReservedStack.
ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
ld_ptr(R11_scratch1, _abi(callers_sp), R1_SP); // Load frame pointer.
cmpld(CCR0, R11_scratch1, R0);
blt_predict_taken(CCR0, no_reserved_zone_enabling);
// Enable reserved zone again, throw stack overflow exception.
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError));
should_not_reach_here();
bind(no_reserved_zone_enabling);
}
verify_oop(R17_tos, state); verify_oop(R17_tos, state);
verify_thread(); verify_thread();
merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
mtlr(R0); mtlr(R0);
BLOCK_COMMENT("} remove_activation");
} }
// Lock object // Lock object

View File

@ -1400,6 +1400,28 @@ address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext)
#endif #endif
} }
void MacroAssembler::reserved_stack_check(Register return_pc) {
// Test if reserved zone needs to be enabled.
Label no_reserved_zone_enabling;
ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
cmpld(CCR0, R1_SP, R0);
blt_predict_taken(CCR0, no_reserved_zone_enabling);
// Enable reserved zone again, throw stack overflow exception.
push_frame_reg_args(0, R0);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
pop_frame();
mtlr(return_pc);
load_const_optimized(R0, StubRoutines::throw_delayed_StackOverflowError_entry());
mtctr(R0);
bctr();
should_not_reach_here();
bind(no_reserved_zone_enabling);
}
// CmpxchgX sets condition register to cmpX(current, compare). // CmpxchgX sets condition register to cmpX(current, compare).
void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value, void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
Register compare_value, Register exchange_value, Register compare_value, Register exchange_value,

View File

@ -411,6 +411,10 @@ class MacroAssembler: public Assembler {
// stdux, return the banged address. Otherwise, return 0. // stdux, return the banged address. Otherwise, return 0.
static address get_stack_bang_address(int instruction, void* ucontext); static address get_stack_bang_address(int instruction, void* ucontext);
// Check for reserved stack access in method being exited. If the reserved
// stack area was accessed, protect it again and throw StackOverflowError.
void reserved_stack_check(Register return_pc);
// Atomics // Atomics
// CmpxchgX sets condition register to cmpX(current, compare). // CmpxchgX sets condition register to cmpX(current, compare).
// (flag == ne) => (dest_current_value != compare_value), (!swapped) // (flag == ne) => (dest_current_value != compare_value), (!swapped)

View File

@ -1432,7 +1432,7 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
const bool method_needs_polling = do_polling() && C->is_method_compilation(); const bool method_needs_polling = do_polling() && C->is_method_compilation();
const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/; const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/;
const Register return_pc = R11; const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().
const Register polling_page = R12; const Register polling_page = R12;
if (!method_is_frameless) { if (!method_is_frameless) {
@ -1456,6 +1456,10 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ addi(R1_SP, R1_SP, (int)framesize); __ addi(R1_SP, R1_SP, (int)framesize);
} }
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check(return_pc);
}
if (method_needs_polling) { if (method_needs_polling) {
// We need to mark the code position where the load from the safepoint // We need to mark the code position where the load from the safepoint
// polling page was emitted as relocInfo::poll_return_type here. // polling page was emitted as relocInfo::poll_return_type here.

View File

@ -3082,6 +3082,9 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_StackOverflowError_entry = StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception", generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
// CRC32 Intrinsics. // CRC32 Intrinsics.
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 SAP SE. All rights reserved. * Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -128,6 +128,8 @@ class Aix {
// Set PC into context. Needed for continuation after signal. // Set PC into context. Needed for continuation after signal.
static void ucontext_set_pc(ucontext_t* uc, address pc); static void ucontext_set_pc(ucontext_t* uc, address pc);
static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
// This boolean allows users to forward their own non-matching signals // This boolean allows users to forward their own non-matching signals
// to JVM_handle_aix_signal, harmlessly. // to JVM_handle_aix_signal, harmlessly.
static bool signal_handlers_are_installed; static bool signal_handlers_are_installed;

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
*/ */
// no precompiled headers // no precompiled headers
#include "assembler_ppc.inline.hpp" #include "asm/assembler.inline.hpp"
#include "classfile/classLoader.hpp" #include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
@ -145,6 +145,41 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
return fr; return fr;
} }
bool os::Aix::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
address pc = (address) os::Aix::ucontext_get_pc(uc);
if (Interpreter::contains(pc)) {
// Interpreter performs stack banging after the fixed frame header has
// been generated while the compilers perform it before. To maintain
// semantic consistency between interpreted and compiled frames, the
// method returns the Java sender of the current frame.
*fr = os::fetch_frame_from_context(uc);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// More complex code with compiled code.
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling. In compiled code, we bang before
// the frame is complete.
return false;
} else {
intptr_t* sp = os::Aix::ucontext_get_sp(uc);
*fr = frame(sp, (address)*sp);
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
assert(!fr->is_first_frame(), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
frame os::get_sender_for_C_frame(frame* fr) { frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == NULL) { if (*fr->sp() == NULL) {
// fr is the last C frame // fr is the last C frame
@ -246,14 +281,32 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
// to continue with yellow zone disabled, but that doesn't buy us much and prevents // to continue with yellow zone disabled, but that doesn't buy us much and prevents
// hs_err_pid files. // hs_err_pid files.
if (thread->in_stack_yellow_reserved_zone(addr)) { if (thread->in_stack_yellow_reserved_zone(addr)) {
thread->disable_stack_yellow_reserved_zone();
if (thread->thread_state() == _thread_in_Java) { if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
if (os::Aix::get_frame_at_stack_banging_point(thread, uc, &fr)) {
assert(fr.is_java_frame(), "Must be a Javac frame");
frame activation =
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
if (activation.is_interpreted_frame()) {
thread->set_reserved_stack_activation((address)activation.fp());
} else {
thread->set_reserved_stack_activation((address)activation.unextended_sp());
}
return 1;
}
}
}
// Throw a stack overflow exception. // Throw a stack overflow exception.
// Guard pages will be reenabled while unwinding the stack. // Guard pages will be reenabled while unwinding the stack.
thread->disable_stack_yellow_reserved_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
goto run_stub; goto run_stub;
} else { } else {
// Thread was in the vm or native code. Return and try to finish. // Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_reserved_zone();
return 1; return 1;
} }
} else if (thread->in_stack_red_zone(addr)) { } else if (thread->in_stack_red_zone(addr)) {

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
*/ */
// no precompiled headers // no precompiled headers
#include "assembler_ppc.inline.hpp" #include "asm/assembler.inline.hpp"
#include "classfile/classLoader.hpp" #include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
@ -157,6 +157,42 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
return frame(sp, epc.pc()); return frame(sp, epc.pc());
} }
bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
address pc = (address) os::Linux::ucontext_get_pc(uc);
if (Interpreter::contains(pc)) {
// Interpreter performs stack banging after the fixed frame header has
// been generated while the compilers perform it before. To maintain
// semantic consistency between interpreted and compiled frames, the
// method returns the Java sender of the current frame.
*fr = os::fetch_frame_from_context(uc);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// More complex code with compiled code.
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling. In compiled code, we bang before
// the frame is complete.
return false;
} else {
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
intptr_t* sp = os::Linux::ucontext_get_sp(uc);
*fr = frame(sp, (address)*sp);
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
assert(!fr->is_first_frame(), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
frame os::get_sender_for_C_frame(frame* fr) { frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == 0) { if (*fr->sp() == 0) {
// fr is the last C frame // fr is the last C frame
@ -243,13 +279,31 @@ JVM_handle_linux_signal(int sig,
if (thread->on_local_stack(addr)) { if (thread->on_local_stack(addr)) {
// stack overflow // stack overflow
if (thread->in_stack_yellow_reserved_zone(addr)) { if (thread->in_stack_yellow_reserved_zone(addr)) {
thread->disable_stack_yellow_reserved_zone();
if (thread->thread_state() == _thread_in_Java) { if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
assert(fr.is_java_frame(), "Must be a Javac frame");
frame activation =
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
if (activation.is_interpreted_frame()) {
thread->set_reserved_stack_activation((address)activation.fp());
} else {
thread->set_reserved_stack_activation((address)activation.unextended_sp());
}
return 1;
}
}
}
// Throw a stack overflow exception. // Throw a stack overflow exception.
// Guard pages will be reenabled while unwinding the stack. // Guard pages will be reenabled while unwinding the stack.
thread->disable_stack_yellow_reserved_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else { } else {
// Thread was in the vm or native code. Return and try to finish. // Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_reserved_zone();
return 1; return 1;
} }
} else if (thread->in_stack_red_zone(addr)) { } else if (thread->in_stack_red_zone(addr)) {

View File

@ -67,6 +67,7 @@
#include "runtime/javaCalls.hpp" #include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
#include "runtime/synchronizer.hpp" #include "runtime/synchronizer.hpp"
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
@ -76,6 +77,11 @@
#include "trace/tracing.hpp" #include "trace/tracing.hpp"
#endif #endif
// helper function to avoid in-line casts
template <typename T> static T* load_ptr_acquire(T* volatile *p) {
return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
}
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@ -147,20 +153,23 @@ void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
} }
void ClassLoaderData::classes_do(KlassClosure* klass_closure) { void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
klass_closure->do_klass(k); klass_closure->do_klass(k);
assert(k != k->next_link(), "no loops!"); assert(k != k->next_link(), "no loops!");
} }
} }
void ClassLoaderData::classes_do(void f(Klass * const)) { void ClassLoaderData::classes_do(void f(Klass * const)) {
assert_locked_or_safepoint(_metaspace_lock);
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
f(k); f(k);
} }
} }
void ClassLoaderData::methods_do(void f(Method*)) { void ClassLoaderData::methods_do(void f(Method*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) { if (k->is_instance_klass()) {
InstanceKlass::cast(k)->methods_do(f); InstanceKlass::cast(k)->methods_do(f);
} }
@ -179,7 +188,8 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
} }
void ClassLoaderData::classes_do(void f(InstanceKlass*)) { void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) { if (k->is_instance_klass()) {
f(InstanceKlass::cast(k)); f(InstanceKlass::cast(k));
} }
@ -188,6 +198,7 @@ void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
} }
void ClassLoaderData::modules_do(void f(ModuleEntry*)) { void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
assert_locked_or_safepoint(Module_lock);
if (_modules != NULL) { if (_modules != NULL) {
for (int i = 0; i < _modules->table_size(); i++) { for (int i = 0; i < _modules->table_size(); i++) {
for (ModuleEntry* entry = _modules->bucket(i); for (ModuleEntry* entry = _modules->bucket(i);
@ -200,9 +211,11 @@ void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
} }
void ClassLoaderData::packages_do(void f(PackageEntry*)) { void ClassLoaderData::packages_do(void f(PackageEntry*)) {
if (_packages != NULL) { // Lock-free access requires load_ptr_acquire
for (int i = 0; i < _packages->table_size(); i++) { PackageEntryTable* packages = load_ptr_acquire(&_packages);
for (PackageEntry* entry = _packages->bucket(i); if (packages != NULL) {
for (int i = 0; i < packages->table_size(); i++) {
for (PackageEntry* entry = packages->bucket(i);
entry != NULL; entry != NULL;
entry = entry->next()) { entry = entry->next()) {
f(entry); f(entry);
@ -325,10 +338,9 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
Klass* old_value = _klasses; Klass* old_value = _klasses;
k->set_next_link(old_value); k->set_next_link(old_value);
// Make sure linked class is stable, since the class list is walked without a lock // Link the new item into the list, making sure the linked class is stable
OrderAccess::storestore(); // since the list can be walked without a lock
// link the new item into the list OrderAccess::release_store_ptr(&_klasses, k);
_klasses = k;
} }
if (publicize && k->class_loader_data() != NULL) { if (publicize && k->class_loader_data() != NULL) {
@ -343,11 +355,10 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
} }
} }
// This is called by InstanceKlass::deallocate_contents() to remove the // Remove a klass from the _klasses list for scratch_class during redefinition
// scratch_class for redefine classes. We need a lock because there it may not // or parsed class in the case of an error.
// be called at a safepoint if there's an error.
void ClassLoaderData::remove_class(Klass* scratch_class) { void ClassLoaderData::remove_class(Klass* scratch_class) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
Klass* prev = NULL; Klass* prev = NULL;
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
if (k == scratch_class) { if (k == scratch_class) {
@ -390,42 +401,46 @@ void ClassLoaderData::unload() {
PackageEntryTable* ClassLoaderData::packages() { PackageEntryTable* ClassLoaderData::packages() {
// Lazily create the package entry table at first request. // Lazily create the package entry table at first request.
if (_packages == NULL) { // Lock-free access requires load_ptr_acquire.
PackageEntryTable* packages = load_ptr_acquire(&_packages);
if (packages == NULL) {
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Check if _packages got allocated while we were waiting for this lock. // Check if _packages got allocated while we were waiting for this lock.
if (_packages == NULL) { if ((packages = _packages) == NULL) {
_packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size); packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
// Ensure _packages is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_packages, packages);
} }
} }
return _packages; return packages;
} }
ModuleEntryTable* ClassLoaderData::modules() { ModuleEntryTable* ClassLoaderData::modules() {
// Lazily create the module entry table at first request. // Lazily create the module entry table at first request.
if (_modules == NULL) { // Lock-free access requires load_ptr_acquire.
ModuleEntryTable* modules = load_ptr_acquire(&_modules);
if (modules == NULL) {
MutexLocker m1(Module_lock); MutexLocker m1(Module_lock);
// Check again if _modules has been allocated while we were getting this lock. // Check if _modules got allocated while we were waiting for this lock.
if (_modules != NULL) { if ((modules = _modules) == NULL) {
return _modules; modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
} // Each loader has one unnamed module entry. Create it before
// any classes, loaded by this loader, are defined in case
// they end up being defined in loader's unnamed module.
modules->create_unnamed_module(this);
ModuleEntryTable* temp_table = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size); {
// Each loader has one unnamed module entry. Create it before MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// any classes, loaded by this loader, are defined in case // Ensure _modules is stable, since it is examined without a lock
// they end up being defined in loader's unnamed module. OrderAccess::release_store_ptr(&_modules, modules);
temp_table->create_unnamed_module(this); }
{
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock
OrderAccess::storestore();
_modules = temp_table;
} }
} }
return _modules; return modules;
} }
oop ClassLoaderData::keep_alive_object() const { oop ClassLoaderData::keep_alive_object() const {
assert_locked_or_safepoint(_metaspace_lock);
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive"); assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
return is_anonymous() ? _klasses->java_mirror() : class_loader(); return is_anonymous() ? _klasses->java_mirror() : class_loader();
} }
@ -499,30 +514,33 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
// to create smaller arena for Reflection class loaders also. // to create smaller arena for Reflection class loaders also.
// The reason for the delayed allocation is because some class loaders are // The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own. // simply for delegating with no metadata of their own.
if (_metaspace == NULL) { // Lock-free access requires load_ptr_acquire.
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); Metaspace* metaspace = load_ptr_acquire(&_metaspace);
// Check again if metaspace has been allocated while we were getting this lock. if (metaspace == NULL) {
if (_metaspace != NULL) { MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
return _metaspace; // Check if _metaspace got allocated while we were waiting for this lock.
} if ((metaspace = _metaspace) == NULL) {
if (this == the_null_class_loader_data()) { if (this == the_null_class_loader_data()) {
assert (class_loader() == NULL, "Must be"); assert (class_loader() == NULL, "Must be");
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType)); metaspace = new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
} else if (is_anonymous()) { } else if (is_anonymous()) {
if (class_loader() != NULL) { if (class_loader() != NULL) {
log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name()); log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name());
}
metaspace = new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
if (class_loader() != NULL) {
log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
}
metaspace = new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
} else {
metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
} }
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType)); // Ensure _metaspace is stable, since it is examined without a lock
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) { OrderAccess::release_store_ptr(&_metaspace, metaspace);
if (class_loader() != NULL) {
log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
}
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType));
} else {
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType));
} }
} }
return _metaspace; return metaspace;
} }
JNIHandleBlock* ClassLoaderData::handles() const { return _handles; } JNIHandleBlock* ClassLoaderData::handles() const { return _handles; }
@ -638,6 +656,7 @@ void ClassLoaderData::dump(outputStream * const out) {
#endif // PRODUCT #endif // PRODUCT
void ClassLoaderData::verify() { void ClassLoaderData::verify() {
assert_locked_or_safepoint(_metaspace_lock);
oop cl = class_loader(); oop cl = class_loader();
guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same"); guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
@ -656,7 +675,8 @@ void ClassLoaderData::verify() {
} }
bool ClassLoaderData::contains_klass(Klass* klass) { bool ClassLoaderData::contains_klass(Klass* klass) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k == klass) return true; if (k == klass) return true;
} }
return false; return false;
@ -1046,6 +1066,7 @@ ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic
// Find the first klass in the CLDG. // Find the first klass in the CLDG.
while (cld != NULL) { while (cld != NULL) {
assert_locked_or_safepoint(cld->metaspace_lock());
klass = cld->_klasses; klass = cld->_klasses;
if (klass != NULL) { if (klass != NULL) {
_next_klass = klass; _next_klass = klass;
@ -1063,6 +1084,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass)
// No more klasses in the current CLD. Time to find a new CLD. // No more klasses in the current CLD. Time to find a new CLD.
ClassLoaderData* cld = klass->class_loader_data(); ClassLoaderData* cld = klass->class_loader_data();
assert_locked_or_safepoint(cld->metaspace_lock());
while (next == NULL) { while (next == NULL) {
cld = cld->next(); cld = cld->next();
if (cld == NULL) { if (cld == NULL) {

View File

@ -171,8 +171,8 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Dependencies _dependencies; // holds dependencies from this class loader Dependencies _dependencies; // holds dependencies from this class loader
// data to others. // data to others.
Metaspace * _metaspace; // Meta-space where meta-data defined by the Metaspace * volatile _metaspace; // Meta-space where meta-data defined by the
// classes in the class loader are allocated. // classes in the class loader are allocated.
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup. Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
bool _unloading; // true if this class loader goes away bool _unloading; // true if this class loader goes away
bool _is_anonymous; // if this CLD is for an anonymous class bool _is_anonymous; // if this CLD is for an anonymous class
@ -186,9 +186,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
JNIHandleBlock* _handles; // Handles to constant pool arrays, Modules, etc, which JNIHandleBlock* _handles; // Handles to constant pool arrays, Modules, etc, which
// have the same life cycle of the corresponding ClassLoader. // have the same life cycle of the corresponding ClassLoader.
Klass* _klasses; // The classes defined by the class loader. Klass* volatile _klasses; // The classes defined by the class loader.
PackageEntryTable* _packages; // The packages defined by the class loader. PackageEntryTable* volatile _packages; // The packages defined by the class loader.
ModuleEntryTable* _modules; // The modules defined by the class loader. ModuleEntryTable* volatile _modules; // The modules defined by the class loader.
// These method IDs are created for the class loader and set to NULL when the // These method IDs are created for the class loader and set to NULL when the
// class loader is unloaded. They are rarely freed, only for redefine classes // class loader is unloaded. They are rarely freed, only for redefine classes
@ -216,8 +216,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies); ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
~ClassLoaderData(); ~ClassLoaderData();
void set_metaspace(Metaspace* m) { _metaspace = m; }
JNIHandleBlock* handles() const; JNIHandleBlock* handles() const;
void set_handles(JNIHandleBlock* handles); void set_handles(JNIHandleBlock* handles);

View File

@ -966,20 +966,18 @@ void LinkResolver::resolve_static_call(CallInfo& result,
methodHandle resolved_method = linktime_resolve_static_method(link_info, CHECK); methodHandle resolved_method = linktime_resolve_static_method(link_info, CHECK);
// The resolved class can change as a result of this resolution. // The resolved class can change as a result of this resolution.
KlassHandle resolved_klass = KlassHandle(THREAD, resolved_method->method_holder()); KlassHandle resolved_klass(THREAD, resolved_method->method_holder());
Method* save_resolved_method = resolved_method();
// Initialize klass (this should only happen if everything is ok) // Initialize klass (this should only happen if everything is ok)
if (initialize_class && resolved_klass->should_be_initialized()) { if (initialize_class && resolved_klass->should_be_initialized()) {
resolved_klass->initialize(CHECK); resolved_klass->initialize(CHECK);
// Use updated LinkInfo (to reresolve with resolved_klass as method_holder?) // Use updated LinkInfo to reresolve with resolved method holder
LinkInfo new_info(resolved_klass, link_info.name(), link_info.signature(), LinkInfo new_info(resolved_klass, link_info.name(), link_info.signature(),
link_info.current_klass(), link_info.current_klass(),
link_info.check_access() ? LinkInfo::needs_access_check : LinkInfo::skip_access_check); link_info.check_access() ? LinkInfo::needs_access_check : LinkInfo::skip_access_check);
resolved_method = linktime_resolve_static_method(new_info, CHECK); resolved_method = linktime_resolve_static_method(new_info, CHECK);
} }
assert(save_resolved_method == resolved_method(), "does this change?");
// setup result // setup result
result.set_static(resolved_klass, resolved_method, CHECK); result.set_static(resolved_klass, resolved_method, CHECK);
} }

View File

@ -1104,21 +1104,21 @@ void InstanceKlass::call_class_initializer_impl(instanceKlassHandle this_k, TRAP
void InstanceKlass::mask_for(const methodHandle& method, int bci, void InstanceKlass::mask_for(const methodHandle& method, int bci,
InterpreterOopMap* entry_for) { InterpreterOopMap* entry_for) {
// Dirty read, then double-check under a lock. // Lazily create the _oop_map_cache at first request
if (_oop_map_cache == NULL) { // Lock-free access requires load_ptr_acquire.
// Otherwise, allocate a new one. OopMapCache* oop_map_cache =
static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache));
if (oop_map_cache == NULL) {
MutexLocker x(OopMapCacheAlloc_lock); MutexLocker x(OopMapCacheAlloc_lock);
// First time use. Allocate a cache in C heap // Check if _oop_map_cache was allocated while we were waiting for this lock
if (_oop_map_cache == NULL) { if ((oop_map_cache = _oop_map_cache) == NULL) {
// Release stores from OopMapCache constructor before assignment oop_map_cache = new OopMapCache();
// to _oop_map_cache. C++ compilers on ppc do not emit the // Ensure _oop_map_cache is stable, since it is examined without a lock
// required memory barrier only because of the volatile OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache);
// qualifier of _oop_map_cache.
OrderAccess::release_store_ptr(&_oop_map_cache, new OopMapCache());
} }
} }
// _oop_map_cache is constant after init; lookup below does is own locking. // _oop_map_cache is constant after init; lookup below does its own locking.
_oop_map_cache->lookup(method, bci, entry_for); oop_map_cache->lookup(method, bci, entry_for);
} }

View File

@ -23,7 +23,10 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadCritical.hpp" #include "runtime/threadCritical.hpp"
#include "services/memTracker.hpp"
#include "services/virtualMemoryTracker.hpp" #include "services/virtualMemoryTracker.hpp"
size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
@ -52,46 +55,41 @@ bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const
if (all_committed()) return true; if (all_committed()) return true;
CommittedMemoryRegion committed_rgn(addr, size, stack); CommittedMemoryRegion committed_rgn(addr, size, stack);
LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn); LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head();
if (node != NULL) {
while (node != NULL) {
CommittedMemoryRegion* rgn = node->data(); CommittedMemoryRegion* rgn = node->data();
if (rgn->same_region(addr, size)) { if (rgn->same_region(addr, size)) {
return true; return true;
} }
if (rgn->adjacent_to(addr, size)) { if (rgn->adjacent_to(addr, size)) {
// check if the next region covers this committed region, // special case to expand prior region if there is no next region
// the regions may not be merged due to different call stacks LinkedListNode<CommittedMemoryRegion>* next = node->next();
LinkedListNode<CommittedMemoryRegion>* next = if (next == NULL && rgn->call_stack()->equals(stack)) {
node->next();
if (next != NULL && next->data()->contain_region(addr, size)) {
if (next->data()->same_region(addr, size)) {
next->data()->set_call_stack(stack);
}
return true;
}
if (rgn->call_stack()->equals(stack)) {
VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag()); VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
// the two adjacent regions have the same call stack, merge them // the two adjacent regions have the same call stack, merge them
rgn->expand_region(addr, size); rgn->expand_region(addr, size);
VirtualMemorySummary::record_committed_memory(rgn->size(), flag()); VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
return true; return true;
} }
VirtualMemorySummary::record_committed_memory(size, flag());
if (rgn->base() > addr) {
return _committed_regions.insert_before(committed_rgn, node) != NULL;
} else {
return _committed_regions.insert_after(committed_rgn, node) != NULL;
} }
if (rgn->overlap_region(addr, size)) {
// Clear a space for this region in the case it overlaps with any regions.
remove_uncommitted_region(addr, size);
break; // commit below
} }
assert(rgn->contain_region(addr, size), "Must cover this region"); if (rgn->end() >= addr + size){
return true; break;
} else { }
node = node->next();
}
// New committed region // New committed region
VirtualMemorySummary::record_committed_memory(size, flag()); VirtualMemorySummary::record_committed_memory(size, flag());
return add_committed_region(committed_rgn); return add_committed_region(committed_rgn);
} }
}
void ReservedMemoryRegion::set_all_committed(bool b) { void ReservedMemoryRegion::set_all_committed(bool b) {
if (all_committed() != b) { if (all_committed() != b) {
@ -175,48 +173,52 @@ bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
} }
} }
} else { } else {
// we have to walk whole list to remove the committed regions in CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
// specified range address end = addr + sz;
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = NULL;
VirtualMemoryRegion uncommitted_rgn(addr, sz);
while (head != NULL && !uncommitted_rgn.is_empty()) { LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
CommittedMemoryRegion* crgn = head->data(); LinkedListNode<CommittedMemoryRegion>* prev = NULL;
// this committed region overlaps to region to uncommit CommittedMemoryRegion* crgn;
if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { while (head != NULL) {
// find matched region, remove the node will do crgn = head->data();
VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
if (crgn->same_region(addr, sz)) {
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
_committed_regions.remove_after(prev); _committed_regions.remove_after(prev);
return true; return true;
} else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { }
// this committed region contains whole uncommitted region
VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag()); // del_rgn contains crgn
return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size()); if (del_rgn.contain_region(crgn->base(), crgn->size())) {
} else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
// this committed region has been uncommitted
size_t exclude_size = crgn->end() - uncommitted_rgn.base();
uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
LinkedListNode<CommittedMemoryRegion>* tmp = head;
head = head->next(); head = head->next();
_committed_regions.remove_after(prev); _committed_regions.remove_after(prev);
continue; continue; // don't update head or prev
} else if (crgn->contain_address(uncommitted_rgn.base())) {
size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
} else if (uncommitted_rgn.contain_address(crgn->base())) {
size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), toUncommitted);
uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
toUncommitted);
VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
} }
// Found addr in the current crgn. There are 2 subcases:
if (crgn->contain_address(addr)) {
// (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
if (crgn->contain_address(end - 1)) {
VirtualMemorySummary::record_uncommitted_memory(sz, flag());
return remove_uncommitted_region(head, addr, sz); // done!
} else {
// (2) Did not find del_rgn's end in crgn.
size_t size = crgn->end() - del_rgn.base();
crgn->exclude_region(addr, size);
VirtualMemorySummary::record_uncommitted_memory(size, flag());
} }
} else if (crgn->contain_address(end - 1)) {
// Found del_rgn's end, but not its base addr.
size_t size = del_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), size);
VirtualMemorySummary::record_uncommitted_memory(size, flag());
return true; // should be done if the list is sorted properly!
}
prev = head; prev = head;
head = head->next(); head = head->next();
} }
@ -386,7 +388,8 @@ bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
assert(reserved_rgn != NULL, "No reserved region"); assert(reserved_rgn != NULL, "No reserved region");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
return reserved_rgn->add_committed_region(addr, size, stack); bool result = reserved_rgn->add_committed_region(addr, size, stack);
return result;
} }
bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
@ -398,7 +401,8 @@ bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size)
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
assert(reserved_rgn != NULL, "No reserved region"); assert(reserved_rgn != NULL, "No reserved region");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
return reserved_rgn->remove_uncommitted_region(addr, size); bool result = reserved_rgn->remove_uncommitted_region(addr, size);
return result;
} }
bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
@ -488,5 +492,3 @@ bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel
return true; return true;
} }

View File

@ -261,8 +261,7 @@ class CommittedMemoryRegion : public VirtualMemoryRegion {
VirtualMemoryRegion(addr, size), _stack(stack) { } VirtualMemoryRegion(addr, size), _stack(stack) { }
inline int compare(const CommittedMemoryRegion& rgn) const { inline int compare(const CommittedMemoryRegion& rgn) const {
if (overlap_region(rgn.base(), rgn.size()) || if (overlap_region(rgn.base(), rgn.size())) {
adjacent_to (rgn.base(), rgn.size())) {
return 0; return 0;
} else { } else {
if (base() == rgn.base()) { if (base() == rgn.base()) {

View File

@ -259,6 +259,11 @@ template <class E, ResourceObj::allocation_type T = ResourceObj::C_HEAP,
virtual bool remove(LinkedListNode<E>* node) { virtual bool remove(LinkedListNode<E>* node) {
LinkedListNode<E>* p = this->head(); LinkedListNode<E>* p = this->head();
if (p == node) {
this->set_head(p->next());
delete_node(node);
return true;
}
while (p != NULL && p->next() != node) { while (p != NULL && p->next() != node) {
p = p->next(); p = p->next();
} }

View File

@ -28,6 +28,7 @@
* @requires vm.gc=="G1" | vm.gc=="null" * @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.FlightRecorder != true * @requires vm.opt.FlightRecorder != true
* @requires vm.opt.ExplicitGCInvokesConcurrent != true * @requires vm.opt.ExplicitGCInvokesConcurrent != true
* @requires vm.opt.MaxGCPauseMillis == "null"
* @library /testlibrary /test/lib / * @library /testlibrary /test/lib /
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* @modules java.management * @modules java.management

View File

@ -25,6 +25,7 @@
* @test TestLogging * @test TestLogging
* @summary Check that a mixed GC is reflected in the gc logs * @summary Check that a mixed GC is reflected in the gc logs
* @requires vm.gc=="G1" | vm.gc=="null" * @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.MaxGCPauseMillis == "null"
* @library /testlibrary /test/lib * @library /testlibrary /test/lib
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* @modules java.management * @modules java.management

View File

@ -33,6 +33,7 @@ import sun.hotspot.WhiteBox;
* @key stress * @key stress
* @requires vm.gc=="G1" | vm.gc=="null" * @requires vm.gc=="G1" | vm.gc=="null"
* @requires os.maxMemory > 2G * @requires os.maxMemory > 2G
* @requires vm.opt.MaxGCPauseMillis == "null"
* *
* @summary Stress G1 Remembered Set using multiple threads * @summary Stress G1 Remembered Set using multiple threads
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc

View File

@ -30,6 +30,7 @@ import sun.hotspot.WhiteBox;
* @bug 8146984 8147087 * @bug 8146984 8147087
* @requires vm.gc=="G1" | vm.gc=="null" * @requires vm.gc=="G1" | vm.gc=="null"
* @requires os.maxMemory > 3G * @requires os.maxMemory > 3G
* @requires vm.opt.MaxGCPauseMillis == "null"
* *
* @summary Stress G1 Remembered Set by creating a lot of cross region links * @summary Stress G1 Remembered Set by creating a lot of cross region links
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc

View File

@ -0,0 +1,144 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Test commits of overlapping regions of memory.
* @key nmt jcmd
* @library /testlibrary /test/lib
* @modules java.base/jdk.internal.misc
* java.management
* @build CommitOverlappingRegions
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail CommitOverlappingRegions
*/
import jdk.test.lib.*;
import sun.hotspot.WhiteBox;
public class CommitOverlappingRegions {
public static WhiteBox wb = WhiteBox.getWhiteBox();
public static void main(String args[]) throws Exception {
OutputAnalyzer output;
long size = 32 * 1024;
long addr = wb.NMTReserveMemory(8*size);
String pid = Long.toString(ProcessTools.getProcessId());
ProcessBuilder pb = new ProcessBuilder();
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
System.out.println("Address is " + Long.toHexString(addr));
// Start: . . . . . . . .
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=0KB)");
// Committing: * * * . . . . .
// Region: * * * . . . . .
// Expected Total: 3 x 32KB = 96KB
wb.NMTCommitMemory(addr + 0*size, 3*size);
// Committing: . . . . * * * .
// Region: * * * . * * * .
// Expected Total: 6 x 32KB = 192KB
wb.NMTCommitMemory(addr + 4*size, 3*size);
// Check output after first 2 commits.
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=192KB)");
// Committing: . . * * * . . .
// Region: * * * * * * * .
// Expected Total: 7 x 32KB = 224KB
wb.NMTCommitMemory(addr + 2*size, 3*size);
// Check output after overlapping commit.
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=224KB)");
// Uncommitting: * * * * * * * *
// Region: . . . . . . . .
// Expected Total: 0 x 32KB = 0KB
wb.NMTUncommitMemory(addr + 0*size, 8*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=0KB)");
// Committing: * * . . . . . .
// Region: * * . . . . . .
// Expected Total: 2 x 32KB = 64KB
wb.NMTCommitMemory(addr + 0*size, 2*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=64KB)");
// Committing: . * * * . . . .
// Region: * * * * . . . .
// Expected Total: 4 x 32KB = 128KB
wb.NMTCommitMemory(addr + 1*size, 3*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=128KB)");
// Uncommitting: * * * . . . . .
// Region: . . . * . . . .
// Expected Total: 1 x 32KB = 32KB
wb.NMTUncommitMemory(addr + 0*size, 3*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=32KB)");
// Committing: . . . * * . . .
// Region: . . . * * . . .
// Expected Total: 2 x 32KB = 64KB
wb.NMTCommitMemory(addr + 3*size, 2*size);
System.out.println("Address is " + Long.toHexString(addr + 3*size));
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=64KB)");
// Committing: . . . . * * . .
// Region: . . . * * * . .
// Expected Total: 3 x 32KB = 96KB
wb.NMTCommitMemory(addr + 4*size, 2*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=96KB)");
// Committing: . . . . . * * .
// Region: . . . * * * * .
// Expected Total: 4 x 32KB = 128KB
wb.NMTCommitMemory(addr + 5*size, 2*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=128KB)");
// Committing: . . . . . . * *
// Region: . . . * * * * *
// Expected Total: 5 x 32KB = 160KB
wb.NMTCommitMemory(addr + 6*size, 2*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=160KB)");
// Uncommitting: * * * * * * * *
// Region: . . . . . . . .
// Expected Total: 0 x 32KB = 32KB
wb.NMTUncommitMemory(addr + 0*size, 8*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=0KB)");
}
}

View File

@ -27,6 +27,7 @@
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* @modules java.base/jdk.internal.vm.annotation * @modules java.base/jdk.internal.vm.annotation
* @build jdk.test.lib.* * @build jdk.test.lib.*
* @run main/othervm -Xint ReservedStackTest
* @run main/othervm -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest * @run main/othervm -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
*/ */
@ -196,9 +197,12 @@ public class ReservedStackTest {
System.out.println("Test started execution at frame = " + (counter - deframe)); System.out.println("Test started execution at frame = " + (counter - deframe));
String result = test.getResult(); String result = test.getResult();
// The feature is not fully implemented on all platforms, // The feature is not fully implemented on all platforms,
// corruptions are still possible // corruptions are still possible.
boolean supportedPlatform = Platform.isSolaris() || Platform.isOSX() boolean supportedPlatform =
|| (Platform.isLinux() && (Platform.isX86() || Platform.isX64())); Platform.isAix() ||
(Platform.isLinux() && (Platform.isPPC() || Platform.isX64() || Platform.isX86())) ||
Platform.isOSX() ||
Platform.isSolaris();
if (supportedPlatform && !result.contains("PASSED")) { if (supportedPlatform && !result.contains("PASSED")) {
System.out.println(result); System.out.println(result);
throw new Error(result); throw new Error(result);

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test ReservedStackTestCompiler
* @summary Run ReservedStackTest with dedicated compilers C1 and C2.
* @requires vm.flavor == "server"
* @library /testlibrary
* @modules java.base/jdk.internal.misc
* @modules java.base/jdk.internal.vm.annotation
* @build jdk.test.lib.* ReservedStackTest
* @run main/othervm -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
* @run main/othervm -XX:-TieredCompilation -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
*/
// Intentionally left blank. Just runs ReservedStackTest with @requires annotation.