8046936: JEP 270: Reserved Stack Areas for Critical Sections

Reviewed-by: acorn, dcubed
This commit is contained in:
Frederic Parain 2015-12-11 09:07:07 -08:00
parent f6440f7fb1
commit ef800bd53f
69 changed files with 987 additions and 64 deletions

View File

@ -58,14 +58,17 @@ define_pd_global(intx, InlineFrequencyCount, 100);
#define DEFAULT_STACK_YELLOW_PAGES (2) #define DEFAULT_STACK_YELLOW_PAGES (2)
#define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5)) #define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
#define DEFAULT_STACK_RESERVED_PAGES (0)
#define MIN_STACK_YELLOW_PAGES 1 #define MIN_STACK_YELLOW_PAGES 1
#define MIN_STACK_RED_PAGES 1 #define MIN_STACK_RED_PAGES 1
#define MIN_STACK_SHADOW_PAGES 1 #define MIN_STACK_SHADOW_PAGES 1
#define MIN_STACK_RESERVED_PAGES (0)
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true); define_pd_global(bool, RewriteFrequentPairs, true);

View File

@ -1453,6 +1453,9 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
void LIR_Assembler::return_op(LIR_Opr result) { void LIR_Assembler::return_op(LIR_Opr result) {
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check();
}
// the poll may need a register so just pick one that isn't the return register // the poll may need a register so just pick one that isn't the return register
#if defined(TIERED) && !defined(_LP64) #if defined(TIERED) && !defined(_LP64)
if (result->type_field() == LIR_OprDesc::long_type) { if (result->type_field() == LIR_OprDesc::long_type) {

View File

@ -632,7 +632,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// stack frames shouldn't be much larger than max_stack elements // stack frames shouldn't be much larger than max_stack elements
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) { if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
return false; return false;
} }

View File

@ -54,4 +54,8 @@ const int StackAlignmentInBytes = (2*wordSize);
#endif #endif
#endif #endif
#if defined(SOLARIS)
#define SUPPORT_RESERVED_STACK_AREA
#endif
#endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP

View File

@ -54,6 +54,7 @@ define_pd_global(intx, InlineSmallCode, 1500);
#define DEFAULT_STACK_YELLOW_PAGES (2) #define DEFAULT_STACK_YELLOW_PAGES (2)
#define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
#ifdef _LP64 #ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM. // Stack slots are 2X larger in LP64 than in the 32 bit VM.
@ -69,10 +70,12 @@ define_pd_global(intx, VMThreadStackSize, 512);
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
#define MIN_STACK_RESERVED_PAGES (0)
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true); define_pd_global(bool, RewriteFrequentPairs, true);

View File

@ -1140,6 +1140,19 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// save result (push state before jvmti call and pop it afterwards) and notify jvmti // save result (push state before jvmti call and pop it afterwards) and notify jvmti
notify_method_exit(false, state, NotifyJVMTI); notify_method_exit(false, state, NotifyJVMTI);
if (StackReservedPages > 0) {
// testing if Stack Reserved Area needs to be re-enabled
Label no_reserved_zone_enabling;
ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G3_scratch);
cmp_and_brx_short(SP, G3_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling);
call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError), G2_thread);
should_not_reach_here();
bind(no_reserved_zone_enabling);
}
interp_verify_oop(Otos_i, state, __FILE__, __LINE__); interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
verify_thread(); verify_thread();

View File

@ -3601,6 +3601,24 @@ void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
} }
} }
void MacroAssembler::reserved_stack_check() {
// testing if reserved zone needs to be enabled
Label no_reserved_zone_enabling;
ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch);
cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling);
call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread);
AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry());
jump_to(stub, G4_scratch);
delayed()->restore();
should_not_reach_here();
bind(no_reserved_zone_enabling);
}
/////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS

View File

@ -1422,6 +1422,9 @@ public:
// stack overflow + shadow pages. Clobbers tsp and scratch registers. // stack overflow + shadow pages. Clobbers tsp and scratch registers.
void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch); void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
// Check for reserved stack access in method being exited (for JIT)
void reserved_stack_check();
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset); virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
void verify_tlab(); void verify_tlab();

View File

@ -1294,6 +1294,10 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ verify_thread(); __ verify_thread();
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check();
}
// If this does safepoint polling, then do it here // If this does safepoint polling, then do it here
if(do_polling() && ra_->C->is_method_compilation()) { if(do_polling() && ra_->C->is_method_compilation()) {
AddressLiteral polling_page(os::get_polling_page()); AddressLiteral polling_page(os::get_polling_page());

View File

@ -5355,7 +5355,12 @@ class StubGenerator: public StubCodeGenerator {
#endif // COMPILER2 !=> _LP64 #endif // COMPILER2 !=> _LP64
// Build this early so it's available for the interpreter. // Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
// set table address before stub generation which use it // set table address before stub generation which use it

View File

@ -518,6 +518,10 @@ void LIR_Assembler::return_op(LIR_Opr result) {
// Pop the stack before the safepoint code // Pop the stack before the safepoint code
__ remove_frame(initial_frame_size_in_bytes()); __ remove_frame(initial_frame_size_in_bytes());
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check();
}
bool result_is_oop = result->is_valid() ? result->is_oop() : false; bool result_is_oop = result->is_valid() ? result->is_oop() : false;
// Note: we do not need to round double result; float result has the right precision // Note: we do not need to round double result; float result has the right precision

View File

@ -57,4 +57,8 @@ const int StackAlignmentInBytes = 16;
#define INCLUDE_RTM_OPT 1 #define INCLUDE_RTM_OPT 1
#endif #endif
#if defined(LINUX) || defined(SOLARIS) || defined(__APPLE__)
#define SUPPORT_RESERVED_STACK_AREA
#endif
#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP

View File

@ -57,9 +57,11 @@ define_pd_global(intx, InlineSmallCode, 1000);
#define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3)) #define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3))
#define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_RESERVED_PAGES (NOT_WINDOWS(1) WINDOWS_ONLY(0))
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
#define MIN_STACK_RESERVED_PAGES (0)
#ifdef AMD64 #ifdef AMD64
// Very large C++ stack frames using solaris-amd64 optimized builds // Very large C++ stack frames using solaris-amd64 optimized builds
@ -76,6 +78,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true); define_pd_global(bool, RewriteFrequentPairs, true);

View File

@ -1023,6 +1023,25 @@ void InterpreterMacroAssembler::remove_activation(
// get sender sp // get sender sp
movptr(rbx, movptr(rbx,
Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
if (StackReservedPages > 0) {
// testing if reserved zone needs to be re-enabled
Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
Label no_reserved_zone_enabling;
NOT_LP64(get_thread(rthread);)
cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
jcc(Assembler::lessEqual, no_reserved_zone_enabling);
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
push(rthread);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_delayed_StackOverflowError));
should_not_reach_here();
bind(no_reserved_zone_enabling);
}
leave(); // remove frame anchor leave(); // remove frame anchor
pop(ret_addr); // get return address pop(ret_addr); // get return address
mov(rsp, rbx); // set sp to sender sp mov(rsp, rbx); // set sp to sender sp

View File

@ -1067,6 +1067,22 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
} }
} }
void MacroAssembler::reserved_stack_check() {
// testing if reserved zone needs to be enabled
Label no_reserved_zone_enabling;
Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
NOT_LP64(get_thread(rsi);)
cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset()));
jcc(Assembler::below, no_reserved_zone_enabling);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread);
jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
should_not_reach_here();
bind(no_reserved_zone_enabling);
}
int MacroAssembler::biased_locking_enter(Register lock_reg, int MacroAssembler::biased_locking_enter(Register lock_reg,
Register obj_reg, Register obj_reg,
Register swap_reg, Register swap_reg,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -641,6 +641,9 @@ class MacroAssembler: public Assembler {
// stack overflow + shadow pages. Also, clobbers tmp // stack overflow + shadow pages. Also, clobbers tmp
void bang_stack_size(Register size, Register tmp); void bang_stack_size(Register size, Register tmp);
// Check for reserved stack access in method being exited (for JIT)
void reserved_stack_check();
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
Register tmp, Register tmp,
int offset); int offset);

View File

@ -3290,7 +3290,10 @@ class StubGenerator: public StubCodeGenerator {
CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
// Build this early so it's available for the interpreter // Build this early so it's available for the interpreter
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
StubRoutines::_throw_delayed_StackOverflowError_entry = generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
// set table address before stub generation which use it // set table address before stub generation which use it

View File

@ -4410,6 +4410,11 @@ class StubGenerator: public StubCodeGenerator {
CAST_FROM_FN_PTR(address, CAST_FROM_FN_PTR(address,
SharedRuntime:: SharedRuntime::
throw_StackOverflowError)); throw_StackOverflowError));
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_delayed_StackOverflowError));
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
// set table address before stub generation which use it // set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;

View File

@ -541,8 +541,8 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
__ subptr(rax, stack_size); __ subptr(rax, stack_size);
// Use the maximum number of pages we might bang. // Use the maximum number of pages we might bang.
const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages+StackReservedPages) ? StackShadowPages :
(StackRedPages+StackYellowPages); (StackRedPages+StackYellowPages+StackReservedPages);
// add in the red and yellow zone sizes // add in the red and yellow zone sizes
__ addptr(rax, max_pages * page_size); __ addptr(rax, max_pages * page_size);

View File

@ -670,17 +670,16 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile *C = ra_->C; Compile *C = ra_->C;
MacroAssembler _masm(&cbuf);
if (C->max_vector_size() > 16) { if (C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses // Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call. // wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler masm(&cbuf); _masm.vzeroupper();
masm.vzeroupper();
} }
// If method set FPU control word, restore to standard control word // If method set FPU control word, restore to standard control word
if (C->in_24_bit_fp_mode()) { if (C->in_24_bit_fp_mode()) {
MacroAssembler masm(&cbuf); _masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
} }
int framesize = C->frame_size_in_bytes(); int framesize = C->frame_size_in_bytes();
@ -702,6 +701,10 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_opcode(cbuf, 0x58 | EBP_enc); emit_opcode(cbuf, 0x58 | EBP_enc);
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check();
}
if (do_polling() && C->is_method_compilation()) { if (do_polling() && C->is_method_compilation()) {
cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0); cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
emit_opcode(cbuf,0x85); emit_opcode(cbuf,0x85);
@ -729,6 +732,7 @@ uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
} else { } else {
size += framesize ? 3 : 0; size += framesize ? 3 : 0;
} }
size += 64; // added to support ReservedStackAccess
return size; return size;
} }

View File

@ -953,10 +953,11 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{ {
Compile* C = ra_->C; Compile* C = ra_->C;
MacroAssembler _masm(&cbuf);
if (C->max_vector_size() > 16) { if (C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses // Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call. // wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler _masm(&cbuf);
__ vzeroupper(); __ vzeroupper();
} }
@ -984,6 +985,10 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
// popq rbp // popq rbp
emit_opcode(cbuf, 0x58 | RBP_enc); emit_opcode(cbuf, 0x58 | RBP_enc);
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check();
}
if (do_polling() && C->is_method_compilation()) { if (do_polling() && C->is_method_compilation()) {
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type); AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);

View File

@ -48,14 +48,17 @@ define_pd_global(intx, InlineSmallCode, 1000 );
#define DEFAULT_STACK_YELLOW_PAGES (2) #define DEFAULT_STACK_YELLOW_PAGES (2)
#define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_SHADOW_PAGES (5 LP64_ONLY(+1) DEBUG_ONLY(+3)) #define DEFAULT_STACK_SHADOW_PAGES (5 LP64_ONLY(+1) DEBUG_ONLY(+3))
#define DEFAULT_STACK_RESERVED_PAGES (0)
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
#define MIN_STACK_RESERVED_PAGES (0)
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true); define_pd_global(bool, RewriteFrequentPairs, true);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -170,7 +170,7 @@ final class HotSpotResolvedJavaMethodImpl extends HotSpotMethod implements HotSp
* @return flags of this method * @return flags of this method
*/ */
private int getFlags() { private int getFlags() {
return UNSAFE.getByte(metaspaceMethod + config().methodFlagsOffset); return UNSAFE.getShort(metaspaceMethod + config().methodFlagsOffset);
} }
/** /**

View File

@ -1244,7 +1244,7 @@ public class HotSpotVMConfig {
@HotSpotVMField(name = "Method::_access_flags", type = "AccessFlags", get = HotSpotVMField.Type.OFFSET) @Stable public int methodAccessFlagsOffset; @HotSpotVMField(name = "Method::_access_flags", type = "AccessFlags", get = HotSpotVMField.Type.OFFSET) @Stable public int methodAccessFlagsOffset;
@HotSpotVMField(name = "Method::_constMethod", type = "ConstMethod*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodConstMethodOffset; @HotSpotVMField(name = "Method::_constMethod", type = "ConstMethod*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodConstMethodOffset;
@HotSpotVMField(name = "Method::_intrinsic_id", type = "u2", get = HotSpotVMField.Type.OFFSET) @Stable public int methodIntrinsicIdOffset; @HotSpotVMField(name = "Method::_intrinsic_id", type = "u2", get = HotSpotVMField.Type.OFFSET) @Stable public int methodIntrinsicIdOffset;
@HotSpotVMField(name = "Method::_flags", type = "u1", get = HotSpotVMField.Type.OFFSET) @Stable public int methodFlagsOffset; @HotSpotVMField(name = "Method::_flags", type = "u2", get = HotSpotVMField.Type.OFFSET) @Stable public int methodFlagsOffset;
@HotSpotVMField(name = "Method::_vtable_index", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int methodVtableIndexOffset; @HotSpotVMField(name = "Method::_vtable_index", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int methodVtableIndexOffset;
@HotSpotVMConstant(name = "Method::_jfr_towrite") @Stable public int methodFlagsJfrTowrite; @HotSpotVMConstant(name = "Method::_jfr_towrite") @Stable public int methodFlagsJfrTowrite;

View File

@ -3497,7 +3497,7 @@ jint os::init_2(void) {
// Add in 2*BytesPerWord times page size to account for VM stack during // Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM. // class initialization depending on 32 or 64 bit VM.
os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed, os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+ (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages+
2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size()); 2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K; size_t threadStackSizeInBytes = ThreadStackSize * K;

View File

@ -99,6 +99,8 @@ class Bsd {
static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc, static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
intptr_t** ret_sp, intptr_t** ret_fp); intptr_t** ret_sp, intptr_t** ret_fp);
static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
// This boolean allows users to forward their own non-matching signals // This boolean allows users to forward their own non-matching signals
// to JVM_handle_bsd_signal, harmlessly. // to JVM_handle_bsd_signal, harmlessly.
static bool signal_handlers_are_installed; static bool signal_handlers_are_installed;

View File

@ -1862,7 +1862,7 @@ void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
while (jt) { while (jt) {
if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
jt->stack_yellow_zone_enabled()) { // No pending stack overflow exceptions jt->stack_guards_enabled()) { // No pending stack overflow exceptions
if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(), if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(),
jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) { jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) {
warning("Attempt to reguard stack yellow zone failed."); warning("Attempt to reguard stack yellow zone failed.");
@ -4603,6 +4603,11 @@ void os::init(void) {
if (vm_page_size() > (int)Linux::vm_default_page_size()) { if (vm_page_size() > (int)Linux::vm_default_page_size()) {
StackYellowPages = 1; StackYellowPages = 1;
StackRedPages = 1; StackRedPages = 1;
#if defined(IA32) || defined(IA64)
StackReservedPages = 1;
#else
StackReservedPages = 0;
#endif
StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size(); StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size();
} }
@ -4664,7 +4669,7 @@ jint os::init_2(void) {
// Add in 2*BytesPerWord times page size to account for VM stack during // Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM. // class initialization depending on 32 or 64 bit VM.
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed, os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
(size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() + (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
(2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size()); (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K; size_t threadStackSizeInBytes = ThreadStackSize * K;

View File

@ -136,6 +136,8 @@ class Linux {
static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc, static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
intptr_t** ret_sp, intptr_t** ret_fp); intptr_t** ret_sp, intptr_t** ret_fp);
static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
// This boolean allows users to forward their own non-matching signals // This boolean allows users to forward their own non-matching signals
// to JVM_handle_linux_signal, harmlessly. // to JVM_handle_linux_signal, harmlessly.
static bool signal_handlers_are_installed; static bool signal_handlers_are_installed;

View File

@ -4382,6 +4382,7 @@ void os::init(void) {
if (vm_page_size() > 8*K) { if (vm_page_size() > 8*K) {
StackYellowPages = 1; StackYellowPages = 1;
StackRedPages = 1; StackRedPages = 1;
StackReservedPages = 1;
StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
} }
} }
@ -4438,7 +4439,7 @@ jint os::init_2(void) {
// Add in 2*BytesPerWord times page size to account for VM stack during // Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM. // class initialization depending on 32 or 64 bit VM.
os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+ (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages+
2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
size_t threadStackSizeInBytes = ThreadStackSize * K; size_t threadStackSizeInBytes = ThreadStackSize * K;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -150,6 +150,8 @@ class Solaris {
static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc, static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
intptr_t** ret_sp, intptr_t** ret_fp); intptr_t** ret_sp, intptr_t** ret_fp);
static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
static void hotspot_sigmask(Thread* thread); static void hotspot_sigmask(Thread* thread);
// SR_handler // SR_handler

View File

@ -2374,6 +2374,39 @@ static inline void report_error(Thread* t, DWORD exception_code,
// somewhere where we can find it in the minidump. // somewhere where we can find it in the minidump.
} }
bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (Interpreter::contains(pc)) {
*fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// more complex code with compiled code
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false;
} else {
*fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
// in compiled code, the stack banging is performed just after the return pc
// has been pushed on the stack
*fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
@ -2550,7 +2583,16 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
} }
#endif #endif
if (thread->stack_yellow_zone_enabled()) { if (thread->stack_guards_enabled()) {
if (_thread_in_Java) {
frame fr;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
assert(fr.is_java_frame(), "Must be a Java frame");
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
}
}
// Yellow zone violation. The o/s has unprotected the first yellow // Yellow zone violation. The o/s has unprotected the first yellow
// zone page for us. Note: must call disable_stack_yellow_zone to // zone page for us. Note: must call disable_stack_yellow_zone to
// update the enabled status, even if the zone contains only one page. // update the enabled status, even if the zone contains only one page.

View File

@ -110,6 +110,10 @@ class win32 {
// Default stack size for the current process. // Default stack size for the current process.
static size_t default_stack_size() { return _default_stack_size; } static size_t default_stack_size() { return _default_stack_size; }
static bool get_frame_at_stack_banging_point(JavaThread* thread,
struct _EXCEPTION_POINTERS* exceptionInfo,
address pc, frame* fr);
#ifndef _WIN64 #ifndef _WIN64
// A wrapper to install a structured exception handler for fast JNI accesors. // A wrapper to install a structured exception handler for fast JNI accesors.
static address fast_jni_accessor_wrapper(BasicType); static address fast_jni_accessor_wrapper(BasicType);

View File

@ -325,6 +325,7 @@ intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
// frames. Currently we don't do that on Bsd, so it's the same as // frames. Currently we don't do that on Bsd, so it's the same as
// os::fetch_frame_from_context(). // os::fetch_frame_from_context().
// This method is also used for stack overflow signal handling.
ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread, ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread,
ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
@ -362,6 +363,48 @@ frame os::fetch_frame_from_context(void* ucVoid) {
return frame(sp, fp, epc.pc()); return frame(sp, fp, epc.pc());
} }
frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
intptr_t* sp;
intptr_t* fp;
ExtendedPC epc = os::Bsd::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
return frame(sp, fp, epc.pc());
}
bool os::Bsd::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
address pc = (address) os::Bsd::ucontext_get_pc(uc);
if (Interpreter::contains(pc)) {
// interpreter performs stack banging after the fixed frame header has
// been generated while the compilers perform it before. To maintain
// semantic consistency between interpreted and compiled frames, the
// method returns the Java sender of the current frame.
*fr = os::fetch_frame_from_ucontext(thread, uc);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// more complex code with compiled code
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false;
} else {
*fr = os::fetch_frame_from_ucontext(thread, uc);
// in compiled code, the stack banging is performed just after the return pc
// has been pushed on the stack
*fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
// turned off by -fomit-frame-pointer, // turned off by -fomit-frame-pointer,
frame os::get_sender_for_C_frame(frame* fr) { frame os::get_sender_for_C_frame(frame* fr) {
@ -479,13 +522,31 @@ JVM_handle_bsd_signal(int sig,
addr >= thread->stack_base() - thread->stack_size()) { addr >= thread->stack_base() - thread->stack_size()) {
// stack overflow // stack overflow
if (thread->in_stack_yellow_zone(addr)) { if (thread->in_stack_yellow_zone(addr)) {
thread->disable_stack_yellow_zone();
if (thread->thread_state() == _thread_in_Java) { if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
if (os::Bsd::get_frame_at_stack_banging_point(thread, uc, &fr)) {
assert(fr.is_java_frame(), "Must be a Java frame");
frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
if (activation.is_interpreted_frame()) {
thread->set_reserved_stack_activation((address)(
activation.fp() + frame::interpreter_frame_initial_sp_offset));
} else {
thread->set_reserved_stack_activation((address)activation.unextended_sp());
}
return 1;
}
}
}
// Throw a stack overflow exception. Guard pages will be reenabled // Throw a stack overflow exception. Guard pages will be reenabled
// while unwinding the stack. // while unwinding the stack.
thread->disable_stack_yellow_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else { } else {
// Thread was in the vm or native code. Return and try to finish. // Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_zone();
return 1; return 1;
} }
} else if (thread->in_stack_red_zone(addr)) { } else if (thread->in_stack_red_zone(addr)) {

View File

@ -138,6 +138,7 @@ intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
// frames. Currently we don't do that on Linux, so it's the same as // frames. Currently we don't do that on Linux, so it's the same as
// os::fetch_frame_from_context(). // os::fetch_frame_from_context().
// This method is also used for stack overflow signal handling.
ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
@ -175,6 +176,50 @@ frame os::fetch_frame_from_context(void* ucVoid) {
return frame(sp, fp, epc.pc()); return frame(sp, fp, epc.pc());
} }
frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
intptr_t* sp;
intptr_t* fp;
ExtendedPC epc = os::Linux::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
return frame(sp, fp, epc.pc());
}
bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
address pc = (address) os::Linux::ucontext_get_pc(uc);
if (Interpreter::contains(pc)) {
// interpreter performs stack banging after the fixed frame header has
// been generated while the compilers perform it before. To maintain
// semantic consistency between interpreted and compiled frames, the
// method returns the Java sender of the current frame.
*fr = os::fetch_frame_from_ucontext(thread, uc);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// more complex code with compiled code
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false;
} else {
// in compiled code, the stack banging is performed just after the return pc
// has been pushed on the stack
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
intptr_t* sp = os::Linux::ucontext_get_sp(uc);
*fr = frame(sp + 1, fp, (address)*sp);
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
assert(!fr->is_first_frame(), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
// turned off by -fomit-frame-pointer, // turned off by -fomit-frame-pointer,
frame os::get_sender_for_C_frame(frame* fr) { frame os::get_sender_for_C_frame(frame* fr) {
@ -305,13 +350,32 @@ JVM_handle_linux_signal(int sig,
addr >= thread->stack_base() - thread->stack_size()) { addr >= thread->stack_base() - thread->stack_size()) {
// stack overflow // stack overflow
if (thread->in_stack_yellow_zone(addr)) { if (thread->in_stack_yellow_zone(addr)) {
thread->disable_stack_yellow_zone();
if (thread->thread_state() == _thread_in_Java) { if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
assert(fr.is_java_frame(), "Must be a Java frame");
frame activation =
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
if (activation.is_interpreted_frame()) {
thread->set_reserved_stack_activation((address)(
activation.fp() + frame::interpreter_frame_initial_sp_offset));
} else {
thread->set_reserved_stack_activation((address)activation.unextended_sp());
}
return 1;
}
}
}
// Throw a stack overflow exception. Guard pages will be reenabled // Throw a stack overflow exception. Guard pages will be reenabled
// while unwinding the stack. // while unwinding the stack.
thread->disable_stack_yellow_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else { } else {
// Thread was in the vm or native code. Return and try to finish. // Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_zone();
return 1; return 1;
} }
} else if (thread->in_stack_red_zone(addr)) { } else if (thread->in_stack_red_zone(addr)) {
@ -868,7 +932,7 @@ void os::workaround_expand_exec_shield_cs_limit() {
* we don't have much control or understanding of the address space, just let it slide. * we don't have much control or understanding of the address space, just let it slide.
*/ */
char* hint = (char*) (Linux::initial_thread_stack_bottom() - char* hint = (char*) (Linux::initial_thread_stack_bottom() -
((StackYellowPages + StackRedPages + 1) * page_size)); ((StackReservedPages + StackYellowPages + StackRedPages + 1) * page_size));
char* codebuf = os::attempt_reserve_memory_at(page_size, hint); char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) { if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
return; // No matter, we tried, best effort. return; // No matter, we tried, best effort.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -213,6 +213,7 @@ address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
// //
// The difference between this and os::fetch_frame_from_context() is that // The difference between this and os::fetch_frame_from_context() is that
// here we try to skip nested signal frames. // here we try to skip nested signal frames.
// This method is also used for stack overflow signal handling.
ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread, ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread,
ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
@ -252,6 +253,41 @@ frame os::fetch_frame_from_context(void* ucVoid) {
return frame(sp, frame::unpatchable, epc.pc()); return frame(sp, frame::unpatchable, epc.pc());
} }
frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
intptr_t* sp;
ExtendedPC epc = os::Solaris::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, NULL);
return frame(sp, frame::unpatchable, epc.pc());
}
bool os::Solaris::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
address pc = (address) os::Solaris::ucontext_get_pc(uc);
if (Interpreter::contains(pc)) {
*fr = os::fetch_frame_from_ucontext(thread, uc);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// more complex code with compiled code
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false;
} else {
*fr = os::fetch_frame_from_ucontext(thread, uc);
*fr = frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc());
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
frame os::get_sender_for_C_frame(frame* fr) { frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc()); return frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc());
} }
@ -367,17 +403,32 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
address addr = (address) info->si_addr; address addr = (address) info->si_addr;
if (thread->in_stack_yellow_zone(addr)) { if (thread->in_stack_yellow_zone(addr)) {
thread->disable_stack_yellow_zone();
// Sometimes the register windows are not properly flushed. // Sometimes the register windows are not properly flushed.
if(uc->uc_mcontext.gwins != NULL) { if(uc->uc_mcontext.gwins != NULL) {
::handle_unflushed_register_windows(uc->uc_mcontext.gwins); ::handle_unflushed_register_windows(uc->uc_mcontext.gwins);
} }
if (thread->thread_state() == _thread_in_Java) { if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
if (os::Solaris::get_frame_at_stack_banging_point(thread, uc, &fr)) {
assert(fr.is_java_frame(), "Must be a Java frame");
frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
RegisterMap map(thread);
int frame_size = activation.frame_size(&map);
thread->set_reserved_stack_activation((address)(((address)activation.sp()) - STACK_BIAS));
return true;
}
}
}
// Throw a stack overflow exception. Guard pages will be reenabled // Throw a stack overflow exception. Guard pages will be reenabled
// while unwinding the stack. // while unwinding the stack.
thread->disable_stack_yellow_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else { } else {
// Thread was in the vm or native code. Return and try to finish. // Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_zone();
return true; return true;
} }
} else if (thread->in_stack_red_zone(addr)) { } else if (thread->in_stack_red_zone(addr)) {

View File

@ -198,6 +198,7 @@ address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
// //
// The difference between this and os::fetch_frame_from_context() is that // The difference between this and os::fetch_frame_from_context() is that
// here we try to skip nested signal frames. // here we try to skip nested signal frames.
// This method is also used for stack overflow signal handling.
ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread, ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread,
ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
@ -236,6 +237,49 @@ frame os::fetch_frame_from_context(void* ucVoid) {
return frame(sp, fp, epc.pc()); return frame(sp, fp, epc.pc());
} }
frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
intptr_t* sp;
intptr_t* fp;
ExtendedPC epc = os::Solaris::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
return frame(sp, fp, epc.pc());
}
bool os::Solaris::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
address pc = (address) os::Solaris::ucontext_get_pc(uc);
if (Interpreter::contains(pc)) {
// interpreter performs stack banging after the fixed frame header has
// been generated while the compilers perform it before. To maintain
// semantic consistency between interpreted and compiled frames, the
// method returns the Java sender of the current frame.
*fr = os::fetch_frame_from_ucontext(thread, uc);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// more complex code with compiled code
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false;
} else {
// in compiled code, the stack banging is performed just after the return pc
// has been pushed on the stack
intptr_t* fp = os::Solaris::ucontext_get_fp(uc);
intptr_t* sp = os::Solaris::ucontext_get_sp(uc);
*fr = frame(sp + 1, fp, (address)*sp);
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
frame os::get_sender_for_C_frame(frame* fr) { frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
} }
@ -422,13 +466,31 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
address addr = (address) info->si_addr; address addr = (address) info->si_addr;
if (thread->in_stack_yellow_zone(addr)) { if (thread->in_stack_yellow_zone(addr)) {
thread->disable_stack_yellow_zone();
if (thread->thread_state() == _thread_in_Java) { if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
if (os::Solaris::get_frame_at_stack_banging_point(thread, uc, &fr)) {
assert(fr.is_java_frame(), "Must be Java frame");
frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
if (activation.is_interpreted_frame()) {
thread->set_reserved_stack_activation((address)(
activation.fp() + frame::interpreter_frame_initial_sp_offset));
} else {
thread->set_reserved_stack_activation((address)activation.unextended_sp());
}
return true;
}
}
}
// Throw a stack overflow exception. Guard pages will be reenabled // Throw a stack overflow exception. Guard pages will be reenabled
// while unwinding the stack. // while unwinding the stack.
thread->disable_stack_yellow_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else { } else {
// Thread was in the vm or native code. Return and try to finish. // Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_zone();
return true; return true;
} }
} else if (thread->in_stack_red_zone(addr)) { } else if (thread->in_stack_red_zone(addr)) {

View File

@ -551,6 +551,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _would_profile(false) , _would_profile(false)
, _has_unsafe_access(false) , _has_unsafe_access(false)
, _has_method_handle_invokes(false) , _has_method_handle_invokes(false)
, _has_reserved_stack_access(method->has_reserved_stack_access())
, _bailout_msg(NULL) , _bailout_msg(NULL)
, _exception_info_list(NULL) , _exception_info_list(NULL)
, _allocator(NULL) , _allocator(NULL)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -81,6 +81,7 @@ class Compilation: public StackObj {
bool _has_unsafe_access; bool _has_unsafe_access;
bool _would_profile; bool _would_profile;
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
bool _has_reserved_stack_access;
const char* _bailout_msg; const char* _bailout_msg;
ExceptionInfoList* _exception_info_list; ExceptionInfoList* _exception_info_list;
ExceptionHandlerTable _exception_handler_table; ExceptionHandlerTable _exception_handler_table;
@ -171,6 +172,9 @@ class Compilation: public StackObj {
bool has_method_handle_invokes() const { return _has_method_handle_invokes; } bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
DebugInformationRecorder* debug_info_recorder() const; // = _env->debug_info(); DebugInformationRecorder* debug_info_recorder() const; // = _env->debug_info();
Dependencies* dependency_recorder() const; // = _env->dependencies() Dependencies* dependency_recorder() const; // = _env->dependencies()
ImplicitExceptionTable* implicit_exception_table() { return &_implicit_exception_table; } ImplicitExceptionTable* implicit_exception_table() { return &_implicit_exception_table; }

View File

@ -3322,7 +3322,13 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
// method handle invokes // method handle invokes
if (callee->is_method_handle_intrinsic()) { if (callee->is_method_handle_intrinsic()) {
return try_method_handle_inline(callee); if (try_method_handle_inline(callee)) {
if (callee->has_reserved_stack_access()) {
compilation()->set_has_reserved_stack_access(true);
}
return true;
}
return false;
} }
// handle intrinsics // handle intrinsics
@ -3330,6 +3336,9 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
(CheckIntrinsics ? callee->intrinsic_candidate() : true)) { (CheckIntrinsics ? callee->intrinsic_candidate() : true)) {
if (try_inline_intrinsics(callee)) { if (try_inline_intrinsics(callee)) {
print_inlining(callee, "intrinsic"); print_inlining(callee, "intrinsic");
if (callee->has_reserved_stack_access()) {
compilation()->set_has_reserved_stack_access(true);
}
return true; return true;
} }
// try normal inlining // try normal inlining
@ -3346,8 +3355,12 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
if (bc == Bytecodes::_illegal) { if (bc == Bytecodes::_illegal) {
bc = code(); bc = code();
} }
if (try_inline_full(callee, holder_known, bc, receiver)) if (try_inline_full(callee, holder_known, bc, receiver)) {
if (callee->has_reserved_stack_access()) {
compilation()->set_has_reserved_stack_access(true);
}
return true; return true;
}
// Entire compilation could fail during try_inline_full call. // Entire compilation could fail during try_inline_full call.
// In that case printing inlining decision info is useless. // In that case printing inlining decision info is useless.

View File

@ -502,7 +502,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// Check the stack guard pages and reenable them if necessary and there is // Check the stack guard pages and reenable them if necessary and there is
// enough space on the stack to do so. Use fast exceptions only if the guard // enough space on the stack to do so. Use fast exceptions only if the guard
// pages are enabled. // pages are enabled.
bool guard_pages_enabled = thread->stack_yellow_zone_enabled(); bool guard_pages_enabled = thread->stack_guards_enabled();
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
if (JvmtiExport::can_post_on_exceptions()) { if (JvmtiExport::can_post_on_exceptions()) {

View File

@ -91,6 +91,7 @@ ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) :
_balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching(); _balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
_is_c1_compilable = !h_m()->is_not_c1_compilable(); _is_c1_compilable = !h_m()->is_not_c1_compilable();
_is_c2_compilable = !h_m()->is_not_c2_compilable(); _is_c2_compilable = !h_m()->is_not_c2_compilable();
_has_reserved_stack_access = h_m()->has_reserved_stack_access();
// Lazy fields, filled in on demand. Require allocation. // Lazy fields, filled in on demand. Require allocation.
_code = NULL; _code = NULL;
_exception_handlers = NULL; _exception_handlers = NULL;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -81,6 +81,7 @@ class ciMethod : public ciMetadata {
bool _is_c1_compilable; bool _is_c1_compilable;
bool _is_c2_compilable; bool _is_c2_compilable;
bool _can_be_statically_bound; bool _can_be_statically_bound;
bool _has_reserved_stack_access;
// Lazy fields, filled in on demand // Lazy fields, filled in on demand
address _code; address _code;
@ -316,6 +317,7 @@ class ciMethod : public ciMetadata {
bool is_accessor () const; bool is_accessor () const;
bool is_initializer () const; bool is_initializer () const;
bool can_be_statically_bound() const { return _can_be_statically_bound; } bool can_be_statically_bound() const { return _can_be_statically_bound; }
bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
bool is_boxing_method() const; bool is_boxing_method() const;
bool is_unboxing_method() const; bool is_unboxing_method() const;

View File

@ -946,6 +946,7 @@ public:
_method_HotSpotIntrinsicCandidate, _method_HotSpotIntrinsicCandidate,
_jdk_internal_vm_annotation_Contended, _jdk_internal_vm_annotation_Contended,
_field_Stable, _field_Stable,
_jdk_internal_vm_annotation_ReservedStackAccess,
_annotation_LIMIT _annotation_LIMIT
}; };
const Location _location; const Location _location;
@ -2016,6 +2017,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
} }
return _jdk_internal_vm_annotation_Contended; return _jdk_internal_vm_annotation_Contended;
} }
case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_ReservedStackAccess_signature): {
if (_location != _in_method) break; // only allow for methods
if (RestrictReservedStack && !privileged) break; // honor privileges
return _jdk_internal_vm_annotation_ReservedStackAccess;
}
default: { default: {
break; break;
} }
@ -2051,6 +2057,8 @@ void MethodAnnotationCollector::apply_to(methodHandle m) {
m->set_hidden(true); m->set_hidden(true);
if (has_annotation(_method_HotSpotIntrinsicCandidate) && !m->is_synthetic()) if (has_annotation(_method_HotSpotIntrinsicCandidate) && !m->is_synthetic())
m->set_intrinsic_candidate(true); m->set_intrinsic_candidate(true);
if (has_annotation(_jdk_internal_vm_annotation_ReservedStackAccess))
m->set_has_reserved_stack_access(true);
} }
void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) { void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {

View File

@ -212,6 +212,7 @@
template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \ template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \ template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \ template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \
template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
\ \
/* class symbols needed by intrinsics */ \ /* class symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, template, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \ VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, template, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \

View File

@ -314,6 +314,27 @@ IRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* thread)
THROW_HANDLE(exception); THROW_HANDLE(exception);
IRT_END IRT_END
IRT_ENTRY(address, InterpreterRuntime::check_ReservedStackAccess_annotated_methods(JavaThread* thread))
frame fr = thread->last_frame();
assert(fr.is_java_frame(), "Must be a Java frame");
frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
thread->set_reserved_stack_activation((address)activation.unextended_sp());
}
return (address)activation.sp();
IRT_END
IRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* thread))
Handle exception = get_preinitialized_exception(
SystemDictionary::StackOverflowError_klass(),
CHECK);
java_lang_Throwable::set_message(exception(),
Universe::delayed_stack_overflow_error_message());
// Increment counter for hs_err file reporting
Atomic::inc(&Exceptions::_stack_overflow_errors);
THROW_HANDLE(exception);
IRT_END
IRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message)) IRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message))
// lookup exception klass // lookup exception klass

View File

@ -91,10 +91,13 @@ class InterpreterRuntime: AllStatic {
// Quicken instance-of and check-cast bytecodes // Quicken instance-of and check-cast bytecodes
static void quicken_io_cc(JavaThread* thread); static void quicken_io_cc(JavaThread* thread);
static address check_ReservedStackAccess_annotated_methods(JavaThread* thread);
// Exceptions thrown by the interpreter // Exceptions thrown by the interpreter
static void throw_AbstractMethodError(JavaThread* thread); static void throw_AbstractMethodError(JavaThread* thread);
static void throw_IncompatibleClassChangeError(JavaThread* thread); static void throw_IncompatibleClassChangeError(JavaThread* thread);
static void throw_StackOverflowError(JavaThread* thread); static void throw_StackOverflowError(JavaThread* thread);
static void throw_delayed_StackOverflowError(JavaThread* thread);
static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index); static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index);
static void throw_ClassCastException(JavaThread* thread, oopDesc* obj); static void throw_ClassCastException(JavaThread* thread, oopDesc* obj);
static void create_exception(JavaThread* thread, char* name, char* message); static void create_exception(JavaThread* thread, char* name, char* message);

View File

@ -248,7 +248,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// Check the stack guard pages and reenable them if necessary and there is // Check the stack guard pages and reenable them if necessary and there is
// enough space on the stack to do so. Use fast exceptions only if the guard // enough space on the stack to do so. Use fast exceptions only if the guard
// pages are enabled. // pages are enabled.
bool guard_pages_enabled = thread->stack_yellow_zone_enabled(); bool guard_pages_enabled = thread->stack_guards_enabled();
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
if (JvmtiExport::can_post_on_exceptions()) { if (JvmtiExport::can_post_on_exceptions()) {

View File

@ -125,6 +125,7 @@ oop Universe::_out_of_memory_error_class_metaspace = NULL;
oop Universe::_out_of_memory_error_array_size = NULL; oop Universe::_out_of_memory_error_array_size = NULL;
oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
oop Universe::_out_of_memory_error_realloc_objects = NULL; oop Universe::_out_of_memory_error_realloc_objects = NULL;
oop Universe::_delayed_stack_overflow_error_message = NULL;
objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
bool Universe::_verify_in_progress = false; bool Universe::_verify_in_progress = false;
@ -200,6 +201,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_out_of_memory_error_array_size); f->do_oop((oop*)&_out_of_memory_error_array_size);
f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
f->do_oop((oop*)&_out_of_memory_error_realloc_objects); f->do_oop((oop*)&_out_of_memory_error_realloc_objects);
f->do_oop((oop*)&_delayed_stack_overflow_error_message);
f->do_oop((oop*)&_preallocated_out_of_memory_error_array); f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
f->do_oop((oop*)&_null_ptr_exception_instance); f->do_oop((oop*)&_null_ptr_exception_instance);
f->do_oop((oop*)&_arithmetic_exception_instance); f->do_oop((oop*)&_arithmetic_exception_instance);
@ -909,6 +911,12 @@ bool universe_post_init() {
k_h->allocate_instance(CHECK_false); k_h->allocate_instance(CHECK_false);
Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false); Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false);
// Setup preallocated cause message for delayed StackOverflowError
if (StackReservedPages > 0) {
Universe::_delayed_stack_overflow_error_message =
java_lang_String::create_oop_from_str("Delayed StackOverflowError due to ReservedStackAccess annotated method", CHECK_false);
}
// Setup preallocated NullPointerException // Setup preallocated NullPointerException
// (this is currently used for a cheap & dirty solution in compiler exception handling) // (this is currently used for a cheap & dirty solution in compiler exception handling)
k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false); k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);

View File

@ -159,6 +159,9 @@ class Universe: AllStatic {
static oop _out_of_memory_error_gc_overhead_limit; static oop _out_of_memory_error_gc_overhead_limit;
static oop _out_of_memory_error_realloc_objects; static oop _out_of_memory_error_realloc_objects;
// preallocated cause message for delayed StackOverflowError
static oop _delayed_stack_overflow_error_message;
static Array<int>* _the_empty_int_array; // Canonicalized int array static Array<int>* _the_empty_int_array; // Canonicalized int array
static Array<u2>* _the_empty_short_array; // Canonicalized short array static Array<u2>* _the_empty_short_array; // Canonicalized short array
static Array<Klass*>* _the_empty_klass_array; // Canonicalized klass obj array static Array<Klass*>* _the_empty_klass_array; // Canonicalized klass obj array
@ -339,6 +342,7 @@ class Universe: AllStatic {
static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); } static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); } static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); }
static oop out_of_memory_error_realloc_objects() { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects); } static oop out_of_memory_error_realloc_objects() { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects); }
static oop delayed_stack_overflow_error_message() { return _delayed_stack_overflow_error_message; }
// Accessors needed for fast allocation // Accessors needed for fast allocation
static Klass** boolArrayKlassObj_addr() { return &_boolArrayKlassObj; } static Klass** boolArrayKlassObj_addr() { return &_boolArrayKlassObj; }

View File

@ -82,9 +82,10 @@ class Method : public Metadata {
_hidden = 1 << 4, _hidden = 1 << 4,
_has_injected_profile = 1 << 5, _has_injected_profile = 1 << 5,
_running_emcp = 1 << 6, _running_emcp = 1 << 6,
_intrinsic_candidate = 1 << 7 _intrinsic_candidate = 1 << 7,
_reserved_stack_access = 1 << 8
}; };
mutable u1 _flags; mutable u2 _flags;
#ifndef PRODUCT #ifndef PRODUCT
int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
@ -835,6 +836,14 @@ class Method : public Metadata {
_flags = x ? (_flags | _has_injected_profile) : (_flags & ~_has_injected_profile); _flags = x ? (_flags | _has_injected_profile) : (_flags & ~_has_injected_profile);
} }
bool has_reserved_stack_access() {
return (_flags & _reserved_stack_access) != 0;
}
void set_has_reserved_stack_access(bool x) {
_flags = x ? (_flags | _reserved_stack_access) : (_flags & ~_reserved_stack_access);
}
ConstMethod::MethodType method_type() const { ConstMethod::MethodType method_type() const {
return _constMethod->method_type(); return _constMethod->method_type();
} }

View File

@ -672,7 +672,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_print_inlining_idx(0), _print_inlining_idx(0),
_print_inlining_output(NULL), _print_inlining_output(NULL),
_interpreter_frame_size(0), _interpreter_frame_size(0),
_max_node_limit(MaxNodeLimit) { _max_node_limit(MaxNodeLimit),
_has_reserved_stack_access(target->has_reserved_stack_access()) {
C = this; C = this;
#ifndef PRODUCT #ifndef PRODUCT
if (_printer != NULL) { if (_printer != NULL) {

View File

@ -364,6 +364,7 @@ class Compile : public Phase {
bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
bool _has_boxed_value; // True if a boxed object is allocated bool _has_boxed_value; // True if a boxed object is allocated
bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
int _max_vector_size; // Maximum size of generated vectors int _max_vector_size; // Maximum size of generated vectors
uint _trap_hist[trapHistLength]; // Cumulative traps uint _trap_hist[trapHistLength]; // Cumulative traps
bool _trap_can_recompile; // Have we emitted a recompiling trap? bool _trap_can_recompile; // Have we emitted a recompiling trap?
@ -637,6 +638,8 @@ class Compile : public Phase {
void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
bool has_boxed_value() const { return _has_boxed_value; } bool has_boxed_value() const { return _has_boxed_value; }
void set_has_boxed_value(bool z) { _has_boxed_value = z; } void set_has_boxed_value(bool z) { _has_boxed_value = z; }
bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
int max_vector_size() const { return _max_vector_size; } int max_vector_size() const { return _max_vector_size; }
void set_max_vector_size(int s) { _max_vector_size = s; } void set_max_vector_size(int s) { _max_vector_size = s; }
void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -415,6 +415,10 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
_est_switch_depth = 0; _est_switch_depth = 0;
#endif #endif
if (parse_method->has_reserved_stack_access()) {
C->set_has_reserved_stack_access(true);
}
_tf = TypeFunc::make(method()); _tf = TypeFunc::make(method());
_iter.reset_to_method(method()); _iter.reset_to_method(method());
_flow = method()->get_flow_analysis(); _flow = method()->get_flow_analysis();

View File

@ -2426,6 +2426,12 @@ bool Arguments::check_vm_args_consistency() {
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount."); warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
} }
#ifndef SUPPORT_RESERVED_STACK_AREA
if (StackReservedPages != 0) {
FLAG_SET_CMDLINE(intx, StackReservedPages, 0);
warning("Reserved Stack Area not supported on this platform");
}
#endif
return status; return status;
} }

View File

@ -1431,7 +1431,7 @@ void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool
// stack bang causes a stack overflow we crash. // stack bang causes a stack overflow we crash.
assert(THREAD->is_Java_thread(), "only a java thread can be here"); assert(THREAD->is_Java_thread(), "only a java thread can be here");
JavaThread* thread = (JavaThread*)THREAD; JavaThread* thread = (JavaThread*)THREAD;
bool guard_pages_enabled = thread->stack_yellow_zone_enabled(); bool guard_pages_enabled = thread->stack_guards_enabled();
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash"); assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
} }

View File

@ -3438,6 +3438,13 @@ public:
"Number of red zone (unrecoverable overflows) pages") \ "Number of red zone (unrecoverable overflows) pages") \
range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2)) \ range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2)) \
\ \
product_pd(intx, StackReservedPages, \
"Number of reserved zone (reserved to annotated methods) pages") \
range(MIN_STACK_RESERVED_PAGES, (DEFAULT_STACK_RESERVED_PAGES+10))\
\
product(bool, RestrictReservedStack, true, \
"Restrict @ReservedStackAccess to trusted classes") \
\
/* greater stack shadow pages can't generate instruction to bang stack */ \ /* greater stack shadow pages can't generate instruction to bang stack */ \
product_pd(intx, StackShadowPages, \ product_pd(intx, StackShadowPages, \
"Number of shadow zone (for overflow checking) pages " \ "Number of shadow zone (for overflow checking) pages " \

View File

@ -371,9 +371,9 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC
// Find receiver // Find receiver
Handle receiver = (!method->is_static()) ? args->receiver() : Handle(); Handle receiver = (!method->is_static()) ? args->receiver() : Handle();
// When we reenter Java, we need to reenable the yellow zone which // When we reenter Java, we need to reenable the reserved/yellow zone which
// might already be disabled when we are in VM. // might already be disabled when we are in VM.
if (thread->stack_yellow_zone_disabled()) { if (!thread->stack_guards_enabled()) {
thread->reguard_stack(); thread->reguard_stack();
} }

View File

@ -1386,8 +1386,9 @@ bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method
// respectively. // respectively.
const int framesize_in_bytes = const int framesize_in_bytes =
Interpreter::size_top_interpreter_activation(method()) * wordSize; Interpreter::size_top_interpreter_activation(method()) * wordSize;
int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages) int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages
* vm_page_size()) + framesize_in_bytes; + StackReservedPages) * vm_page_size())
+ framesize_in_bytes;
// The very lower end of the stack // The very lower end of the stack
address stack_limit = thread->stack_base() - thread->stack_size(); address stack_limit = thread->stack_base() - thread->stack_size();
return (sp > (stack_limit + reserved_area)); return (sp > (stack_limit + reserved_area));

View File

@ -473,6 +473,7 @@ class os: AllStatic {
static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp); static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
static frame fetch_frame_from_context(void* ucVoid); static frame fetch_frame_from_context(void* ucVoid);
static frame fetch_frame_from_ucontext(Thread* thread, void* ucVoid);
static ExtendedPC get_thread_pc(Thread *thread); static ExtendedPC get_thread_pc(Thread *thread);
static void breakpoint(); static void breakpoint();

View File

@ -57,6 +57,7 @@
#include "runtime/stubRoutines.hpp" #include "runtime/stubRoutines.hpp"
#include "runtime/vframe.hpp" #include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp" #include "runtime/vframeArray.hpp"
#include "trace/tracing.hpp"
#include "utilities/copy.hpp" #include "utilities/copy.hpp"
#include "utilities/dtrace.hpp" #include "utilities/dtrace.hpp"
#include "utilities/events.hpp" #include "utilities/events.hpp"
@ -487,8 +488,11 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thre
// unguarded. Reguard the stack otherwise if we return to the // unguarded. Reguard the stack otherwise if we return to the
// deopt blob and the stack bang causes a stack overflow we // deopt blob and the stack bang causes a stack overflow we
// crash. // crash.
bool guard_pages_enabled = thread->stack_yellow_zone_enabled(); bool guard_pages_enabled = thread->stack_guards_enabled();
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
if (thread->reserved_stack_activation() != thread->stack_base()) {
thread->set_reserved_stack_activation(thread->stack_base());
}
assert(guard_pages_enabled, "stack banging in deopt blob may cause crash"); assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
return SharedRuntime::deopt_blob()->unpack_with_exception(); return SharedRuntime::deopt_blob()->unpack_with_exception();
} else { } else {
@ -761,10 +765,23 @@ JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* th
JRT_END JRT_END
JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread)) JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
throw_StackOverflowError_common(thread, false);
JRT_END
JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* thread))
throw_StackOverflowError_common(thread, true);
JRT_END
void SharedRuntime::throw_StackOverflowError_common(JavaThread* thread, bool delayed) {
// We avoid using the normal exception construction in this case because // We avoid using the normal exception construction in this case because
// it performs an upcall to Java, and we're already out of stack space. // it performs an upcall to Java, and we're already out of stack space.
Thread* THREAD = thread;
Klass* k = SystemDictionary::StackOverflowError_klass(); Klass* k = SystemDictionary::StackOverflowError_klass();
oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK); oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK);
if (delayed) {
java_lang_Throwable::set_message(exception_oop,
Universe::delayed_stack_overflow_error_message());
}
Handle exception (thread, exception_oop); Handle exception (thread, exception_oop);
if (StackTraceInThrowable) { if (StackTraceInThrowable) {
java_lang_Throwable::fill_in_stack_trace(exception); java_lang_Throwable::fill_in_stack_trace(exception);
@ -772,7 +789,7 @@ JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
// Increment counter for hs_err file reporting // Increment counter for hs_err file reporting
Atomic::inc(&Exceptions::_stack_overflow_errors); Atomic::inc(&Exceptions::_stack_overflow_errors);
throw_and_post_jvmti_exception(thread, exception); throw_and_post_jvmti_exception(thread, exception);
JRT_END }
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
address SharedRuntime::deoptimize_for_implicit_exception(JavaThread* thread, address pc, nmethod* nm, int deopt_reason) { address SharedRuntime::deoptimize_for_implicit_exception(JavaThread* thread, address pc, nmethod* nm, int deopt_reason) {
@ -2934,3 +2951,68 @@ void AdapterHandlerLibrary::print_statistics() {
} }
#endif /* PRODUCT */ #endif /* PRODUCT */
JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* thread))
assert(thread->is_Java_thread(), "Only Java threads have a stack reserved zone");
thread->enable_stack_reserved_zone();
thread->set_reserved_stack_activation(thread->stack_base());
JRT_END
frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr) {
frame activation;
int decode_offset = 0;
nmethod* nm = NULL;
frame prv_fr = fr;
int count = 1;
assert(fr.is_java_frame(), "Must start on Java frame");
while (!fr.is_first_frame()) {
Method* method = NULL;
// Compiled java method case.
if (decode_offset != 0) {
DebugInfoReadStream stream(nm, decode_offset);
decode_offset = stream.read_int();
method = (Method*)nm->metadata_at(stream.read_int());
} else {
if (fr.is_first_java_frame()) break;
address pc = fr.pc();
prv_fr = fr;
if (fr.is_interpreted_frame()) {
method = fr.interpreter_frame_method();
fr = fr.java_sender();
} else {
CodeBlob* cb = fr.cb();
fr = fr.java_sender();
if (cb == NULL || !cb->is_nmethod()) {
continue;
}
nm = (nmethod*)cb;
if (nm->method()->is_native()) {
method = nm->method();
} else {
PcDesc* pd = nm->pc_desc_at(pc);
assert(pd != NULL, "PcDesc must not be NULL");
decode_offset = pd->scope_decode_offset();
// if decode_offset is not equal to 0, it will execute the
// "compiled java method case" at the beginning of the loop.
continue;
}
}
}
if (method->has_reserved_stack_access()) {
ResourceMark rm(thread);
activation = prv_fr;
warning("Potentially dangerous stack overflow in "
"ReservedStackAccess annotated method %s [%d]",
method->name_and_sig_as_C_string(), count++);
EventReservedStackActivation event;
if (event.should_commit()) {
event.set_method(method);
event.commit();
}
}
}
return activation;
}

View File

@ -201,6 +201,8 @@ class SharedRuntime: AllStatic {
static void throw_NullPointerException(JavaThread* thread); static void throw_NullPointerException(JavaThread* thread);
static void throw_NullPointerException_at_call(JavaThread* thread); static void throw_NullPointerException_at_call(JavaThread* thread);
static void throw_StackOverflowError(JavaThread* thread); static void throw_StackOverflowError(JavaThread* thread);
static void throw_delayed_StackOverflowError(JavaThread* thread);
static void throw_StackOverflowError_common(JavaThread* thread, bool delayed);
static address continuation_for_implicit_exception(JavaThread* thread, static address continuation_for_implicit_exception(JavaThread* thread,
address faulting_pc, address faulting_pc,
ImplicitExceptionKind exception_kind); ImplicitExceptionKind exception_kind);
@ -208,6 +210,9 @@ class SharedRuntime: AllStatic {
static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, nmethod* nm, int deopt_reason); static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, nmethod* nm, int deopt_reason);
#endif #endif
static void enable_stack_reserved_zone(JavaThread* thread);
static frame look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr);
// Shared stub locations // Shared stub locations
static address get_poll_stub(address pc); static address get_poll_stub(address pc);

View File

@ -54,6 +54,7 @@ address StubRoutines::_throw_AbstractMethodError_entry = NULL;
address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL; address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
address StubRoutines::_throw_NullPointerException_at_call_entry = NULL; address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
address StubRoutines::_throw_StackOverflowError_entry = NULL; address StubRoutines::_throw_StackOverflowError_entry = NULL;
address StubRoutines::_throw_delayed_StackOverflowError_entry = NULL;
address StubRoutines::_handler_for_unsafe_access_entry = NULL; address StubRoutines::_handler_for_unsafe_access_entry = NULL;
jint StubRoutines::_verify_oop_count = 0; jint StubRoutines::_verify_oop_count = 0;
address StubRoutines::_verify_oop_subroutine_entry = NULL; address StubRoutines::_verify_oop_subroutine_entry = NULL;

View File

@ -111,6 +111,7 @@ class StubRoutines: AllStatic {
static address _throw_IncompatibleClassChangeError_entry; static address _throw_IncompatibleClassChangeError_entry;
static address _throw_NullPointerException_at_call_entry; static address _throw_NullPointerException_at_call_entry;
static address _throw_StackOverflowError_entry; static address _throw_StackOverflowError_entry;
static address _throw_delayed_StackOverflowError_entry;
static address _handler_for_unsafe_access_entry; static address _handler_for_unsafe_access_entry;
static address _atomic_xchg_entry; static address _atomic_xchg_entry;
@ -275,6 +276,7 @@ class StubRoutines: AllStatic {
static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; } static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; }
static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; } static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; } static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; }
static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; }
// Exceptions during unsafe access - should throw Java exception rather // Exceptions during unsafe access - should throw Java exception rather
// than crash. // than crash.

View File

@ -307,6 +307,7 @@ void Thread::record_stack_base_and_size() {
set_stack_size(os::current_stack_size()); set_stack_size(os::current_stack_size());
if (is_Java_thread()) { if (is_Java_thread()) {
((JavaThread*) this)->set_stack_overflow_limit(); ((JavaThread*) this)->set_stack_overflow_limit();
((JavaThread*) this)->set_reserved_stack_activation(stack_base());
} }
// CR 7190089: on Solaris, primordial thread's stack is adjusted // CR 7190089: on Solaris, primordial thread's stack is adjusted
// in initialize_thread(). Without the adjustment, stack size is // in initialize_thread(). Without the adjustment, stack size is
@ -908,7 +909,7 @@ bool Thread::is_in_stack(address adr) const {
bool Thread::is_in_usable_stack(address adr) const { bool Thread::is_in_usable_stack(address adr) const {
size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0; size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
size_t usable_stack_size = _stack_size - stack_guard_size; size_t usable_stack_size = _stack_size - stack_guard_size;
return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size)); return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
@ -1460,6 +1461,7 @@ void JavaThread::initialize() {
_jvmci_counters = NULL; _jvmci_counters = NULL;
} }
#endif // INCLUDE_JVMCI #endif // INCLUDE_JVMCI
_reserved_stack_activation = NULL; // stack base not known yet
(void)const_cast<oop&>(_exception_oop = oop(NULL)); (void)const_cast<oop&>(_exception_oop = oop(NULL));
_exception_pc = 0; _exception_pc = 0;
_exception_handler_pc = 0; _exception_handler_pc = 0;
@ -1532,7 +1534,8 @@ JavaThread::JavaThread(bool is_attaching_via_jni) :
} }
bool JavaThread::reguard_stack(address cur_sp) { bool JavaThread::reguard_stack(address cur_sp) {
if (_stack_guard_state != stack_guard_yellow_disabled) { if (_stack_guard_state != stack_guard_yellow_disabled
&& _stack_guard_state != stack_guard_reserved_disabled) {
return true; // Stack already guarded or guard pages not needed. return true; // Stack already guarded or guard pages not needed.
} }
@ -1549,8 +1552,15 @@ bool JavaThread::reguard_stack(address cur_sp) {
// some exception code in c1, c2 or the interpreter isn't unwinding // some exception code in c1, c2 or the interpreter isn't unwinding
// when it should. // when it should.
guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages"); guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
if (_stack_guard_state == stack_guard_yellow_disabled) {
enable_stack_yellow_zone(); enable_stack_yellow_zone();
if (reserved_stack_activation() != stack_base()) {
set_reserved_stack_activation(stack_base());
}
} else if (_stack_guard_state == stack_guard_reserved_disabled) {
set_reserved_stack_activation(stack_base());
enable_stack_reserved_zone();
}
return true; return true;
} }
@ -2473,7 +2483,7 @@ void JavaThread::java_resume() {
void JavaThread::create_stack_guard_pages() { void JavaThread::create_stack_guard_pages() {
if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return; if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return;
address low_addr = stack_base() - stack_size(); address low_addr = stack_base() - stack_size();
size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size(); size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
int allocate = os::allocate_stack_guard_pages(); int allocate = os::allocate_stack_guard_pages();
// warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len); // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
@ -2497,7 +2507,7 @@ void JavaThread::remove_stack_guard_pages() {
assert(Thread::current() == this, "from different thread"); assert(Thread::current() == this, "from different thread");
if (_stack_guard_state == stack_guard_unused) return; if (_stack_guard_state == stack_guard_unused) return;
address low_addr = stack_base() - stack_size(); address low_addr = stack_base() - stack_size();
size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size(); size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
if (os::allocate_stack_guard_pages()) { if (os::allocate_stack_guard_pages()) {
if (os::remove_stack_guard_pages((char *) low_addr, len)) { if (os::remove_stack_guard_pages((char *) low_addr, len)) {
@ -2515,6 +2525,44 @@ void JavaThread::remove_stack_guard_pages() {
} }
} }
void JavaThread::enable_stack_reserved_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_enabled, "already enabled");
// The base notation is from the stack's point of view, growing downward.
// We need to adjust it to work correctly with guard_memory()
address base = stack_reserved_zone_base() - stack_reserved_zone_size();
guarantee(base < stack_base(),"Error calculating stack reserved zone");
guarantee(base < os::current_stack_pointer(),"Error calculating stack reserved zone");
if (os::guard_memory((char *) base, stack_reserved_zone_size())) {
_stack_guard_state = stack_guard_enabled;
} else {
warning("Attempt to guard stack reserved zone failed.");
}
enable_register_stack_guard();
}
void JavaThread::disable_stack_reserved_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_reserved_disabled, "already disabled");
// Simply return if called for a thread that does not use guard pages.
if (_stack_guard_state == stack_guard_unused) return;
// The base notation is from the stack's point of view, growing downward.
// We need to adjust it to work correctly with guard_memory()
address base = stack_reserved_zone_base() - stack_reserved_zone_size();
if (os::unguard_memory((char *)base, stack_reserved_zone_size())) {
_stack_guard_state = stack_guard_reserved_disabled;
} else {
warning("Attempt to unguard stack reserved zone failed.");
}
disable_register_stack_guard();
}
void JavaThread::enable_stack_yellow_zone() { void JavaThread::enable_stack_yellow_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages."); assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_enabled, "already enabled"); assert(_stack_guard_state != stack_guard_enabled, "already enabled");

View File

@ -909,6 +909,7 @@ class JavaThread: public Thread {
// State of the stack guard pages for this thread. // State of the stack guard pages for this thread.
enum StackGuardState { enum StackGuardState {
stack_guard_unused, // not needed stack_guard_unused, // not needed
stack_guard_reserved_disabled,
stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
stack_guard_enabled // enabled stack_guard_enabled // enabled
}; };
@ -957,6 +958,7 @@ class JavaThread: public Thread {
// Precompute the limit of the stack as used in stack overflow checks. // Precompute the limit of the stack as used in stack overflow checks.
// We load it from here to simplify the stack overflow check in assembly. // We load it from here to simplify the stack overflow check in assembly.
address _stack_overflow_limit; address _stack_overflow_limit;
address _reserved_stack_activation;
// Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
// used to temp. parsing values into and out of the runtime system during exception handling for compiled // used to temp. parsing values into and out of the runtime system during exception handling for compiled
@ -1343,18 +1345,25 @@ class JavaThread: public Thread {
// Stack overflow support // Stack overflow support
inline size_t stack_available(address cur_sp); inline size_t stack_available(address cur_sp);
address stack_reserved_zone_base() {
return stack_yellow_zone_base(); }
size_t stack_reserved_zone_size() {
return StackReservedPages * os::vm_page_size(); }
address stack_yellow_zone_base() { address stack_yellow_zone_base() {
return (address)(stack_base() - return (address)(stack_base() -
(stack_size() - (stack_size() -
(stack_red_zone_size() + stack_yellow_zone_size()))); (stack_red_zone_size() + stack_yellow_zone_size())));
} }
size_t stack_yellow_zone_size() { size_t stack_yellow_zone_size() {
return StackYellowPages * os::vm_page_size(); return StackYellowPages * os::vm_page_size() + stack_reserved_zone_size();
} }
address stack_red_zone_base() { address stack_red_zone_base() {
return (address)(stack_base() - (stack_size() - stack_red_zone_size())); return (address)(stack_base() - (stack_size() - stack_red_zone_size()));
} }
size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); } size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); }
bool in_stack_reserved_zone(address a) {
return (a <= stack_reserved_zone_base()) && (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
}
bool in_stack_yellow_zone(address a) { bool in_stack_yellow_zone(address a) {
return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base()); return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base());
} }
@ -1366,6 +1375,8 @@ class JavaThread: public Thread {
void create_stack_guard_pages(); void create_stack_guard_pages();
void remove_stack_guard_pages(); void remove_stack_guard_pages();
void enable_stack_reserved_zone();
void disable_stack_reserved_zone();
void enable_stack_yellow_zone(); void enable_stack_yellow_zone();
void disable_stack_yellow_zone(); void disable_stack_yellow_zone();
void enable_stack_red_zone(); void enable_stack_red_zone();
@ -1373,7 +1384,16 @@ class JavaThread: public Thread {
inline bool stack_guard_zone_unused(); inline bool stack_guard_zone_unused();
inline bool stack_yellow_zone_disabled(); inline bool stack_yellow_zone_disabled();
inline bool stack_yellow_zone_enabled(); inline bool stack_reserved_zone_disabled();
inline bool stack_guards_enabled();
address reserved_stack_activation() const { return _reserved_stack_activation; }
void set_reserved_stack_activation(address addr) {
assert(_reserved_stack_activation == stack_base()
|| _reserved_stack_activation == NULL
|| addr == stack_base(), "Must not be set twice");
_reserved_stack_activation = addr;
}
// Attempt to reguard the stack after a stack overflow may have occurred. // Attempt to reguard the stack after a stack overflow may have occurred.
// Returns true if (a) guard pages are not needed on this thread, (b) the // Returns true if (a) guard pages are not needed on this thread, (b) the
@ -1390,6 +1410,7 @@ class JavaThread: public Thread {
void set_stack_overflow_limit() { void set_stack_overflow_limit() {
_stack_overflow_limit = _stack_base - _stack_size + _stack_overflow_limit = _stack_base - _stack_size +
((StackShadowPages + ((StackShadowPages +
StackReservedPages +
StackYellowPages + StackYellowPages +
StackRedPages) * os::vm_page_size()); StackRedPages) * os::vm_page_size());
} }
@ -1439,6 +1460,7 @@ class JavaThread: public Thread {
static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); } static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); }
static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); } static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); }
static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); }
static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); } static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); }
static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -130,6 +130,10 @@ inline bool JavaThread::stack_yellow_zone_disabled() {
return _stack_guard_state == stack_guard_yellow_disabled; return _stack_guard_state == stack_guard_yellow_disabled;
} }
inline bool JavaThread::stack_reserved_zone_disabled() {
return _stack_guard_state == stack_guard_reserved_disabled;
}
inline size_t JavaThread::stack_available(address cur_sp) { inline size_t JavaThread::stack_available(address cur_sp) {
// This code assumes java stacks grow down // This code assumes java stacks grow down
address low_addr; // Limit on the address for deepest stack depth address low_addr; // Limit on the address for deepest stack depth
@ -141,7 +145,7 @@ inline size_t JavaThread::stack_available(address cur_sp) {
return cur_sp > low_addr ? cur_sp - low_addr : 0; return cur_sp > low_addr ? cur_sp - low_addr : 0;
} }
inline bool JavaThread::stack_yellow_zone_enabled() { inline bool JavaThread::stack_guards_enabled() {
#ifdef ASSERT #ifdef ASSERT
if (os::uses_stack_guard_pages()) { if (os::uses_stack_guard_pages()) {
assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use"); assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");

View File

@ -396,7 +396,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
nonstatic_field(Method, _access_flags, AccessFlags) \ nonstatic_field(Method, _access_flags, AccessFlags) \
nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _vtable_index, int) \
nonstatic_field(Method, _intrinsic_id, u2) \ nonstatic_field(Method, _intrinsic_id, u2) \
nonstatic_field(Method, _flags, u1) \ nonstatic_field(Method, _flags, u2) \
nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \ nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \
volatile_nonstatic_field(Method, _code, nmethod*) \ volatile_nonstatic_field(Method, _code, nmethod*) \
nonstatic_field(Method, _i2i_entry, address) \ nonstatic_field(Method, _i2i_entry, address) \

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<!-- <!--
Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it This code is free software; you can redistribute it and/or modify it
@ -109,6 +109,11 @@ Declares a structure type that can be used in other events.
<value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JAVA_MONITOR_ADDRESS"/> <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JAVA_MONITOR_ADDRESS"/>
</event> </event>
<event id="ReservedStackActivation" path="java/reserved_stack_activation" label="Reserved Stack Activation" description="Activation of Reserved Stack Area caused by stack overflow with ReservedStackAccess annotated method in call stack"
has_thread="true" has_stacktrace="true" is_instant="true">
<value type="METHOD" field="method" label="Java Method"/>
</event>
<event id="ClassLoad" path="vm/class/load" label="Class Load" <event id="ClassLoad" path="vm/class/load" label="Class Load"
has_thread="true" has_stacktrace="true" is_instant="false"> has_thread="true" has_stacktrace="true" is_instant="false">
<value type="CLASS" field="loadedClass" label="Loaded Class"/> <value type="CLASS" field="loadedClass" label="Loaded Class"/>

View File

@ -0,0 +1,235 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test ReservedStackTest
* @run main/othervm -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
*/
/* The exclusion of java.util.concurrent.locks.AbstractOwnableSynchronizer.setExclusiveOwnerThread()
* from the compilable methods is required to ensure that the test will be able
* to trigger a StackOverflowError on the right method.
*/
/*
* Notes about this test:
* This test tries to reproduce a rare but nasty corruption bug that
* occurs when a StackOverflowError is thrown in some critical sections
* of the ReentrantLock implementation.
*
* Here's the critical section where a corruption could occur
* (from java.util.concurrent.ReentrantLock.java)
*
* final void lock() {
* if (compareAndSetState(0, 1))
* setExclusiveOwnerThread(Thread.currentThread());
* else
* acquire(1);
* }
*
* The corruption occurs when the compareAndSetState(0, 1)
* successfully updates the status of the lock but the method
* fails to set the owner because of a stack overflow.
* HotSpot checks for stack overflow on method invocations.
* The test must trigger a stack overflow either when
* Thread.currentThread() or setExclusiveOwnerThread() is
* invoked.
*
* The test starts with a recursive invocation loop until a
* first StackOverflowError is thrown, the Error is caught
* and a few dozen frames are exited. Now the thread has
* little free space on its execution stack and will try
* to trigger a stack overflow in the critical section.
* The test has a huge array of ReentrantLocks instances.
* The thread invokes a recursive method which, at each
* of its invocations, tries to acquire the next lock
* in the array. The execution continues until a
* StackOverflowError is thrown or the end of the array
* is reached.
* If no StackOverflowError has been thrown, the test
* is non conclusive (recommendation: increase the size
* of the ReentrantLock array).
* The status of all Reentrant locks in the array is checked,
* if a corruption is detected, the test failed, otherwise
* the test passed.
*
* To have a chance that the stack overflow occurs on one
* of the two targeted method invocations, the test is
* repeated in different threads. Each Java thread has a
* random size area allocated at the beginning of its
* stack to prevent false sharing. The test relies on this
* to have different stack alignments when it hits the targeted
* methods (the test could have been written with a native
* method with alloca, but using different Java threads makes
* the test 100% Java).
*
* One additional trick is required to ensure that the stack
* overflow will occur on the Thread.currentThread() getter
* or the setExclusiveOwnerThread() setter.
*
* Potential stack overflows are detected by stack banging,
* at method invocation time.
* In interpreted code, the stack banging performed for the
* lock() method goes further than the stack banging performed
* for the getter or the setter method, so the potential stack
* overflow is detected before entering the critical section.
* In compiled code, the getter and the setter are in-lined,
* so the stack banging is only performed before entering the
* critical section.
* In order to have a stack banging that goes further for the
* getter/setter methods than for the lock() method, the test
* exploits the property that interpreter frames are (much)
* bigger than compiled code frames. When the test is run,
* a compiler option disables the compilation of the
* setExclusiveOwnerThread() method.
*
*/
import java.util.concurrent.locks.ReentrantLock;
public class ReservedStackTest {
private static boolean isWindows() {
return System.getProperty("os.name").toLowerCase().startsWith("win");
}
static class ReentrantLockTest {
private ReentrantLock lockArray[];
// Frame sizes vary a lot between interpreted code and compiled code
// so the lock array has to be big enough to cover all cases.
// If test fails with message "Not conclusive test", try to increase
// LOCK_ARRAY_SIZE value
private static final int LOCK_ARRAY_SIZE = 8192;
private boolean stackOverflowErrorReceived;
StackOverflowError soe = null;
private int index = -1;
public void initialize() {
lockArray = new ReentrantLock[LOCK_ARRAY_SIZE];
for (int i = 0; i < LOCK_ARRAY_SIZE; i++) {
lockArray[i] = new ReentrantLock();
}
stackOverflowErrorReceived = false;
}
public String getResult() {
if (!stackOverflowErrorReceived) {
return "ERROR: Not conclusive test: no StackOverflowError received";
}
for (int i = 0; i < LOCK_ARRAY_SIZE; i++) {
if (lockArray[i].isLocked()) {
if (!lockArray[i].isHeldByCurrentThread()) {
StringBuilder s = new StringBuilder();
s.append("FAILED: ReentrantLock ");
s.append(i);
s.append(" looks corrupted");
return s.toString();
}
}
}
return "PASSED";
}
public void run() {
try {
lockAndCall(0);
} catch (StackOverflowError e) {
soe = e;
stackOverflowErrorReceived = true;
}
}
private void lockAndCall(int i) {
index = i;
if (i < LOCK_ARRAY_SIZE) {
lockArray[i].lock();
lockAndCall(i + 1);
}
}
}
static class RunWithSOEContext implements Runnable {
int counter;
int deframe;
int decounter;
int setupSOEFrame;
int testStartFrame;
ReentrantLockTest test;
public RunWithSOEContext(ReentrantLockTest test, int deframe) {
this.test = test;
this.deframe = deframe;
}
@Override
@jdk.internal.vm.annotation.ReservedStackAccess
public void run() {
counter = 0;
decounter = deframe;
test.initialize();
recursiveCall();
System.out.println("Framework got StackOverflowError at frame = " + counter);
System.out.println("Test started execution at frame = " + (counter - deframe));
String result = test.getResult();
System.out.println(result);
// The feature is not fully implemented on Windows platforms,
// corruptions are still possible
if (!isWindows() && !result.contains("PASSED")) {
System.exit(-1);
}
}
void recursiveCall() {
// Unused local variables to increase the frame size
long l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19;
long l20, l21, l22, l23, l24, l25, l26, l27, l28, l30, l31, l32, l33, l34, l35, l36, l37;
counter++;
try {
recursiveCall();
} catch (StackOverflowError e) {
}
decounter--;
if (decounter == 0) {
setupSOEFrame = counter;
testStartFrame = counter - deframe;
test.run();
}
}
}
public static void main(String[] args) {
for (int i = 0; i < 1000; i++) {
// Each iteration has to be executed by a new thread. The test
// relies on the random size area pushed by the VM at the beginning
// of the stack of each Java thread it creates.
Thread thread = new Thread(new RunWithSOEContext(new ReentrantLockTest(), 256));
thread.start();
try {
thread.join();
} catch (InterruptedException ex) { }
}
}
}