8253717: Relocate stack overflow code out of thread.hpp/cpp
Reviewed-by: rehn, dcubed, dholmes, stuefe
This commit is contained in:
parent
782d45bdec
commit
6bc493188b
src
hotspot
cpu
aarch64
arm
ppc
s390
x86
interp_masm_x86.cppmacroAssembler_x86.cppsharedRuntime_x86_32.cppsharedRuntime_x86_64.cpptemplateInterpreterGenerator_x86.cpp
zero
os
os_cpu
aix_ppc
bsd_x86
linux_aarch64
linux_arm
linux_ppc
linux_s390
linux_x86
linux_zero
share
asm
c1
interpreter
jvmci
opto
prims
runtime
jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot
@ -4392,7 +4392,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
||||
// was post-decremented.) Skip this address by starting at i=1, and
|
||||
// touch a few more pages below. N.B. It is important to touch all
|
||||
// the way down to and including i=StackShadowPages.
|
||||
for (int i = 0; i < (int)(JavaThread::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
|
||||
for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
|
||||
// this could be any sized move but this is can be a debugging crumb
|
||||
// so the bigger the better.
|
||||
lea(tmp, Address(tmp, -os::vm_page_size()));
|
||||
|
@ -1524,7 +1524,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Generate stack overflow check
|
||||
if (UseStackBanging) {
|
||||
__ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
|
||||
__ bang_stack_with_offset(StackOverflow::stack_shadow_zone_size());
|
||||
} else {
|
||||
Unimplemented();
|
||||
}
|
||||
@ -1893,7 +1893,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
Label reguard;
|
||||
Label reguard_done;
|
||||
__ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
|
||||
__ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
__ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ br(Assembler::EQ, reguard);
|
||||
__ bind(reguard_done);
|
||||
|
||||
|
@ -1120,7 +1120,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
||||
// an interpreter frame with greater than a page of locals, so each page
|
||||
// needs to be checked. Only true for non-native.
|
||||
if (UseStackBanging) {
|
||||
const int n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size();
|
||||
const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / os::vm_page_size();
|
||||
const int start_page = native_call ? n_shadow_pages : 1;
|
||||
const int page_size = os::vm_page_size();
|
||||
for (int pages = start_page; pages <= n_shadow_pages ; pages++) {
|
||||
@ -1445,7 +1445,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
Label no_reguard;
|
||||
__ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
|
||||
__ ldrw(rscratch1, Address(rscratch1));
|
||||
__ cmp(rscratch1, (u1)JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
__ cmp(rscratch1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ br(Assembler::NE, no_reguard);
|
||||
|
||||
__ pusha(); // XXX only save smashed registers
|
||||
|
@ -994,7 +994,7 @@ void MacroAssembler::arm_stack_overflow_check(int frame_size_in_bytes, Register
|
||||
if (UseStackBanging) {
|
||||
const int page_size = os::vm_page_size();
|
||||
|
||||
sub_slow(tmp, SP, JavaThread::stack_shadow_zone_size());
|
||||
sub_slow(tmp, SP, StackOverflow::stack_shadow_zone_size());
|
||||
strb(R0, Address(tmp));
|
||||
for (; frame_size_in_bytes >= page_size; frame_size_in_bytes -= 0xff0) {
|
||||
strb(R0, Address(tmp, -0xff0, pre_indexed));
|
||||
@ -1007,7 +1007,7 @@ void MacroAssembler::arm_stack_overflow_check(Register Rsize, Register tmp) {
|
||||
Label loop;
|
||||
|
||||
mov(tmp, SP);
|
||||
add_slow(Rsize, Rsize, JavaThread::stack_shadow_zone_size() - os::vm_page_size());
|
||||
add_slow(Rsize, Rsize, StackOverflow::stack_shadow_zone_size() - os::vm_page_size());
|
||||
bind(loop);
|
||||
subs(Rsize, Rsize, 0xff0);
|
||||
strb(R0, Address(tmp, -0xff0, pre_indexed));
|
||||
|
@ -1238,7 +1238,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ ldr_s32(R2, Address(Rthread, JavaThread::stack_guard_state_offset()));
|
||||
__ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
|
||||
|
||||
__ cmp(R2, JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
__ cmp(R2, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ b(reguard, eq);
|
||||
__ bind(reguard_done);
|
||||
|
||||
|
@ -485,10 +485,10 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size;
|
||||
|
||||
// Pages reserved for VM runtime calls and subsequent Java calls.
|
||||
const int reserved_pages = JavaThread::stack_shadow_zone_size();
|
||||
const int reserved_pages = StackOverflow::stack_shadow_zone_size();
|
||||
|
||||
// Thread::stack_size() includes guard pages, and they should not be touched.
|
||||
const int guard_pages = JavaThread::stack_guard_zone_size();
|
||||
const int guard_pages = StackOverflow::stack_guard_zone_size();
|
||||
|
||||
__ ldr(R0, Address(Rthread, Thread::stack_base_offset()));
|
||||
__ ldr(R1, Address(Rthread, Thread::stack_size_offset()));
|
||||
@ -1016,7 +1016,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// reguard stack if StackOverflow exception happened while in native.
|
||||
{
|
||||
__ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset()));
|
||||
__ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
__ cmp_32(Rtemp, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq);
|
||||
#if R9_IS_SCRATCHED
|
||||
__ restore_method();
|
||||
|
@ -1427,7 +1427,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
// insert the code of generate_stack_overflow_check(), see
|
||||
// assembler.cpp for some illuminative comments.
|
||||
const int page_size = os::vm_page_size();
|
||||
int bang_end = JavaThread::stack_shadow_zone_size();
|
||||
int bang_end = StackOverflow::stack_shadow_zone_size();
|
||||
|
||||
// This is how far the previous frame's stack banging extended.
|
||||
const int bang_end_safe = bang_end;
|
||||
|
@ -2507,7 +2507,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
Label no_reguard;
|
||||
__ lwz(r_temp_1, thread_(stack_guard_state));
|
||||
__ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
__ cmpwi(CCR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ bne(CCR0, no_reguard);
|
||||
|
||||
save_native_result(masm, ret_type, workspace_slot_offset);
|
||||
|
@ -1186,7 +1186,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
||||
// needs to be checked. Only true for non-native.
|
||||
if (UseStackBanging) {
|
||||
const int page_size = os::vm_page_size();
|
||||
const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
|
||||
const int n_shadow_pages = ((int)StackOverflow::stack_shadow_zone_size()) / page_size;
|
||||
const int start_page = native_call ? n_shadow_pages : 1;
|
||||
BLOCK_COMMENT("bang_stack_shadow_pages:");
|
||||
for (int pages = start_page; pages <= n_shadow_pages; pages++) {
|
||||
|
@ -2209,8 +2209,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
Label no_reguard;
|
||||
|
||||
__ z_cli(Address(Z_thread, JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(JavaThread::StackGuardState) - 1)),
|
||||
JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
__ z_cli(Address(Z_thread, JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(StackOverflow::StackGuardState) - 1)),
|
||||
StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
|
||||
__ z_bre(no_reguard);
|
||||
|
||||
|
@ -2067,7 +2067,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
||||
// needs to be checked. Only true for non-native. For native, we only bang the last page.
|
||||
if (UseStackBanging) {
|
||||
const int page_size = os::vm_page_size();
|
||||
const int n_shadow_pages = (int)(JavaThread::stack_shadow_zone_size()/page_size);
|
||||
const int n_shadow_pages = (int)(StackOverflow::stack_shadow_zone_size()/page_size);
|
||||
const int start_page_num = native_call ? n_shadow_pages : 1;
|
||||
for (int pages = start_page_num; pages <= n_shadow_pages; pages++) {
|
||||
__ bang_stack_with_offset(pages*page_size);
|
||||
|
@ -1130,7 +1130,7 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
|
||||
NOT_LP64(get_thread(rthread);)
|
||||
|
||||
cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_enabled);
|
||||
cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
|
||||
jcc(Assembler::equal, no_reserved_zone_enabling);
|
||||
|
||||
cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
|
||||
|
@ -1058,7 +1058,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
||||
// was post-decremented.) Skip this address by starting at i=1, and
|
||||
// touch a few more pages below. N.B. It is important to touch all
|
||||
// the way down including all pages in the shadow zone.
|
||||
for (int i = 1; i < ((int)JavaThread::stack_shadow_zone_size() / os::vm_page_size()); i++) {
|
||||
for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / os::vm_page_size()); i++) {
|
||||
// this could be any sized move but this is can be a debugging crumb
|
||||
// so the bigger the better.
|
||||
movptr(Address(tmp, (-i*os::vm_page_size())), size );
|
||||
|
@ -1868,7 +1868,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Generate stack overflow check
|
||||
|
||||
if (UseStackBanging) {
|
||||
__ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
|
||||
__ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
|
||||
} else {
|
||||
// need a 5 byte instruction to allow MT safe patching to non-entrant
|
||||
__ fat_nop();
|
||||
@ -2279,7 +2279,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label reguard;
|
||||
Label reguard_done;
|
||||
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ jcc(Assembler::equal, reguard);
|
||||
|
||||
// slow path reguard re-enters here
|
||||
|
@ -2174,7 +2174,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Generate stack overflow check
|
||||
|
||||
if (UseStackBanging) {
|
||||
__ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
|
||||
__ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
|
||||
} else {
|
||||
// need a 5 byte instruction to allow MT safe patching to non-entrant
|
||||
__ fat_nop();
|
||||
@ -2638,7 +2638,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label reguard;
|
||||
Label reguard_done;
|
||||
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ jcc(Assembler::equal, reguard);
|
||||
__ bind(reguard_done);
|
||||
|
||||
|
@ -771,7 +771,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
||||
// needs to be checked. Only true for non-native.
|
||||
if (UseStackBanging) {
|
||||
const int page_size = os::vm_page_size();
|
||||
const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
|
||||
const int n_shadow_pages = ((int)StackOverflow::stack_shadow_zone_size()) / page_size;
|
||||
const int start_page = native_call ? n_shadow_pages : 1;
|
||||
for (int pages = start_page; pages <= n_shadow_pages; pages++) {
|
||||
__ bang_stack_with_offset(pages*page_size);
|
||||
@ -1180,7 +1180,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
{
|
||||
Label no_reguard;
|
||||
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
|
||||
JavaThread::stack_guard_yellow_reserved_disabled);
|
||||
StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ jcc(Assembler::notEqual, no_reguard);
|
||||
|
||||
__ pusha(); // XXX only save smashed registers
|
||||
|
@ -35,7 +35,7 @@
|
||||
// Inlined causes circular inclusion with thread.hpp
|
||||
ZeroStack::ZeroStack()
|
||||
: _base(NULL), _top(NULL), _sp(NULL) {
|
||||
_shadow_pages_size = JavaThread::stack_shadow_zone_size();
|
||||
_shadow_pages_size = StackOverflow::stack_shadow_zone_size();
|
||||
}
|
||||
|
||||
int ZeroStack::suggest_size(Thread *thread) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -49,7 +49,7 @@ inline void ZeroStack::overflow_check(int required_words, TRAPS) {
|
||||
inline int ZeroStack::abi_stack_available(Thread *thread) const {
|
||||
guarantee(Thread::current() == thread, "should run in the same thread");
|
||||
int stack_used = thread->stack_base() - (address) &stack_used
|
||||
+ (JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size());
|
||||
+ (StackOverflow::stack_guard_zone_size() + StackOverflow::stack_shadow_zone_size());
|
||||
int stack_free = thread->stack_size() - stack_used;
|
||||
return stack_free;
|
||||
}
|
||||
|
@ -935,9 +935,10 @@ bool os::create_attached_thread(JavaThread* thread) {
|
||||
// enabling yellow zone first will crash JVM on SuSE Linux), so there
|
||||
// is no gap between the last two virtual memory regions.
|
||||
|
||||
address addr = thread->stack_reserved_zone_base();
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
address addr = overflow_state->stack_reserved_zone_base();
|
||||
assert(addr != NULL, "initialization problem?");
|
||||
assert(thread->stack_available(addr) > 0, "stack guard should not be enabled");
|
||||
assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled");
|
||||
|
||||
osthread->set_expanding_stack();
|
||||
os::Linux::manually_expand_stack(thread, addr);
|
||||
@ -1931,9 +1932,10 @@ void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
|
||||
|
||||
if (!_stack_is_executable) {
|
||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
|
||||
if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
|
||||
jt->stack_guards_enabled()) { // No pending stack overflow exceptions
|
||||
if (!os::guard_memory((char *)jt->stack_end(), jt->stack_guard_zone_size())) {
|
||||
StackOverflow* overflow_state = jt->stack_overflow_state();
|
||||
if (!overflow_state->stack_guard_zone_unused() && // Stack not yet fully initialized
|
||||
overflow_state->stack_guards_enabled()) { // No pending stack overflow exceptions
|
||||
if (!os::guard_memory((char *)jt->stack_end(), overflow_state->stack_guard_zone_size())) {
|
||||
warning("Attempt to reguard stack yellow zone failed.");
|
||||
}
|
||||
}
|
||||
@ -5314,7 +5316,7 @@ bool os::start_debugging(char *buf, int buflen) {
|
||||
// | |\
|
||||
// | HotSpot Guard Pages | - red, yellow and reserved pages
|
||||
// | |/
|
||||
// +------------------------+ JavaThread::stack_reserved_zone_base()
|
||||
// +------------------------+ StackOverflow::stack_reserved_zone_base()
|
||||
// | |\
|
||||
// | Normal Stack | -
|
||||
// | |/
|
||||
|
@ -800,8 +800,8 @@ jint os::Posix::set_minimum_stack_sizes() {
|
||||
size_t os_min_stack_allowed = PTHREAD_STACK_MIN;
|
||||
|
||||
_java_thread_min_stack_allowed = _java_thread_min_stack_allowed +
|
||||
JavaThread::stack_guard_zone_size() +
|
||||
JavaThread::stack_shadow_zone_size();
|
||||
StackOverflow::stack_guard_zone_size() +
|
||||
StackOverflow::stack_shadow_zone_size();
|
||||
|
||||
_java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
|
||||
_java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
|
||||
@ -824,8 +824,8 @@ jint os::Posix::set_minimum_stack_sizes() {
|
||||
|
||||
// Reminder: a compiler thread is a Java thread.
|
||||
_compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
|
||||
JavaThread::stack_guard_zone_size() +
|
||||
JavaThread::stack_shadow_zone_size();
|
||||
StackOverflow::stack_guard_zone_size() +
|
||||
StackOverflow::stack_shadow_zone_size();
|
||||
|
||||
_compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
|
||||
_compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
|
||||
|
@ -2473,7 +2473,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
|
||||
// Handle potential stack overflows up front.
|
||||
if (exception_code == EXCEPTION_STACK_OVERFLOW) {
|
||||
if (thread->stack_guards_enabled()) {
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
if (overflow_state->stack_guards_enabled()) {
|
||||
if (in_java) {
|
||||
frame fr;
|
||||
if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
|
||||
@ -2485,14 +2486,14 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
// zone page for us. Note: must call disable_stack_yellow_zone to
|
||||
// update the enabled status, even if the zone contains only one page.
|
||||
assert(!in_vm, "Undersized StackShadowPages");
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
// If not in java code, return and hope for the best.
|
||||
return in_java
|
||||
? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
|
||||
: EXCEPTION_CONTINUE_EXECUTION;
|
||||
} else {
|
||||
// Fatal red zone violation.
|
||||
thread->disable_stack_red_zone();
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
|
||||
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
|
||||
report_error(t, exception_code, pc, exception_record,
|
||||
@ -4091,8 +4092,8 @@ jint os::init_2(void) {
|
||||
// Add in 4*BytesPerWord 4K pages to account for VM stack during
|
||||
// class initialization depending on 32 or 64 bit VM.
|
||||
size_t min_stack_allowed =
|
||||
(size_t)(JavaThread::stack_guard_zone_size() +
|
||||
JavaThread::stack_shadow_zone_size() +
|
||||
(size_t)(StackOverflow::stack_guard_zone_size() +
|
||||
StackOverflow::stack_shadow_zone_size() +
|
||||
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
|
||||
|
||||
min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,7 +53,7 @@ inline void os::map_stack_shadow_pages(address sp) {
|
||||
// the OS may not map an intervening page into our space
|
||||
// and may fault on a memory access to interior of our frame.
|
||||
const int page_size = os::win32::vm_page_size();
|
||||
const size_t n_pages = JavaThread::stack_shadow_zone_size() / page_size;
|
||||
const size_t n_pages = StackOverflow::stack_shadow_zone_size() / page_size;
|
||||
for (size_t pages = 1; pages <= n_pages; pages++) {
|
||||
sp -= page_size;
|
||||
*sp = 0;
|
||||
|
@ -267,26 +267,28 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
|
||||
// Handle ALL stack overflow variations here
|
||||
if (sig == SIGSEGV && thread->is_in_full_stack(addr)) {
|
||||
// stack overflow
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
|
||||
//
|
||||
// If we are in a yellow zone and we are inside java, we disable the yellow zone and
|
||||
// throw a stack overflow exception.
|
||||
// If we are in native code or VM C code, we report-and-die. The original coding tried
|
||||
// to continue with yellow zone disabled, but that doesn't buy us much and prevents
|
||||
// hs_err_pid files.
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
if (thread->in_stack_reserved_zone(addr)) {
|
||||
if (overflow_state->in_stack_reserved_zone(addr)) {
|
||||
frame fr;
|
||||
if (os::Aix::get_frame_at_stack_banging_point(thread, uc, &fr)) {
|
||||
assert(fr.is_java_frame(), "Must be a Javac frame");
|
||||
frame activation =
|
||||
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
|
||||
if (activation.sp() != NULL) {
|
||||
thread->disable_stack_reserved_zone();
|
||||
overflow_state->disable_stack_reserved_zone();
|
||||
if (activation.is_interpreted_frame()) {
|
||||
thread->set_reserved_stack_activation((address)activation.fp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.fp());
|
||||
} else {
|
||||
thread->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -294,18 +296,18 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
|
||||
}
|
||||
// Throw a stack overflow exception.
|
||||
// Guard pages will be reenabled while unwinding the stack.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
||||
goto run_stub;
|
||||
} else {
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
} else if (overflow_state->in_stack_red_zone(addr)) {
|
||||
// Fatal red zone violation. Disable the guard pages and fall through
|
||||
// to handle_unexpected_exception way down below.
|
||||
thread->disable_stack_red_zone();
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
||||
goto report_and_die;
|
||||
} else {
|
||||
|
@ -495,20 +495,21 @@ JVM_handle_bsd_signal(int sig,
|
||||
// check if fault address is within thread stack
|
||||
if (thread->is_in_full_stack(addr)) {
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
if (thread->in_stack_reserved_zone(addr)) {
|
||||
if (overflow_state->in_stack_reserved_zone(addr)) {
|
||||
frame fr;
|
||||
if (os::Bsd::get_frame_at_stack_banging_point(thread, uc, &fr)) {
|
||||
assert(fr.is_java_frame(), "Must be a Java frame");
|
||||
frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
|
||||
if (activation.sp() != NULL) {
|
||||
thread->disable_stack_reserved_zone();
|
||||
overflow_state->disable_stack_reserved_zone();
|
||||
if (activation.is_interpreted_frame()) {
|
||||
thread->set_reserved_stack_activation((address)(
|
||||
overflow_state->set_reserved_stack_activation((address)(
|
||||
activation.fp() + frame::interpreter_frame_initial_sp_offset));
|
||||
} else {
|
||||
thread->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -516,17 +517,17 @@ JVM_handle_bsd_signal(int sig,
|
||||
}
|
||||
// Throw a stack overflow exception. Guard pages will be reenabled
|
||||
// while unwinding the stack.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
||||
} else {
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
} else if (overflow_state->in_stack_red_zone(addr)) {
|
||||
// Fatal red zone violation. Disable the guard pages and fall through
|
||||
// to handle_unexpected_exception way down below.
|
||||
thread->disable_stack_red_zone();
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
||||
}
|
||||
}
|
||||
@ -856,7 +857,7 @@ size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// | |\
|
||||
// | HotSpot Guard Pages | - red, yellow and reserved pages
|
||||
// | |/
|
||||
// +------------------------+ JavaThread::stack_reserved_zone_base()
|
||||
// +------------------------+ StackOverflow::stack_reserved_zone_base()
|
||||
// | |\
|
||||
// | Normal Stack | -
|
||||
// | |/
|
||||
|
@ -279,22 +279,23 @@ JVM_handle_linux_signal(int sig,
|
||||
if (sig == SIGSEGV) {
|
||||
// check if fault address is within thread stack
|
||||
if (thread->is_in_full_stack(addr)) {
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
if (thread->in_stack_reserved_zone(addr)) {
|
||||
if (overflow_state->in_stack_reserved_zone(addr)) {
|
||||
frame fr;
|
||||
if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
|
||||
assert(fr.is_java_frame(), "Must be a Java frame");
|
||||
frame activation =
|
||||
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
|
||||
if (activation.sp() != NULL) {
|
||||
thread->disable_stack_reserved_zone();
|
||||
overflow_state->disable_stack_reserved_zone();
|
||||
if (activation.is_interpreted_frame()) {
|
||||
thread->set_reserved_stack_activation((address)(
|
||||
overflow_state->set_reserved_stack_activation((address)(
|
||||
activation.fp() + frame::interpreter_frame_initial_sp_offset));
|
||||
} else {
|
||||
thread->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -302,17 +303,17 @@ JVM_handle_linux_signal(int sig,
|
||||
}
|
||||
// Throw a stack overflow exception. Guard pages will be reenabled
|
||||
// while unwinding the stack.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
||||
} else {
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
} else if (overflow_state->in_stack_red_zone(addr)) {
|
||||
// Fatal red zone violation. Disable the guard pages and fall through
|
||||
// to handle_unexpected_exception way down below.
|
||||
thread->disable_stack_red_zone();
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
||||
|
||||
// This is a likely cause, but hard to verify. Let's just print
|
||||
|
@ -323,8 +323,9 @@ extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info,
|
||||
// check if fault address is within thread stack
|
||||
if (thread->is_in_full_stack(addr)) {
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
// Throw a stack overflow exception. Guard pages will be reenabled
|
||||
// while unwinding the stack.
|
||||
@ -333,10 +334,10 @@ extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info,
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
} else if (overflow_state->in_stack_red_zone(addr)) {
|
||||
// Fatal red zone violation. Disable the guard pages and fall through
|
||||
// to handle_unexpected_exception way down below.
|
||||
thread->disable_stack_red_zone();
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
||||
} else {
|
||||
// Accessing stack address below sp may cause SEGV if current
|
||||
|
@ -327,20 +327,21 @@ JVM_handle_linux_signal(int sig,
|
||||
// Check if fault address is within thread stack.
|
||||
if (thread->is_in_full_stack(addr)) {
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
if (thread->in_stack_reserved_zone(addr)) {
|
||||
if (overflow_state->in_stack_reserved_zone(addr)) {
|
||||
frame fr;
|
||||
if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
|
||||
assert(fr.is_java_frame(), "Must be a Javac frame");
|
||||
frame activation =
|
||||
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
|
||||
if (activation.sp() != NULL) {
|
||||
thread->disable_stack_reserved_zone();
|
||||
overflow_state->disable_stack_reserved_zone();
|
||||
if (activation.is_interpreted_frame()) {
|
||||
thread->set_reserved_stack_activation((address)activation.fp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.fp());
|
||||
} else {
|
||||
thread->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -348,17 +349,17 @@ JVM_handle_linux_signal(int sig,
|
||||
}
|
||||
// Throw a stack overflow exception.
|
||||
// Guard pages will be reenabled while unwinding the stack.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
||||
} else {
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
} else if (overflow_state->in_stack_red_zone(addr)) {
|
||||
// Fatal red zone violation. Disable the guard pages and fall through
|
||||
// to handle_unexpected_exception way down below.
|
||||
thread->disable_stack_red_zone();
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
||||
|
||||
// This is a likely cause, but hard to verify. Let's just print
|
||||
|
@ -323,20 +323,21 @@ JVM_handle_linux_signal(int sig,
|
||||
// Check if fault address is within thread stack.
|
||||
if (thread->is_in_full_stack(addr)) {
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
if (thread->in_stack_reserved_zone(addr)) {
|
||||
if (overflow_state->in_stack_reserved_zone(addr)) {
|
||||
frame fr;
|
||||
if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
|
||||
assert(fr.is_java_frame(), "Must be a Javac frame");
|
||||
frame activation =
|
||||
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
|
||||
if (activation.sp() != NULL) {
|
||||
thread->disable_stack_reserved_zone();
|
||||
overflow_state->disable_stack_reserved_zone();
|
||||
if (activation.is_interpreted_frame()) {
|
||||
thread->set_reserved_stack_activation((address)activation.fp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.fp());
|
||||
} else {
|
||||
thread->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -344,17 +345,17 @@ JVM_handle_linux_signal(int sig,
|
||||
}
|
||||
// Throw a stack overflow exception.
|
||||
// Guard pages will be reenabled while unwinding the stack.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
||||
} else {
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
} else if (overflow_state->in_stack_red_zone(addr)) {
|
||||
// Fatal red zone violation. Disable the guard pages and fall through
|
||||
// to handle_unexpected_exception way down below.
|
||||
thread->disable_stack_red_zone();
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
||||
|
||||
// This is a likely cause, but hard to verify. Let's just print
|
||||
|
@ -324,21 +324,22 @@ JVM_handle_linux_signal(int sig,
|
||||
// check if fault address is within thread stack
|
||||
if (thread->is_in_full_stack(addr)) {
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
if (thread->in_stack_reserved_zone(addr)) {
|
||||
if (overflow_state->in_stack_reserved_zone(addr)) {
|
||||
frame fr;
|
||||
if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
|
||||
assert(fr.is_java_frame(), "Must be a Java frame");
|
||||
frame activation =
|
||||
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
|
||||
if (activation.sp() != NULL) {
|
||||
thread->disable_stack_reserved_zone();
|
||||
overflow_state->disable_stack_reserved_zone();
|
||||
if (activation.is_interpreted_frame()) {
|
||||
thread->set_reserved_stack_activation((address)(
|
||||
overflow_state->set_reserved_stack_activation((address)(
|
||||
activation.fp() + frame::interpreter_frame_initial_sp_offset));
|
||||
} else {
|
||||
thread->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -346,17 +347,17 @@ JVM_handle_linux_signal(int sig,
|
||||
}
|
||||
// Throw a stack overflow exception. Guard pages will be reenabled
|
||||
// while unwinding the stack.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
||||
} else {
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
} else if (overflow_state->in_stack_red_zone(addr)) {
|
||||
// Fatal red zone violation. Disable the guard pages and fall through
|
||||
// to handle_unexpected_exception way down below.
|
||||
thread->disable_stack_red_zone();
|
||||
overflow_state->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
||||
|
||||
// This is a likely cause, but hard to verify. Let's just print
|
||||
|
@ -168,13 +168,14 @@ JVM_handle_linux_signal(int sig,
|
||||
|
||||
// check if fault address is within thread stack
|
||||
if (thread->is_in_full_stack(addr)) {
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
|
||||
overflow_state->disable_stack_yellow_reserved_zone();
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
else if (thread->in_stack_red_zone(addr)) {
|
||||
thread->disable_stack_red_zone();
|
||||
else if (overflow_state->in_stack_red_zone(addr)) {
|
||||
overflow_state->disable_stack_red_zone();
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
else {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -135,7 +135,7 @@ void AbstractAssembler::generate_stack_overflow_check(int frame_size_in_bytes) {
|
||||
// is greater than a page.
|
||||
|
||||
const int page_size = os::vm_page_size();
|
||||
int bang_end = (int)JavaThread::stack_shadow_zone_size();
|
||||
int bang_end = (int)StackOverflow::stack_shadow_zone_size();
|
||||
|
||||
// This is how far the previous frame's stack banging extended.
|
||||
const int bang_end_safe = bang_end;
|
||||
|
@ -527,8 +527,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
||||
// Check the stack guard pages and reenable them if necessary and there is
|
||||
// enough space on the stack to do so. Use fast exceptions only if the guard
|
||||
// pages are enabled.
|
||||
bool guard_pages_enabled = thread->stack_guards_enabled();
|
||||
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
|
||||
bool guard_pages_enabled = thread->stack_overflow_state()->reguard_stack_if_needed();
|
||||
|
||||
if (JvmtiExport::can_post_on_exceptions()) {
|
||||
// To ensure correct notification of exception catches and throws
|
||||
|
@ -548,7 +548,7 @@ JRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
|
||||
|
||||
address continuation = NULL;
|
||||
address handler_pc = NULL;
|
||||
if (handler_bci < 0 || !thread->reguard_stack((address) &continuation)) {
|
||||
if (handler_bci < 0 || !thread->stack_overflow_state()->reguard_stack((address) &continuation)) {
|
||||
// Forward exception to callee (leaving bci/bcp untouched) because (a) no
|
||||
// handler in this method, or (b) after a stack overflow there is not yet
|
||||
// enough stack space available to reprotect the stack.
|
||||
|
@ -267,8 +267,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
||||
// Check the stack guard pages and reenable them if necessary and there is
|
||||
// enough space on the stack to do so. Use fast exceptions only if the guard
|
||||
// pages are enabled.
|
||||
bool guard_pages_enabled = thread->stack_guards_enabled();
|
||||
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
|
||||
bool guard_pages_enabled = thread->stack_overflow_state()->reguard_stack_if_needed();
|
||||
|
||||
if (JvmtiExport::can_post_on_exceptions()) {
|
||||
// To ensure correct notification of exception catches and throws
|
||||
|
@ -180,7 +180,7 @@
|
||||
nonstatic_field(JavaThread, _jvmci_counters, jlong*) \
|
||||
nonstatic_field(JavaThread, _should_post_on_exceptions_flag, int) \
|
||||
nonstatic_field(JavaThread, _jni_environment, JNIEnv) \
|
||||
nonstatic_field(JavaThread, _reserved_stack_activation, address) \
|
||||
nonstatic_field(JavaThread, _stack_overflow_state._reserved_stack_activation, address) \
|
||||
\
|
||||
static_field(java_lang_Class, _klass_offset, int) \
|
||||
static_field(java_lang_Class, _array_klass_offset, int) \
|
||||
|
@ -1344,7 +1344,7 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* t
|
||||
// otherwise, forcibly unwind the frame.
|
||||
//
|
||||
// 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate.
|
||||
bool force_unwind = !thread->reguard_stack();
|
||||
bool force_unwind = !thread->stack_overflow_state()->reguard_stack();
|
||||
bool deopting = false;
|
||||
if (nm->is_deopt_pc(pc)) {
|
||||
deopting = true;
|
||||
|
@ -3964,7 +3964,7 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae
|
||||
return JNI_ERR;
|
||||
}
|
||||
// Enable stack overflow checks
|
||||
thread->create_stack_guard_pages();
|
||||
thread->stack_overflow_state()->create_stack_guard_pages();
|
||||
|
||||
thread->initialize_tlab();
|
||||
|
||||
|
@ -3683,7 +3683,7 @@ jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init,
|
||||
JVM_ENTRY(jobject, JVM_InvokeMethod(JNIEnv *env, jobject method, jobject obj, jobjectArray args0))
|
||||
JVMWrapper("JVM_InvokeMethod");
|
||||
Handle method_handle;
|
||||
if (thread->stack_available((address) &method_handle) >= JVMInvokeMethodSlack) {
|
||||
if (thread->stack_overflow_state()->stack_available((address) &method_handle) >= JVMInvokeMethodSlack) {
|
||||
method_handle = Handle(THREAD, JNIHandles::resolve(method));
|
||||
Handle receiver(THREAD, JNIHandles::resolve(obj));
|
||||
objArrayHandle args(THREAD, objArrayOop(JNIHandles::resolve(args0)));
|
||||
|
@ -1685,7 +1685,8 @@ WB_ENTRY(jlong, WB_GetThreadStackSize(JNIEnv* env, jobject o))
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_GetThreadRemainingStackSize(JNIEnv* env, jobject o))
|
||||
return (jlong) thread->stack_available(os::current_stack_pointer()) - (jlong)JavaThread::stack_shadow_zone_size();
|
||||
return (jlong) thread->stack_overflow_state()->stack_available(
|
||||
os::current_stack_pointer()) - (jlong)StackOverflow::stack_shadow_zone_size();
|
||||
WB_END
|
||||
|
||||
|
||||
|
@ -1689,8 +1689,7 @@ void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool
|
||||
// stack otherwise if we return to the uncommon trap blob and the
|
||||
// stack bang causes a stack overflow we crash.
|
||||
JavaThread* jt = THREAD->as_Java_thread();
|
||||
bool guard_pages_enabled = jt->stack_guards_enabled();
|
||||
if (!guard_pages_enabled) guard_pages_enabled = jt->reguard_stack();
|
||||
bool guard_pages_enabled = jt->stack_overflow_state()->reguard_stack_if_needed();
|
||||
assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
|
||||
}
|
||||
return;
|
||||
|
@ -169,8 +169,8 @@ class ThreadInVMfromJava : public ThreadStateTransition {
|
||||
trans_from_java(_thread_in_vm);
|
||||
}
|
||||
~ThreadInVMfromJava() {
|
||||
if (_thread->stack_yellow_reserved_zone_disabled()) {
|
||||
_thread->enable_stack_yellow_reserved_zone();
|
||||
if (_thread->stack_overflow_state()->stack_yellow_reserved_zone_disabled()) {
|
||||
_thread->stack_overflow_state()->enable_stack_yellow_reserved_zone();
|
||||
}
|
||||
trans(_thread_in_vm, _thread_in_Java);
|
||||
// Check for pending. async. exceptions or suspends.
|
||||
@ -309,8 +309,8 @@ class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
|
||||
trans_from_java(_thread_in_vm);
|
||||
}
|
||||
~ThreadInVMfromJavaNoAsyncException() {
|
||||
if (_thread->stack_yellow_reserved_zone_disabled()) {
|
||||
_thread->enable_stack_yellow_reserved_zone();
|
||||
if (_thread->stack_overflow_state()->stack_yellow_reserved_zone_disabled()) {
|
||||
_thread->stack_overflow_state()->enable_stack_yellow_reserved_zone();
|
||||
}
|
||||
trans(_thread_in_vm, _thread_in_Java);
|
||||
// NOTE: We do not check for pending. async. exceptions.
|
||||
|
@ -378,9 +378,7 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC
|
||||
|
||||
// When we reenter Java, we need to reenable the reserved/yellow zone which
|
||||
// might already be disabled when we are in VM.
|
||||
if (!thread->stack_guards_enabled()) {
|
||||
thread->reguard_stack();
|
||||
}
|
||||
thread->stack_overflow_state()->reguard_stack_if_needed();
|
||||
|
||||
// Check that there are shadow pages available before changing thread state
|
||||
// to Java. Calculate current_stack_pointer here to make sure
|
||||
|
@ -454,13 +454,7 @@ void os::init_before_ergo() {
|
||||
// decisions depending on large page support and the calculated large page size.
|
||||
large_page_init();
|
||||
|
||||
// We need to adapt the configured number of stack protection pages given
|
||||
// in 4K pages to the actual os page size. We must do this before setting
|
||||
// up minimal stack sizes etc. in os::init_2().
|
||||
JavaThread::set_stack_red_zone_size (align_up(StackRedPages * 4 * K, vm_page_size()));
|
||||
JavaThread::set_stack_yellow_zone_size (align_up(StackYellowPages * 4 * K, vm_page_size()));
|
||||
JavaThread::set_stack_reserved_zone_size(align_up(StackReservedPages * 4 * K, vm_page_size()));
|
||||
JavaThread::set_stack_shadow_zone_size (align_up(StackShadowPages * 4 * K, vm_page_size()));
|
||||
StackOverflow::initialize_stack_zone_sizes();
|
||||
|
||||
// VM version initialization identifies some characteristics of the
|
||||
// platform that are used during ergonomic decisions.
|
||||
@ -1376,7 +1370,7 @@ bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method
|
||||
Interpreter::size_top_interpreter_activation(method()) * wordSize;
|
||||
|
||||
address limit = thread->as_Java_thread()->stack_end() +
|
||||
(JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size());
|
||||
(StackOverflow::stack_guard_zone_size() + StackOverflow::stack_shadow_zone_size());
|
||||
|
||||
return sp > (limit + framesize_in_bytes);
|
||||
}
|
||||
|
@ -480,10 +480,10 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thre
|
||||
// unguarded. Reguard the stack otherwise if we return to the
|
||||
// deopt blob and the stack bang causes a stack overflow we
|
||||
// crash.
|
||||
bool guard_pages_enabled = thread->stack_guards_enabled();
|
||||
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
|
||||
if (thread->reserved_stack_activation() != thread->stack_base()) {
|
||||
thread->set_reserved_stack_activation(thread->stack_base());
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
|
||||
if (overflow_state->reserved_stack_activation() != thread->stack_base()) {
|
||||
overflow_state->set_reserved_stack_activation(thread->stack_base());
|
||||
}
|
||||
assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
|
||||
return SharedRuntime::deopt_blob()->unpack_with_exception();
|
||||
@ -2065,7 +2065,7 @@ char* SharedRuntime::generate_class_cast_message(
|
||||
}
|
||||
|
||||
JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
|
||||
(void) JavaThread::current()->reguard_stack();
|
||||
(void) JavaThread::current()->stack_overflow_state()->reguard_stack();
|
||||
JRT_END
|
||||
|
||||
void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* thread) {
|
||||
@ -3158,10 +3158,9 @@ void AdapterHandlerLibrary::print_statistics() {
|
||||
#endif /* PRODUCT */
|
||||
|
||||
JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* thread))
|
||||
if (thread->stack_reserved_zone_disabled()) {
|
||||
thread->enable_stack_reserved_zone();
|
||||
}
|
||||
thread->set_reserved_stack_activation(thread->stack_base());
|
||||
StackOverflow* overflow_state = thread->stack_overflow_state();
|
||||
overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
|
||||
overflow_state->set_reserved_stack_activation(thread->stack_base());
|
||||
JRT_END
|
||||
|
||||
frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr) {
|
||||
|
274
src/hotspot/share/runtime/stackOverflow.cpp
Normal file
274
src/hotspot/share/runtime/stackOverflow.cpp
Normal file
@ -0,0 +1,274 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/stackOverflow.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
size_t StackOverflow::_stack_red_zone_size = 0;
|
||||
size_t StackOverflow::_stack_yellow_zone_size = 0;
|
||||
size_t StackOverflow::_stack_reserved_zone_size = 0;
|
||||
size_t StackOverflow::_stack_shadow_zone_size = 0;
|
||||
|
||||
void StackOverflow::initialize_stack_zone_sizes() {
|
||||
// Stack zone sizes must be page aligned.
|
||||
size_t page_size = os::vm_page_size();
|
||||
|
||||
// We need to adapt the configured number of stack protection pages given
|
||||
// in 4K pages to the actual os page size. We must do this before setting
|
||||
// up minimal stack sizes etc. in os::init_2().
|
||||
size_t alignment = 4*K;
|
||||
|
||||
assert(_stack_red_zone_size == 0, "This should be called only once.");
|
||||
_stack_red_zone_size = align_up(StackRedPages * alignment, page_size);
|
||||
|
||||
assert(_stack_yellow_zone_size == 0, "This should be called only once.");
|
||||
_stack_yellow_zone_size = align_up(StackYellowPages * alignment, page_size);
|
||||
|
||||
assert(_stack_reserved_zone_size == 0, "This should be called only once.");
|
||||
_stack_reserved_zone_size = align_up(StackReservedPages * alignment, page_size);
|
||||
|
||||
// The shadow area is not allocated or protected, so
|
||||
// it needs not be page aligned.
|
||||
// But the stack bang currently assumes that it is a
|
||||
// multiple of page size. This guarantees that the bang
|
||||
// loop touches all pages in the shadow zone.
|
||||
// This can be guaranteed differently, as well. E.g., if
|
||||
// the page size is a multiple of 4K, banging in 4K steps
|
||||
// suffices to touch all pages. (Some pages are banged
|
||||
// several times, though.)
|
||||
assert(_stack_shadow_zone_size == 0, "This should be called only once.");
|
||||
_stack_shadow_zone_size = align_up(StackShadowPages * alignment, page_size);
|
||||
}
|
||||
|
||||
bool StackOverflow::stack_guards_enabled() const {
|
||||
#ifdef ASSERT
|
||||
if (os::uses_stack_guard_pages() &&
|
||||
!(DisablePrimordialThreadGuardPages && os::is_primordial_thread())) {
|
||||
assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
|
||||
}
|
||||
#endif
|
||||
return _stack_guard_state == stack_guard_enabled;
|
||||
}
|
||||
|
||||
void StackOverflow::create_stack_guard_pages() {
|
||||
if (!os::uses_stack_guard_pages() ||
|
||||
_stack_guard_state != stack_guard_unused ||
|
||||
(DisablePrimordialThreadGuardPages && os::is_primordial_thread())) {
|
||||
log_info(os, thread)("Stack guard page creation for thread "
|
||||
UINTX_FORMAT " disabled", os::current_thread_id());
|
||||
return;
|
||||
}
|
||||
address low_addr = stack_end();
|
||||
size_t len = stack_guard_zone_size();
|
||||
|
||||
assert(is_aligned(low_addr, os::vm_page_size()), "Stack base should be the start of a page");
|
||||
assert(is_aligned(len, os::vm_page_size()), "Stack size should be a multiple of page size");
|
||||
|
||||
int must_commit = os::must_commit_stack_guard_pages();
|
||||
// warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
|
||||
|
||||
if (must_commit && !os::create_stack_guard_pages((char *) low_addr, len)) {
|
||||
log_warning(os, thread)("Attempt to allocate stack guard pages failed.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (os::guard_memory((char *) low_addr, len)) {
|
||||
_stack_guard_state = stack_guard_enabled;
|
||||
} else {
|
||||
log_warning(os, thread)("Attempt to protect stack guard pages failed ("
|
||||
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
|
||||
if (os::uncommit_memory((char *) low_addr, len)) {
|
||||
log_warning(os, thread)("Attempt to deallocate stack guard pages failed.");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
log_debug(os, thread)("Thread " UINTX_FORMAT " stack guard pages activated: "
|
||||
PTR_FORMAT "-" PTR_FORMAT ".",
|
||||
os::current_thread_id(), p2i(low_addr), p2i(low_addr + len));
|
||||
}
|
||||
|
||||
void StackOverflow::remove_stack_guard_pages() {
|
||||
if (_stack_guard_state == stack_guard_unused) return;
|
||||
address low_addr = stack_end();
|
||||
size_t len = stack_guard_zone_size();
|
||||
|
||||
if (os::must_commit_stack_guard_pages()) {
|
||||
if (os::remove_stack_guard_pages((char *) low_addr, len)) {
|
||||
_stack_guard_state = stack_guard_unused;
|
||||
} else {
|
||||
log_warning(os, thread)("Attempt to deallocate stack guard pages failed ("
|
||||
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (_stack_guard_state == stack_guard_unused) return;
|
||||
if (os::unguard_memory((char *) low_addr, len)) {
|
||||
_stack_guard_state = stack_guard_unused;
|
||||
} else {
|
||||
log_warning(os, thread)("Attempt to unprotect stack guard pages failed ("
|
||||
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
log_debug(os, thread)("Thread " UINTX_FORMAT " stack guard pages removed: "
|
||||
PTR_FORMAT "-" PTR_FORMAT ".",
|
||||
os::current_thread_id(), p2i(low_addr), p2i(low_addr + len));
|
||||
}
|
||||
|
||||
void StackOverflow::enable_stack_reserved_zone(bool check_if_disabled) {
|
||||
if (check_if_disabled && _stack_guard_state == stack_guard_reserved_disabled) {
|
||||
return;
|
||||
}
|
||||
assert(_stack_guard_state == stack_guard_reserved_disabled, "inconsistent state");
|
||||
|
||||
// The base notation is from the stack's point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
address base = stack_reserved_zone_base() - stack_reserved_zone_size();
|
||||
|
||||
guarantee(base < stack_base(),"Error calculating stack reserved zone");
|
||||
guarantee(base < os::current_stack_pointer(),"Error calculating stack reserved zone");
|
||||
|
||||
if (os::guard_memory((char *) base, stack_reserved_zone_size())) {
|
||||
_stack_guard_state = stack_guard_enabled;
|
||||
} else {
|
||||
warning("Attempt to guard stack reserved zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void StackOverflow::disable_stack_reserved_zone() {
|
||||
assert(_stack_guard_state == stack_guard_enabled, "inconsistent state");
|
||||
|
||||
// Simply return if called for a thread that does not use guard pages.
|
||||
if (_stack_guard_state != stack_guard_enabled) return;
|
||||
|
||||
// The base notation is from the stack's point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
address base = stack_reserved_zone_base() - stack_reserved_zone_size();
|
||||
|
||||
if (os::unguard_memory((char *)base, stack_reserved_zone_size())) {
|
||||
_stack_guard_state = stack_guard_reserved_disabled;
|
||||
} else {
|
||||
warning("Attempt to unguard stack reserved zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void StackOverflow::enable_stack_yellow_reserved_zone() {
|
||||
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
|
||||
assert(_stack_guard_state != stack_guard_enabled, "already enabled");
|
||||
|
||||
// The base notation is from the stacks point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
address base = stack_red_zone_base();
|
||||
|
||||
guarantee(base < stack_base(), "Error calculating stack yellow zone");
|
||||
guarantee(base < os::current_stack_pointer(), "Error calculating stack yellow zone");
|
||||
|
||||
if (os::guard_memory((char *) base, stack_yellow_reserved_zone_size())) {
|
||||
_stack_guard_state = stack_guard_enabled;
|
||||
} else {
|
||||
warning("Attempt to guard stack yellow zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void StackOverflow::disable_stack_yellow_reserved_zone() {
|
||||
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
|
||||
assert(_stack_guard_state != stack_guard_yellow_reserved_disabled, "already disabled");
|
||||
|
||||
// Simply return if called for a thread that does not use guard pages.
|
||||
if (_stack_guard_state == stack_guard_unused) return;
|
||||
|
||||
// The base notation is from the stacks point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
address base = stack_red_zone_base();
|
||||
|
||||
if (os::unguard_memory((char *)base, stack_yellow_reserved_zone_size())) {
|
||||
_stack_guard_state = stack_guard_yellow_reserved_disabled;
|
||||
} else {
|
||||
warning("Attempt to unguard stack yellow zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void StackOverflow::enable_stack_red_zone() {
|
||||
// The base notation is from the stacks point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
|
||||
address base = stack_red_zone_base() - stack_red_zone_size();
|
||||
|
||||
guarantee(base < stack_base(), "Error calculating stack red zone");
|
||||
guarantee(base < os::current_stack_pointer(), "Error calculating stack red zone");
|
||||
|
||||
if (!os::guard_memory((char *) base, stack_red_zone_size())) {
|
||||
warning("Attempt to guard stack red zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void StackOverflow::disable_stack_red_zone() {
|
||||
// The base notation is from the stacks point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
|
||||
address base = stack_red_zone_base() - stack_red_zone_size();
|
||||
if (!os::unguard_memory((char *)base, stack_red_zone_size())) {
|
||||
warning("Attempt to unguard stack red zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
bool StackOverflow::reguard_stack(address cur_sp) {
|
||||
if (_stack_guard_state != stack_guard_yellow_reserved_disabled
|
||||
&& _stack_guard_state != stack_guard_reserved_disabled) {
|
||||
return true; // Stack already guarded or guard pages not needed.
|
||||
}
|
||||
|
||||
// Java code never executes within the yellow zone: the latter is only
|
||||
// there to provoke an exception during stack banging. If java code
|
||||
// is executing there, either StackShadowPages should be larger, or
|
||||
// some exception code in c1, c2 or the interpreter isn't unwinding
|
||||
// when it should.
|
||||
guarantee(cur_sp > stack_reserved_zone_base(),
|
||||
"not enough space to reguard - increase StackShadowPages");
|
||||
if (_stack_guard_state == stack_guard_yellow_reserved_disabled) {
|
||||
enable_stack_yellow_reserved_zone();
|
||||
if (reserved_stack_activation() != stack_base()) {
|
||||
set_reserved_stack_activation(stack_base());
|
||||
}
|
||||
} else if (_stack_guard_state == stack_guard_reserved_disabled) {
|
||||
set_reserved_stack_activation(stack_base());
|
||||
enable_stack_reserved_zone();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StackOverflow::reguard_stack(void) {
|
||||
return reguard_stack(os::current_stack_pointer());
|
||||
}
|
||||
|
||||
bool StackOverflow::reguard_stack_if_needed() {
|
||||
return !stack_guards_enabled() ? reguard_stack() : true;
|
||||
}
|
239
src/hotspot/share/runtime/stackOverflow.hpp
Normal file
239
src/hotspot/share/runtime/stackOverflow.hpp
Normal file
@ -0,0 +1,239 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_RUNTIME_STACKOVERFLOW_HPP
|
||||
#define SHARE_RUNTIME_STACKOVERFLOW_HPP
|
||||
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class JavaThread;
|
||||
|
||||
// StackOverflow handling is encapsulated in this class. This class contains state variables
|
||||
// for each JavaThread that are used to detect stack overflow though explicit checks or through
|
||||
// checks in the signal handler when stack banging into guard pages causes a trap.
|
||||
// The state variables also record whether guard pages are enabled or disabled.
|
||||
|
||||
class StackOverflow {
|
||||
friend class JVMCIVMStructs;
|
||||
friend class JavaThread;
|
||||
public:
|
||||
// State of the stack guard pages for the containing thread.
|
||||
enum StackGuardState {
|
||||
stack_guard_unused, // not needed
|
||||
stack_guard_reserved_disabled,
|
||||
stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow
|
||||
stack_guard_enabled // enabled
|
||||
};
|
||||
|
||||
StackOverflow() :
|
||||
_stack_guard_state(stack_guard_unused),
|
||||
_stack_overflow_limit(nullptr),
|
||||
_reserved_stack_activation(nullptr), // stack base not known yet
|
||||
_stack_base(nullptr), _stack_end(nullptr) {}
|
||||
|
||||
// Initialization after thread is started.
|
||||
void initialize(address base, address end) {
|
||||
_stack_base = base;
|
||||
_stack_end = end;
|
||||
set_stack_overflow_limit();
|
||||
set_reserved_stack_activation(base);
|
||||
}
|
||||
private:
|
||||
|
||||
StackGuardState _stack_guard_state;
|
||||
|
||||
// Precompute the limit of the stack as used in stack overflow checks.
|
||||
// We load it from here to simplify the stack overflow check in assembly.
|
||||
address _stack_overflow_limit;
|
||||
address _reserved_stack_activation;
|
||||
|
||||
// Support for stack overflow handling, copied down from thread.
|
||||
address _stack_base;
|
||||
address _stack_end;
|
||||
|
||||
address stack_end() const { return _stack_end; }
|
||||
address stack_base() const { assert(_stack_base != nullptr, "Sanity check"); return _stack_base; }
|
||||
|
||||
// Stack overflow support
|
||||
//
|
||||
// (low addresses)
|
||||
//
|
||||
// -- <-- stack_end() ---
|
||||
// | |
|
||||
// | red zone |
|
||||
// | |
|
||||
// -- <-- stack_red_zone_base() |
|
||||
// | |
|
||||
// | guard
|
||||
// | yellow zone zone
|
||||
// | |
|
||||
// | |
|
||||
// -- <-- stack_yellow_zone_base() |
|
||||
// | |
|
||||
// | |
|
||||
// | reserved zone |
|
||||
// | |
|
||||
// -- <-- stack_reserved_zone_base() --- ---
|
||||
// /|\ shadow <-- stack_overflow_limit() (somewhere in here)
|
||||
// | zone
|
||||
// \|/ size
|
||||
// some untouched memory ---
|
||||
//
|
||||
//
|
||||
// --
|
||||
// |
|
||||
// | shadow zone
|
||||
// |
|
||||
// --
|
||||
// x frame n
|
||||
// --
|
||||
// x frame n-1
|
||||
// x
|
||||
// --
|
||||
// ...
|
||||
//
|
||||
// --
|
||||
// x frame 0
|
||||
// -- <-- stack_base()
|
||||
//
|
||||
// (high addresses)
|
||||
//
|
||||
|
||||
private:
|
||||
// These values are derived from flags StackRedPages, StackYellowPages,
|
||||
// StackReservedPages and StackShadowPages.
|
||||
static size_t _stack_red_zone_size;
|
||||
static size_t _stack_yellow_zone_size;
|
||||
static size_t _stack_reserved_zone_size;
|
||||
static size_t _stack_shadow_zone_size;
|
||||
|
||||
public:
|
||||
static void initialize_stack_zone_sizes();
|
||||
|
||||
static size_t stack_red_zone_size() {
|
||||
assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized.");
|
||||
return _stack_red_zone_size;
|
||||
}
|
||||
|
||||
address stack_red_zone_base() const {
|
||||
return (address)(stack_end() + stack_red_zone_size());
|
||||
}
|
||||
|
||||
bool in_stack_red_zone(address a) const {
|
||||
return a <= stack_red_zone_base() && a >= stack_end();
|
||||
}
|
||||
|
||||
static size_t stack_yellow_zone_size() {
|
||||
assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
|
||||
return _stack_yellow_zone_size;
|
||||
}
|
||||
|
||||
static size_t stack_reserved_zone_size() {
|
||||
// _stack_reserved_zone_size may be 0. This indicates the feature is off.
|
||||
return _stack_reserved_zone_size;
|
||||
}
|
||||
|
||||
address stack_reserved_zone_base() const {
|
||||
return (address)(stack_end() +
|
||||
(stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
|
||||
}
|
||||
bool in_stack_reserved_zone(address a) const {
|
||||
return (a <= stack_reserved_zone_base()) &&
|
||||
(a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
|
||||
}
|
||||
|
||||
static size_t stack_yellow_reserved_zone_size() {
|
||||
return _stack_yellow_zone_size + _stack_reserved_zone_size;
|
||||
}
|
||||
bool in_stack_yellow_reserved_zone(address a) const {
|
||||
return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
|
||||
}
|
||||
|
||||
// Size of red + yellow + reserved zones.
|
||||
static size_t stack_guard_zone_size() {
|
||||
return stack_red_zone_size() + stack_yellow_reserved_zone_size();
|
||||
}
|
||||
|
||||
static size_t stack_shadow_zone_size() {
|
||||
assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized.");
|
||||
return _stack_shadow_zone_size;
|
||||
}
|
||||
|
||||
void create_stack_guard_pages();
|
||||
void remove_stack_guard_pages();
|
||||
|
||||
void enable_stack_reserved_zone(bool check_if_disabled = false);
|
||||
void disable_stack_reserved_zone();
|
||||
void enable_stack_yellow_reserved_zone();
|
||||
void disable_stack_yellow_reserved_zone();
|
||||
void enable_stack_red_zone();
|
||||
void disable_stack_red_zone();
|
||||
|
||||
bool stack_guard_zone_unused() const { return _stack_guard_state == stack_guard_unused; }
|
||||
|
||||
bool stack_yellow_reserved_zone_disabled() const {
|
||||
return _stack_guard_state == stack_guard_yellow_reserved_disabled;
|
||||
}
|
||||
|
||||
size_t stack_available(address cur_sp) const {
|
||||
// This code assumes java stacks grow down
|
||||
address low_addr; // Limit on the address for deepest stack depth
|
||||
if (_stack_guard_state == stack_guard_unused) {
|
||||
low_addr = stack_end();
|
||||
} else {
|
||||
low_addr = stack_reserved_zone_base();
|
||||
}
|
||||
return cur_sp > low_addr ? cur_sp - low_addr : 0;
|
||||
}
|
||||
|
||||
bool stack_guards_enabled() const;
|
||||
|
||||
address reserved_stack_activation() const { return _reserved_stack_activation; }
|
||||
void set_reserved_stack_activation(address addr) {
|
||||
assert(_reserved_stack_activation == stack_base()
|
||||
|| _reserved_stack_activation == nullptr
|
||||
|| addr == stack_base(), "Must not be set twice");
|
||||
_reserved_stack_activation = addr;
|
||||
}
|
||||
|
||||
// Attempt to reguard the stack after a stack overflow may have occurred.
|
||||
// Returns true if (a) guard pages are not needed on this thread, (b) the
|
||||
// pages are already guarded, or (c) the pages were successfully reguarded.
|
||||
// Returns false if there is not enough stack space to reguard the pages, in
|
||||
// which case the caller should unwind a frame and try again. The argument
|
||||
// should be the caller's (approximate) sp.
|
||||
bool reguard_stack(address cur_sp);
|
||||
// Similar to above but see if current stackpoint is out of the guard area
|
||||
// and reguard if possible.
|
||||
bool reguard_stack(void);
|
||||
bool reguard_stack_if_needed(void);
|
||||
|
||||
void set_stack_overflow_limit() {
|
||||
_stack_overflow_limit =
|
||||
stack_end() + MAX2(stack_guard_zone_size(), stack_shadow_zone_size());
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_RUNTIME_STACKOVERFLOW_HPP
|
@ -352,8 +352,7 @@ void Thread::record_stack_base_and_size() {
|
||||
|
||||
// Set stack limits after thread is initialized.
|
||||
if (is_Java_thread()) {
|
||||
as_Java_thread()->set_stack_overflow_limit();
|
||||
as_Java_thread()->set_reserved_stack_activation(stack_base());
|
||||
as_Java_thread()->stack_overflow_state()->initialize(stack_base(), stack_end());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1662,87 +1661,96 @@ bool JavaThread::resize_all_jvmci_counters(int new_size) {
|
||||
|
||||
// A JavaThread is a normal Java thread
|
||||
|
||||
void JavaThread::initialize() {
|
||||
JavaThread::JavaThread() :
|
||||
// Initialize fields
|
||||
|
||||
set_saved_exception_pc(NULL);
|
||||
_anchor.clear();
|
||||
set_entry_point(NULL);
|
||||
set_jni_functions(jni_functions());
|
||||
set_callee_target(NULL);
|
||||
set_vm_result(NULL);
|
||||
set_vm_result_2(NULL);
|
||||
set_vframe_array_head(NULL);
|
||||
set_vframe_array_last(NULL);
|
||||
set_deferred_locals(NULL);
|
||||
set_deopt_mark(NULL);
|
||||
set_deopt_compiled_method(NULL);
|
||||
set_monitor_chunks(NULL);
|
||||
_on_thread_list = false;
|
||||
_thread_state = _thread_new;
|
||||
_terminated = _not_terminated;
|
||||
_suspend_equivalent = false;
|
||||
_in_deopt_handler = 0;
|
||||
_doing_unsafe_access = false;
|
||||
_stack_guard_state = stack_guard_unused;
|
||||
_on_thread_list(false),
|
||||
DEBUG_ONLY(_java_call_counter(0) COMMA)
|
||||
_entry_point(nullptr),
|
||||
_deopt_mark(nullptr),
|
||||
_deopt_nmethod(nullptr),
|
||||
_vframe_array_head(nullptr),
|
||||
_vframe_array_last(nullptr),
|
||||
_deferred_locals_updates(nullptr),
|
||||
_callee_target(nullptr),
|
||||
_vm_result(nullptr),
|
||||
_vm_result_2(nullptr),
|
||||
|
||||
_monitor_chunks(nullptr),
|
||||
_special_runtime_exit_condition(_no_async_condition),
|
||||
_pending_async_exception(nullptr),
|
||||
|
||||
_thread_state(_thread_new),
|
||||
_saved_exception_pc(nullptr),
|
||||
|
||||
_terminated(_not_terminated),
|
||||
_suspend_equivalent(false),
|
||||
_in_deopt_handler(0),
|
||||
_doing_unsafe_access(false),
|
||||
_do_not_unlock_if_synchronized(false),
|
||||
_jni_attach_state(_not_attaching_via_jni),
|
||||
#if INCLUDE_JVMCI
|
||||
_pending_monitorenter = false;
|
||||
_pending_deoptimization = -1;
|
||||
_pending_failed_speculation = 0;
|
||||
_pending_transfer_to_interpreter = false;
|
||||
_in_retryable_allocation = false;
|
||||
_jvmci._alternate_call_target = NULL;
|
||||
assert(_jvmci._implicit_exception_pc == NULL, "must be");
|
||||
_jvmci_counters = NULL;
|
||||
_pending_deoptimization(-1),
|
||||
_pending_monitorenter(false),
|
||||
_pending_transfer_to_interpreter(false),
|
||||
_in_retryable_allocation(false),
|
||||
_pending_failed_speculation(0),
|
||||
_jvmci{nullptr},
|
||||
_jvmci_counters(nullptr),
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
||||
_exception_oop(oop()),
|
||||
_exception_pc(0),
|
||||
_exception_handler_pc(0),
|
||||
_is_method_handle_return(0),
|
||||
|
||||
_jni_active_critical(0),
|
||||
_pending_jni_exception_check_fn(nullptr),
|
||||
_depth_first_number(0),
|
||||
|
||||
// JVMTI PopFrame support
|
||||
_popframe_condition(popframe_inactive),
|
||||
_frames_to_pop_failed_realloc(0),
|
||||
|
||||
_handshake(this),
|
||||
|
||||
_popframe_preserved_args(nullptr),
|
||||
_popframe_preserved_args_size(0),
|
||||
|
||||
_jvmti_thread_state(nullptr),
|
||||
_interp_only_mode(0),
|
||||
_should_post_on_exceptions_flag(JNI_FALSE),
|
||||
_thread_stat(new ThreadStatistics()),
|
||||
|
||||
_parker(Parker::Allocate(this)),
|
||||
_cached_monitor_info(nullptr),
|
||||
|
||||
_class_to_be_initialized(nullptr),
|
||||
|
||||
_SleepEvent(ParkEvent::Allocate(this))
|
||||
{
|
||||
|
||||
set_jni_functions(jni_functions());
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
assert(_jvmci._implicit_exception_pc == nullptr, "must be");
|
||||
if (JVMCICounterSize > 0) {
|
||||
resize_counters(0, (int) JVMCICounterSize);
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
_reserved_stack_activation = NULL; // stack base not known yet
|
||||
set_exception_oop(oop());
|
||||
_exception_pc = 0;
|
||||
_exception_handler_pc = 0;
|
||||
_is_method_handle_return = 0;
|
||||
_jvmti_thread_state= NULL;
|
||||
_should_post_on_exceptions_flag = JNI_FALSE;
|
||||
_interp_only_mode = 0;
|
||||
_special_runtime_exit_condition = _no_async_condition;
|
||||
_pending_async_exception = NULL;
|
||||
_thread_stat = NULL;
|
||||
_thread_stat = new ThreadStatistics();
|
||||
_jni_active_critical = 0;
|
||||
_pending_jni_exception_check_fn = NULL;
|
||||
_do_not_unlock_if_synchronized = false;
|
||||
_cached_monitor_info = NULL;
|
||||
_parker = Parker::Allocate(this);
|
||||
_SleepEvent = ParkEvent::Allocate(this);
|
||||
|
||||
// Setup safepoint state info for this thread
|
||||
ThreadSafepointState::create(this);
|
||||
|
||||
debug_only(_java_call_counter = 0);
|
||||
|
||||
// JVMTI PopFrame support
|
||||
_popframe_condition = popframe_inactive;
|
||||
_popframe_preserved_args = NULL;
|
||||
_popframe_preserved_args_size = 0;
|
||||
_frames_to_pop_failed_realloc = 0;
|
||||
|
||||
SafepointMechanism::initialize_header(this);
|
||||
|
||||
_class_to_be_initialized = NULL;
|
||||
|
||||
pd_initialize();
|
||||
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
|
||||
}
|
||||
|
||||
JavaThread::JavaThread(bool is_attaching_via_jni) :
|
||||
Thread(), _handshake(this) {
|
||||
initialize();
|
||||
JavaThread::JavaThread(bool is_attaching_via_jni) : JavaThread() {
|
||||
if (is_attaching_via_jni) {
|
||||
_jni_attach_state = _attaching_via_jni;
|
||||
} else {
|
||||
_jni_attach_state = _not_attaching_via_jni;
|
||||
}
|
||||
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
|
||||
}
|
||||
|
||||
|
||||
@ -1803,35 +1811,6 @@ bool JavaThread::is_interrupted(bool clear_interrupted) {
|
||||
return interrupted;
|
||||
}
|
||||
|
||||
bool JavaThread::reguard_stack(address cur_sp) {
|
||||
if (_stack_guard_state != stack_guard_yellow_reserved_disabled
|
||||
&& _stack_guard_state != stack_guard_reserved_disabled) {
|
||||
return true; // Stack already guarded or guard pages not needed.
|
||||
}
|
||||
|
||||
// Java code never executes within the yellow zone: the latter is only
|
||||
// there to provoke an exception during stack banging. If java code
|
||||
// is executing there, either StackShadowPages should be larger, or
|
||||
// some exception code in c1, c2 or the interpreter isn't unwinding
|
||||
// when it should.
|
||||
guarantee(cur_sp > stack_reserved_zone_base(),
|
||||
"not enough space to reguard - increase StackShadowPages");
|
||||
if (_stack_guard_state == stack_guard_yellow_reserved_disabled) {
|
||||
enable_stack_yellow_reserved_zone();
|
||||
if (reserved_stack_activation() != stack_base()) {
|
||||
set_reserved_stack_activation(stack_base());
|
||||
}
|
||||
} else if (_stack_guard_state == stack_guard_reserved_disabled) {
|
||||
set_reserved_stack_activation(stack_base());
|
||||
enable_stack_reserved_zone();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool JavaThread::reguard_stack(void) {
|
||||
return reguard_stack(os::current_stack_pointer());
|
||||
}
|
||||
|
||||
void JavaThread::block_if_vm_exited() {
|
||||
if (_terminated == _vm_exited) {
|
||||
// _vm_exited is set at safepoint, and Threads_lock is never released
|
||||
@ -1849,9 +1828,7 @@ void JavaThread::block_if_vm_exited() {
|
||||
static void compiler_thread_entry(JavaThread* thread, TRAPS);
|
||||
static void sweeper_thread_entry(JavaThread* thread, TRAPS);
|
||||
|
||||
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
|
||||
Thread(), _handshake(this) {
|
||||
initialize();
|
||||
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) : JavaThread() {
|
||||
_jni_attach_state = _not_attaching_via_jni;
|
||||
set_entry_point(entry_point);
|
||||
// Create the native thread itself.
|
||||
@ -1935,11 +1912,11 @@ void JavaThread::pre_run() {
|
||||
// which defines the actual logic for that kind of thread.
|
||||
void JavaThread::run() {
|
||||
// initialize thread-local alloc buffer related fields
|
||||
this->initialize_tlab();
|
||||
initialize_tlab();
|
||||
|
||||
this->create_stack_guard_pages();
|
||||
_stack_overflow_state.create_stack_guard_pages();
|
||||
|
||||
this->cache_global_variables();
|
||||
cache_global_variables();
|
||||
|
||||
// Thread is now sufficiently initialized to be handled by the safepoint code as being
|
||||
// in the VM. Change thread state from _thread_new to _thread_in_vm
|
||||
@ -1956,7 +1933,7 @@ void JavaThread::run() {
|
||||
|
||||
// This operation might block. We call that after all safepoint checks for a new thread has
|
||||
// been completed.
|
||||
this->set_active_handles(JNIHandleBlock::allocate_block());
|
||||
set_active_handles(JNIHandleBlock::allocate_block());
|
||||
|
||||
if (JvmtiExport::should_post_thread_life()) {
|
||||
JvmtiExport::post_thread_start(this);
|
||||
@ -2175,7 +2152,7 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
|
||||
}
|
||||
|
||||
// These have to be removed while this is still a valid thread.
|
||||
remove_stack_guard_pages();
|
||||
_stack_overflow_state.remove_stack_guard_pages();
|
||||
|
||||
if (UseTLAB) {
|
||||
tlab().retire();
|
||||
@ -2234,7 +2211,7 @@ void JavaThread::cleanup_failed_attach_current_thread(bool is_daemon) {
|
||||
}
|
||||
|
||||
// These have to be removed while this is still a valid thread.
|
||||
remove_stack_guard_pages();
|
||||
_stack_overflow_state.remove_stack_guard_pages();
|
||||
|
||||
if (UseTLAB) {
|
||||
tlab().retire();
|
||||
@ -2697,184 +2674,6 @@ void JavaThread::java_resume() {
|
||||
}
|
||||
}
|
||||
|
||||
size_t JavaThread::_stack_red_zone_size = 0;
|
||||
size_t JavaThread::_stack_yellow_zone_size = 0;
|
||||
size_t JavaThread::_stack_reserved_zone_size = 0;
|
||||
size_t JavaThread::_stack_shadow_zone_size = 0;
|
||||
|
||||
void JavaThread::create_stack_guard_pages() {
|
||||
if (!os::uses_stack_guard_pages() ||
|
||||
_stack_guard_state != stack_guard_unused ||
|
||||
(DisablePrimordialThreadGuardPages && os::is_primordial_thread())) {
|
||||
log_info(os, thread)("Stack guard page creation for thread "
|
||||
UINTX_FORMAT " disabled", os::current_thread_id());
|
||||
return;
|
||||
}
|
||||
address low_addr = stack_end();
|
||||
size_t len = stack_guard_zone_size();
|
||||
|
||||
assert(is_aligned(low_addr, os::vm_page_size()), "Stack base should be the start of a page");
|
||||
assert(is_aligned(len, os::vm_page_size()), "Stack size should be a multiple of page size");
|
||||
|
||||
int must_commit = os::must_commit_stack_guard_pages();
|
||||
// warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
|
||||
|
||||
if (must_commit && !os::create_stack_guard_pages((char *) low_addr, len)) {
|
||||
log_warning(os, thread)("Attempt to allocate stack guard pages failed.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (os::guard_memory((char *) low_addr, len)) {
|
||||
_stack_guard_state = stack_guard_enabled;
|
||||
} else {
|
||||
log_warning(os, thread)("Attempt to protect stack guard pages failed ("
|
||||
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
|
||||
if (os::uncommit_memory((char *) low_addr, len)) {
|
||||
log_warning(os, thread)("Attempt to deallocate stack guard pages failed.");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
log_debug(os, thread)("Thread " UINTX_FORMAT " stack guard pages activated: "
|
||||
PTR_FORMAT "-" PTR_FORMAT ".",
|
||||
os::current_thread_id(), p2i(low_addr), p2i(low_addr + len));
|
||||
}
|
||||
|
||||
void JavaThread::remove_stack_guard_pages() {
|
||||
assert(Thread::current() == this, "from different thread");
|
||||
if (_stack_guard_state == stack_guard_unused) return;
|
||||
address low_addr = stack_end();
|
||||
size_t len = stack_guard_zone_size();
|
||||
|
||||
if (os::must_commit_stack_guard_pages()) {
|
||||
if (os::remove_stack_guard_pages((char *) low_addr, len)) {
|
||||
_stack_guard_state = stack_guard_unused;
|
||||
} else {
|
||||
log_warning(os, thread)("Attempt to deallocate stack guard pages failed ("
|
||||
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (_stack_guard_state == stack_guard_unused) return;
|
||||
if (os::unguard_memory((char *) low_addr, len)) {
|
||||
_stack_guard_state = stack_guard_unused;
|
||||
} else {
|
||||
log_warning(os, thread)("Attempt to unprotect stack guard pages failed ("
|
||||
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
log_debug(os, thread)("Thread " UINTX_FORMAT " stack guard pages removed: "
|
||||
PTR_FORMAT "-" PTR_FORMAT ".",
|
||||
os::current_thread_id(), p2i(low_addr), p2i(low_addr + len));
|
||||
}
|
||||
|
||||
void JavaThread::enable_stack_reserved_zone() {
|
||||
assert(_stack_guard_state == stack_guard_reserved_disabled, "inconsistent state");
|
||||
|
||||
// The base notation is from the stack's point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
address base = stack_reserved_zone_base() - stack_reserved_zone_size();
|
||||
|
||||
guarantee(base < stack_base(),"Error calculating stack reserved zone");
|
||||
guarantee(base < os::current_stack_pointer(),"Error calculating stack reserved zone");
|
||||
|
||||
if (os::guard_memory((char *) base, stack_reserved_zone_size())) {
|
||||
_stack_guard_state = stack_guard_enabled;
|
||||
} else {
|
||||
warning("Attempt to guard stack reserved zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void JavaThread::disable_stack_reserved_zone() {
|
||||
assert(_stack_guard_state == stack_guard_enabled, "inconsistent state");
|
||||
|
||||
// Simply return if called for a thread that does not use guard pages.
|
||||
if (_stack_guard_state != stack_guard_enabled) return;
|
||||
|
||||
// The base notation is from the stack's point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
address base = stack_reserved_zone_base() - stack_reserved_zone_size();
|
||||
|
||||
if (os::unguard_memory((char *)base, stack_reserved_zone_size())) {
|
||||
_stack_guard_state = stack_guard_reserved_disabled;
|
||||
} else {
|
||||
warning("Attempt to unguard stack reserved zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void JavaThread::enable_stack_yellow_reserved_zone() {
|
||||
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
|
||||
assert(_stack_guard_state != stack_guard_enabled, "already enabled");
|
||||
|
||||
// The base notation is from the stacks point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
address base = stack_red_zone_base();
|
||||
|
||||
guarantee(base < stack_base(), "Error calculating stack yellow zone");
|
||||
guarantee(base < os::current_stack_pointer(), "Error calculating stack yellow zone");
|
||||
|
||||
if (os::guard_memory((char *) base, stack_yellow_reserved_zone_size())) {
|
||||
_stack_guard_state = stack_guard_enabled;
|
||||
} else {
|
||||
warning("Attempt to guard stack yellow zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void JavaThread::disable_stack_yellow_reserved_zone() {
|
||||
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
|
||||
assert(_stack_guard_state != stack_guard_yellow_reserved_disabled, "already disabled");
|
||||
|
||||
// Simply return if called for a thread that does not use guard pages.
|
||||
if (_stack_guard_state == stack_guard_unused) return;
|
||||
|
||||
// The base notation is from the stacks point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
address base = stack_red_zone_base();
|
||||
|
||||
if (os::unguard_memory((char *)base, stack_yellow_reserved_zone_size())) {
|
||||
_stack_guard_state = stack_guard_yellow_reserved_disabled;
|
||||
} else {
|
||||
warning("Attempt to unguard stack yellow zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void JavaThread::enable_stack_red_zone() {
|
||||
// The base notation is from the stacks point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
|
||||
address base = stack_red_zone_base() - stack_red_zone_size();
|
||||
|
||||
guarantee(base < stack_base(), "Error calculating stack red zone");
|
||||
guarantee(base < os::current_stack_pointer(), "Error calculating stack red zone");
|
||||
|
||||
if (!os::guard_memory((char *) base, stack_red_zone_size())) {
|
||||
warning("Attempt to guard stack red zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void JavaThread::disable_stack_red_zone() {
|
||||
// The base notation is from the stacks point of view, growing downward.
|
||||
// We need to adjust it to work correctly with guard_memory()
|
||||
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
|
||||
address base = stack_red_zone_base() - stack_red_zone_size();
|
||||
if (!os::unguard_memory((char *)base, stack_red_zone_size())) {
|
||||
warning("Attempt to unguard stack red zone failed.");
|
||||
}
|
||||
}
|
||||
|
||||
void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
|
||||
// ignore is there is no stack
|
||||
if (!has_last_Java_frame()) return;
|
||||
// traverse the stack frames. Starts from top frame.
|
||||
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
|
||||
frame* fr = fst.current();
|
||||
f(fr, fst.register_map());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Deoptimization
|
||||
// Function for testing deoptimization
|
||||
@ -3148,8 +2947,19 @@ void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Verification
|
||||
|
||||
void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
|
||||
// ignore if there is no stack
|
||||
if (!has_last_Java_frame()) return;
|
||||
// traverse the stack frames. Starts from top frame.
|
||||
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
|
||||
frame* fr = fst.current();
|
||||
f(fr, fst.register_map());
|
||||
}
|
||||
}
|
||||
|
||||
static void frame_verify(frame* f, const RegisterMap *map) { f->verify(map); }
|
||||
|
||||
void JavaThread::verify() {
|
||||
@ -3894,7 +3704,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
|
||||
// Enable guard page *after* os::create_main_thread(), otherwise it would
|
||||
// crash Linux VM, see notes in os_linux.cpp.
|
||||
main_thread->create_stack_guard_pages();
|
||||
main_thread->stack_overflow_state()->create_stack_guard_pages();
|
||||
|
||||
// Initialize Java-Level synchronization subsystem
|
||||
ObjectMonitor::Initialize();
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "runtime/osThread.hpp"
|
||||
#include "runtime/park.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/stackOverflow.hpp"
|
||||
#include "runtime/threadHeapSampler.hpp"
|
||||
#include "runtime/threadLocalStorage.hpp"
|
||||
#include "runtime/threadStatisticalInfo.hpp"
|
||||
@ -1134,16 +1135,6 @@ class JavaThread: public Thread {
|
||||
// of _attaching_via_jni and transitions to _attached_via_jni.
|
||||
volatile JNIAttachStates _jni_attach_state;
|
||||
|
||||
public:
|
||||
// State of the stack guard pages for this thread.
|
||||
enum StackGuardState {
|
||||
stack_guard_unused, // not needed
|
||||
stack_guard_reserved_disabled,
|
||||
stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow
|
||||
stack_guard_enabled // enabled
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// The _pending_* fields below are used to communicate extra information
|
||||
@ -1191,12 +1182,7 @@ class JavaThread: public Thread {
|
||||
private:
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
||||
StackGuardState _stack_guard_state;
|
||||
|
||||
// Precompute the limit of the stack as used in stack overflow checks.
|
||||
// We load it from here to simplify the stack overflow check in assembly.
|
||||
address _stack_overflow_limit;
|
||||
address _reserved_stack_activation;
|
||||
StackOverflow _stack_overflow_state;
|
||||
|
||||
// Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
|
||||
// used to temp. parsing values into and out of the runtime system during exception handling for compiled
|
||||
@ -1230,11 +1216,11 @@ class JavaThread: public Thread {
|
||||
friend class ThreadWaitTransition;
|
||||
friend class VM_Exit;
|
||||
|
||||
void initialize(); // Initialized the instance variables
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
JavaThread(bool is_attaching_via_jni = false); // for main thread and JNI attached threads
|
||||
JavaThread(); // delegating constructor
|
||||
JavaThread(bool is_attaching_via_jni); // for main thread and JNI attached threads
|
||||
JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
|
||||
~JavaThread();
|
||||
|
||||
@ -1243,6 +1229,8 @@ class JavaThread: public Thread {
|
||||
void verify_not_published();
|
||||
#endif // ASSERT
|
||||
|
||||
StackOverflow* stack_overflow_state() { return &_stack_overflow_state; }
|
||||
|
||||
//JNI functiontable getter/setter for JVMTI jni function table interception API.
|
||||
void set_jni_functions(struct JNINativeInterface_* functionTable) {
|
||||
_jni_environment.functions = functionTable;
|
||||
@ -1288,7 +1276,6 @@ class JavaThread: public Thread {
|
||||
void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; }
|
||||
address saved_exception_pc() { return _saved_exception_pc; }
|
||||
|
||||
|
||||
ThreadFunction entry_point() const { return _entry_point; }
|
||||
|
||||
// Allocates a new Java level thread object for this thread. thread_name may be NULL.
|
||||
@ -1574,186 +1561,11 @@ class JavaThread: public Thread {
|
||||
set_exception_pc(NULL);
|
||||
}
|
||||
|
||||
// Stack overflow support
|
||||
//
|
||||
// (small addresses)
|
||||
//
|
||||
// -- <-- stack_end() ---
|
||||
// | |
|
||||
// | red pages |
|
||||
// | |
|
||||
// -- <-- stack_red_zone_base() |
|
||||
// | |
|
||||
// | guard
|
||||
// | yellow pages zone
|
||||
// | |
|
||||
// | |
|
||||
// -- <-- stack_yellow_zone_base() |
|
||||
// | |
|
||||
// | |
|
||||
// | reserved pages |
|
||||
// | |
|
||||
// -- <-- stack_reserved_zone_base() --- ---
|
||||
// /|\ shadow <-- stack_overflow_limit() (somewhere in here)
|
||||
// | zone
|
||||
// \|/ size
|
||||
// some untouched memory ---
|
||||
//
|
||||
//
|
||||
// --
|
||||
// |
|
||||
// | shadow zone
|
||||
// |
|
||||
// --
|
||||
// x frame n
|
||||
// --
|
||||
// x frame n-1
|
||||
// x
|
||||
// --
|
||||
// ...
|
||||
//
|
||||
// --
|
||||
// x frame 0
|
||||
// -- <-- stack_base()
|
||||
//
|
||||
// (large addresses)
|
||||
//
|
||||
|
||||
private:
|
||||
// These values are derived from flags StackRedPages, StackYellowPages,
|
||||
// StackReservedPages and StackShadowPages. The zone size is determined
|
||||
// ergonomically if page_size > 4K.
|
||||
static size_t _stack_red_zone_size;
|
||||
static size_t _stack_yellow_zone_size;
|
||||
static size_t _stack_reserved_zone_size;
|
||||
static size_t _stack_shadow_zone_size;
|
||||
public:
|
||||
inline size_t stack_available(address cur_sp);
|
||||
|
||||
static size_t stack_red_zone_size() {
|
||||
assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized.");
|
||||
return _stack_red_zone_size;
|
||||
}
|
||||
static void set_stack_red_zone_size(size_t s) {
|
||||
assert(is_aligned(s, os::vm_page_size()),
|
||||
"We can not protect if the red zone size is not page aligned.");
|
||||
assert(_stack_red_zone_size == 0, "This should be called only once.");
|
||||
_stack_red_zone_size = s;
|
||||
}
|
||||
address stack_red_zone_base() {
|
||||
return (address)(stack_end() + stack_red_zone_size());
|
||||
}
|
||||
bool in_stack_red_zone(address a) {
|
||||
return a <= stack_red_zone_base() && a >= stack_end();
|
||||
}
|
||||
|
||||
static size_t stack_yellow_zone_size() {
|
||||
assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
|
||||
return _stack_yellow_zone_size;
|
||||
}
|
||||
static void set_stack_yellow_zone_size(size_t s) {
|
||||
assert(is_aligned(s, os::vm_page_size()),
|
||||
"We can not protect if the yellow zone size is not page aligned.");
|
||||
assert(_stack_yellow_zone_size == 0, "This should be called only once.");
|
||||
_stack_yellow_zone_size = s;
|
||||
}
|
||||
|
||||
static size_t stack_reserved_zone_size() {
|
||||
// _stack_reserved_zone_size may be 0. This indicates the feature is off.
|
||||
return _stack_reserved_zone_size;
|
||||
}
|
||||
static void set_stack_reserved_zone_size(size_t s) {
|
||||
assert(is_aligned(s, os::vm_page_size()),
|
||||
"We can not protect if the reserved zone size is not page aligned.");
|
||||
assert(_stack_reserved_zone_size == 0, "This should be called only once.");
|
||||
_stack_reserved_zone_size = s;
|
||||
}
|
||||
address stack_reserved_zone_base() const {
|
||||
return (address)(stack_end() +
|
||||
(stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
|
||||
}
|
||||
bool in_stack_reserved_zone(address a) {
|
||||
return (a <= stack_reserved_zone_base()) &&
|
||||
(a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
|
||||
}
|
||||
|
||||
static size_t stack_yellow_reserved_zone_size() {
|
||||
return _stack_yellow_zone_size + _stack_reserved_zone_size;
|
||||
}
|
||||
bool in_stack_yellow_reserved_zone(address a) {
|
||||
return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
|
||||
}
|
||||
|
||||
// Size of red + yellow + reserved zones.
|
||||
static size_t stack_guard_zone_size() {
|
||||
return stack_red_zone_size() + stack_yellow_reserved_zone_size();
|
||||
}
|
||||
|
||||
static size_t stack_shadow_zone_size() {
|
||||
assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized.");
|
||||
return _stack_shadow_zone_size;
|
||||
}
|
||||
static void set_stack_shadow_zone_size(size_t s) {
|
||||
// The shadow area is not allocated or protected, so
|
||||
// it needs not be page aligned.
|
||||
// But the stack bang currently assumes that it is a
|
||||
// multiple of page size. This guarantees that the bang
|
||||
// loop touches all pages in the shadow zone.
|
||||
// This can be guaranteed differently, as well. E.g., if
|
||||
// the page size is a multiple of 4K, banging in 4K steps
|
||||
// suffices to touch all pages. (Some pages are banged
|
||||
// several times, though.)
|
||||
assert(is_aligned(s, os::vm_page_size()),
|
||||
"Stack bang assumes multiple of page size.");
|
||||
assert(_stack_shadow_zone_size == 0, "This should be called only once.");
|
||||
_stack_shadow_zone_size = s;
|
||||
}
|
||||
|
||||
void create_stack_guard_pages();
|
||||
void remove_stack_guard_pages();
|
||||
|
||||
void enable_stack_reserved_zone();
|
||||
void disable_stack_reserved_zone();
|
||||
void enable_stack_yellow_reserved_zone();
|
||||
void disable_stack_yellow_reserved_zone();
|
||||
void enable_stack_red_zone();
|
||||
void disable_stack_red_zone();
|
||||
|
||||
inline bool stack_guard_zone_unused();
|
||||
inline bool stack_yellow_reserved_zone_disabled();
|
||||
inline bool stack_reserved_zone_disabled();
|
||||
inline bool stack_guards_enabled();
|
||||
|
||||
address reserved_stack_activation() const { return _reserved_stack_activation; }
|
||||
void set_reserved_stack_activation(address addr) {
|
||||
assert(_reserved_stack_activation == stack_base()
|
||||
|| _reserved_stack_activation == NULL
|
||||
|| addr == stack_base(), "Must not be set twice");
|
||||
_reserved_stack_activation = addr;
|
||||
}
|
||||
|
||||
// Attempt to reguard the stack after a stack overflow may have occurred.
|
||||
// Returns true if (a) guard pages are not needed on this thread, (b) the
|
||||
// pages are already guarded, or (c) the pages were successfully reguarded.
|
||||
// Returns false if there is not enough stack space to reguard the pages, in
|
||||
// which case the caller should unwind a frame and try again. The argument
|
||||
// should be the caller's (approximate) sp.
|
||||
bool reguard_stack(address cur_sp);
|
||||
// Similar to above but see if current stackpoint is out of the guard area
|
||||
// and reguard if possible.
|
||||
bool reguard_stack(void);
|
||||
|
||||
address stack_overflow_limit() { return _stack_overflow_limit; }
|
||||
void set_stack_overflow_limit() {
|
||||
_stack_overflow_limit =
|
||||
stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size());
|
||||
}
|
||||
|
||||
// Check if address is in the usable part of the stack (excludes protected
|
||||
// guard pages). Can be applied to any thread and is an approximation for
|
||||
// using is_in_live_stack when the query has to happen from another thread.
|
||||
bool is_in_usable_stack(address adr) const {
|
||||
return is_in_stack_range_incl(adr, stack_reserved_zone_base());
|
||||
return is_in_stack_range_incl(adr, _stack_overflow_state.stack_reserved_zone_base());
|
||||
}
|
||||
|
||||
// Misc. accessors/mutators
|
||||
@ -1793,10 +1605,19 @@ class JavaThread: public Thread {
|
||||
static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); }
|
||||
static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); }
|
||||
static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
|
||||
static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); }
|
||||
static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
|
||||
static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); }
|
||||
static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); }
|
||||
|
||||
// StackOverflow offsets
|
||||
static ByteSize stack_overflow_limit_offset() {
|
||||
return byte_offset_of(JavaThread, _stack_overflow_state._stack_overflow_limit);
|
||||
}
|
||||
static ByteSize stack_guard_state_offset() {
|
||||
return byte_offset_of(JavaThread, _stack_overflow_state._stack_guard_state);
|
||||
}
|
||||
static ByteSize reserved_stack_activation_offset() {
|
||||
return byte_offset_of(JavaThread, _stack_overflow_state._reserved_stack_activation);
|
||||
}
|
||||
|
||||
static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); }
|
||||
|
||||
static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -166,39 +166,6 @@ inline void JavaThread::set_done_attaching_via_jni() {
|
||||
OrderAccess::fence();
|
||||
}
|
||||
|
||||
inline bool JavaThread::stack_guard_zone_unused() {
|
||||
return _stack_guard_state == stack_guard_unused;
|
||||
}
|
||||
|
||||
inline bool JavaThread::stack_yellow_reserved_zone_disabled() {
|
||||
return _stack_guard_state == stack_guard_yellow_reserved_disabled;
|
||||
}
|
||||
|
||||
inline bool JavaThread::stack_reserved_zone_disabled() {
|
||||
return _stack_guard_state == stack_guard_reserved_disabled;
|
||||
}
|
||||
|
||||
inline size_t JavaThread::stack_available(address cur_sp) {
|
||||
// This code assumes java stacks grow down
|
||||
address low_addr; // Limit on the address for deepest stack depth
|
||||
if (_stack_guard_state == stack_guard_unused) {
|
||||
low_addr = stack_end();
|
||||
} else {
|
||||
low_addr = stack_reserved_zone_base();
|
||||
}
|
||||
return cur_sp > low_addr ? cur_sp - low_addr : 0;
|
||||
}
|
||||
|
||||
inline bool JavaThread::stack_guards_enabled() {
|
||||
#ifdef ASSERT
|
||||
if (os::uses_stack_guard_pages() &&
|
||||
!(DisablePrimordialThreadGuardPages && os::is_primordial_thread())) {
|
||||
assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
|
||||
}
|
||||
#endif
|
||||
return _stack_guard_state == stack_guard_enabled;
|
||||
}
|
||||
|
||||
// The release make sure this store is done after storing the handshake
|
||||
// operation or global state
|
||||
inline void JavaThread::set_polling_page_release(void* poll_value) {
|
||||
|
@ -395,7 +395,7 @@ public class GraalHotSpotVMConfig extends GraalHotSpotVMConfigAccess {
|
||||
public final int threadObjectResultOffset = getFieldOffset("JavaThread::_vm_result", Integer.class, "oop");
|
||||
public final int jvmciCountersThreadOffset = getFieldOffset("JavaThread::_jvmci_counters", Integer.class, "jlong*");
|
||||
public final int doingUnsafeAccessOffset = getFieldOffset("JavaThread::_doing_unsafe_access", Integer.class, "bool", Integer.MAX_VALUE, JVMCI || JDK >= 14);
|
||||
public final int javaThreadReservedStackActivationOffset = JDK <= 8 ? 0 : getFieldOffset("JavaThread::_reserved_stack_activation", Integer.class, "address"); // JDK-8046936
|
||||
public final int javaThreadReservedStackActivationOffset = JDK <= 8 ? 0 : getFieldOffset("JavaThread::_stack_overflow_state._reserved_stack_activation", Integer.class, "address"); // JDK-8046936
|
||||
public final int jniEnvironmentOffset = getFieldOffset("JavaThread::_jni_environment", Integer.class, "JNIEnv", Integer.MIN_VALUE, JVMCI || JDK >= 14);
|
||||
|
||||
public boolean requiresReservedStackCheck(List<ResolvedJavaMethod> methods) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user