8329332: Remove CompiledMethod and CodeBlobLayout classes

Reviewed-by: vlivanov, stefank
This commit is contained in:
Vladimir Kozlov 2024-04-04 19:48:48 +00:00
parent 28216aa971
commit 83eba863fe
118 changed files with 2131 additions and 2597 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@
inline frame ContinuationEntry::to_frame() const {
static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc());
assert(cb != nullptr, "");
assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), "");
assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), "");
return frame(entry_sp(), entry_sp(), entry_fp(), entry_pc(), cb);
}

View File

@ -222,7 +222,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
return false;
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
@ -234,7 +234,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// because the return address counts against the callee's frame.
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_compiled(), "should count return address at least");
assert(!sender_blob->is_nmethod(), "should count return address at least");
return false;
}
@ -243,7 +243,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod.
if (!sender_blob->is_compiled()) {
if (!sender_blob->is_nmethod()) {
return false;
}
@ -297,7 +297,7 @@ void frame::patch_pc(Thread* thread, address pc) {
DEBUG_ONLY(address old_pc = _pc;)
*pc_addr = signed_pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
@ -426,7 +426,7 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP.
#ifdef ASSERT
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
@ -449,12 +449,12 @@ void frame::adjust_unextended_sp() {
// returning to any of these call sites.
if (_cb != nullptr) {
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
if (sender_cm != nullptr) {
nmethod* sender_nm = _cb->as_nmethod_or_null();
if (sender_nm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_cm, _unextended_sp);
if (sender_nm->is_deopt_entry(_pc) ||
sender_nm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_nm, _unextended_sp);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -153,7 +153,7 @@
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp);
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
public:

View File

@ -71,11 +71,11 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else {
if (_cb == SharedRuntime::deopt_blob()) {
@ -178,7 +178,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
@ -240,8 +240,8 @@ inline int frame::frame_size() const {
}
inline int frame::compiled_frame_stack_argsize() const {
assert(cb()->is_compiled(), "");
return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
assert(cb()->is_nmethod(), "");
return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
}
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
@ -417,7 +417,7 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map.
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (oop_map() != nullptr) {
_oop_map->update_register_map(this, map);

View File

@ -867,7 +867,7 @@ static bool is_always_within_branch_range(Address entry) {
// Non-compiled methods stay forever in CodeCache.
// We check whether the longest possible branch is within the branch range.
assert(CodeCache::find_blob(target) != nullptr &&
!CodeCache::find_blob(target)->is_compiled(),
!CodeCache::find_blob(target)->is_nmethod(),
"runtime call of compiled method");
const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
const address left_longest_branch_start = CodeCache::low_bound();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "code/compiledMethod.hpp"
#include "code/nmethod.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/oop.inline.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ template <ChunkFrames frame_kind>
inline bool StackChunkFrameStream<frame_kind>::is_in_frame(void* p0) const {
assert(!is_done(), "");
intptr_t* p = (intptr_t*)p0;
int argsize = is_compiled() ? (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0;
int argsize = is_compiled() ? (_cb->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0;
int frame_size = _cb->frame_size() + argsize;
return p == sp() - frame::sender_sp_offset || ((p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// ok. adapter blobs never have a frame complete and are never ok.
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
}
}
@ -179,7 +179,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// because the return address counts against the callee's frame.
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_compiled(), "should count return address at least");
assert(!sender_blob->is_nmethod(), "should count return address at least");
return false;
}
@ -188,7 +188,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod.
if (!sender_blob->is_compiled()) {
if (!sender_blob->is_nmethod()) {
return false;
}
@ -229,7 +229,7 @@ void frame::patch_pc(Thread* thread, address pc) {
DEBUG_ONLY(address old_pc = _pc;)
*pc_addr = pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
@ -332,7 +332,7 @@ bool frame::upcall_stub_frame_is_first() const {
// given unextended SP. The unextended SP might also be the saved SP
// for MethodHandle call sites.
#ifdef ASSERT
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
@ -357,19 +357,19 @@ void frame::adjust_unextended_sp() {
// simplest way to tell whether we are returning to such a call site
// is as follows:
CompiledMethod* sender_cm = (_cb == nullptr) ? nullptr : _cb->as_compiled_method_or_null();
if (sender_cm != nullptr) {
nmethod* sender_nm = (_cb == nullptr) ? nullptr : _cb->as_nmethod_or_null();
if (sender_nm != nullptr) {
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if (sender_cm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_cm, _fp));
if (sender_nm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
_unextended_sp = _fp;
}
else if (sender_cm->is_deopt_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
else if (sender_nm->is_deopt_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
}
else if (sender_cm->is_method_handle_return(_pc)) {
else if (sender_nm->is_method_handle_return(_pc)) {
_unextended_sp = _fp;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -93,8 +93,8 @@
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
static void verify_deopt_mh_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
verify_deopt_original_pc(nm, unextended_sp, true);
}
#endif

View File

@ -58,10 +58,10 @@ inline void frame::init(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, add
adjust_unextended_sp();
DEBUG_ONLY(_frame_index = -1;)
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
assert(_cb->as_compiled_method()->insts_contains_inclusive(_pc),
assert(_cb->as_nmethod()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the the compiled method (or must be immediately following it)");
_deopt_state = is_deoptimized;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@
inline frame ContinuationEntry::to_frame() const {
static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc());
assert(cb != nullptr, "");
assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), "");
assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), "");
return frame(entry_sp(), entry_pc(), entry_sp(), entry_fp(), cb);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -90,7 +90,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// so we just assume they are OK.
// Adapter blobs never have a complete frame and are never OK
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
}
}
@ -280,7 +280,7 @@ void frame::patch_pc(Thread* thread, address pc) {
DEBUG_ONLY(address old_pc = _pc;)
own_abi()->lr = (uint64_t)pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
@ -288,7 +288,7 @@ void frame::patch_pc(Thread* thread, address pc) {
} else {
_deopt_state = not_deoptimized;
}
assert(!is_compiled_frame() || !_cb->as_compiled_method()->is_deopt_entry(_pc), "must be");
assert(!is_compiled_frame() || !_cb->as_nmethod()->is_deopt_entry(_pc), "must be");
#ifdef ASSERT
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -61,11 +61,11 @@ inline void frame::setup(kind knd) {
}
}
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else {
if (_cb == SharedRuntime::deopt_blob()) {
@ -329,7 +329,7 @@ inline frame frame::sender_for_compiled_frame(RegisterMap *map) const {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map.
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (oop_map() != nullptr) {
_oop_map->update_register_map(this, map);
@ -368,8 +368,8 @@ inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
}
inline int frame::compiled_frame_stack_argsize() const {
assert(cb()->is_compiled(), "");
return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
assert(cb()->is_nmethod(), "");
return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
}
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -460,7 +460,7 @@ void NativeDeoptInstruction::verify() {
bool NativeDeoptInstruction::is_deopt_at(address code_pos) {
if (!Assembler::is_illtrap(code_pos)) return false;
CodeBlob* cb = CodeCache::find_blob(code_pos);
if (cb == nullptr || !cb->is_compiled()) return false;
if (cb == nullptr || !cb->is_nmethod()) return false;
nmethod *nm = (nmethod *)cb;
// see NativeInstruction::is_sigill_not_entrant_at()
return nm->verified_entry_point() != code_pos;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ inline bool StackChunkFrameStream<frame_kind>::is_in_frame(void* p0) const {
assert(!is_done(), "");
assert(is_compiled(), "");
intptr_t* p = (intptr_t*)p0;
int argsize = (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
int argsize = (_cb->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
int frame_size = _cb->frame_size() + (argsize > 0 ? argsize + frame::metadata_words_at_top : 0);
return (p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@
inline frame ContinuationEntry::to_frame() const {
static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc());
assert(cb != nullptr, "");
assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), "");
assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), "");
return frame(entry_sp(), entry_sp(), entry_fp(), entry_pc(), cb);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -211,7 +211,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
return thread->is_in_stack_range_excl(jcw, (address)sender.fp());
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
@ -222,7 +222,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
// because the return address counts against the callee's frame.
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_compiled(), "should count return address at least");
assert(!sender_blob->is_nmethod(), "should count return address at least");
return false;
}
@ -230,7 +230,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// code cache (current frame) is called by an entity within the code cache that entity
// should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod.
if (!sender_blob->is_compiled()) {
if (!sender_blob->is_nmethod()) {
return false;
}
@ -273,7 +273,7 @@ void frame::patch_pc(Thread* thread, address pc) {
DEBUG_ONLY(address old_pc = _pc;)
*pc_addr = pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
@ -399,7 +399,7 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP.
#ifdef ASSERT
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
@ -423,12 +423,12 @@ void frame::adjust_unextended_sp() {
// returning to any of these call sites.
if (_cb != nullptr) {
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
if (sender_cm != nullptr) {
nmethod* sender_nm = _cb->as_nmethod_or_null();
if (sender_nm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_cm, _unextended_sp);
if (sender_nm->is_deopt_entry(_pc) ||
sender_nm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_nm, _unextended_sp);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -186,7 +186,7 @@
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp);
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
public:

View File

@ -69,11 +69,11 @@ inline void frame::init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) {
inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else {
if (_cb == SharedRuntime::deopt_blob()) {
@ -170,7 +170,7 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) {
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
@ -231,8 +231,8 @@ inline int frame::frame_size() const {
}
inline int frame::compiled_frame_stack_argsize() const {
assert(cb()->is_compiled(), "");
return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
assert(cb()->is_nmethod(), "");
return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
}
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
@ -413,7 +413,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map.
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (oop_map() != nullptr) {
_oop_map->update_register_map(this, map);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ template <ChunkFrames frame_kind>
inline bool StackChunkFrameStream<frame_kind>::is_in_frame(void* p0) const {
assert(!is_done(), "");
intptr_t* p = (intptr_t*)p0;
int argsize = is_compiled() ? (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0;
int argsize = is_compiled() ? (_cb->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0;
int frame_size = _cb->frame_size() + argsize;
return p == sp() - 2 || ((p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -267,7 +267,7 @@ void frame::patch_pc(Thread* thread, address pc) {
DEBUG_ONLY(address old_pc = _pc;)
own_abi()->return_pc = (uint64_t)pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
// assert(original_pc == _pc, "expected original to be stored before patching");
_deopt_state = is_deoptimized;
@ -275,7 +275,7 @@ void frame::patch_pc(Thread* thread, address pc) {
} else {
_deopt_state = not_deoptimized;
}
assert(!is_compiled_frame() || !_cb->as_compiled_method()->is_deopt_entry(_pc), "must be");
assert(!is_compiled_frame() || !_cb->as_nmethod()->is_deopt_entry(_pc), "must be");
#ifdef ASSERT
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -56,11 +56,11 @@ inline void frame::setup() {
assert(_on_heap || (is_aligned(_sp, alignment_in_bytes) && is_aligned(_fp, alignment_in_bytes)),
"invalid alignment sp:" PTR_FORMAT " unextended_sp:" PTR_FORMAT " fp:" PTR_FORMAT, p2i(_sp), p2i(_unextended_sp), p2i(_fp));
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else {
if (_cb == SharedRuntime::deopt_blob()) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -540,7 +540,7 @@ void NativeMovConstReg::set_narrow_klass(intptr_t data) {
ICache::invalidate_range(start, range);
}
void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passed_nm /* = nullptr */) {
void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, nmethod *passed_nm /* = nullptr */) {
address next_address;
address loc = addr_at(0);
@ -565,7 +565,7 @@ void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passe
}
}
void NativeMovConstReg::set_pcrel_data(intptr_t newData, CompiledMethod *passed_nm /* = nullptr */) {
void NativeMovConstReg::set_pcrel_data(intptr_t newData, nmethod *passed_nm /* = nullptr */) {
address next_address;
address loc = addr_at(0);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -486,8 +486,8 @@ class NativeMovConstReg: public NativeInstruction {
// Patch narrow oop constant in code stream.
void set_narrow_oop(intptr_t data);
void set_narrow_klass(intptr_t data);
void set_pcrel_addr(intptr_t addr, CompiledMethod *nm = nullptr);
void set_pcrel_data(intptr_t data, CompiledMethod *nm = nullptr);
void set_pcrel_addr(intptr_t addr, nmethod *nm = nullptr);
void set_pcrel_data(intptr_t data, nmethod *nm = nullptr);
void verify();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@
inline frame ContinuationEntry::to_frame() const {
static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc());
assert(cb != nullptr, "");
assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), "");
assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), "");
return frame(entry_sp(), entry_sp(), entry_fp(), entry_pc(), cb);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -95,7 +95,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// ok. adapter blobs never have a frame complete and are never ok.
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
}
}
@ -213,7 +213,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
return false;
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != nullptr) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
@ -225,7 +225,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// because the return address counts against the callee's frame.
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_compiled(), "should count return address at least");
assert(!sender_blob->is_nmethod(), "should count return address at least");
return false;
}
@ -234,7 +234,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod.
if (!sender_blob->is_compiled()) {
if (!sender_blob->is_nmethod()) {
return false;
}
@ -283,7 +283,7 @@ void frame::patch_pc(Thread* thread, address pc) {
DEBUG_ONLY(address old_pc = _pc;)
*pc_addr = pc;
_pc = pc; // must be set before call to get_deopt_original_pc
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
assert(original_pc == old_pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
@ -291,7 +291,7 @@ void frame::patch_pc(Thread* thread, address pc) {
} else {
_deopt_state = not_deoptimized;
}
assert(!is_compiled_frame() || !_cb->as_compiled_method()->is_deopt_entry(_pc), "must be");
assert(!is_compiled_frame() || !_cb->as_nmethod()->is_deopt_entry(_pc), "must be");
#ifdef ASSERT
{
@ -415,7 +415,7 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP.
#ifdef ASSERT
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
@ -438,12 +438,12 @@ void frame::adjust_unextended_sp() {
// returning to any of these call sites.
if (_cb != nullptr) {
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
if (sender_cm != nullptr) {
nmethod* sender_nm = _cb->as_nmethod_or_null();
if (sender_nm != nullptr) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_cm, _unextended_sp);
if (sender_nm->is_deopt_entry(_pc) ||
sender_nm->is_deopt_mh_entry(_pc)) {
verify_deopt_original_pc(sender_nm, _unextended_sp);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -146,7 +146,7 @@
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp);
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
public:

View File

@ -66,11 +66,11 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else {
if (_cb == SharedRuntime::deopt_blob()) {
@ -164,7 +164,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = CompiledMethod::get_deopt_original_pc(this);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != nullptr) {
_pc = original_pc;
_deopt_state = is_deoptimized;
@ -226,8 +226,8 @@ inline int frame::frame_size() const {
}
inline int frame::compiled_frame_stack_argsize() const {
assert(cb()->is_compiled(), "");
return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
assert(cb()->is_nmethod(), "");
return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
}
inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
@ -397,7 +397,7 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map.
if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (oop_map() != nullptr) {
_oop_map->update_register_map(this, map);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ template <ChunkFrames frame_kind>
inline bool StackChunkFrameStream<frame_kind>::is_in_frame(void* p0) const {
assert(!is_done(), "");
intptr_t* p = (intptr_t*)p0;
int argsize = is_compiled() ? (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0;
int argsize = is_compiled() ? (_cb->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0;
int frame_size = _cb->frame_size() + argsize;
return p == sp() - frame::sender_sp_offset || ((p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size);
}

View File

@ -24,8 +24,8 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/compiledMethod.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.hpp"
#include "jvm.h"
#include "logging/log.hpp"
#include "os_posix.hpp"
@ -613,17 +613,17 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info,
if (!signal_was_handled && pc != nullptr && os::is_readable_pointer(pc)) {
if (NativeDeoptInstruction::is_deopt_at(pc)) {
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb != nullptr && cb->is_compiled()) {
if (cb != nullptr && cb->is_nmethod()) {
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, t);) // can call PcDescCache::add_pc_desc
CompiledMethod* cm = cb->as_compiled_method();
assert(cm->insts_contains_inclusive(pc), "");
address deopt = cm->is_method_handle_return(pc) ?
cm->deopt_mh_handler_begin() :
cm->deopt_handler_begin();
nmethod* nm = cb->as_nmethod();
assert(nm->insts_contains_inclusive(pc), "");
address deopt = nm->is_method_handle_return(pc) ?
nm->deopt_mh_handler_begin() :
nm->deopt_handler_begin();
assert(deopt != nullptr, "");
frame fr = os::fetch_frame_from_context(uc);
cm->set_original_pc(&fr, pc);
nm->set_original_pc(&fr, pc);
os::Posix::ucontext_set_pc(uc, deopt);
signal_was_handled = true;

View File

@ -2781,10 +2781,10 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
CompiledMethod* nm = nullptr;
nmethod* nm = nullptr;
if (in_java) {
CodeBlob* cb = CodeCache::find_blob(pc);
nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
}
bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
@ -2833,14 +2833,14 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// If it is, patch return address to be deopt handler.
if (NativeDeoptInstruction::is_deopt_at(pc)) {
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb != nullptr && cb->is_compiled()) {
CompiledMethod* cm = cb->as_compiled_method();
if (cb != nullptr && cb->is_nmethod()) {
nmethod* nm = cb->as_nmethod();
frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
address deopt = cm->is_method_handle_return(pc) ?
cm->deopt_mh_handler_begin() :
cm->deopt_handler_begin();
assert(cm->insts_contains_inclusive(pc), "");
cm->set_original_pc(&fr, pc);
address deopt = nm->is_method_handle_return(pc) ?
nm->deopt_mh_handler_begin() :
nm->deopt_handler_begin();
assert(nm->insts_contains_inclusive(pc), "");
nm->set_original_pc(&fr, pc);
// Set pc to handler
exceptionInfo->ContextRecord->PC_NAME = (DWORD64)deopt;
return EXCEPTION_CONTINUE_EXECUTION;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -236,7 +236,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
((NativeInstruction*)pc)->is_safepoint_poll() &&
CodeCache::contains((void*) pc) &&
((cb = CodeCache::find_blob(pc)) != nullptr) &&
cb->is_compiled()) {
cb->is_nmethod()) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (%s)", p2i(pc),
USE_POLL_BIT_ONLY ? "SIGTRAP" : "SIGSEGV");
@ -249,7 +249,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
((NativeInstruction*)pc)->is_safepoint_poll_return() &&
CodeCache::contains((void*) pc) &&
((cb = CodeCache::find_blob(pc)) != nullptr) &&
cb->is_compiled()) {
cb->is_nmethod()) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at return at " INTPTR_FORMAT " (nmethod)", p2i(pc));
}
@ -339,7 +339,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = cb ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = cb ? cb->as_nmethod_or_null() : nullptr;
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + 4;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -256,7 +256,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + NativeCall::instruction_size;

View File

@ -440,7 +440,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = Assembler::locate_next_instruction(pc);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -239,7 +239,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + NativeCall::instruction_size;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -323,7 +323,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
if ((nm != nullptr && nm->has_unsafe_access()) || (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc))) {
unsafe_access = true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -263,7 +263,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
((NativeInstruction*)pc)->is_safepoint_poll() &&
CodeCache::contains((void*) pc) &&
((cb = CodeCache::find_blob(pc)) != nullptr) &&
cb->is_compiled()) {
cb->is_nmethod()) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (%s)", p2i(pc),
USE_POLL_BIT_ONLY ? "SIGTRAP" : "SIGSEGV");
@ -275,7 +275,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
((NativeInstruction*)pc)->is_safepoint_poll_return() &&
CodeCache::contains((void*) pc) &&
((cb = CodeCache::find_blob(pc)) != nullptr) &&
cb->is_compiled()) {
cb->is_nmethod()) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at return at " INTPTR_FORMAT " (nmethod)", p2i(pc));
}
@ -354,7 +354,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + 4;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -229,7 +229,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = Assembler::locate_next_instruction(pc);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -309,7 +309,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
if (nm != nullptr && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exception and
// continue at the next instruction after the faulting read. Returning

View File

@ -259,7 +259,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = Assembler::locate_next_instruction(pc);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1142,7 +1142,7 @@ void ciEnv::register_method(ciMethod* target,
if (entry_bci == InvocationEntryBci) {
// If there is an old version we're done with it
CompiledMethod* old = method->code();
nmethod* old = method->code();
if (TraceMethodReplacement && old != nullptr) {
ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1129,7 +1129,7 @@ int ciMethod::code_size_for_inlining() {
int ciMethod::inline_instructions_size() {
if (_inline_instructions_size == -1) {
GUARDED_VM_ENTRY(
CompiledMethod* code = get_Method()->code();
nmethod* code = get_Method()->code();
if (code != nullptr && (code->comp_level() == CompLevel_full_optimization)) {
int isize = code->insts_end() - code->verified_entry_point() - code->skipped_instructions_size();
_inline_instructions_size = isize > 0 ? isize : 0;
@ -1145,7 +1145,7 @@ int ciMethod::inline_instructions_size() {
// ciMethod::log_nmethod_identity
void ciMethod::log_nmethod_identity(xmlStream* log) {
GUARDED_VM_ENTRY(
CompiledMethod* code = get_Method()->code();
nmethod* code = get_Method()->code();
if (code != nullptr) {
code->log_identity(log);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -804,7 +804,7 @@ class CompileReplay : public StackObj {
}
}
// Make sure the existence of a prior compile doesn't stop this one
CompiledMethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
if (nm != nullptr) {
nm->make_not_entrant();
}

View File

@ -2417,7 +2417,7 @@ static void print_stack_element_to_stream(outputStream* st, Handle mirror, int m
// Neither sourcename nor linenumber
buf_off += os::snprintf_checked(buf + buf_off, buf_size - buf_off, "Unknown Source)");
}
CompiledMethod* nm = method->code();
nmethod* nm = method->code();
if (WizardMode && nm != nullptr) {
os::snprintf_checked(buf + buf_off, buf_size - buf_off, "(nmethod " INTPTR_FORMAT ")", (intptr_t)nm);
}
@ -2543,7 +2543,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
RegisterMap::ProcessFrames::skip,
RegisterMap::WalkContinuation::include);
int decode_offset = 0;
CompiledMethod* nm = nullptr;
nmethod* nm = nullptr;
bool skip_fillInStackTrace_check = false;
bool skip_throwableInit_check = false;
bool skip_hidden = !ShowHiddenFrames;
@ -2587,10 +2587,10 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
// HMMM QQQ might be nice to have frame return nm as null if cb is non-null
// but non nmethod
fr = fr.sender(&map);
if (cb == nullptr || !cb->is_compiled()) {
if (cb == nullptr || !cb->is_nmethod()) {
continue;
}
nm = cb->as_compiled_method();
nm = cb->as_nmethod();
assert(nm->method() != nullptr, "must be");
if (nm->method()->is_native()) {
method = nm->method();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
CompiledICProtectionBehaviour* CompiledICProtectionBehaviour::_current = nullptr;
bool DefaultICProtectionBehaviour::lock(CompiledMethod* method) {
bool DefaultICProtectionBehaviour::lock(nmethod* method) {
if (is_safe(method)) {
return false;
}
@ -37,10 +37,10 @@ bool DefaultICProtectionBehaviour::lock(CompiledMethod* method) {
return true;
}
void DefaultICProtectionBehaviour::unlock(CompiledMethod* method) {
void DefaultICProtectionBehaviour::unlock(nmethod* method) {
CompiledIC_lock->unlock();
}
bool DefaultICProtectionBehaviour::is_safe(CompiledMethod* method) {
bool DefaultICProtectionBehaviour::is_safe(nmethod* method) {
return SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->owned_by_self();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,24 +27,24 @@
#include "memory/allocation.hpp"
class CompiledMethod;
class nmethod;
class CompiledICProtectionBehaviour {
static CompiledICProtectionBehaviour* _current;
public:
virtual bool lock(CompiledMethod* method) = 0;
virtual void unlock(CompiledMethod* method) = 0;
virtual bool is_safe(CompiledMethod* method) = 0;
virtual bool lock(nmethod* method) = 0;
virtual void unlock(nmethod* method) = 0;
virtual bool is_safe(nmethod* method) = 0;
static CompiledICProtectionBehaviour* current() { return _current; }
static void set_current(CompiledICProtectionBehaviour* current) { _current = current; }
};
class DefaultICProtectionBehaviour: public CompiledICProtectionBehaviour, public CHeapObj<mtInternal> {
virtual bool lock(CompiledMethod* method);
virtual void unlock(CompiledMethod* method);
virtual bool is_safe(CompiledMethod* method);
virtual bool lock(nmethod* method);
virtual void unlock(nmethod* method);
virtual bool is_safe(nmethod* method);
};
#endif // SHARE_CODE_CODEBEHAVIOURS_HPP

View File

@ -54,9 +54,6 @@
#include "c1/c1_Runtime1.hpp"
#endif
const char* CodeBlob::compiler_name() const {
return compilertype2name(_type);
}
unsigned int CodeBlob::align_code_offset(int offset) {
// align the size to CodeEntryAlignment
@ -64,7 +61,6 @@ unsigned int CodeBlob::align_code_offset(int offset) {
return align_up(offset + header_size, CodeEntryAlignment) - header_size;
}
// This must be consistent with the CodeBlob constructor's layout actions.
unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
unsigned int size = header_size;
@ -77,99 +73,79 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
return size;
}
CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) :
_code_begin(layout.code_begin()),
_code_end(layout.code_end()),
_content_begin(layout.content_begin()),
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
#ifdef ASSERT
void CodeBlob::verify_parameters() {
assert(is_aligned(_size, oopSize), "unaligned size");
assert(is_aligned(_header_size, oopSize), "unaligned size");
assert(is_aligned(_relocation_size, oopSize), "unaligned size");
assert(_data_offset <= size(), "codeBlob is too small");
assert(code_end() == content_end(), "must be the same - see code_end()");
#ifdef COMPILER1
// probably wrong for tiered
assert(frame_size() >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
#endif
CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size, int relocation_size,
int content_offset, int code_offset, int frame_complete_offset, int data_offset,
int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
_oop_maps(oop_maps),
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
_size(size),
_header_size(header_size),
_relocation_size(relocation_size),
_content_offset(content_offset),
_code_offset(code_offset),
_frame_complete_offset(frame_complete_offset),
_data_offset(layout.data_offset()),
_data_offset(data_offset),
_frame_size(frame_size),
_caller_must_gc_arguments(caller_must_gc_arguments),
_is_compiled(compiled),
_type(type)
S390_ONLY(_ctable_offset(0) COMMA)
_kind(kind),
_caller_must_gc_arguments(caller_must_gc_arguments)
{
assert(is_aligned(layout.size(), oopSize), "unaligned size");
assert(is_aligned(layout.header_size(), oopSize), "unaligned size");
assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size");
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
DEBUG_ONLY( verify_parameters(); )
}
CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb /*UNUSED*/, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) :
_code_begin(layout.code_begin()),
_code_end(layout.code_end()),
_content_begin(layout.content_begin()),
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int header_size,
int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
_oop_maps(nullptr), // will be set by set_oop_maps() call
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
_size(size),
_header_size(header_size),
_relocation_size(align_up(cb->total_relocation_size(), oopSize)),
_content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
_code_offset(_content_offset + cb->total_offset_of(cb->insts())),
_frame_complete_offset(frame_complete_offset),
_data_offset(layout.data_offset()),
_data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
_frame_size(frame_size),
_caller_must_gc_arguments(caller_must_gc_arguments),
_is_compiled(compiled),
_type(type)
S390_ONLY(_ctable_offset(0) COMMA)
_kind(kind),
_caller_must_gc_arguments(caller_must_gc_arguments)
{
assert(is_aligned(_size, oopSize), "unaligned size");
assert(is_aligned(_header_size, oopSize), "unaligned size");
assert(_data_offset <= _size, "codeBlob is too small");
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
DEBUG_ONLY( verify_parameters(); )
set_oop_maps(oop_maps);
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
}
// Creates a simple CodeBlob. Sets up the size of the different regions.
RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
: CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, nullptr, false /* caller_must_gc_arguments */)
// Simple CodeBlob used for simple BufferBlob.
CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size) :
_oop_maps(nullptr),
_name(name),
_size(size),
_header_size(header_size),
_relocation_size(0),
_content_offset(CodeBlob::align_code_offset(header_size)),
_code_offset(_content_offset),
_frame_complete_offset(CodeOffsets::frame_never_safe),
_data_offset(size),
_frame_size(0),
S390_ONLY(_ctable_offset(0) COMMA)
_kind(kind),
_caller_must_gc_arguments(false)
{
assert(is_aligned(locs_size, oopSize), "unaligned size");
}
// Creates a RuntimeBlob from a CodeBuffer
// and copy code and relocation info.
RuntimeBlob::RuntimeBlob(
const char* name,
CodeBuffer* cb,
int header_size,
int size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments
) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
cb->copy_code_and_locs_to(this);
}
void RuntimeBlob::free(RuntimeBlob* blob) {
assert(blob != nullptr, "caller must check for nullptr");
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */);
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(blob);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
assert(is_aligned(size, oopSize), "unaligned size");
assert(is_aligned(header_size, oopSize), "unaligned size");
}
void CodeBlob::purge(bool free_code_cache_data, bool unregister_nmethod) {
@ -191,6 +167,46 @@ void CodeBlob::set_oop_maps(OopMapSet* p) {
}
}
const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
assert(_oop_maps != nullptr, "nope");
return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
}
void CodeBlob::print_code_on(outputStream* st) {
ResourceMark m;
Disassembler::decode(this, st);
}
//-----------------------------------------------------------------------------------------
// Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
RuntimeBlob::RuntimeBlob(
const char* name,
CodeBlobKind kind,
CodeBuffer* cb,
int size,
int header_size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments)
: CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
{
cb->copy_code_and_locs_to(this);
}
void RuntimeBlob::free(RuntimeBlob* blob) {
assert(blob != nullptr, "caller must check for nullptr");
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */);
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(blob);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
}
void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
// Do not hold the CodeCache lock during name formatting.
assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
@ -230,22 +246,11 @@ void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const cha
MemoryService::track_code_cache_memory_usage();
}
const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
assert(_oop_maps != nullptr, "nope");
return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
}
void CodeBlob::print_code_on(outputStream* st) {
ResourceMark m;
Disassembler::decode(this, st);
}
//----------------------------------------------------------------------------------------------------
// Implementation of BufferBlob
BufferBlob::BufferBlob(const char* name, int size)
: RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size)
: RuntimeBlob(name, kind, size, sizeof(BufferBlob))
{}
BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
@ -259,7 +264,7 @@ BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
assert(name != nullptr, "must provide a name");
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) BufferBlob(name, size);
blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@ -268,10 +273,11 @@ BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
}
BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
: RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, nullptr)
BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size)
: RuntimeBlob(name, kind, cb, size, sizeof(BufferBlob), CodeOffsets::frame_never_safe, 0, nullptr)
{}
// Used by gtest
BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
@ -280,7 +286,7 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
assert(name != nullptr, "must provide a name");
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) BufferBlob(name, size, cb);
blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@ -301,7 +307,7 @@ void BufferBlob::free(BufferBlob *blob) {
// Implementation of AdapterBlob
AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
BufferBlob("I2C/C2I adapters", size, cb) {
BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size) {
CodeCache::commit(this);
}
@ -322,6 +328,9 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
return blob;
}
//----------------------------------------------------------------------------------------------------
// Implementation of VtableBlob
void* VtableBlob::operator new(size_t s, unsigned size) throw() {
// Handling of allocation failure stops compilation and prints a bunch of
// stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
@ -333,7 +342,7 @@ void* VtableBlob::operator new(size_t s, unsigned size) throw() {
}
VtableBlob::VtableBlob(const char* name, int size) :
BufferBlob(name, size) {
BufferBlob(name, CodeBlobKind::Vtable, size) {
}
VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
@ -404,7 +413,8 @@ RuntimeStub::RuntimeStub(
OopMapSet* oop_maps,
bool caller_must_gc_arguments
)
: RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
: RuntimeBlob(name, CodeBlobKind::Runtime_Stub, cb, size, sizeof(RuntimeStub),
frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
{
}
@ -460,7 +470,8 @@ DeoptimizationBlob::DeoptimizationBlob(
int unpack_with_reexecution_offset,
int frame_size
)
: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
: SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
{
_unpack_offset = unpack_offset;
_unpack_with_exception = unpack_with_exception_offset;
@ -509,7 +520,8 @@ UncommonTrapBlob::UncommonTrapBlob(
OopMapSet* oop_maps,
int frame_size
)
: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
: SingletonBlob("UncommonTrapBlob", CodeBlobKind::Uncommon_Trap, cb,
size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
{}
@ -545,7 +557,8 @@ ExceptionBlob::ExceptionBlob(
OopMapSet* oop_maps,
int frame_size
)
: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
: SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
size, sizeof(ExceptionBlob), frame_size, oop_maps)
{}
@ -580,7 +593,8 @@ SafepointBlob::SafepointBlob(
OopMapSet* oop_maps,
int frame_size
)
: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
: SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
size, sizeof(SafepointBlob), frame_size, oop_maps)
{}
@ -602,6 +616,61 @@ SafepointBlob* SafepointBlob::create(
return blob;
}
//----------------------------------------------------------------------------------------------------
// Implementation of UpcallStub
UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
CodeOffsets::frame_never_safe, 0 /* no frame size */,
/* oop maps = */ nullptr, /* caller must gc arguments = */ false),
_receiver(receiver),
_frame_data_offset(frame_data_offset)
{
CodeCache::commit(this);
}
void* UpcallStub::operator new(size_t s, unsigned size) throw() {
return CodeCache::allocate(size, CodeBlobType::NonNMethod);
}
UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
UpcallStub* blob = nullptr;
unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
}
if (blob == nullptr) {
return nullptr; // caller must handle this
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
trace_new_stub(blob, "UpcallStub");
return blob;
}
void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
frame_data_for_frame(frame)->old_handles->oops_do(f);
}
JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
return &frame_data_for_frame(frame)->jfa;
}
void UpcallStub::free(UpcallStub* blob) {
assert(blob != nullptr, "caller must check for nullptr");
JNIHandles::destroy_global(blob->receiver());
RuntimeBlob::free(blob);
}
void UpcallStub::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) {
ShouldNotReachHere(); // caller should never have to gc arguments
}
//----------------------------------------------------------------------------------------------------
// Verification and printing
@ -678,10 +747,6 @@ void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const
print_on(st);
}
void RuntimeBlob::verify() {
ShouldNotReachHere();
}
void BufferBlob::verify() {
// unimplemented
}
@ -730,60 +795,6 @@ void DeoptimizationBlob::print_value_on(outputStream* st) const {
st->print_cr("Deoptimization (frame not available)");
}
// Implementation of UpcallStub
UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
RuntimeBlob(name, cb, sizeof(UpcallStub), size, CodeOffsets::frame_never_safe, 0 /* no frame size */,
/* oop maps = */ nullptr, /* caller must gc arguments = */ false),
_receiver(receiver),
_frame_data_offset(frame_data_offset) {
CodeCache::commit(this);
}
void* UpcallStub::operator new(size_t s, unsigned size) throw() {
return CodeCache::allocate(size, CodeBlobType::NonNMethod);
}
UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
UpcallStub* blob = nullptr;
unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
}
if (blob == nullptr) {
return nullptr; // caller must handle this
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
trace_new_stub(blob, "UpcallStub");
return blob;
}
void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
frame_data_for_frame(frame)->old_handles->oops_do(f);
}
JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
return &frame_data_for_frame(frame)->jfa;
}
void UpcallStub::free(UpcallStub* blob) {
assert(blob != nullptr, "caller must check for nullptr");
JNIHandles::destroy_global(blob->receiver());
RuntimeBlob::free(blob);
}
void UpcallStub::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) {
ShouldNotReachHere(); // caller should never have to gc arguments
}
// Misc.
void UpcallStub::verify() {
// unimplemented
}

View File

@ -52,8 +52,7 @@ enum class CodeBlobType {
// CodeBlob - superclass for all entries in the CodeCache.
//
// Subtypes are:
// CompiledMethod : Compiled Java methods (include method that calls to native code)
// nmethod : JIT Compiled Java methods
// nmethod : JIT Compiled Java methods
// RuntimeBlob : Non-compiled method code; generated glue code
// BufferBlob : Used for non-relocatable code such as interpreter, stubroutines, etc.
// AdapterBlob : Used to hold C2I/I2C adapters
@ -75,8 +74,22 @@ enum class CodeBlobType {
// - instruction space
// - data space
enum class CodeBlobKind : u1 {
None,
Nmethod,
Buffer,
Adapter,
Vtable,
MH_Adapter,
Runtime_Stub,
Deoptimization,
Exception,
Safepoint,
Uncommon_Trap,
Upcall,
Number_Of_Kinds
};
class CodeBlobLayout;
class UpcallStub; // for as_upcall_stub()
class RuntimeStub; // for as_runtime_stub()
class JavaFrameAnchor; // for UpcallStub::jfa_for_frame
@ -87,23 +100,15 @@ class CodeBlob {
friend class CodeCacheDumper;
protected:
// order fields from large to small to minimize padding between fields
address _code_begin;
address _code_end;
address _content_begin; // address to where content region begins (this includes consts, insts, stubs)
// address _content_end - not required, for all CodeBlobs _code_end == _content_end for now
address _data_end;
address _relocation_begin;
address _relocation_end;
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
const char* _name;
S390_ONLY(int _ctable_offset;)
int _size; // total size of CodeBlob in bytes
int _header_size; // size of header (depends on subclass)
int _relocation_size; // size of relocation
int _content_offset; // offset to where content region begins (this includes consts, insts, stubs)
int _code_offset; // offset to where instructions region begins (this includes insts, stubs)
int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
// not finished setting up their frame. Beware of pc's in
// that range. There is a similar range(s) on returns
@ -111,28 +116,32 @@ protected:
int _data_offset; // offset to where data region begins
int _frame_size; // size of stack frame in words (NOT slots. On x64 these are 64bit words)
bool _caller_must_gc_arguments;
S390_ONLY(int _ctable_offset;)
bool _is_compiled;
const CompilerType _type; // CompilerType
CodeBlobKind _kind; // Kind of this code blob
bool _caller_must_gc_arguments;
#ifndef PRODUCT
AsmRemarks _asm_remarks;
DbgStrings _dbg_strings;
#endif // not PRODUCT
CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset,
int frame_size, ImmutableOopMapSet* oop_maps,
bool caller_must_gc_arguments, bool compiled = false);
CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset,
int frame_size, OopMapSet* oop_maps,
bool caller_must_gc_arguments, bool compiled = false);
DEBUG_ONLY( void verify_parameters() );
CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size, int relocation_size,
int content_offset, int code_offset, int data_offset, int frame_complete_offset,
int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int header_size,
int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
// Simple CodeBlob used for simple BufferBlob.
CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size);
void operator delete(void* p) { }
public:
// Only used by unit test.
CodeBlob() : _type(compiler_none) {}
virtual ~CodeBlob() {
assert(_oop_maps == nullptr, "Not flushed");
@ -146,44 +155,42 @@ public:
virtual void purge(bool free_code_cache_data, bool unregister_nmethod);
// Typing
virtual bool is_buffer_blob() const { return false; }
virtual bool is_nmethod() const { return false; }
virtual bool is_runtime_stub() const { return false; }
virtual bool is_deoptimization_stub() const { return false; }
virtual bool is_uncommon_trap_stub() const { return false; }
virtual bool is_exception_stub() const { return false; }
virtual bool is_safepoint_stub() const { return false; }
virtual bool is_adapter_blob() const { return false; }
virtual bool is_vtable_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_upcall_stub() const { return false; }
bool is_compiled() const { return _is_compiled; }
const bool* is_compiled_addr() const { return &_is_compiled; }
inline bool is_compiled_by_c1() const { return _type == compiler_c1; };
inline bool is_compiled_by_c2() const { return _type == compiler_c2; };
inline bool is_compiled_by_jvmci() const { return _type == compiler_jvmci; };
const char* compiler_name() const;
CompilerType compiler_type() const { return _type; }
bool is_nmethod() const { return _kind == CodeBlobKind::Nmethod; }
bool is_buffer_blob() const { return _kind == CodeBlobKind::Buffer; }
bool is_runtime_stub() const { return _kind == CodeBlobKind::Runtime_Stub; }
bool is_deoptimization_stub() const { return _kind == CodeBlobKind::Deoptimization; }
bool is_uncommon_trap_stub() const { return _kind == CodeBlobKind::Uncommon_Trap; }
bool is_exception_stub() const { return _kind == CodeBlobKind::Exception; }
bool is_safepoint_stub() const { return _kind == CodeBlobKind::Safepoint; }
bool is_adapter_blob() const { return _kind == CodeBlobKind::Adapter; }
bool is_vtable_blob() const { return _kind == CodeBlobKind::Vtable; }
bool is_method_handles_adapter_blob() const { return _kind == CodeBlobKind::MH_Adapter; }
bool is_upcall_stub() const { return _kind == CodeBlobKind::Upcall; }
// Casting
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : nullptr; }
nmethod* as_nmethod() { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; }
CompiledMethod* as_compiled_method_or_null() { return is_compiled() ? (CompiledMethod*) this : nullptr; }
CompiledMethod* as_compiled_method() { assert(is_compiled(), "must be compiled"); return (CompiledMethod*) this; }
CodeBlob* as_codeblob_or_null() const { return (CodeBlob*) this; }
UpcallStub* as_upcall_stub() const { assert(is_upcall_stub(), "must be upcall stub"); return (UpcallStub*) this; }
RuntimeStub* as_runtime_stub() const { assert(is_runtime_stub(), "must be runtime blob"); return (RuntimeStub*) this; }
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : nullptr; }
nmethod* as_nmethod() { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; }
CodeBlob* as_codeblob_or_null() const { return (CodeBlob*) this; }
UpcallStub* as_upcall_stub() const { assert(is_upcall_stub(), "must be upcall stub"); return (UpcallStub*) this; }
RuntimeStub* as_runtime_stub() const { assert(is_runtime_stub(), "must be runtime blob"); return (RuntimeStub*) this; }
// Boundaries
address header_begin() const { return (address) this; }
relocInfo* relocation_begin() const { return (relocInfo*) _relocation_begin; };
relocInfo* relocation_end() const { return (relocInfo*) _relocation_end; }
address content_begin() const { return _content_begin; }
address content_end() const { return _code_end; } // _code_end == _content_end is true for all types of blobs for now, it is also checked in the constructor
address code_begin() const { return _code_begin; }
address code_end() const { return _code_end; }
address data_end() const { return _data_end; }
address header_begin() const { return (address) this; }
address header_end() const { return ((address) this) + _header_size; }
relocInfo* relocation_begin() const { return (relocInfo*) header_end(); }
relocInfo* relocation_end() const { return (relocInfo*)(header_end() + _relocation_size); }
address content_begin() const { return (address) header_begin() + _content_offset; }
address content_end() const { return (address) header_begin() + _data_offset; }
address code_begin() const { return (address) header_begin() + _code_offset; }
// code_end == content_end is true for all types of blobs for now, it is also checked in the constructor
address code_end() const { return (address) header_begin() + _data_offset; }
address data_begin() const { return (address) header_begin() + _data_offset; }
address data_end() const { return (address) header_begin() + _size; }
// Offsets
int content_offset() const { return _content_offset; }
int code_offset() const { return _code_offset; }
int data_offset() const { return _data_offset; }
// This field holds the beginning of the const section in the old code buffer.
// It is needed to fix relocations of pc-relative loads when resizing the
@ -192,17 +199,16 @@ public:
void set_ctable_begin(address ctable) { S390_ONLY(_ctable_offset = ctable - header_begin();) }
// Sizes
int size() const { return _size; }
int header_size() const { return _header_size; }
int relocation_size() const { return pointer_delta_as_int((address) relocation_end(), (address) relocation_begin()); }
int content_size() const { return pointer_delta_as_int(content_end(), content_begin()); }
int code_size() const { return pointer_delta_as_int(code_end(), code_begin()); }
int size() const { return _size; }
int header_size() const { return _header_size; }
int relocation_size() const { return pointer_delta_as_int((address) relocation_end(), (address) relocation_begin()); }
int content_size() const { return pointer_delta_as_int(content_end(), content_begin()); }
int code_size() const { return pointer_delta_as_int(code_end(), code_begin()); }
// Only used from CodeCache::free_unused_tail() after the Interpreter blob was trimmed
void adjust_size(size_t used) {
_size = (int)used;
_data_offset = (int)used;
_code_end = (address)this + used;
_data_end = (address)this + used;
}
// Containment
@ -213,8 +219,6 @@ public:
code_contains(addr) && addr >= code_begin() + _frame_complete_offset; }
int frame_complete_offset() const { return _frame_complete_offset; }
virtual bool is_not_entrant() const { return false; }
// OopMap for frame
ImmutableOopMapSet* oop_maps() const { return _oop_maps; }
void set_oop_maps(OopMapSet* p);
@ -260,97 +264,8 @@ public:
#endif
};
class CodeBlobLayout : public StackObj {
private:
int _size;
int _header_size;
int _relocation_size;
int _content_offset;
int _code_offset;
int _data_offset;
address _code_begin;
address _code_end;
address _content_begin;
address _content_end;
address _data_end;
address _relocation_begin;
address _relocation_end;
public:
CodeBlobLayout(address code_begin, address code_end, address content_begin, address content_end, address data_end, address relocation_begin, address relocation_end) :
_size(0),
_header_size(0),
_relocation_size(0),
_content_offset(0),
_code_offset(0),
_data_offset(0),
_code_begin(code_begin),
_code_end(code_end),
_content_begin(content_begin),
_content_end(content_end),
_data_end(data_end),
_relocation_begin(relocation_begin),
_relocation_end(relocation_end)
{
}
CodeBlobLayout(const address start, int size, int header_size, int relocation_size, int data_offset) :
_size(size),
_header_size(header_size),
_relocation_size(relocation_size),
_content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
_code_offset(_content_offset),
_data_offset(data_offset)
{
assert(is_aligned(_relocation_size, oopSize), "unaligned size");
_code_begin = (address) start + _code_offset;
_code_end = (address) start + _data_offset;
_content_begin = (address) start + _content_offset;
_content_end = (address) start + _data_offset;
_data_end = (address) start + _size;
_relocation_begin = (address) start + _header_size;
_relocation_end = _relocation_begin + _relocation_size;
}
CodeBlobLayout(const address start, int size, int header_size, const CodeBuffer* cb) :
_size(size),
_header_size(header_size),
_relocation_size(align_up(cb->total_relocation_size(), oopSize)),
_content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
_code_offset(_content_offset + cb->total_offset_of(cb->insts())),
_data_offset(_content_offset + align_up(cb->total_content_size(), oopSize))
{
assert(is_aligned(_relocation_size, oopSize), "unaligned size");
_code_begin = (address) start + _code_offset;
_code_end = (address) start + _data_offset;
_content_begin = (address) start + _content_offset;
_content_end = (address) start + _data_offset;
_data_end = (address) start + _size;
_relocation_begin = (address) start + _header_size;
_relocation_end = _relocation_begin + _relocation_size;
}
int size() const { return _size; }
int header_size() const { return _header_size; }
int relocation_size() const { return _relocation_size; }
int content_offset() const { return _content_offset; }
int code_offset() const { return _code_offset; }
int data_offset() const { return _data_offset; }
address code_begin() const { return _code_begin; }
address code_end() const { return _code_end; }
address data_end() const { return _data_end; }
address relocation_begin() const { return _relocation_begin; }
address relocation_end() const { return _relocation_end; }
address content_begin() const { return _content_begin; }
address content_end() const { return _content_end; }
};
//----------------------------------------------------------------------------------------------------
// RuntimeBlob: used for non-compiled method code (adapters, stubs, blobs)
class RuntimeBlob : public CodeBlob {
friend class VMStructs;
@ -358,16 +273,19 @@ class RuntimeBlob : public CodeBlob {
// Creation
// a) simple CodeBlob
// frame_complete is the offset from the beginning of the instructions
// to where the frame setup (from stackwalk viewpoint) is complete.
RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size);
RuntimeBlob(const char* name, CodeBlobKind kind, int size, int header_size)
: CodeBlob(name, kind, size, header_size)
{}
// b) full CodeBlob
// frame_complete is the offset from the beginning of the instructions
// to where the frame setup (from stackwalk viewpoint) is complete.
RuntimeBlob(
const char* name,
CodeBlobKind kind,
CodeBuffer* cb,
int header_size,
int size,
int header_size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps,
@ -376,15 +294,6 @@ class RuntimeBlob : public CodeBlob {
static void free(RuntimeBlob* blob);
void verify();
// OopMap for frame
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { ShouldNotReachHere(); }
// Debugging
virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
virtual void print_value_on(outputStream* st) const { CodeBlob::print_value_on(st); }
// Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService.
static void trace_new_stub(RuntimeBlob* blob, const char* name1, const char* name2 = "");
};
@ -403,8 +312,8 @@ class BufferBlob: public RuntimeBlob {
private:
// Creation support
BufferBlob(const char* name, int size);
BufferBlob(const char* name, int size, CodeBuffer* cb);
BufferBlob(const char* name, CodeBlobKind kind, int size);
BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size);
void* operator new(size_t s, unsigned size) throw();
@ -415,15 +324,12 @@ class BufferBlob: public RuntimeBlob {
static void free(BufferBlob* buf);
// Typing
virtual bool is_buffer_blob() const { return true; }
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override { /* nothing to do */ }
void verify();
void print_on(outputStream* st) const;
void print_value_on(outputStream* st) const;
void verify() override;
void print_on(outputStream* st) const override;
void print_value_on(outputStream* st) const override;
};
@ -437,9 +343,6 @@ private:
public:
// Creation
static AdapterBlob* create(CodeBuffer* cb);
// Typing
virtual bool is_adapter_blob() const { return true; }
};
//---------------------------------------------------------------------------------------------------
@ -452,9 +355,6 @@ private:
public:
// Creation
static VtableBlob* create(const char* name, int buffer_size);
// Typing
virtual bool is_vtable_blob() const { return true; }
};
//----------------------------------------------------------------------------------------------------
@ -462,14 +362,11 @@ public:
class MethodHandlesAdapterBlob: public BufferBlob {
private:
MethodHandlesAdapterBlob(int size): BufferBlob("MethodHandles adapters", size) {}
MethodHandlesAdapterBlob(int size): BufferBlob("MethodHandles adapters", CodeBlobKind::MH_Adapter, size) {}
public:
// Creation
static MethodHandlesAdapterBlob* create(int buffer_size);
// Typing
virtual bool is_method_handles_adapter_blob() const { return true; }
};
@ -506,17 +403,14 @@ class RuntimeStub: public RuntimeBlob {
static void free(RuntimeStub* stub) { RuntimeBlob::free(stub); }
// Typing
bool is_runtime_stub() const { return true; }
address entry_point() const { return code_begin(); }
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override { /* nothing to do */ }
void verify();
void print_on(outputStream* st) const;
void print_value_on(outputStream* st) const;
void verify() override;
void print_on(outputStream* st) const override;
void print_value_on(outputStream* st) const override;
};
@ -531,23 +425,24 @@ class SingletonBlob: public RuntimeBlob {
public:
SingletonBlob(
const char* name,
CodeBuffer* cb,
int header_size,
int size,
int frame_size,
OopMapSet* oop_maps
const char* name,
CodeBlobKind kind,
CodeBuffer* cb,
int size,
int header_size,
int frame_size,
OopMapSet* oop_maps
)
: RuntimeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps)
: RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, frame_size, oop_maps)
{};
address entry_point() { return code_begin(); }
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
void verify(); // does nothing
void print_on(outputStream* st) const;
void print_value_on(outputStream* st) const;
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override { /* nothing to do */ }
void verify() override; // does nothing
void print_on(outputStream* st) const override;
void print_value_on(outputStream* st) const override;
};
@ -592,14 +487,8 @@ class DeoptimizationBlob: public SingletonBlob {
int frame_size
);
// Typing
bool is_deoptimization_stub() const { return true; }
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
// Printing
void print_value_on(outputStream* st) const;
void print_value_on(outputStream* st) const override;
address unpack() const { return code_begin() + _unpack_offset; }
address unpack_with_exception() const { return code_begin() + _unpack_with_exception; }
@ -656,12 +545,6 @@ class UncommonTrapBlob: public SingletonBlob {
OopMapSet* oop_maps,
int frame_size
);
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
// Typing
bool is_uncommon_trap_stub() const { return true; }
};
@ -686,12 +569,6 @@ class ExceptionBlob: public SingletonBlob {
OopMapSet* oop_maps,
int frame_size
);
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
// Typing
bool is_exception_stub() const { return true; }
};
#endif // COMPILER2
@ -717,12 +594,6 @@ class SafepointBlob: public SingletonBlob {
OopMapSet* oop_maps,
int frame_size
);
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
// Typing
bool is_safepoint_stub() const { return true; }
};
//----------------------------------------------------------------------------------------------------
@ -759,17 +630,14 @@ class UpcallStub: public RuntimeBlob {
JavaFrameAnchor* jfa_for_frame(const frame& frame) const;
// Typing
virtual bool is_upcall_stub() const override { return true; }
// GC/Verification support
void oops_do(OopClosure* f, const frame& frame);
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override;
virtual void verify() override;
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override;
void verify() override;
// Misc.
virtual void print_on(outputStream* st) const override;
virtual void print_value_on(outputStream* st) const override;
void print_on(outputStream* st) const override;
void print_value_on(outputStream* st) const override;
};
#endif // SHARE_CODE_CODEBLOB_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -161,7 +161,6 @@ class CodeBlob_sizes {
// Iterate over all CodeHeaps
#define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
#define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
#define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
// Iterate over all CodeBlobs (cb) on the given CodeHeap
@ -174,7 +173,6 @@ ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
// Initialize arrays of CodeHeap subsets
GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
@ -424,9 +422,6 @@ void CodeCache::add_heap(CodeHeap* heap) {
_heaps->insert_sorted<code_heap_compare>(heap);
CodeBlobType type = heap->code_blob_type();
if (code_blob_type_accepts_compiled(type)) {
_compiled_heaps->insert_sorted<code_heap_compare>(heap);
}
if (code_blob_type_accepts_nmethod(type)) {
_nmethod_heaps->insert_sorted<code_heap_compare>(heap);
}
@ -669,8 +664,8 @@ CodeBlob* CodeCache::find_blob(void* start) {
nmethod* CodeCache::find_nmethod(void* start) {
CodeBlob* cb = find_blob(start);
assert(cb->is_nmethod(), "did not find an nmethod");
return (nmethod*)cb;
assert(cb != nullptr, "did not find an nmethod");
return cb->as_nmethod();
}
void CodeCache::blobs_do(void f(CodeBlob* nm)) {
@ -882,7 +877,7 @@ void CodeCache::arm_all_nmethods() {
// Mark nmethods for unloading if they contain otherwise unreachable oops.
void CodeCache::do_unloading(bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock);
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
NMethodIterator iter(NMethodIterator::all_blobs);
while(iter.next()) {
iter.method()->do_unloading(unloading_occurred);
}
@ -1011,7 +1006,7 @@ int CodeCache::nmethod_count(CodeBlobType code_blob_type) {
int CodeCache::nmethod_count() {
int count = 0;
FOR_ALL_NMETHOD_HEAPS(heap) {
for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) {
count += (*heap)->nmethod_count();
}
return count;
@ -1178,7 +1173,7 @@ bool CodeCache::has_nmethods_with_dependencies() {
void CodeCache::clear_inline_caches() {
assert_locked_or_safepoint(CodeCache_lock);
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
NMethodIterator iter(NMethodIterator::only_not_unloading);
while(iter.next()) {
iter.method()->clear_inline_caches();
}
@ -1271,38 +1266,32 @@ void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassD
#endif
}
CompiledMethod* CodeCache::find_compiled(void* start) {
CodeBlob *cb = find_blob(start);
assert(cb == nullptr || cb->is_compiled(), "did not find an compiled_method");
return (CompiledMethod*)cb;
}
#if INCLUDE_JVMTI
// RedefineClasses support for saving nmethods that are dependent on "old" methods.
// We don't really expect this table to grow very large. If it does, it can become a hashtable.
static GrowableArray<CompiledMethod*>* old_compiled_method_table = nullptr;
static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
static void add_to_old_table(CompiledMethod* c) {
if (old_compiled_method_table == nullptr) {
old_compiled_method_table = new (mtCode) GrowableArray<CompiledMethod*>(100, mtCode);
static void add_to_old_table(nmethod* c) {
if (old_nmethod_table == nullptr) {
old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode);
}
old_compiled_method_table->push(c);
old_nmethod_table->push(c);
}
static void reset_old_method_table() {
if (old_compiled_method_table != nullptr) {
delete old_compiled_method_table;
old_compiled_method_table = nullptr;
if (old_nmethod_table != nullptr) {
delete old_nmethod_table;
old_nmethod_table = nullptr;
}
}
// Remove this method when flushed.
void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
void CodeCache::unregister_old_nmethod(nmethod* c) {
assert_lock_strong(CodeCache_lock);
if (old_compiled_method_table != nullptr) {
int index = old_compiled_method_table->find(c);
if (old_nmethod_table != nullptr) {
int index = old_nmethod_table->find(c);
if (index != -1) {
old_compiled_method_table->delete_at(index);
old_nmethod_table->delete_at(index);
}
}
}
@ -1310,13 +1299,13 @@ void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
void CodeCache::old_nmethods_do(MetadataClosure* f) {
// Walk old method table and mark those on stack.
int length = 0;
if (old_compiled_method_table != nullptr) {
length = old_compiled_method_table->length();
if (old_nmethod_table != nullptr) {
length = old_nmethod_table->length();
for (int i = 0; i < length; i++) {
// Walk all methods saved on the last pass. Concurrent class unloading may
// also be looking at this method's metadata, so don't delete it yet if
// it is marked as unloaded.
old_compiled_method_table->at(i)->metadata_do(f);
old_nmethod_table->at(i)->metadata_do(f);
}
}
log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
@ -1329,9 +1318,9 @@ void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deo
// So delete old method table and create a new one.
reset_old_method_table();
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
NMethodIterator iter(NMethodIterator::all_blobs);
while(iter.next()) {
CompiledMethod* nm = iter.method();
nmethod* nm = iter.method();
// Walk all alive nmethods to check for old Methods.
// This includes methods whose inline caches point to old methods, so
// inline cache clearing is unnecessary.
@ -1344,9 +1333,9 @@ void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deo
void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
NMethodIterator iter(NMethodIterator::all_blobs);
while(iter.next()) {
CompiledMethod* nm = iter.method();
nmethod* nm = iter.method();
if (!nm->method()->is_method_handle_intrinsic()) {
if (nm->can_be_deoptimized()) {
deopt_scope->mark(nm);
@ -1365,9 +1354,9 @@ void CodeCache::mark_directives_matches(bool top_only) {
Thread *thread = Thread::current();
HandleMark hm(thread);
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
NMethodIterator iter(NMethodIterator::only_not_unloading);
while(iter.next()) {
CompiledMethod* nm = iter.method();
nmethod* nm = iter.method();
methodHandle mh(thread, nm->method());
if (DirectivesStack::hasMatchingDirectives(mh, top_only)) {
ResourceMark rm;
@ -1383,9 +1372,9 @@ void CodeCache::recompile_marked_directives_matches() {
// Try the max level and let the directives be applied during the compilation.
int comp_level = CompilationPolicy::highest_compile_level();
RelaxedCompiledMethodIterator iter(RelaxedCompiledMethodIterator::only_not_unloading);
RelaxedNMethodIterator iter(RelaxedNMethodIterator::only_not_unloading);
while(iter.next()) {
CompiledMethod* nm = iter.method();
nmethod* nm = iter.method();
methodHandle mh(thread, nm->method());
if (mh->has_matching_directives()) {
ResourceMark rm;
@ -1424,9 +1413,9 @@ void CodeCache::recompile_marked_directives_matches() {
// Mark methods for deopt (if safe or possible).
void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
NMethodIterator iter(NMethodIterator::only_not_unloading);
while(iter.next()) {
CompiledMethod* nm = iter.method();
nmethod* nm = iter.method();
if (!nm->is_native_method()) {
deopt_scope->mark(nm);
}
@ -1436,9 +1425,9 @@ void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_
void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
NMethodIterator iter(NMethodIterator::only_not_unloading);
while(iter.next()) {
CompiledMethod* nm = iter.method();
nmethod* nm = iter.method();
if (nm->is_dependent_on_method(dependee)) {
deopt_scope->mark(nm);
}
@ -1446,9 +1435,9 @@ void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method
}
void CodeCache::make_marked_nmethods_deoptimized() {
RelaxedCompiledMethodIterator iter(RelaxedCompiledMethodIterator::only_not_unloading);
RelaxedNMethodIterator iter(RelaxedNMethodIterator::only_not_unloading);
while(iter.next()) {
CompiledMethod* nm = iter.method();
nmethod* nm = iter.method();
if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
nm->make_not_entrant();
nm->make_deoptimized();
@ -1849,15 +1838,15 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
void CodeCache::print_codelist(outputStream* st) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
NMethodIterator iter(NMethodIterator::only_not_unloading);
while (iter.next()) {
CompiledMethod* cm = iter.method();
nmethod* nm = iter.method();
ResourceMark rm;
char* method_name = cm->method()->name_and_sig_as_C_string();
char* method_name = nm->method()->name_and_sig_as_C_string();
st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
cm->compile_id(), cm->comp_level(), cm->get_state(),
nm->compile_id(), nm->comp_level(), nm->get_state(),
method_name,
(intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
(intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
}
}
@ -1897,8 +1886,8 @@ void CodeCache::write_perf_map(const char* filename) {
CodeBlob *cb = iter.method();
ResourceMark rm;
const char* method_name =
cb->is_compiled() ? cb->as_compiled_method()->method()->external_name()
: cb->name();
cb->is_nmethod() ? cb->as_nmethod()->method()->external_name()
: cb->name();
fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",
(intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
method_name);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,14 +83,13 @@ class DeoptimizationScope;
class CodeCache : AllStatic {
friend class VMStructs;
friend class JVMCIVMStructs;
template <class T, class Filter, bool is_compiled_method> friend class CodeBlobIterator;
template <class T, class Filter, bool is_relaxed> friend class CodeBlobIterator;
friend class WhiteBox;
friend class CodeCacheLoader;
friend class ShenandoahParallelCodeHeapIterator;
private:
// CodeHeaps of the cache
static GrowableArray<CodeHeap*>* _heaps;
static GrowableArray<CodeHeap*>* _compiled_heaps;
static GrowableArray<CodeHeap*>* _nmethod_heaps;
static GrowableArray<CodeHeap*>* _allocable_heaps;
@ -144,7 +143,6 @@ class CodeCache : AllStatic {
static void add_heap(CodeHeap* heap);
static const GrowableArray<CodeHeap*>* heaps() { return _heaps; }
static const GrowableArray<CodeHeap*>* compiled_heaps() { return _compiled_heaps; }
static const GrowableArray<CodeHeap*>* nmethod_heaps() { return _nmethod_heaps; }
// Allocation/administration
@ -165,7 +163,6 @@ class CodeCache : AllStatic {
static CodeBlob* find_blob_and_oopmap(void* start, int& slot); // Returns the CodeBlob containing the given address
static int find_oopmap_slot_fast(void* start); // Returns a fast oopmap slot if there is any; -1 otherwise
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
static CompiledMethod* find_compiled(void* start);
static int blob_count(); // Returns the total number of CodeBlobs in the cache
static int blob_count(CodeBlobType code_blob_type);
@ -258,14 +255,9 @@ class CodeCache : AllStatic {
// Returns true if an own CodeHeap for the given CodeBlobType is available
static bool heap_available(CodeBlobType code_blob_type);
// Returns the CodeBlobType for the given CompiledMethod
static CodeBlobType get_code_blob_type(CompiledMethod* cm) {
return get_code_heap(cm)->code_blob_type();
}
static bool code_blob_type_accepts_compiled(CodeBlobType code_blob_type) {
bool result = code_blob_type == CodeBlobType::All || code_blob_type <= CodeBlobType::MethodProfiled;
return result;
// Returns the CodeBlobType for the given nmethod
static CodeBlobType get_code_blob_type(nmethod* nm) {
return get_code_heap(nm)->code_blob_type();
}
static bool code_blob_type_accepts_nmethod(CodeBlobType type) {
@ -315,7 +307,7 @@ class CodeCache : AllStatic {
static void mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
static void mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN;
static void unregister_old_nmethod(CompiledMethod* c) NOT_JVMTI_RETURN;
static void unregister_old_nmethod(nmethod* c) NOT_JVMTI_RETURN;
// Support for fullspeed debugging
static void mark_dependents_on_method_for_breakpoint(const methodHandle& dependee);
@ -369,8 +361,8 @@ template <class T, class Filter, bool is_relaxed> class CodeBlobIterator : publi
// Filter is_unloading as required
if (_only_not_unloading) {
CompiledMethod* cm = _code_blob->as_compiled_method_or_null();
if (cm != nullptr && cm->is_unloading()) {
nmethod* nm = _code_blob->as_nmethod_or_null();
if (nm != nullptr && nm->is_unloading()) {
continue;
}
}
@ -442,12 +434,6 @@ private:
}
};
struct CompiledMethodFilter {
static bool apply(CodeBlob* cb) { return cb->is_compiled(); }
static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::compiled_heaps(); }
};
struct NMethodFilter {
static bool apply(CodeBlob* cb) { return cb->is_nmethod(); }
static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::nmethod_heaps(); }
@ -458,9 +444,8 @@ struct AllCodeBlobsFilter {
static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::heaps(); }
};
typedef CodeBlobIterator<CompiledMethod, CompiledMethodFilter, false /* is_relaxed */> CompiledMethodIterator;
typedef CodeBlobIterator<CompiledMethod, CompiledMethodFilter, true /* is_relaxed */> RelaxedCompiledMethodIterator;
typedef CodeBlobIterator<nmethod, NMethodFilter, false /* is_relaxed */> NMethodIterator;
typedef CodeBlobIterator<nmethod, NMethodFilter, true /* is_relaxed */> RelaxedNMethodIterator;
typedef CodeBlobIterator<CodeBlob, AllCodeBlobsFilter, false /* is_relaxed */> AllCodeBlobsIterator;
#endif // SHARE_CODE_CODECACHE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -44,8 +44,8 @@ class CodeHeapState : public CHeapObj<mtCode> {
enum blobType {
noType = 0, // must be! due to initialization by memset to zero
// The nMethod_* values correspond to the CompiledMethod enum values.
// We can't use the CompiledMethod values 1:1 because we depend on noType == 0.
// The nMethod_* values correspond to the nmethod enum values.
// We can't use the nmethod values 1:1 because we depend on noType == 0.
nMethod_inconstruction, // under construction. Very soon, the type will transition to "in_use".
// can't be observed while holding Compile_lock and CodeCache_lock simultaneously.
// left in here for completeness (and to document we spent a thought).

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@
// Every time a compiled IC is changed or its type is being accessed,
// either the CompiledIC_lock must be set or we must be at a safe point.
CompiledICLocker::CompiledICLocker(CompiledMethod* method)
CompiledICLocker::CompiledICLocker(nmethod* method)
: _method(method),
_behaviour(CompiledICProtectionBehaviour::current()),
_locked(_behaviour->lock(_method)) {
@ -56,15 +56,15 @@ CompiledICLocker::~CompiledICLocker() {
}
}
bool CompiledICLocker::is_safe(CompiledMethod* method) {
bool CompiledICLocker::is_safe(nmethod* method) {
return CompiledICProtectionBehaviour::current()->is_safe(method);
}
bool CompiledICLocker::is_safe(address code) {
CodeBlob* cb = CodeCache::find_blob(code);
assert(cb != nullptr && cb->is_compiled(), "must be compiled");
CompiledMethod* cm = cb->as_compiled_method();
return CompiledICProtectionBehaviour::current()->is_safe(cm);
assert(cb != nullptr && cb->is_nmethod(), "must be compiled");
nmethod* nm = cb->as_nmethod();
return CompiledICProtectionBehaviour::current()->is_safe(nm);
}
CompiledICData::CompiledICData()
@ -167,12 +167,12 @@ CompiledIC::CompiledIC(RelocIterator* iter)
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
}
CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
address call_site = nativeCall_before(return_addr)->instruction_address();
return CompiledIC_at(nm, call_site);
}
CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
RelocIterator iter(nm, call_site, call_site + 1);
iter.next();
return CompiledIC_at(&iter);
@ -180,8 +180,8 @@ CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
CompiledIC* CompiledIC_at(Relocation* call_reloc) {
address call_site = call_reloc->addr();
CompiledMethod* cm = CodeCache::find_blob(call_reloc->addr())->as_compiled_method();
return CompiledIC_at(cm, call_site);
nmethod* nm = CodeCache::find_blob(call_reloc->addr())->as_nmethod();
return CompiledIC_at(nm, call_site);
}
CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
@ -204,7 +204,7 @@ void CompiledIC::set_to_clean() {
void CompiledIC::set_to_monomorphic() {
assert(data()->is_initialized(), "must be initialized");
Method* method = data()->speculated_method();
CompiledMethod* code = method->code();
nmethod* code = method->code();
address entry;
bool to_compiled = code != nullptr && code->is_in_use() && !code->is_unloading();
@ -321,7 +321,7 @@ void CompiledIC::verify() {
// ----------------------------------------------------------------------------
void CompiledDirectCall::set_to_clean() {
// in_use is unused but needed to match template function in CompiledMethod
// in_use is unused but needed to match template function in nmethod
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
// Reset call site
RelocIterator iter((nmethod*)nullptr, instruction_address(), instruction_address() + 1);
@ -343,8 +343,8 @@ void CompiledDirectCall::set_to_clean() {
}
void CompiledDirectCall::set(const methodHandle& callee_method) {
CompiledMethod* code = callee_method->code();
CompiledMethod* caller = CodeCache::find_compiled(instruction_address());
nmethod* code = callee_method->code();
nmethod* caller = CodeCache::find_nmethod(instruction_address());
bool to_interp_cont_enter = caller->method()->is_continuation_enter_intrinsic() &&
ContinuationEntry::is_interpreted_call(instruction_address());
@ -377,14 +377,14 @@ bool CompiledDirectCall::is_clean() const {
bool CompiledDirectCall::is_call_to_interpreted() const {
// It is a call to interpreted, if it calls to a stub. Hence, the destination
// must be in the stub part of the nmethod that contains the call
CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
return cm->stub_contains(destination());
nmethod* nm = CodeCache::find_nmethod(instruction_address());
return nm->stub_contains(destination());
}
bool CompiledDirectCall::is_call_to_compiled() const {
CompiledMethod* caller = CodeCache::find_compiled(instruction_address());
nmethod* caller = CodeCache::find_nmethod(instruction_address());
CodeBlob* dest_cb = CodeCache::find_blob(destination());
return !caller->stub_contains(destination()) && dest_cb->is_compiled();
return !caller->stub_contains(destination()) && dest_cb->is_nmethod();
}
address CompiledDirectCall::find_stub_for(address instruction) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,18 +39,18 @@
//
class CompiledIC;
class CompiledICProtectionBehaviour;
class CompiledMethod;
class nmethod;
class CompiledICLocker: public StackObj {
CompiledMethod* _method;
nmethod* _method;
CompiledICProtectionBehaviour* _behaviour;
bool _locked;
NoSafepointVerifier _nsv;
public:
CompiledICLocker(CompiledMethod* method);
CompiledICLocker(nmethod* method);
~CompiledICLocker();
static bool is_safe(CompiledMethod* method);
static bool is_safe(nmethod* method);
static bool is_safe(address code);
};
@ -98,7 +98,7 @@ class CompiledICData : public CHeapObj<mtCode> {
class CompiledIC: public ResourceObj {
private:
CompiledMethod* _method;
nmethod* _method;
CompiledICData* _data;
NativeCall* _call;
@ -114,8 +114,8 @@ private:
public:
// conversion (machine PC to CompiledIC*)
friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
friend CompiledIC* CompiledIC_at(Relocation* call_site);
friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
@ -146,8 +146,8 @@ public:
void verify() PRODUCT_RETURN;
};
CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
CompiledIC* CompiledIC_at(Relocation* call_site);
CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);

View File

@ -1,647 +0,0 @@
/*
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/compiledIC.hpp"
#include "code/compiledMethod.inline.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "code/scopeDesc.hpp"
#include "code/codeCache.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/gcBehaviours.hpp"
#include "interpreter/bytecode.inline.hpp"
#include "logging/log.hpp"
#include "logging/logTag.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/method.inline.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/sharedRuntime.hpp"
CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
bool caller_must_gc_arguments, bool compiled)
: CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
_deoptimization_status(not_marked),
_deoptimization_generation(0),
_method(method),
_gc_data(nullptr)
{
init_defaults();
}
CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled)
: CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
_deoptimization_status(not_marked),
_deoptimization_generation(0),
_method(method),
_gc_data(nullptr)
{
init_defaults();
}
void CompiledMethod::init_defaults() {
{ // avoid uninitialized fields, even for short time periods
_scopes_data_begin = nullptr;
_deopt_handler_begin = nullptr;
_deopt_mh_handler_begin = nullptr;
_exception_cache = nullptr;
}
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_has_wide_vectors = 0;
_has_monitors = 0;
}
bool CompiledMethod::is_method_handle_return(address return_pc) {
if (!has_method_handle_invokes()) return false;
PcDesc* pd = pc_desc_at(return_pc);
if (pd == nullptr)
return false;
return pd->is_method_handle_invoke();
}
// Returns a string version of the method state.
const char* CompiledMethod::state() const {
int state = get_state();
switch (state) {
case not_installed:
return "not installed";
case in_use:
return "in use";
case not_used:
return "not_used";
case not_entrant:
return "not_entrant";
default:
fatal("unexpected method state: %d", state);
return nullptr;
}
}
//-----------------------------------------------------------------------------
void CompiledMethod::set_deoptimized_done() {
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
if (_deoptimization_status != deoptimize_done) { // can't go backwards
Atomic::store(&_deoptimization_status, deoptimize_done);
}
}
//-----------------------------------------------------------------------------
ExceptionCache* CompiledMethod::exception_cache_acquire() const {
return Atomic::load_acquire(&_exception_cache);
}
void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
assert(new_entry != nullptr,"Must be non null");
assert(new_entry->next() == nullptr, "Must be null");
for (;;) {
ExceptionCache *ec = exception_cache();
if (ec != nullptr) {
Klass* ex_klass = ec->exception_type();
if (!ex_klass->is_loader_alive()) {
// We must guarantee that entries are not inserted with new next pointer
// edges to ExceptionCache entries with dead klasses, due to bad interactions
// with concurrent ExceptionCache cleanup. Therefore, the inserts roll
// the head pointer forward to the first live ExceptionCache, so that the new
// next pointers always point at live ExceptionCaches, that are not removed due
// to concurrent ExceptionCache cleanup.
ExceptionCache* next = ec->next();
if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
CodeCache::release_exception_cache(ec);
}
continue;
}
ec = exception_cache();
if (ec != nullptr) {
new_entry->set_next(ec);
}
}
if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
return;
}
}
}
void CompiledMethod::clean_exception_cache() {
// For each nmethod, only a single thread may call this cleanup function
// at the same time, whether called in STW cleanup or concurrent cleanup.
// Note that if the GC is processing exception cache cleaning in a concurrent phase,
// then a single writer may contend with cleaning up the head pointer to the
// first ExceptionCache node that has a Klass* that is alive. That is fine,
// as long as there is no concurrent cleanup of next pointers from concurrent writers.
// And the concurrent writers do not clean up next pointers, only the head.
// Also note that concurrent readers will walk through Klass* pointers that are not
// alive. That does not cause ABA problems, because Klass* is deleted after
// a handshake with all threads, after all stale ExceptionCaches have been
// unlinked. That is also when the CodeCache::exception_cache_purge_list()
// is deleted, with all ExceptionCache entries that were cleaned concurrently.
// That similarly implies that CAS operations on ExceptionCache entries do not
// suffer from ABA problems as unlinking and deletion is separated by a global
// handshake operation.
ExceptionCache* prev = nullptr;
ExceptionCache* curr = exception_cache_acquire();
while (curr != nullptr) {
ExceptionCache* next = curr->next();
if (!curr->exception_type()->is_loader_alive()) {
if (prev == nullptr) {
// Try to clean head; this is contended by concurrent inserts, that
// both lazily clean the head, and insert entries at the head. If
// the CAS fails, the operation is restarted.
if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
prev = nullptr;
curr = exception_cache_acquire();
continue;
}
} else {
// It is impossible to during cleanup connect the next pointer to
// an ExceptionCache that has not been published before a safepoint
// prior to the cleanup. Therefore, release is not required.
prev->set_next(next);
}
// prev stays the same.
CodeCache::release_exception_cache(curr);
} else {
prev = curr;
}
curr = next;
}
}
// public method for accessing the exception cache
// These are the public access methods.
address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
// We never grab a lock to read the exception cache, so we may
// have false negatives. This is okay, as it can only happen during
// the first few exception lookups for a given nmethod.
ExceptionCache* ec = exception_cache_acquire();
while (ec != nullptr) {
address ret_val;
if ((ret_val = ec->match(exception,pc)) != nullptr) {
return ret_val;
}
ec = ec->next();
}
return nullptr;
}
void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
// There are potential race conditions during exception cache updates, so we
// must own the ExceptionCache_lock before doing ANY modifications. Because
// we don't lock during reads, it is possible to have several threads attempt
// to update the cache with the same data. We need to check for already inserted
// copies of the current data before adding it.
MutexLocker ml(ExceptionCache_lock);
ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
target_entry = new ExceptionCache(exception,pc,handler);
add_exception_cache_entry(target_entry);
}
}
// private method for handling exception cache
// These methods are private, and used to manipulate the exception cache
// directly.
ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
ExceptionCache* ec = exception_cache_acquire();
while (ec != nullptr) {
if (ec->match_exception_with_space(exception)) {
return ec;
}
ec = ec->next();
}
return nullptr;
}
//-------------end of code for ExceptionCache--------------
bool CompiledMethod::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
if (iter.type() == relocInfo::poll_return_type)
return true;
}
return false;
}
bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
relocInfo::relocType t = iter.type();
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
return true;
}
return false;
}
void CompiledMethod::verify_oop_relocations() {
// Ensure sure that the code matches the current oop values
RelocIterator iter(this, nullptr, nullptr);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (!reloc->oop_is_immediate()) {
reloc->verify_oop_relocation();
}
}
}
}
ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != nullptr, "scope must be present");
return new ScopeDesc(this, pd);
}
ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
PcDesc* pd = pc_desc_near(pc);
guarantee(pd != nullptr, "scope must be present");
return new ScopeDesc(this, pd);
}
address CompiledMethod::oops_reloc_begin() const {
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
code_begin() + frame_complete_offset() >
verified_entry_point() + NativeJump::instruction_size)
{
// If we have a frame_complete_offset after the native jump, then there
// is no point trying to look for oops before that. This is a requirement
// for being allowed to scan oops concurrently.
return code_begin() + frame_complete_offset();
}
// It is not safe to read oops concurrently using entry barriers, if their
// location depend on whether the nmethod is entrant or not.
// assert(BarrierSet::barrier_set()->barrier_set_nmethod() == nullptr, "Not safe oop scan");
address low_boundary = verified_entry_point();
if (!is_in_use() && is_nmethod()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// This means that the low_boundary is going to be a little too high.
// This shouldn't matter, since oops of non-entrant methods are never used.
// In fact, why are we bothering to look at oops in a non-entrant method??
}
return low_boundary;
}
// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
if (method() == nullptr) {
return;
}
// handle the case of an anchor explicitly set in continuation code that doesn't have a callee
JavaThread* thread = reg_map->thread();
if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) {
return;
}
if (!method()->is_native()) {
address pc = fr.pc();
bool has_receiver, has_appendix;
Symbol* signature;
// The method attached by JIT-compilers should be used, if present.
// Bytecode can be inaccurate in such case.
Method* callee = attached_method_before_pc(pc);
if (callee != nullptr) {
has_receiver = !(callee->access_flags().is_static());
has_appendix = false;
signature = callee->signature();
} else {
SimpleScopeDesc ssd(this, pc);
Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
has_receiver = call.has_receiver();
has_appendix = call.has_appendix();
signature = call.signature();
}
fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
} else if (method()->is_continuation_enter_intrinsic()) {
// This method only calls Continuation.enter()
Symbol* signature = vmSymbols::continuationEnter_signature();
fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
}
}
Method* CompiledMethod::attached_method(address call_instr) {
assert(code_contains(call_instr), "not part of the nmethod");
RelocIterator iter(this, call_instr, call_instr + 1);
while (iter.next()) {
if (iter.addr() == call_instr) {
switch(iter.type()) {
case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
default: break;
}
}
}
return nullptr; // not found
}
Method* CompiledMethod::attached_method_before_pc(address pc) {
if (NativeCall::is_call_before(pc)) {
NativeCall* ncall = nativeCall_before(pc);
return attached_method(ncall->instruction_address());
}
return nullptr; // not a call
}
void CompiledMethod::clear_inline_caches() {
assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
RelocIterator iter(this);
while (iter.next()) {
iter.reloc()->clear_inline_cache();
}
}
#ifdef ASSERT
// Check class_loader is alive for this bit of metadata.
class CheckClass : public MetadataClosure {
void do_metadata(Metadata* md) {
Klass* klass = nullptr;
if (md->is_klass()) {
klass = ((Klass*)md);
} else if (md->is_method()) {
klass = ((Method*)md)->method_holder();
} else if (md->is_methodData()) {
klass = ((MethodData*)md)->method()->method_holder();
} else {
md->print();
ShouldNotReachHere();
}
assert(klass->is_loader_alive(), "must be alive");
}
};
#endif // ASSERT
static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
ic->clean_metadata();
}
// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
template <typename CallsiteT>
static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, CompiledMethod* from,
bool clean_all) {
CodeBlob* cb = CodeCache::find_blob(callsite->destination());
if (!cb->is_compiled()) {
return;
}
CompiledMethod* cm = cb->as_compiled_method();
if (clean_all || !cm->is_in_use() || cm->is_unloading() || cm->method()->code() != cm) {
callsite->set_to_clean();
}
}
// Cleans caches in nmethods that point to either classes that are unloaded
// or nmethods that are unloaded.
//
// Can be called either in parallel by G1 currently or after all
// nmethods are unloaded. Return postponed=true in the parallel case for
// inline caches found that point to nmethods that are not yet visited during
// the do_unloading walk.
void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
ResourceMark rm;
// Exception cache only needs to be called if unloading occurred
if (unloading_occurred) {
clean_exception_cache();
}
cleanup_inline_caches_impl(unloading_occurred, false);
#ifdef ASSERT
// Check that the metadata embedded in the nmethod is alive
CheckClass check_class;
metadata_do(&check_class);
#endif
}
void CompiledMethod::run_nmethod_entry_barrier() {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != nullptr) {
// We want to keep an invariant that nmethods found through iterations of a Thread's
// nmethods found in safepoints have gone through an entry barrier and are not armed.
// By calling this nmethod entry barrier, it plays along and acts
// like any other nmethod found on the stack of a thread (fewer surprises).
nmethod* nm = as_nmethod_or_null();
if (nm != nullptr && bs_nm->is_armed(nm)) {
bool alive = bs_nm->nmethod_entry_barrier(nm);
assert(alive, "should be alive");
}
}
}
// Only called by whitebox test
void CompiledMethod::cleanup_inline_caches_whitebox() {
assert_locked_or_safepoint(CodeCache_lock);
CompiledICLocker ic_locker(this);
cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
}
address* CompiledMethod::orig_pc_addr(const frame* fr) {
return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
}
// Called to clean up after class unloading for live nmethods
void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
ResourceMark rm;
// Find all calls in an nmethod and clear the ones that point to bad nmethods.
RelocIterator iter(this, oops_reloc_begin());
bool is_in_static_stub = false;
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
if (unloading_occurred) {
// If class unloading occurred we first clear ICs where the cached metadata
// is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
}
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
break;
case relocInfo::opt_virtual_call_type:
case relocInfo::static_call_type:
clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all);
break;
case relocInfo::static_stub_type: {
is_in_static_stub = true;
break;
}
case relocInfo::metadata_type: {
// Only the metadata relocations contained in static/opt virtual call stubs
// contains the Method* passed to c2i adapters. It is the only metadata
// relocation that needs to be walked, as it is the one metadata relocation
// that violates the invariant that all metadata relocations have an oop
// in the compiled method (due to deferred resolution and code patching).
// This causes dead metadata to remain in compiled methods that are not
// unloading. Unless these slippery metadata relocations of the static
// stubs are at least cleared, subsequent class redefinition operations
// will access potentially free memory, and JavaThread execution
// concurrent to class unloading may call c2i adapters with dead methods.
if (!is_in_static_stub) {
// The first metadata relocation after a static stub relocation is the
// metadata relocation of the static stub used to pass the Method* to
// c2i adapters.
continue;
}
is_in_static_stub = false;
if (is_unloading()) {
// If the nmethod itself is dying, then it may point at dead metadata.
// Nobody should follow that metadata; it is strictly unsafe.
continue;
}
metadata_Relocation* r = iter.metadata_reloc();
Metadata* md = r->metadata_value();
if (md != nullptr && md->is_method()) {
Method* method = static_cast<Method*>(md);
if (!method->method_holder()->is_loader_alive()) {
Atomic::store(r->metadata_addr(), (Method*)nullptr);
if (!r->metadata_is_immediate()) {
r->fix_metadata_relocation();
}
}
}
break;
}
default:
break;
}
}
}
address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
// Exception happened outside inline-cache check code => we are inside
// an active nmethod => use cpc to determine a return address
int exception_offset = int(pc - code_begin());
int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
#ifdef ASSERT
if (cont_offset == 0) {
Thread* thread = Thread::current();
ResourceMark rm(thread);
CodeBlob* cb = CodeCache::find_blob(pc);
assert(cb != nullptr && cb == this, "");
// Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
stringStream ss;
ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
print_on(&ss);
method()->print_codes_on(&ss);
print_code_on(&ss);
print_pcs_on(&ss);
tty->print("%s", ss.as_string()); // print all at once
}
#endif
if (cont_offset == 0) {
// Let the normal error handling report the exception
return nullptr;
}
if (cont_offset == exception_offset) {
#if INCLUDE_JVMCI
Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
JavaThread *thread = JavaThread::current();
thread->set_jvmci_implicit_exception_pc(pc);
thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
Deoptimization::Action_reinterpret));
return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
#else
ShouldNotReachHere();
#endif
}
return code_begin() + cont_offset;
}
class HasEvolDependency : public MetadataClosure {
bool _has_evol_dependency;
public:
HasEvolDependency() : _has_evol_dependency(false) {}
void do_metadata(Metadata* md) {
if (md->is_method()) {
Method* method = (Method*)md;
if (method->is_old()) {
_has_evol_dependency = true;
}
}
}
bool has_evol_dependency() const { return _has_evol_dependency; }
};
bool CompiledMethod::has_evol_metadata() {
// Check the metadata in relocIter and CompiledIC and also deoptimize
// any nmethod that has reference to old methods.
HasEvolDependency check_evol;
metadata_do(&check_evol);
if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
ResourceMark rm;
log_debug(redefine, class, nmethod)
("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
_method->method_holder()->external_name(),
_method->name()->as_C_string(),
_method->signature()->as_C_string(),
compile_id());
}
return check_evol.has_evol_dependency();
}

View File

@ -1,415 +0,0 @@
/*
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CODE_COMPILEDMETHOD_HPP
#define SHARE_CODE_COMPILEDMETHOD_HPP
#include "code/codeBlob.hpp"
#include "code/pcDesc.hpp"
#include "oops/metadata.hpp"
#include "oops/method.hpp"
class Dependencies;
class ExceptionHandlerTable;
class ImplicitExceptionTable;
class AbstractCompiler;
class xmlStream;
class CompiledDirectCall;
class NativeCallWrapper;
class ScopeDesc;
class CompiledIC;
class MetadataClosure;
// This class is used internally by nmethods, to cache
// exception/pc/handler information.
class ExceptionCache : public CHeapObj<mtCode> {
friend class VMStructs;
private:
enum { cache_size = 16 };
Klass* _exception_type;
address _pc[cache_size];
address _handler[cache_size];
volatile int _count;
ExceptionCache* volatile _next;
ExceptionCache* _purge_list_next;
inline address pc_at(int index);
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
inline address handler_at(int index);
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
inline int count();
// increment_count is only called under lock, but there may be concurrent readers.
void increment_count();
public:
ExceptionCache(Handle exception, address pc, address handler);
Klass* exception_type() { return _exception_type; }
ExceptionCache* next();
void set_next(ExceptionCache *ec);
ExceptionCache* purge_list_next() { return _purge_list_next; }
void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
address match(Handle exception, address pc);
bool match_exception_with_space(Handle exception) ;
address test_address(address addr);
bool add_address_and_handler(address addr, address handler) ;
};
class nmethod;
// cache pc descs found in earlier inquiries
class PcDescCache {
friend class VMStructs;
private:
enum { cache_size = 4 };
// The array elements MUST be volatile! Several threads may modify
// and read from the cache concurrently. find_pc_desc_internal has
// returned wrong results. C++ compiler (namely xlC12) may duplicate
// C++ field accesses if the elements are not volatile.
typedef PcDesc* PcDescPtr;
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_pc_descs[0] = nullptr); }
void reset_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);
PcDesc* last_pc_desc() { return _pc_descs[0]; }
};
class PcDescSearch {
private:
address _code_begin;
PcDesc* _lower;
PcDesc* _upper;
public:
PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
_code_begin(code), _lower(lower), _upper(upper)
{
}
address code_begin() const { return _code_begin; }
PcDesc* scopes_pcs_begin() const { return _lower; }
PcDesc* scopes_pcs_end() const { return _upper; }
};
class PcDescContainer {
private:
PcDescCache _pc_desc_cache;
public:
PcDescContainer() {}
PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
address base_address = search.code_begin();
PcDesc* desc = _pc_desc_cache.last_pc_desc();
if (desc != nullptr && desc->pc_offset() == pc - base_address) {
return desc;
}
return find_pc_desc_internal(pc, approximate, search);
}
};
class CompiledMethod : public CodeBlob {
friend class VMStructs;
friend class DeoptimizationScope;
void init_defaults();
protected:
enum DeoptimizationStatus : u1 {
not_marked,
deoptimize,
deoptimize_noupdate,
deoptimize_done
};
volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
// Used to track in which deoptimize handshake this method will be deoptimized.
uint64_t _deoptimization_generation;
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
unsigned int _has_monitors:1; // Fastpath monitor detection for continuations
Method* _method;
address _scopes_data_begin;
// All deoptee's will resume execution at this location described by
// this address.
address _deopt_handler_begin;
// All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset.
address _deopt_mh_handler_begin;
PcDescContainer _pc_desc_container;
ExceptionCache * volatile _exception_cache;
void* _gc_data;
virtual void purge(bool free_code_cache_data, bool unregister_nmethod) = 0;
private:
DeoptimizationStatus deoptimization_status() const {
return Atomic::load(&_deoptimization_status);
}
protected:
CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
public:
// Only used by unit test.
CompiledMethod() {}
template<typename T>
T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
template<typename T>
void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_monitors() const { return _has_monitors; }
void set_has_monitors(bool z) { _has_monitors = z; }
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool has_wide_vectors() const { return _has_wide_vectors; }
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
// allowed to advance state
in_use = 0, // executable nmethod
not_used = 1, // not entrant, but revivable
not_entrant = 2, // marked for deoptimization but activations may still exist
};
virtual bool is_in_use() const = 0;
virtual int comp_level() const = 0;
virtual int compile_id() const = 0;
virtual address verified_entry_point() const = 0;
virtual void log_identity(xmlStream* log) const = 0;
virtual void log_state_change() const = 0;
virtual bool make_not_used() = 0;
virtual bool make_not_entrant() = 0;
virtual bool make_entrant() = 0;
virtual address entry_point() const = 0;
virtual bool is_osr_method() const = 0;
virtual int osr_entry_bci() const = 0;
Method* method() const { return _method; }
virtual void print_pcs_on(outputStream* st) = 0;
bool is_native_method() const { return _method != nullptr && _method->is_native(); }
bool is_java_method() const { return _method != nullptr && !_method->is_native(); }
// ScopeDesc retrieval operation
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
// pc_desc_near returns the first PcDesc at or after the given pc.
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
// ScopeDesc for an instruction
ScopeDesc* scope_desc_at(address pc);
ScopeDesc* scope_desc_near(address pc);
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
void set_deoptimized_done();
virtual void make_deoptimized() { assert(false, "not supported"); };
bool update_recompile_counts() const {
// Update recompile counts when either the update is explicitly requested (deoptimize)
// or the nmethod is not marked for deoptimization at all (not_marked).
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
DeoptimizationStatus status = deoptimization_status();
return status != deoptimize_noupdate && status != deoptimize_done;
}
// tells whether frames described by this nmethod can be deoptimized
// note: native wrappers cannot be deoptimized.
bool can_be_deoptimized() const { return is_java_method(); }
virtual oop oop_at(int index) const = 0;
virtual Metadata* metadata_at(int index) const = 0;
address scopes_data_begin() const { return _scopes_data_begin; }
virtual address scopes_data_end() const = 0;
int scopes_data_size() const { return int(scopes_data_end() - scopes_data_begin()); }
virtual PcDesc* scopes_pcs_begin() const = 0;
virtual PcDesc* scopes_pcs_end() const = 0;
int scopes_pcs_size() const { return int((intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin()); }
address insts_begin() const { return code_begin(); }
address insts_end() const { return stub_begin(); }
// Returns true if a given address is in the 'insts' section. The method
// insts_contains_inclusive() is end-inclusive.
bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); }
int insts_size() const { return int(insts_end() - insts_begin()); }
virtual address consts_begin() const = 0;
virtual address consts_end() const = 0;
bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
int consts_size() const { return int(consts_end() - consts_begin()); }
virtual int skipped_instructions_size() const = 0;
virtual address stub_begin() const = 0;
virtual address stub_end() const = 0;
bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
int stub_size() const { return int(stub_end() - stub_begin()); }
virtual address handler_table_begin() const = 0;
virtual address handler_table_end() const = 0;
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
int handler_table_size() const { return int(handler_table_end() - handler_table_begin()); }
virtual address exception_begin() const = 0;
virtual address nul_chk_table_begin() const = 0;
virtual address nul_chk_table_end() const = 0;
bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
int nul_chk_table_size() const { return int(nul_chk_table_end() - nul_chk_table_begin()); }
virtual oop* oop_addr_at(int index) const = 0;
virtual Metadata** metadata_addr_at(int index) const = 0;
protected:
// Exception cache support
// Note: _exception_cache may be read and cleaned concurrently.
ExceptionCache* exception_cache() const { return _exception_cache; }
ExceptionCache* exception_cache_acquire() const;
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
public:
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache();
void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// MethodHandle
bool is_method_handle_return(address return_pc);
address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; }
address deopt_handler_begin() const { return _deopt_handler_begin; }
address* deopt_handler_begin_addr() { return &_deopt_handler_begin; }
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
inline bool is_deopt_pc(address pc);
inline bool is_deopt_mh_entry(address pc);
inline bool is_deopt_entry(address pc);
// Accessor/mutator for the original pc of a frame before a frame was deopted.
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
virtual int orig_pc_offset() = 0;
private:
address* orig_pc_addr(const frame* fr);
public:
virtual const char* compile_kind() const = 0;
virtual int get_state() const = 0;
const char* state() const;
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
}
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
// implicit exceptions support
address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
static address get_deopt_original_pc(const frame* fr);
// Inline cache support for class unloading and nmethod unloading
private:
void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
address continuation_for_implicit_exception(address pc, bool for_div0_check);
public:
// Serial version used by whitebox test
void cleanup_inline_caches_whitebox();
virtual void clear_inline_caches();
// Execute nmethod barrier code, as if entering through nmethod call.
void run_nmethod_entry_barrier();
void verify_oop_relocations();
bool has_evol_metadata();
// Fast breakpoint support. Tells if this compiled method is
// dependent on the given method. Returns true if this nmethod
// corresponds to the given method as well.
virtual bool is_dependent_on_method(Method* dependee) = 0;
virtual address call_instruction_address(address pc) const = 0;
Method* attached_method(address call_pc);
Method* attached_method_before_pc(address pc);
virtual void metadata_do(MetadataClosure* f) = 0;
// GC support
protected:
address oops_reloc_begin() const;
public:
// GC unloading support
// Cleans unloaded klasses and unloaded nmethods in inline caches
virtual bool is_unloading() = 0;
void unload_nmethod_caches(bool class_unloading_occurred);
virtual void do_unloading(bool unloading_occurred) = 0;
private:
PcDesc* find_pc_desc(address pc, bool approximate) {
return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
}
};
#endif // SHARE_CODE_COMPILEDMETHOD_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,15 +53,9 @@ void DebugInfoWriteStream::write_metadata(Metadata* h) {
}
oop DebugInfoReadStream::read_oop() {
nmethod* nm = const_cast<CompiledMethod*>(code())->as_nmethod_or_null();
oop o;
if (nm != nullptr) {
// Despite these oops being found inside nmethods that are on-stack,
// they are not kept alive by all GCs (e.g. G1 and Shenandoah).
o = nm->oop_at_phantom(read_int());
} else {
o = code()->oop_at(read_int());
}
// Despite these oops being found inside nmethods that are on-stack,
// they are not kept alive by all GCs (e.g. G1 and Shenandoah).
oop o = code()->oop_at_phantom(read_int());
assert(oopDesc::is_oop_or_null(o), "oop only");
return o;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -372,11 +372,11 @@ class MonitorValue: public ResourceObj {
class DebugInfoReadStream : public CompressedReadStream {
private:
const CompiledMethod* _code;
const CompiledMethod* code() const { return _code; }
const nmethod* _code;
const nmethod* code() const { return _code; }
GrowableArray<ScopeValue*>* _obj_pool;
public:
DebugInfoReadStream(const CompiledMethod* code, int offset, GrowableArray<ScopeValue*>* obj_pool = nullptr) :
DebugInfoReadStream(const nmethod* code, int offset, GrowableArray<ScopeValue*>* obj_pool = nullptr) :
CompressedReadStream(code->scopes_data_begin(), offset) {
_code = code;
_obj_pool = obj_pool;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -175,7 +175,7 @@ void DebugInformationRecorder::add_non_safepoint(int pc_offset) {
void DebugInformationRecorder::add_new_pc_offset(int pc_offset) {
assert(_pcs_length == 0 || last_pc()->pc_offset() < pc_offset,
"must specify a new, larger pc offset");
"must specify a new, larger pc offset: %d >= %d", last_pc()->pc_offset(), pc_offset);
// add the pcdesc
if (_pcs_length == _pcs_size) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,9 +65,9 @@ ExceptionHandlerTable::ExceptionHandlerTable(int initial_size) {
}
ExceptionHandlerTable::ExceptionHandlerTable(const CompiledMethod* cm) {
_table = (HandlerTableEntry*)cm->handler_table_begin();
_length = cm->handler_table_size() / sizeof(HandlerTableEntry);
ExceptionHandlerTable::ExceptionHandlerTable(const nmethod* nm) {
_table = (HandlerTableEntry*)nm->handler_table_begin();
_length = nm->handler_table_size() / sizeof(HandlerTableEntry);
_size = 0; // no space allocated by ExceptionHandlerTable!
}
@ -98,9 +98,9 @@ void ExceptionHandlerTable::add_subtable(
}
void ExceptionHandlerTable::copy_to(CompiledMethod* cm) {
assert(size_in_bytes() == cm->handler_table_size(), "size of space allocated in compiled method incorrect");
copy_bytes_to(cm->handler_table_begin());
void ExceptionHandlerTable::copy_to(nmethod* nm) {
assert(size_in_bytes() == nm->handler_table_size(), "size of space allocated in compiled method incorrect");
copy_bytes_to(nm->handler_table_begin());
}
void ExceptionHandlerTable::copy_bytes_to(address addr) {
@ -215,7 +215,7 @@ void ImplicitExceptionTable::print(address base) const {
}
}
ImplicitExceptionTable::ImplicitExceptionTable(const CompiledMethod* nm) {
ImplicitExceptionTable::ImplicitExceptionTable(const nmethod* nm) {
if (nm->nul_chk_table_size() == 0) {
_len = 0;
_data = nullptr;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -99,7 +99,7 @@ class ExceptionHandlerTable {
ExceptionHandlerTable(int initial_size = 8);
// (run-time) construction from nmethod
ExceptionHandlerTable(const CompiledMethod* nm);
ExceptionHandlerTable(const nmethod* nm);
// (compile-time) add entries
void add_subtable(
@ -116,7 +116,7 @@ class ExceptionHandlerTable {
// nmethod support
int size_in_bytes() const { return align_up(_length * (int)sizeof(HandlerTableEntry), oopSize); }
void copy_to(CompiledMethod* nm);
void copy_to(nmethod* nm);
void copy_bytes_to(address addr);
// lookup
@ -150,7 +150,7 @@ class ImplicitExceptionTable {
public:
ImplicitExceptionTable( ) : _size(0), _len(0), _data(0) { }
// (run-time) construction from nmethod
ImplicitExceptionTable( const CompiledMethod *nm );
ImplicitExceptionTable(const nmethod *nm);
void set_size( uint size );
void append( uint exec_off, uint cont_off );

View File

@ -26,10 +26,9 @@
#include "asm/assembler.inline.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/compiledMethod.inline.hpp"
#include "code/dependencies.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.hpp"
#include "code/nmethod.inline.hpp"
#include "code/scopeDesc.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compilationLog.hpp"
@ -44,7 +43,7 @@
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/bytecode.inline.hpp"
#include "jvm.h"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
@ -98,9 +97,9 @@
Symbol* name = m->name(); \
Symbol* signature = m->signature(); \
HOTSPOT_COMPILED_METHOD_UNLOAD( \
(char *) klass_name->bytes(), klass_name->utf8_length(), \
(char *) name->bytes(), name->utf8_length(), \
(char *) signature->bytes(), signature->utf8_length()); \
(char *) klass_name->bytes(), klass_name->utf8_length(), \
(char *) name->bytes(), name->utf8_length(), \
(char *) signature->bytes(), signature->utf8_length()); \
} \
}
@ -138,6 +137,9 @@ struct java_nmethod_stats_struct {
uint oops_size;
uint metadata_size;
uint size_gt_32k;
int size_max;
void note_nmethod(nmethod* nm) {
nmethod_count += 1;
total_size += nm->size();
@ -156,27 +158,33 @@ struct java_nmethod_stats_struct {
speculations_size += nm->speculations_size();
jvmci_data_size += nm->jvmci_data_size();
#endif
int short_pos_max = ((1<<15) - 1);
if (nm->size() > short_pos_max) size_gt_32k++;
if (nm->size() > size_max) size_max = nm->size();
}
void print_nmethod_stats(const char* name) {
if (nmethod_count == 0) return;
tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
if (total_size != 0) tty->print_cr(" total in heap = %u", total_size);
if (nmethod_count != 0) tty->print_cr(" header = " SIZE_FORMAT, nmethod_count * sizeof(nmethod));
if (relocation_size != 0) tty->print_cr(" relocation = %u", relocation_size);
if (consts_size != 0) tty->print_cr(" constants = %u", consts_size);
if (insts_size != 0) tty->print_cr(" main code = %u", insts_size);
if (stub_size != 0) tty->print_cr(" stub code = %u", stub_size);
if (oops_size != 0) tty->print_cr(" oops = %u", oops_size);
if (metadata_size != 0) tty->print_cr(" metadata = %u", metadata_size);
if (scopes_data_size != 0) tty->print_cr(" scopes data = %u", scopes_data_size);
if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %u", scopes_pcs_size);
if (dependencies_size != 0) tty->print_cr(" dependencies = %u", dependencies_size);
if (handler_table_size != 0) tty->print_cr(" handler table = %u", handler_table_size);
if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %u", nul_chk_table_size);
if (total_size != 0) tty->print_cr(" total in heap = %u (100%%)", total_size);
uint header_size = (uint)(nmethod_count * sizeof(nmethod));
if (nmethod_count != 0) tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_size);
if (relocation_size != 0) tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_size);
if (consts_size != 0) tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_size);
if (insts_size != 0) tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_size);
if (stub_size != 0) tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_size);
if (oops_size != 0) tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_size);
if (metadata_size != 0) tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_size);
if (scopes_data_size != 0) tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_size);
if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_size);
if (dependencies_size != 0) tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_size);
if (handler_table_size != 0) tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_size);
if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_size);
#if INCLUDE_JVMCI
if (speculations_size != 0) tty->print_cr(" speculations = %u", speculations_size);
if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %u", jvmci_data_size);
if (speculations_size != 0) tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_size);
if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_size);
#endif
if (size_gt_32k != 0) tty->print_cr(" size > 32k = %u", size_gt_32k);
if (size_max != 0) tty->print_cr(" max size = %d", size_max);
}
};
@ -417,6 +425,558 @@ static int adjust_pcs_size(int pcs_size) {
return nsize;
}
bool nmethod::is_method_handle_return(address return_pc) {
if (!has_method_handle_invokes()) return false;
PcDesc* pd = pc_desc_at(return_pc);
if (pd == nullptr)
return false;
return pd->is_method_handle_invoke();
}
// Returns a string version of the method state.
const char* nmethod::state() const {
int state = get_state();
switch (state) {
case not_installed:
return "not installed";
case in_use:
return "in use";
case not_entrant:
return "not_entrant";
default:
fatal("unexpected method state: %d", state);
return nullptr;
}
}
void nmethod::set_deoptimized_done() {
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
if (_deoptimization_status != deoptimize_done) { // can't go backwards
Atomic::store(&_deoptimization_status, deoptimize_done);
}
}
ExceptionCache* nmethod::exception_cache_acquire() const {
return Atomic::load_acquire(&_exception_cache);
}
void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
assert(new_entry != nullptr,"Must be non null");
assert(new_entry->next() == nullptr, "Must be null");
for (;;) {
ExceptionCache *ec = exception_cache();
if (ec != nullptr) {
Klass* ex_klass = ec->exception_type();
if (!ex_klass->is_loader_alive()) {
// We must guarantee that entries are not inserted with new next pointer
// edges to ExceptionCache entries with dead klasses, due to bad interactions
// with concurrent ExceptionCache cleanup. Therefore, the inserts roll
// the head pointer forward to the first live ExceptionCache, so that the new
// next pointers always point at live ExceptionCaches, that are not removed due
// to concurrent ExceptionCache cleanup.
ExceptionCache* next = ec->next();
if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
CodeCache::release_exception_cache(ec);
}
continue;
}
ec = exception_cache();
if (ec != nullptr) {
new_entry->set_next(ec);
}
}
if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
return;
}
}
}
void nmethod::clean_exception_cache() {
// For each nmethod, only a single thread may call this cleanup function
// at the same time, whether called in STW cleanup or concurrent cleanup.
// Note that if the GC is processing exception cache cleaning in a concurrent phase,
// then a single writer may contend with cleaning up the head pointer to the
// first ExceptionCache node that has a Klass* that is alive. That is fine,
// as long as there is no concurrent cleanup of next pointers from concurrent writers.
// And the concurrent writers do not clean up next pointers, only the head.
// Also note that concurrent readers will walk through Klass* pointers that are not
// alive. That does not cause ABA problems, because Klass* is deleted after
// a handshake with all threads, after all stale ExceptionCaches have been
// unlinked. That is also when the CodeCache::exception_cache_purge_list()
// is deleted, with all ExceptionCache entries that were cleaned concurrently.
// That similarly implies that CAS operations on ExceptionCache entries do not
// suffer from ABA problems as unlinking and deletion is separated by a global
// handshake operation.
ExceptionCache* prev = nullptr;
ExceptionCache* curr = exception_cache_acquire();
while (curr != nullptr) {
ExceptionCache* next = curr->next();
if (!curr->exception_type()->is_loader_alive()) {
if (prev == nullptr) {
// Try to clean head; this is contended by concurrent inserts, that
// both lazily clean the head, and insert entries at the head. If
// the CAS fails, the operation is restarted.
if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
prev = nullptr;
curr = exception_cache_acquire();
continue;
}
} else {
// It is impossible to during cleanup connect the next pointer to
// an ExceptionCache that has not been published before a safepoint
// prior to the cleanup. Therefore, release is not required.
prev->set_next(next);
}
// prev stays the same.
CodeCache::release_exception_cache(curr);
} else {
prev = curr;
}
curr = next;
}
}
// public method for accessing the exception cache
// These are the public access methods.
address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
// We never grab a lock to read the exception cache, so we may
// have false negatives. This is okay, as it can only happen during
// the first few exception lookups for a given nmethod.
ExceptionCache* ec = exception_cache_acquire();
while (ec != nullptr) {
address ret_val;
if ((ret_val = ec->match(exception,pc)) != nullptr) {
return ret_val;
}
ec = ec->next();
}
return nullptr;
}
void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
// There are potential race conditions during exception cache updates, so we
// must own the ExceptionCache_lock before doing ANY modifications. Because
// we don't lock during reads, it is possible to have several threads attempt
// to update the cache with the same data. We need to check for already inserted
// copies of the current data before adding it.
MutexLocker ml(ExceptionCache_lock);
ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
target_entry = new ExceptionCache(exception,pc,handler);
add_exception_cache_entry(target_entry);
}
}
// private method for handling exception cache
// These methods are private, and used to manipulate the exception cache
// directly.
ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
ExceptionCache* ec = exception_cache_acquire();
while (ec != nullptr) {
if (ec->match_exception_with_space(exception)) {
return ec;
}
ec = ec->next();
}
return nullptr;
}
bool nmethod::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
if (iter.type() == relocInfo::poll_return_type)
return true;
}
return false;
}
bool nmethod::is_at_poll_or_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
relocInfo::relocType t = iter.type();
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
return true;
}
return false;
}
void nmethod::verify_oop_relocations() {
// Ensure sure that the code matches the current oop values
RelocIterator iter(this, nullptr, nullptr);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (!reloc->oop_is_immediate()) {
reloc->verify_oop_relocation();
}
}
}
}
ScopeDesc* nmethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != nullptr, "scope must be present");
return new ScopeDesc(this, pd);
}
ScopeDesc* nmethod::scope_desc_near(address pc) {
PcDesc* pd = pc_desc_near(pc);
guarantee(pd != nullptr, "scope must be present");
return new ScopeDesc(this, pd);
}
address nmethod::oops_reloc_begin() const {
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
code_begin() + frame_complete_offset() >
verified_entry_point() + NativeJump::instruction_size)
{
// If we have a frame_complete_offset after the native jump, then there
// is no point trying to look for oops before that. This is a requirement
// for being allowed to scan oops concurrently.
return code_begin() + frame_complete_offset();
}
// It is not safe to read oops concurrently using entry barriers, if their
// location depend on whether the nmethod is entrant or not.
// assert(BarrierSet::barrier_set()->barrier_set_nmethod() == nullptr, "Not safe oop scan");
address low_boundary = verified_entry_point();
if (!is_in_use()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// This means that the low_boundary is going to be a little too high.
// This shouldn't matter, since oops of non-entrant methods are never used.
// In fact, why are we bothering to look at oops in a non-entrant method??
}
return low_boundary;
}
// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
if (method() == nullptr) {
return;
}
// handle the case of an anchor explicitly set in continuation code that doesn't have a callee
JavaThread* thread = reg_map->thread();
if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) {
return;
}
if (!method()->is_native()) {
address pc = fr.pc();
bool has_receiver, has_appendix;
Symbol* signature;
// The method attached by JIT-compilers should be used, if present.
// Bytecode can be inaccurate in such case.
Method* callee = attached_method_before_pc(pc);
if (callee != nullptr) {
has_receiver = !(callee->access_flags().is_static());
has_appendix = false;
signature = callee->signature();
} else {
SimpleScopeDesc ssd(this, pc);
Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
has_receiver = call.has_receiver();
has_appendix = call.has_appendix();
signature = call.signature();
}
fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
} else if (method()->is_continuation_enter_intrinsic()) {
// This method only calls Continuation.enter()
Symbol* signature = vmSymbols::continuationEnter_signature();
fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
}
}
Method* nmethod::attached_method(address call_instr) {
assert(code_contains(call_instr), "not part of the nmethod");
RelocIterator iter(this, call_instr, call_instr + 1);
while (iter.next()) {
if (iter.addr() == call_instr) {
switch(iter.type()) {
case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
default: break;
}
}
}
return nullptr; // not found
}
Method* nmethod::attached_method_before_pc(address pc) {
if (NativeCall::is_call_before(pc)) {
NativeCall* ncall = nativeCall_before(pc);
return attached_method(ncall->instruction_address());
}
return nullptr; // not a call
}
void nmethod::clear_inline_caches() {
assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
RelocIterator iter(this);
while (iter.next()) {
iter.reloc()->clear_inline_cache();
}
}
#ifdef ASSERT
// Check class_loader is alive for this bit of metadata.
class CheckClass : public MetadataClosure {
void do_metadata(Metadata* md) {
Klass* klass = nullptr;
if (md->is_klass()) {
klass = ((Klass*)md);
} else if (md->is_method()) {
klass = ((Method*)md)->method_holder();
} else if (md->is_methodData()) {
klass = ((MethodData*)md)->method()->method_holder();
} else {
md->print();
ShouldNotReachHere();
}
assert(klass->is_loader_alive(), "must be alive");
}
};
#endif // ASSERT
static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
ic->clean_metadata();
}
// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
template <typename CallsiteT>
static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from,
bool clean_all) {
CodeBlob* cb = CodeCache::find_blob(callsite->destination());
if (!cb->is_nmethod()) {
return;
}
nmethod* nm = cb->as_nmethod();
if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) {
callsite->set_to_clean();
}
}
// Cleans caches in nmethods that point to either classes that are unloaded
// or nmethods that are unloaded.
//
// Can be called either in parallel by G1 currently or after all
// nmethods are unloaded. Return postponed=true in the parallel case for
// inline caches found that point to nmethods that are not yet visited during
// the do_unloading walk.
void nmethod::unload_nmethod_caches(bool unloading_occurred) {
ResourceMark rm;
// Exception cache only needs to be called if unloading occurred
if (unloading_occurred) {
clean_exception_cache();
}
cleanup_inline_caches_impl(unloading_occurred, false);
#ifdef ASSERT
// Check that the metadata embedded in the nmethod is alive
CheckClass check_class;
metadata_do(&check_class);
#endif
}
void nmethod::run_nmethod_entry_barrier() {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != nullptr) {
// We want to keep an invariant that nmethods found through iterations of a Thread's
// nmethods found in safepoints have gone through an entry barrier and are not armed.
// By calling this nmethod entry barrier, it plays along and acts
// like any other nmethod found on the stack of a thread (fewer surprises).
nmethod* nm = this;
if (bs_nm->is_armed(nm)) {
bool alive = bs_nm->nmethod_entry_barrier(nm);
assert(alive, "should be alive");
}
}
}
// Only called by whitebox test
void nmethod::cleanup_inline_caches_whitebox() {
assert_locked_or_safepoint(CodeCache_lock);
CompiledICLocker ic_locker(this);
cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
}
address* nmethod::orig_pc_addr(const frame* fr) {
return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
}
// Called to clean up after class unloading for live nmethods
void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
ResourceMark rm;
// Find all calls in an nmethod and clear the ones that point to bad nmethods.
RelocIterator iter(this, oops_reloc_begin());
bool is_in_static_stub = false;
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
if (unloading_occurred) {
// If class unloading occurred we first clear ICs where the cached metadata
// is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
}
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
break;
case relocInfo::opt_virtual_call_type:
case relocInfo::static_call_type:
clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all);
break;
case relocInfo::static_stub_type: {
is_in_static_stub = true;
break;
}
case relocInfo::metadata_type: {
// Only the metadata relocations contained in static/opt virtual call stubs
// contains the Method* passed to c2i adapters. It is the only metadata
// relocation that needs to be walked, as it is the one metadata relocation
// that violates the invariant that all metadata relocations have an oop
// in the compiled method (due to deferred resolution and code patching).
// This causes dead metadata to remain in compiled methods that are not
// unloading. Unless these slippery metadata relocations of the static
// stubs are at least cleared, subsequent class redefinition operations
// will access potentially free memory, and JavaThread execution
// concurrent to class unloading may call c2i adapters with dead methods.
if (!is_in_static_stub) {
// The first metadata relocation after a static stub relocation is the
// metadata relocation of the static stub used to pass the Method* to
// c2i adapters.
continue;
}
is_in_static_stub = false;
if (is_unloading()) {
// If the nmethod itself is dying, then it may point at dead metadata.
// Nobody should follow that metadata; it is strictly unsafe.
continue;
}
metadata_Relocation* r = iter.metadata_reloc();
Metadata* md = r->metadata_value();
if (md != nullptr && md->is_method()) {
Method* method = static_cast<Method*>(md);
if (!method->method_holder()->is_loader_alive()) {
Atomic::store(r->metadata_addr(), (Method*)nullptr);
if (!r->metadata_is_immediate()) {
r->fix_metadata_relocation();
}
}
}
break;
}
default:
break;
}
}
}
address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
// Exception happened outside inline-cache check code => we are inside
// an active nmethod => use cpc to determine a return address
int exception_offset = int(pc - code_begin());
int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
#ifdef ASSERT
if (cont_offset == 0) {
Thread* thread = Thread::current();
ResourceMark rm(thread);
CodeBlob* cb = CodeCache::find_blob(pc);
assert(cb != nullptr && cb == this, "");
// Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
stringStream ss;
ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
print_on(&ss);
method()->print_codes_on(&ss);
print_code_on(&ss);
print_pcs_on(&ss);
tty->print("%s", ss.as_string()); // print all at once
}
#endif
if (cont_offset == 0) {
// Let the normal error handling report the exception
return nullptr;
}
if (cont_offset == exception_offset) {
#if INCLUDE_JVMCI
Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
JavaThread *thread = JavaThread::current();
thread->set_jvmci_implicit_exception_pc(pc);
thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
Deoptimization::Action_reinterpret));
return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
#else
ShouldNotReachHere();
#endif
}
return code_begin() + cont_offset;
}
class HasEvolDependency : public MetadataClosure {
bool _has_evol_dependency;
public:
HasEvolDependency() : _has_evol_dependency(false) {}
void do_metadata(Metadata* md) {
if (md->is_method()) {
Method* method = (Method*)md;
if (method->is_old()) {
_has_evol_dependency = true;
}
}
}
bool has_evol_dependency() const { return _has_evol_dependency; }
};
bool nmethod::has_evol_metadata() {
// Check the metadata in relocIter and CompiledIC and also deoptimize
// any nmethod that has reference to old methods.
HasEvolDependency check_evol;
metadata_do(&check_evol);
if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
ResourceMark rm;
log_debug(redefine, class, nmethod)
("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
_method->method_holder()->external_name(),
_method->name()->as_C_string(),
_method->signature()->as_C_string(),
compile_id());
}
return check_evol.has_evol_dependency();
}
int nmethod::total_size() const {
return
@ -440,16 +1000,28 @@ const char* nmethod::compile_kind() const {
return nullptr;
}
const char* nmethod::compiler_name() const {
return compilertype2name(_compiler_type);
}
// Fill in default values for various flag fields
void nmethod::init_defaults() {
// avoid uninitialized fields, even for short time periods
_exception_cache = nullptr;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_has_wide_vectors = 0;
_has_monitors = 0;
_state = not_installed;
_has_flushed_dependencies = 0;
_load_reported = false; // jvmti state
_oops_do_mark_link = nullptr;
_osr_link = nullptr;
_oops_do_mark_link = nullptr;
_osr_link = nullptr;
#if INCLUDE_RTM_OPT
_rtm_state = NoRTM;
_rtm_state = NoRTM;
#endif
}
@ -639,18 +1211,19 @@ nmethod::nmethod(
ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps )
: CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
: CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_deoptimization_generation(0),
_method(method),
_gc_data(nullptr),
_compiled_ic_data(nullptr),
_is_unlinked(false),
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
_native_basic_lock_sp_offset(basic_lock_sp_offset),
_is_unloading_state(0)
_is_unloading_state(0),
_deoptimization_status(not_marked)
{
{
int scopes_data_offset = 0;
int deoptimize_offset = 0;
int deoptimize_mh_offset = 0;
debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
@ -661,14 +1234,16 @@ nmethod::nmethod(
// values something that will never match a pc like the nmethod vtable entry
_exception_offset = 0;
_orig_pc_offset = 0;
_deopt_handler_offset = 0;
_deopt_mh_handler_offset = 0;
_gc_epoch = CodeCache::gc_epoch();
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
_oops_offset = data_offset();
_metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize);
scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = scopes_data_offset;
_scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
_nul_chk_table_offset = _handler_table_offset;
@ -681,6 +1256,7 @@ nmethod::nmethod(
_nmethod_end_offset = _nul_chk_table_offset;
#endif
_compile_id = compile_id;
_compiler_type = type;
_entry_point = code_begin() + offsets->value(CodeOffsets::Entry);
_verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
_osr_entry_point = nullptr;
@ -689,10 +1265,6 @@ nmethod::nmethod(
_exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
_scopes_data_begin = (address) this + scopes_data_offset;
_deopt_handler_begin = (address) this + deoptimize_offset;
_deopt_mh_handler_begin = (address) this + deoptimize_mh_offset;
code_buffer->copy_code_and_locs_to(this);
code_buffer->copy_values_to(this);
@ -784,51 +1356,54 @@ nmethod::nmethod(
JVMCINMethodData* jvmci_data
#endif
)
: CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
: CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_deoptimization_generation(0),
_method(method),
_gc_data(nullptr),
_compiled_ic_data(nullptr),
_is_unlinked(false),
_native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1)),
_is_unloading_state(0)
_is_unloading_state(0),
_deoptimization_status(not_marked)
{
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
{
debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
_deopt_handler_begin = (address) this;
_deopt_mh_handler_begin = (address) this;
init_defaults();
_entry_bci = entry_bci;
_compile_id = compile_id;
_comp_level = comp_level;
_orig_pc_offset = orig_pc_offset;
_gc_epoch = CodeCache::gc_epoch();
_entry_bci = entry_bci;
_compile_id = compile_id;
_compiler_type = type;
_comp_level = comp_level;
_orig_pc_offset = orig_pc_offset;
_gc_epoch = CodeCache::gc_epoch();
// Section offsets
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
set_ctable_begin(header_begin() + _consts_offset);
_skipped_instructions_size = code_buffer->total_skipped_instructions_size();
_skipped_instructions_size = code_buffer->total_skipped_instructions_size();
#if INCLUDE_JVMCI
if (compiler->is_jvmci()) {
// JVMCI might not produce any stub sections
if (offsets->value(CodeOffsets::Exceptions) != -1) {
_exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
_exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
} else {
_exception_offset = -1;
_exception_offset = -1;
}
if (offsets->value(CodeOffsets::Deopt) != -1) {
_deopt_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::Deopt);
_deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
} else {
_deopt_handler_begin = nullptr;
_deopt_handler_offset = -1;
}
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
_deopt_mh_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::DeoptMH);
_deopt_mh_handler_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH);
} else {
_deopt_mh_handler_begin = nullptr;
_deopt_mh_handler_offset = -1;
}
} else
#endif
@ -837,25 +1412,25 @@ nmethod::nmethod(
assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
_deopt_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::Deopt);
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
_deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
_deopt_mh_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::DeoptMH);
_deopt_mh_handler_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
} else {
_deopt_mh_handler_begin = nullptr;
_deopt_mh_handler_offset = -1;
}
}
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
_unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
_unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
} else {
_unwind_handler_offset = -1;
}
_oops_offset = data_offset();
_metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize);
int scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
_scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = scopes_data_offset + align_up(debug_info->data_size (), oopSize);
_scopes_pcs_offset = _scopes_data_offset + align_up(debug_info->data_size (), oopSize);
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes(), oopSize);
_nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
@ -871,7 +1446,6 @@ nmethod::nmethod(
_verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
_osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
_exception_cache = nullptr;
_scopes_data_begin = (address) this + scopes_data_offset;
_pc_desc_container.reset_to(scopes_pcs_begin());
@ -1509,7 +2083,7 @@ oop nmethod::oop_at_phantom(int index) const {
void nmethod::flush_dependencies() {
if (!has_flushed_dependencies()) {
set_has_flushed_dependencies();
set_has_flushed_dependencies(true);
for (Dependencies::DepStream deps(this); deps.next(); ) {
if (deps.type() == Dependencies::call_site_target_value) {
// CallSite dependencies are managed on per-CallSite instance basis.
@ -2026,7 +2600,7 @@ void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
break;
}
}
assert(has_method_handle_invokes() == (_deopt_mh_handler_begin != nullptr), "must have deopt mh handler");
assert(has_method_handle_invokes() == (_deopt_mh_handler_offset != -1), "must have deopt mh handler");
int size = count * sizeof(PcDesc);
assert(scopes_pcs_size() >= size, "oob");
@ -2989,8 +3563,8 @@ const char* nmethod::nmethod_section_label(address pos) const {
if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]";
// Check stub_code before checking exception_handler or deopt_handler.
if (pos == this->stub_begin()) label = "[Stub Code]";
if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
if (JVMCI_ONLY(_deopt_handler_begin != nullptr &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
return label;
}

View File

@ -25,15 +25,122 @@
#ifndef SHARE_CODE_NMETHOD_HPP
#define SHARE_CODE_NMETHOD_HPP
#include "code/compiledMethod.hpp"
#include "code/codeBlob.hpp"
#include "code/pcDesc.hpp"
#include "oops/metadata.hpp"
#include "oops/method.hpp"
class AbstractCompiler;
class CompiledDirectCall;
class CompiledIC;
class CompiledICData;
class CompileTask;
class DepChange;
class Dependencies;
class DirectiveSet;
class DebugInformationRecorder;
class ExceptionHandlerTable;
class ImplicitExceptionTable;
class JvmtiThreadState;
class MetadataClosure;
class NativeCallWrapper;
class OopIterateClosure;
class ScopeDesc;
class xmlStream;
// This class is used internally by nmethods, to cache
// exception/pc/handler information.
class ExceptionCache : public CHeapObj<mtCode> {
friend class VMStructs;
private:
enum { cache_size = 16 };
Klass* _exception_type;
address _pc[cache_size];
address _handler[cache_size];
volatile int _count;
ExceptionCache* volatile _next;
ExceptionCache* _purge_list_next;
inline address pc_at(int index);
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
inline address handler_at(int index);
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
inline int count();
// increment_count is only called under lock, but there may be concurrent readers.
void increment_count();
public:
ExceptionCache(Handle exception, address pc, address handler);
Klass* exception_type() { return _exception_type; }
ExceptionCache* next();
void set_next(ExceptionCache *ec);
ExceptionCache* purge_list_next() { return _purge_list_next; }
void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
address match(Handle exception, address pc);
bool match_exception_with_space(Handle exception) ;
address test_address(address addr);
bool add_address_and_handler(address addr, address handler) ;
};
// cache pc descs found in earlier inquiries
class PcDescCache {
friend class VMStructs;
private:
enum { cache_size = 4 };
// The array elements MUST be volatile! Several threads may modify
// and read from the cache concurrently. find_pc_desc_internal has
// returned wrong results. C++ compiler (namely xlC12) may duplicate
// C++ field accesses if the elements are not volatile.
typedef PcDesc* PcDescPtr;
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_pc_descs[0] = nullptr); }
void reset_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);
PcDesc* last_pc_desc() { return _pc_descs[0]; }
};
class PcDescSearch {
private:
address _code_begin;
PcDesc* _lower;
PcDesc* _upper;
public:
PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
_code_begin(code), _lower(lower), _upper(upper)
{
}
address code_begin() const { return _code_begin; }
PcDesc* scopes_pcs_begin() const { return _lower; }
PcDesc* scopes_pcs_end() const { return _upper; }
};
class PcDescContainer {
private:
PcDescCache _pc_desc_cache;
public:
PcDescContainer() {}
PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
address base_address = search.code_begin();
PcDesc* desc = _pc_desc_cache.last_pc_desc();
if (desc != nullptr && desc->pc_offset() == pc - base_address) {
return desc;
}
return find_pc_desc_internal(pc, approximate, search);
}
};
// nmethods (native methods) are the compiled code versions of Java methods.
//
@ -65,19 +172,192 @@ class FailedSpeculation;
class JVMCINMethodData;
#endif
class nmethod : public CompiledMethod {
class nmethod : public CodeBlob {
friend class VMStructs;
friend class JVMCIVMStructs;
friend class CodeCache; // scavengable oops
friend class JVMCINMethodData;
friend class DeoptimizationScope;
private:
// Used to track in which deoptimize handshake this method will be deoptimized.
uint64_t _deoptimization_generation;
uint64_t _gc_epoch;
Method* _method;
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
PcDescContainer _pc_desc_container;
ExceptionCache* volatile _exception_cache;
void* _gc_data;
struct oops_do_mark_link; // Opaque data type.
static nmethod* volatile _oops_do_mark_nmethods;
oops_do_mark_link* volatile _oops_do_mark_link;
// offsets for entry points
address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check
address _osr_entry_point; // entry point for on stack replacement
CompiledICData* _compiled_ic_data;
// Shared fields for all nmethod's
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
// Offsets for different nmethod parts
int _exception_offset;
// All deoptee's will resume execution at this location described by
// this offset.
int _deopt_handler_offset;
// All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset.
int _deopt_mh_handler_offset;
// Offset of the unwind handler if it exists
int _unwind_handler_offset;
int _consts_offset;
int _stub_offset;
int _oops_offset; // offset to where embedded oop table begins (inside data)
int _metadata_offset; // embedded meta data table
int _scopes_data_offset;
int _scopes_pcs_offset;
int _dependencies_offset;
int _handler_table_offset;
int _nul_chk_table_offset;
#if INCLUDE_JVMCI
int _speculations_offset;
int _jvmci_data_offset;
#endif
int _nmethod_end_offset;
int _skipped_instructions_size;
// location in frame (offset for sp) that deopt can store the original
// pc during a deopt.
int _orig_pc_offset;
int _compile_id; // which compilation made this nmethod
CompilerType _compiler_type; // which compiler made this nmethod (u1)
bool _is_unlinked;
#if INCLUDE_RTM_OPT
// RTM state at compile time. Used during deoptimization to decide
// whether to restart collecting RTM locking abort statistic again.
RTMState _rtm_state;
#endif
// These are used for compiled synchronized native methods to
// locate the owner and stack slot for the BasicLock. They are
// needed because there is no debug information for compiled native
// wrappers and the oop maps are insufficient to allow
// frame::retrieve_receiver() to work. Currently they are expected
// to be byte offsets from the Java stack pointer for maximum code
// sharing between platforms. JVMTI's GetLocalInstance() uses these
// offsets to find the receiver for non-static native wrapper frames.
ByteSize _native_receiver_sp_offset;
ByteSize _native_basic_lock_sp_offset;
CompLevel _comp_level; // compilation level (s1)
// Local state used to keep track of whether unloading is happening or not
volatile uint8_t _is_unloading_state;
// used by jvmti to track if an event has been posted for this nmethod.
bool _load_reported;
// Protected by CompiledMethod_lock
volatile signed char _state; // {not_installed, in_use, not_entrant}
// set during construction
uint8_t _has_unsafe_access:1, // May fault due to unsafe access.
_has_method_handle_invokes:1,// Has this method MethodHandle invokes?
_has_wide_vectors:1, // Preserve wide vectors at safepoints
_has_monitors:1, // Fastpath monitor detection for continuations
_has_flushed_dependencies:1; // Used for maintenance of dependencies (under CodeCache_lock)
enum DeoptimizationStatus : u1 {
not_marked,
deoptimize,
deoptimize_noupdate,
deoptimize_done
};
volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
DeoptimizationStatus deoptimization_status() const {
return Atomic::load(&_deoptimization_status);
}
// For native wrappers
nmethod(Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
CodeBuffer *code_buffer,
int frame_size,
ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
ByteSize basic_lock_sp_offset, /* synchronized natives only */
OopMapSet* oop_maps);
// Creation support
nmethod(Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
int entry_bci,
CodeOffsets* offsets,
int orig_pc_offset,
DebugInformationRecorder *recorder,
Dependencies* dependencies,
CodeBuffer *code_buffer,
int frame_size,
OopMapSet* oop_maps,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
CompLevel comp_level
#if INCLUDE_JVMCI
, char* speculations = nullptr,
int speculations_len = 0,
JVMCINMethodData* jvmci_data = nullptr
#endif
);
// helper methods
void* operator new(size_t size, int nmethod_size, int comp_level) throw();
// For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
// Attention: Only allow NonNMethod space for special nmethods which don't need to be
// findable by nmethod iterators! In particular, they must not contain oops!
void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
const char* reloc_string_for(u_char* begin, u_char* end);
bool try_transition(signed char new_state);
// Returns true if this thread changed the state of the nmethod or
// false if another thread performed the transition.
bool make_entrant() { Unimplemented(); return false; }
void inc_decompile_count();
// Inform external interfaces that a compiled method has been unloaded
void post_compiled_method_unload();
// Initialize fields to their default values
void init_defaults();
PcDesc* find_pc_desc(address pc, bool approximate) {
return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
}
// STW two-phase nmethod root processing helpers.
//
// When determining liveness of a given nmethod to do code cache unloading,
@ -137,7 +417,6 @@ class nmethod : public CompiledMethod {
// the next state by marking the _transition_ with (C) and (O), which mean "current"
// and "other" thread respectively.
//
struct oops_do_mark_link; // Opaque data type.
// States used for claiming nmethods during root processing.
static const uint claim_weak_request_tag = 0;
@ -189,146 +468,7 @@ class nmethod : public CompiledMethod {
// transitions).
void oops_do_set_strong_done(nmethod* old_head);
static nmethod* volatile _oops_do_mark_nmethods;
oops_do_mark_link* volatile _oops_do_mark_link;
// offsets for entry points
address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check
address _osr_entry_point; // entry point for on stack replacement
CompiledICData* _compiled_ic_data;
bool _is_unlinked;
// Shared fields for all nmethod's
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
// Offsets for different nmethod parts
int _exception_offset;
// Offset of the unwind handler if it exists
int _unwind_handler_offset;
int _consts_offset;
int _stub_offset;
int _oops_offset; // offset to where embedded oop table begins (inside data)
int _metadata_offset; // embedded meta data table
int _scopes_data_offset;
int _scopes_pcs_offset;
int _dependencies_offset;
int _handler_table_offset;
int _nul_chk_table_offset;
#if INCLUDE_JVMCI
int _speculations_offset;
int _jvmci_data_offset;
#endif
int _nmethod_end_offset;
int code_offset() const { return int(code_begin() - header_begin()); }
// location in frame (offset for sp) that deopt can store the original
// pc during a deopt.
int _orig_pc_offset;
int _compile_id; // which compilation made this nmethod
#if INCLUDE_RTM_OPT
// RTM state at compile time. Used during deoptimization to decide
// whether to restart collecting RTM locking abort statistic again.
RTMState _rtm_state;
#endif
// These are used for compiled synchronized native methods to
// locate the owner and stack slot for the BasicLock. They are
// needed because there is no debug information for compiled native
// wrappers and the oop maps are insufficient to allow
// frame::retrieve_receiver() to work. Currently they are expected
// to be byte offsets from the Java stack pointer for maximum code
// sharing between platforms. JVMTI's GetLocalInstance() uses these
// offsets to find the receiver for non-static native wrapper frames.
ByteSize _native_receiver_sp_offset;
ByteSize _native_basic_lock_sp_offset;
CompLevel _comp_level; // compilation level
// Local state used to keep track of whether unloading is happening or not
volatile uint8_t _is_unloading_state;
// protected by CodeCache_lock
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
// used by jvmti to track if an event has been posted for this nmethod.
bool _load_reported;
// Protected by CompiledMethod_lock
volatile signed char _state; // {not_installed, in_use, not_used, not_entrant}
int _skipped_instructions_size;
// For native wrappers
nmethod(Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
CodeBuffer *code_buffer,
int frame_size,
ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
ByteSize basic_lock_sp_offset, /* synchronized natives only */
OopMapSet* oop_maps);
// Creation support
nmethod(Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
int entry_bci,
CodeOffsets* offsets,
int orig_pc_offset,
DebugInformationRecorder *recorder,
Dependencies* dependencies,
CodeBuffer *code_buffer,
int frame_size,
OopMapSet* oop_maps,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
CompLevel comp_level
#if INCLUDE_JVMCI
, char* speculations = nullptr,
int speculations_len = 0,
JVMCINMethodData* jvmci_data = nullptr
#endif
);
// helper methods
void* operator new(size_t size, int nmethod_size, int comp_level) throw();
// For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod.
// Attention: Only allow NonNMethod space for special nmethods which don't need to be
// findable by nmethod iterators! In particular, they must not contain oops!
void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
const char* reloc_string_for(u_char* begin, u_char* end);
bool try_transition(signed char new_state);
// Returns true if this thread changed the state of the nmethod or
// false if another thread performed the transition.
bool make_entrant() { Unimplemented(); return false; }
void inc_decompile_count();
// Inform external interfaces that a compiled method has been unloaded
void post_compiled_method_unload();
// Initialize fields to their default values
void init_defaults();
// Offsets
int content_offset() const { return int(content_begin() - header_begin()); }
int data_offset() const { return _data_offset; }
address header_end() const { return (address) header_begin() + header_size(); }
public:
public:
// create nmethod with entry_bci
static nmethod* new_nmethod(const methodHandle& method,
int compile_id,
@ -351,14 +491,6 @@ class nmethod : public CompiledMethod {
#endif
);
// Only used for unit tests.
nmethod()
: CompiledMethod(),
_native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1)),
_is_unloading_state(0) {}
static nmethod* new_native_nmethod(const methodHandle& method,
int compile_id,
CodeBuffer *code_buffer,
@ -370,86 +502,126 @@ class nmethod : public CompiledMethod {
OopMapSet* oop_maps,
int exception_handler = -1);
// type info
bool is_nmethod() const { return true; }
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
Method* method () const { return _method; }
bool is_native_method() const { return _method != nullptr && _method->is_native(); }
bool is_java_method () const { return _method != nullptr && !_method->is_native(); }
bool is_osr_method () const { return _entry_bci != InvocationEntryBci; }
// Compiler task identification. Note that all OSR methods
// are numbered in an independent sequence if CICountOSR is true,
// and native method wrappers are also numbered independently if
// CICountNative is true.
int compile_id() const { return _compile_id; }
const char* compile_kind() const;
inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; }
inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; }
inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; }
CompilerType compiler_type () const { return _compiler_type; }
const char* compiler_name () const;
// boundaries for different parts
address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return code_begin() ; }
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _oops_offset ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : nullptr; }
oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return header_begin() + code_offset() ; }
address insts_begin () const { return header_begin() + code_offset() ; }
address insts_end () const { return header_begin() + _stub_offset ; }
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _oops_offset ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : nullptr; }
oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; }
Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
address dependencies_end () const { return header_begin() + _handler_table_offset ; }
address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
int skipped_instructions_size () const { return _skipped_instructions_size ; }
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset) ; }
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
address dependencies_end () const { return header_begin() + _handler_table_offset ; }
address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
#if INCLUDE_JVMCI
address nul_chk_table_end () const { return header_begin() + _speculations_offset ; }
address speculations_begin () const { return header_begin() + _speculations_offset ; }
address speculations_end () const { return header_begin() + _jvmci_data_offset ; }
address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; }
address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; }
address nul_chk_table_end () const { return header_begin() + _speculations_offset ; }
address speculations_begin () const { return header_begin() + _speculations_offset ; }
address speculations_end () const { return header_begin() + _jvmci_data_offset ; }
address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; }
address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; }
#else
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
#endif
// Sizes
int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); }
int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); }
int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); }
int consts_size () const { return int( consts_end () - consts_begin ()); }
int insts_size () const { return int( insts_end () - insts_begin ()); }
int stub_size () const { return int( stub_end () - stub_begin ()); }
int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); }
int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); }
int scopes_data_size () const { return int( scopes_data_end () - scopes_data_begin ()); }
int scopes_pcs_size () const { return int((intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin ()); }
int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); }
int handler_table_size() const { return int( handler_table_end() - handler_table_begin()); }
int nul_chk_table_size() const { return int( nul_chk_table_end() - nul_chk_table_begin()); }
#if INCLUDE_JVMCI
int speculations_size () const { return int( speculations_end () - speculations_begin ()); }
int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); }
int speculations_size () const { return int( speculations_end () - speculations_begin ()); }
int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); }
#endif
int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; }
int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
int total_size () const;
int skipped_instructions_size () const { return _skipped_instructions_size; }
int total_size() const;
// Containment
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
// Returns true if a given address is in the 'insts' section. The method
// insts_contains_inclusive() is end-inclusive.
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); }
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
// entry points
address entry_point() const { return _entry_point; } // normal entry point
address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
address entry_point() const { return _entry_point; } // normal entry point
address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
// allowed to advance state
in_use = 0, // executable nmethod
not_entrant = 1 // marked for deoptimization but activations may still exist
};
// flag accessing and manipulation
bool is_not_installed() const { return _state == not_installed; }
bool is_in_use() const { return _state <= in_use; }
bool is_not_entrant() const { return _state == not_entrant; }
bool is_not_installed() const { return _state == not_installed; }
bool is_in_use() const { return _state <= in_use; }
bool is_not_entrant() const { return _state == not_entrant; }
int get_state() const { return _state; }
void clear_unloading_state();
// Heuristically deduce an nmethod isn't worth keeping around
bool is_cold();
virtual bool is_unloading();
virtual void do_unloading(bool unloading_occurred);
bool is_unloading();
void do_unloading(bool unloading_occurred);
bool is_unlinked() const { return _is_unlinked; }
void set_is_unlinked() { assert(!_is_unlinked, "already unlinked"); _is_unlinked = true; }
bool is_unlinked() const { return _is_unlinked; }
void set_is_unlinked() { assert(!_is_unlinked, "already unlinked"); _is_unlinked = true; }
#if INCLUDE_RTM_OPT
// rtm state accessing and manipulating
RTMState rtm_state() const { return _rtm_state; }
void set_rtm_state(RTMState state) { _rtm_state = state; }
RTMState rtm_state() const { return _rtm_state; }
void set_rtm_state(RTMState state) { _rtm_state = state; }
#endif
bool make_in_use() {
@ -462,23 +634,51 @@ class nmethod : public CompiledMethod {
bool make_not_entrant();
bool make_not_used() { return make_not_entrant(); }
int get_state() const {
return _state;
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
void set_deoptimized_done();
bool update_recompile_counts() const {
// Update recompile counts when either the update is explicitly requested (deoptimize)
// or the nmethod is not marked for deoptimization at all (not_marked).
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
DeoptimizationStatus status = deoptimization_status();
return status != deoptimize_noupdate && status != deoptimize_done;
}
// tells whether frames described by this nmethod can be deoptimized
// note: native wrappers cannot be deoptimized.
bool can_be_deoptimized() const { return is_java_method(); }
bool has_dependencies() { return dependencies_size() != 0; }
void print_dependencies_on(outputStream* out) PRODUCT_RETURN;
void flush_dependencies();
bool has_flushed_dependencies() { return _has_flushed_dependencies; }
void set_has_flushed_dependencies() {
template<typename T>
T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
template<typename T>
void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_monitors() const { return _has_monitors; }
void set_has_monitors(bool z) { _has_monitors = z; }
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool has_wide_vectors() const { return _has_wide_vectors; }
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
bool has_flushed_dependencies() const { return _has_flushed_dependencies; }
void set_has_flushed_dependencies(bool z) {
assert(!has_flushed_dependencies(), "should only happen once");
_has_flushed_dependencies = 1;
_has_flushed_dependencies = z;
}
int comp_level() const { return _comp_level; }
void unlink_from_method();
// Support for oops in scopes and relocs:
// Note: index 0 is reserved for null.
oop oop_at(int index) const;
@ -491,7 +691,7 @@ class nmethod : public CompiledMethod {
// Support for meta data in scopes and relocs:
// Note: index 0 is reserved for null.
Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); }
Metadata** metadata_addr_at(int index) const { // for GC
// relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index");
@ -506,10 +706,87 @@ private:
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
inline void initialize_immediate_oop(oop* dest, jobject handle);
protected:
address oops_reloc_begin() const;
public:
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); }
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
protected:
// Exception cache support
// Note: _exception_cache may be read and cleaned concurrently.
ExceptionCache* exception_cache() const { return _exception_cache; }
ExceptionCache* exception_cache_acquire() const;
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
public:
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache();
void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// MethodHandle
bool is_method_handle_return(address return_pc);
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
inline bool is_deopt_pc(address pc);
inline bool is_deopt_mh_entry(address pc);
inline bool is_deopt_entry(address pc);
// Accessor/mutator for the original pc of a frame before a frame was deopted.
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
const char* state() const;
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
}
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override;
// implicit exceptions support
address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
static address get_deopt_original_pc(const frame* fr);
// Inline cache support for class unloading and nmethod unloading
private:
void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
address continuation_for_implicit_exception(address pc, bool for_div0_check);
public:
// Serial version used by whitebox test
void cleanup_inline_caches_whitebox();
void clear_inline_caches();
// Execute nmethod barrier code, as if entering through nmethod call.
void run_nmethod_entry_barrier();
void verify_oop_relocations();
bool has_evol_metadata();
Method* attached_method(address call_pc);
Method* attached_method_before_pc(address pc);
// GC unloading support
// Cleans unloaded klasses and unloaded nmethods in inline caches
void unload_nmethod_caches(bool class_unloading_occurred);
void unlink_from_method();
// On-stack replacement support
int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
@ -524,7 +801,7 @@ public:
void unlink();
// Deallocate this nmethod - called by the GC
void purge(bool free_code_cache_data, bool unregister_nmethod);
void purge(bool free_code_cache_data, bool unregister_nmethod) override;
// See comment at definition of _last_seen_on_stack
void mark_as_maybe_on_stack();
@ -549,7 +826,6 @@ public:
}
#endif
public:
void oops_do(OopClosure* f) { oops_do(f, false); }
void oops_do(OopClosure* f, bool allow_dead);
@ -591,6 +867,15 @@ public:
void set_load_reported() { _load_reported = true; }
public:
// ScopeDesc retrieval operation
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
// pc_desc_near returns the first PcDesc at or after the given pc.
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
// ScopeDesc for an instruction
ScopeDesc* scope_desc_at(address pc);
ScopeDesc* scope_desc_near(address pc);
// copying of debugging information
void copy_scopes_pcs(PcDesc* pcs, int count);
void copy_scopes_data(address buffer, int size);
@ -604,7 +889,7 @@ public:
void post_compiled_method_load_event(JvmtiThreadState* state = nullptr);
// verify operations
void verify();
void verify() override;
void verify_scopes();
void verify_interrupt_point(address interrupt_point, bool is_inline_cache);
@ -616,8 +901,8 @@ public:
void decode(outputStream* st) const { decode2(st); } // just delegate here.
// printing support
void print() const;
void print(outputStream* st) const;
void print() const override;
void print(outputStream* st) const;
void print_code();
#if defined(SUPPORT_DATA_STRUCTS)
@ -626,7 +911,7 @@ public:
void print_pcs_on(outputStream* st);
void print_scopes() { print_scopes_on(tty); }
void print_scopes_on(outputStream* st) PRODUCT_RETURN;
void print_value_on(outputStream* st) const;
void print_value_on(outputStream* st) const override;
void print_handler_table();
void print_nul_chk_table();
void print_recorded_oop(int log_n, int index);
@ -646,7 +931,7 @@ public:
void print_nmethod(bool print_code);
// need to re-define this from CodeBlob else the overload hides it
virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
void print_on(outputStream* st) const override { CodeBlob::print_on(st); }
void print_on(outputStream* st, const char* msg) const;
// Logging
@ -655,7 +940,7 @@ public:
void log_state_change() const;
// Prints block-level comments, including nmethod specific block labels:
virtual void print_block_comment(outputStream* stream, address block_begin) const {
void print_block_comment(outputStream* stream, address block_begin) const override {
#if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
print_nmethod_labels(stream, block_begin);
CodeBlob::print_block_comment(stream, block_begin);
@ -670,13 +955,6 @@ public:
// Prints a comment for one native instruction (reloc info, pc desc)
void print_code_comment_on(outputStream* st, int column, address begin, address end);
// Compiler task identification. Note that all OSR methods
// are numbered in an independent sequence if CICountOSR is true,
// and native method wrappers are also numbered independently if
// CICountNative is true.
virtual int compile_id() const { return _compile_id; }
const char* compile_kind() const;
// tells if this compiled method is dependent on the given changes,
// and the changes have invalidated it
bool check_dependency_on(DepChange& changes);
@ -684,7 +962,7 @@ public:
// Fast breakpoint support. Tells if this compiled method is
// dependent on the given method. Returns true if this nmethod
// corresponds to the given method as well.
virtual bool is_dependent_on_method(Method* dependee);
bool is_dependent_on_method(Method* dependee);
// JVMTI's GetLocalInstance() support
ByteSize native_receiver_sp_offset() {
@ -699,11 +977,11 @@ public:
static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
virtual void metadata_do(MetadataClosure* f);
void metadata_do(MetadataClosure* f);
address call_instruction_address(address pc) const;
virtual void make_deoptimized();
void make_deoptimized();
void finalize_relocations();
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,19 +22,19 @@
*
*/
#ifndef SHARE_CODE_COMPILEDMETHOD_INLINE_HPP
#define SHARE_CODE_COMPILEDMETHOD_INLINE_HPP
#ifndef SHARE_CODE_NMETHOD_INLINE_HPP
#define SHARE_CODE_NMETHOD_INLINE_HPP
#include "code/compiledMethod.hpp"
#include "code/nmethod.hpp"
#include "code/nativeInst.hpp"
#include "runtime/atomic.hpp"
#include "runtime/frame.hpp"
inline bool CompiledMethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
// When using JVMCI the address might be off by the size of a call instruction.
inline bool CompiledMethod::is_deopt_entry(address pc) {
inline bool nmethod::is_deopt_entry(address pc) {
return pc == deopt_handler_begin()
#if INCLUDE_JVMCI
|| (is_compiled_by_jvmci() && pc == (deopt_handler_begin() + NativeCall::instruction_size))
@ -42,7 +42,7 @@ inline bool CompiledMethod::is_deopt_entry(address pc) {
;
}
inline bool CompiledMethod::is_deopt_mh_entry(address pc) {
inline bool nmethod::is_deopt_mh_entry(address pc) {
return pc == deopt_mh_handler_begin()
#if INCLUDE_JVMCI
|| (is_compiled_by_jvmci() && pc == (deopt_mh_handler_begin() + NativeCall::instruction_size))
@ -51,19 +51,19 @@ inline bool CompiledMethod::is_deopt_mh_entry(address pc) {
}
// -----------------------------------------------------------------------------
// CompiledMethod::get_deopt_original_pc
// nmethod::get_deopt_original_pc
//
// Return the original PC for the given PC if:
// (a) the given PC belongs to a nmethod and
// (a) the given PC belongs to an nmethod and
// (b) it is a deopt PC
inline address CompiledMethod::get_deopt_original_pc(const frame* fr) {
inline address nmethod::get_deopt_original_pc(const frame* fr) {
if (fr->cb() == nullptr) return nullptr;
CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
if (cm != nullptr && cm->is_deopt_pc(fr->pc()))
return cm->get_original_pc(fr);
nmethod* nm = fr->cb()->as_nmethod_or_null();
if (nm != nullptr && nm->is_deopt_pc(fr->pc())) {
return nm->get_original_pc(fr);
}
return nullptr;
}
@ -86,4 +86,4 @@ address ExceptionCache::handler_at(int index) {
inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); }
#endif // SHARE_CODE_COMPILEDMETHOD_INLINE_HPP
#endif // SHARE_CODE_NMETHOD_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,11 +36,11 @@ PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
_flags = 0;
}
address PcDesc::real_pc(const CompiledMethod* code) const {
address PcDesc::real_pc(const nmethod* code) const {
return code->code_begin() + pc_offset();
}
void PcDesc::print_on(outputStream* st, CompiledMethod* code) {
void PcDesc::print_on(outputStream* st, nmethod* code) {
#ifndef PRODUCT
ResourceMark rm;
st->print_cr("PcDesc(pc=" PTR_FORMAT " offset=%x bits=%x):", p2i(real_pc(code)), pc_offset(), _flags);
@ -57,7 +57,7 @@ void PcDesc::print_on(outputStream* st, CompiledMethod* code) {
#endif
}
bool PcDesc::verify(CompiledMethod* code) {
bool PcDesc::verify(nmethod* code) {
//Unimplemented();
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
// PcDescs map a physical PC (given as offset from start of nmethod) to
// the corresponding source scope and byte code index.
class CompiledMethod;
class nmethod;
class PcDesc {
friend class VMStructs;
@ -102,11 +102,11 @@ class PcDesc {
void set_arg_escape(bool z) { set_flag(PCDESC_arg_escape, z); }
// Returns the real pc
address real_pc(const CompiledMethod* code) const;
address real_pc(const nmethod* code) const;
void print(CompiledMethod* code) { print_on(tty, code); }
void print_on(outputStream* st, CompiledMethod* code);
bool verify(CompiledMethod* code);
void print(nmethod* code) { print_on(tty, code); }
void print_on(outputStream* st, nmethod* code);
bool verify(nmethod* code);
};
#endif // SHARE_CODE_PCDESC_HPP

View File

@ -117,13 +117,13 @@ void relocInfo::change_reloc_info_for_address(RelocIterator *itr, address pc, re
// ----------------------------------------------------------------------------------------------------
// Implementation of RelocIterator
void RelocIterator::initialize(CompiledMethod* nm, address begin, address limit) {
void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
initialize_misc();
if (nm == nullptr && begin != nullptr) {
// allow nmethod to be deduced from beginning address
CodeBlob* cb = CodeCache::find_blob(begin);
nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
}
guarantee(nm != nullptr, "must be able to deduce nmethod from other arguments");
@ -633,9 +633,9 @@ address virtual_call_Relocation::cached_value() {
}
Method* virtual_call_Relocation::method_value() {
CompiledMethod* cm = code();
if (cm == nullptr) return (Method*)nullptr;
Metadata* m = cm->metadata_at(_method_index);
nmethod* nm = code();
if (nm == nullptr) return (Method*)nullptr;
Metadata* m = nm->metadata_at(_method_index);
assert(m != nullptr || _method_index == 0, "should be non-null for non-zero index");
assert(m == nullptr || m->is_method(), "not a method");
return (Method*)m;
@ -659,9 +659,9 @@ void opt_virtual_call_Relocation::unpack_data() {
}
Method* opt_virtual_call_Relocation::method_value() {
CompiledMethod* cm = code();
if (cm == nullptr) return (Method*)nullptr;
Metadata* m = cm->metadata_at(_method_index);
nmethod* nm = code();
if (nm == nullptr) return (Method*)nullptr;
Metadata* m = nm->metadata_at(_method_index);
assert(m != nullptr || _method_index == 0, "should be non-null for non-zero index");
assert(m == nullptr || m->is_method(), "not a method");
return (Method*)m;
@ -689,9 +689,9 @@ address opt_virtual_call_Relocation::static_stub() {
}
Method* static_call_Relocation::method_value() {
CompiledMethod* cm = code();
if (cm == nullptr) return (Method*)nullptr;
Metadata* m = cm->metadata_at(_method_index);
nmethod* nm = code();
if (nm == nullptr) return (Method*)nullptr;
Metadata* m = nm->metadata_at(_method_index);
assert(m != nullptr || _method_index == 0, "should be non-null for non-zero index");
assert(m == nullptr || m->is_method(), "not a method");
return (Method*)m;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,11 +34,10 @@
#include <new>
class nmethod;
class CodeBlob;
class CompiledMethod;
class Metadata;
class NativeMovConstReg;
class nmethod;
// Types in this file:
// relocInfo
@ -571,7 +570,7 @@ class RelocIterator : public StackObj {
address _limit; // stop producing relocations after this _addr
relocInfo* _current; // the current relocation information
relocInfo* _end; // end marker; we're done iterating when _current == _end
CompiledMethod* _code; // compiled method containing _addr
nmethod* _code; // compiled method containing _addr
address _addr; // instruction to which the relocation applies
short _databuf; // spare buffer for compressed data
short* _data; // pointer to the relocation's data
@ -601,13 +600,13 @@ class RelocIterator : public StackObj {
void initialize_misc();
void initialize(CompiledMethod* nm, address begin, address limit);
void initialize(nmethod* nm, address begin, address limit);
RelocIterator() { initialize_misc(); }
public:
// constructor
RelocIterator(CompiledMethod* nm, address begin = nullptr, address limit = nullptr);
RelocIterator(nmethod* nm, address begin = nullptr, address limit = nullptr);
RelocIterator(CodeSection* cb, address begin = nullptr, address limit = nullptr);
// get next reloc info, return !eos
@ -640,7 +639,7 @@ class RelocIterator : public StackObj {
relocType type() const { return current()->type(); }
int format() const { return (relocInfo::have_format) ? current()->format() : 0; }
address addr() const { return _addr; }
CompiledMethod* code() const { return _code; }
nmethod* code() const { return _code; }
short* data() const { return _data; }
int datalen() const { return _datalen; }
bool has_current() const { return _datalen >= 0; }
@ -827,7 +826,7 @@ class Relocation {
public:
// accessors which only make sense for a bound Relocation
address addr() const { return binding()->addr(); }
CompiledMethod* code() const { return binding()->code(); }
nmethod* code() const { return binding()->code(); }
bool addr_in_const() const { return binding()->addr_in_const(); }
protected:
short* data() const { return binding()->data(); }
@ -1463,7 +1462,7 @@ APPLY_TO_RELOCATIONS(EACH_CASE);
#undef EACH_CASE_AUX
#undef EACH_CASE
inline RelocIterator::RelocIterator(CompiledMethod* nm, address begin, address limit) {
inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
initialize(nm, begin, limit);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
ScopeDesc::ScopeDesc(const CompiledMethod* code, PcDesc* pd, bool ignore_objects) {
ScopeDesc::ScopeDesc(const nmethod* code, PcDesc* pd, bool ignore_objects) {
int obj_decode_offset = ignore_objects ? DebugInformationRecorder::serialized_null : pd->obj_decode_offset();
_code = code;
_decode_offset = pd->scope_decode_offset();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ class SimpleScopeDesc : public StackObj {
int _bci;
public:
SimpleScopeDesc(CompiledMethod* code, address pc) {
SimpleScopeDesc(nmethod* code, address pc) {
PcDesc* pc_desc = code->pc_desc_at(pc);
assert(pc_desc != nullptr, "Must be able to find matching PcDesc");
// save this here so we only have to look up the PcDesc once
@ -61,7 +61,7 @@ class SimpleScopeDesc : public StackObj {
class ScopeDesc : public ResourceObj {
public:
// Constructor
ScopeDesc(const CompiledMethod* code, PcDesc* pd, bool ignore_objects = false);
ScopeDesc(const nmethod* code, PcDesc* pd, bool ignore_objects = false);
// Direct access to scope
ScopeDesc* at_offset(int decode_offset) { return new ScopeDesc(this, decode_offset); }
@ -120,7 +120,7 @@ class ScopeDesc : public ResourceObj {
GrowableArray<ScopeValue*>* _objects;
// Nmethod information
const CompiledMethod* _code;
const nmethod* _code;
// Decoding operations
void decode_body();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -212,7 +212,7 @@ bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
}
CompLevel CompilationPolicy::comp_level(Method* method) {
CompiledMethod *nm = method->code();
nmethod *nm = method->code();
if (nm != nullptr && nm->is_in_use()) {
return (CompLevel)nm->comp_level();
}
@ -708,7 +708,7 @@ void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
}
nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS) {
int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
if (PrintTieredEvents) {
print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
}
@ -1137,7 +1137,7 @@ CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cu
// Handle the invocation event.
void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
CompLevel level, CompiledMethod* nm, TRAPS) {
CompLevel level, nmethod* nm, TRAPS) {
if (should_create_mdo(mh, level)) {
create_mdo(mh, THREAD);
}
@ -1152,7 +1152,7 @@ void CompilationPolicy::method_invocation_event(const methodHandle& mh, const me
// Handle the back branch event. Notice that we can compile the method
// with a regular entry from here.
void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
int bci, CompLevel level, CompiledMethod* nm, TRAPS) {
int bci, CompLevel level, nmethod* nm, TRAPS) {
if (should_create_mdo(mh, level)) {
create_mdo(mh, THREAD);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -235,9 +235,9 @@ class CompilationPolicy : AllStatic {
// Get a compilation level for a given method.
static CompLevel comp_level(Method* method);
static void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
CompLevel level, CompiledMethod* nm, TRAPS);
CompLevel level, nmethod* nm, TRAPS);
static void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
int bci, CompLevel level, CompiledMethod* nm, TRAPS);
int bci, CompLevel level, nmethod* nm, TRAPS);
static void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); }
static void set_start_time(jlong t) { _start_time = t; }
@ -265,7 +265,7 @@ public:
// Return initial compile level to use with Xcomp (depends on compilation mode).
static void reprofile(ScopeDesc* trap_scope, bool is_osr);
static nmethod* event(const methodHandle& method, const methodHandle& inlinee,
int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS);
int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
// Select task is called by CompileBroker. We should return a task or nullptr.
static CompileTask* select_task(CompileQueue* compile_queue);
// Tell the runtime if we think a given method is adequately profiled.

View File

@ -1376,9 +1376,8 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
if (osr_bci == InvocationEntryBci) {
// standard compilation
CompiledMethod* method_code = method->code();
if (method_code != nullptr && method_code->is_nmethod()
&& (compile_reason != CompileTask::Reason_DirectivesChanged)) {
nmethod* method_code = method->code();
if (method_code != nullptr && (compile_reason != CompileTask::Reason_DirectivesChanged)) {
if (compilation_is_complete(method, osr_bci, comp_level)) {
return (nmethod*) method_code;
}
@ -1481,12 +1480,7 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
// return requested nmethod
// We accept a higher level osr method
if (osr_bci == InvocationEntryBci) {
CompiledMethod* code = method->code();
if (code == nullptr) {
return (nmethod*) code;
} else {
return code->as_nmethod_or_null();
}
return method->code();
}
return method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
}
@ -1511,7 +1505,7 @@ bool CompileBroker::compilation_is_complete(const methodHandle& method,
if (method->is_not_compilable(comp_level)) {
return true;
} else {
CompiledMethod* result = method->code();
nmethod* result = method->code();
if (result == nullptr) return false;
return comp_level == result->comp_level();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -498,7 +498,6 @@ static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr,
VMReg reg = omv.content_reg();
address loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
reg_map->set_location(reg, loc);
//DEBUG_ONLY(nof_callee++;)
}
}
}
@ -520,15 +519,7 @@ void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map)
// Scan through oopmap and find location of all callee-saved registers
// (we do not do update in place, since info could be overwritten)
DEBUG_ONLY(int nof_callee = 0;)
update_register_map1(this, fr, reg_map);
// Check that runtime stubs save all callee-saved registers
#ifdef COMPILER2
assert(cb == nullptr || cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
(nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
"must save all");
#endif // COMPILER2
}
const ImmutableOopMap* OopMapSet::find_map(const frame *fr) {

View File

@ -336,7 +336,7 @@ public:
_hr(hr), _failures(false) {}
void do_code_blob(CodeBlob* cb) {
nmethod* nm = (cb == nullptr) ? nullptr : cb->as_compiled_method()->as_nmethod_or_null();
nmethod* nm = (cb == nullptr) ? nullptr : cb->as_nmethod_or_null();
if (nm != nullptr) {
// Verify that the nemthod is live
VerifyCodeRootOopClosure oop_cl(_hr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,19 +23,18 @@
*/
#include "precompiled.hpp"
#include "code/compiledMethod.hpp"
#include "code/nmethod.hpp"
#include "gc/shared/gcBehaviours.hpp"
IsUnloadingBehaviour* IsUnloadingBehaviour::_current = nullptr;
bool IsUnloadingBehaviour::is_unloading(CompiledMethod* cm) {
if (cm->method()->can_be_allocated_in_NonNMethod_space()) {
bool IsUnloadingBehaviour::is_unloading(nmethod* nm) {
if (nm->method()->can_be_allocated_in_NonNMethod_space()) {
// When the nmethod is in NonNMethod space, we may reach here without IsUnloadingBehaviour.
// However, we only allow this for special methods which never get unloaded.
return false;
}
return _current->has_dead_oop(cm) || cm->as_nmethod()->is_cold();
return _current->has_dead_oop(nm) || nm->is_cold();
}
class IsCompiledMethodUnloadingOopClosure: public OopClosure {
@ -70,12 +69,8 @@ public:
}
};
bool ClosureIsUnloadingBehaviour::has_dead_oop(CompiledMethod* cm) const {
if (cm->is_nmethod()) {
IsCompiledMethodUnloadingOopClosure cl(_cl);
static_cast<nmethod*>(cm)->oops_do(&cl, true /* allow_dead */);
return cl.is_unloading();
} else {
return false;
}
bool ClosureIsUnloadingBehaviour::has_dead_oop(nmethod* nm) const {
IsCompiledMethodUnloadingOopClosure cl(_cl);
nm->oops_do(&cl, true /* allow_dead */);
return cl.is_unloading();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,14 +28,14 @@
#include "memory/iterator.hpp"
#include "oops/oopsHierarchy.hpp"
// This is the behaviour for checking if a CompiledMethod is unloading
// This is the behaviour for checking if an nmethod is unloading
// or has unloaded due to having phantomly dead oops in it after a GC.
class IsUnloadingBehaviour {
static IsUnloadingBehaviour* _current;
public:
static bool is_unloading(CompiledMethod* cm);
virtual bool has_dead_oop(CompiledMethod* cm) const = 0;
static bool is_unloading(nmethod* nm);
virtual bool has_dead_oop(nmethod* nm) const = 0;
static IsUnloadingBehaviour* current() { return _current; }
static void set_current(IsUnloadingBehaviour* current) { _current = current; }
};
@ -48,7 +48,7 @@ public:
: _cl(is_alive)
{ }
virtual bool has_dead_oop(CompiledMethod* cm) const;
virtual bool has_dead_oop(nmethod* nm) const;
};
#endif // SHARE_GC_SHARED_GCBEHAVIOURS_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@ CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, bool unloading_
_first_nmethod(nullptr),
_claimed_nmethod(nullptr) {
// Get first alive nmethod
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
NMethodIterator iter(NMethodIterator::all_blobs);
if(iter.next()) {
_first_nmethod = iter.method();
}
@ -49,15 +49,15 @@ CodeCacheUnloadingTask::~CodeCacheUnloadingTask() {
CodeCache::verify_clean_inline_caches();
}
void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
CompiledMethod* first;
CompiledMethodIterator last(CompiledMethodIterator::all_blobs);
void CodeCacheUnloadingTask::claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
nmethod* first;
NMethodIterator last(NMethodIterator::all_blobs);
do {
*num_claimed_nmethods = 0;
first = _claimed_nmethod;
last = CompiledMethodIterator(CompiledMethodIterator::all_blobs, first);
last = NMethodIterator(NMethodIterator::all_blobs, first);
if (first != nullptr) {
@ -81,7 +81,7 @@ void CodeCacheUnloadingTask::work(uint worker_id) {
}
int num_claimed_nmethods;
CompiledMethod* claimed_nmethods[MaxClaimNmethods];
nmethod* claimed_nmethods[MaxClaimNmethods];
while (true) {
claim_nmethods(claimed_nmethods, &num_claimed_nmethods);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,8 +36,8 @@ class CodeCacheUnloadingTask {
const uint _num_workers;
// Variables used to claim nmethods.
CompiledMethod* _first_nmethod;
CompiledMethod* volatile _claimed_nmethod;
nmethod* _first_nmethod;
nmethod* volatile _claimed_nmethod;
public:
CodeCacheUnloadingTask(uint num_workers, bool unloading_occurred);
@ -45,7 +45,7 @@ public:
private:
static const int MaxClaimNmethods = 16;
void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods);
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods);
public:
// Cleaning and unloading of nmethods.

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -77,8 +78,7 @@ public:
class ShenandoahIsUnloadingBehaviour : public IsUnloadingBehaviour {
public:
virtual bool has_dead_oop(CompiledMethod* method) const {
nmethod* const nm = method->as_nmethod();
virtual bool has_dead_oop(nmethod* nm) const {
assert(ShenandoahHeap::heap()->is_concurrent_weak_root_in_progress(), "Only for this phase");
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
ShenandoahReentrantLocker locker(data->lock());
@ -90,27 +90,24 @@ public:
class ShenandoahCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
public:
virtual bool lock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
virtual bool lock(nmethod* nm) {
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
lock->lock();
return true;
}
virtual void unlock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
virtual void unlock(nmethod* nm) {
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
lock->unlock();
}
virtual bool is_safe(CompiledMethod* method) {
if (SafepointSynchronize::is_at_safepoint() || method->is_unloading()) {
virtual bool is_safe(nmethod* nm) {
if (SafepointSynchronize::is_at_safepoint() || nm->is_unloading()) {
return true;
}
nmethod* const nm = method->as_nmethod();
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
return lock->owned_by_self();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,8 +75,7 @@ public:
class XIsUnloadingBehaviour : public IsUnloadingBehaviour {
public:
virtual bool has_dead_oop(CompiledMethod* method) const {
nmethod* const nm = method->as_nmethod();
virtual bool has_dead_oop(nmethod* nm) const {
XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm);
XLocker<XReentrantLock> locker(lock);
XIsUnloadingOopClosure cl;
@ -87,25 +86,22 @@ public:
class XCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
public:
virtual bool lock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
virtual bool lock(nmethod* nm) {
XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm);
lock->lock();
return true;
}
virtual void unlock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
virtual void unlock(nmethod* nm) {
XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm);
lock->unlock();
}
virtual bool is_safe(CompiledMethod* method) {
if (SafepointSynchronize::is_at_safepoint() || method->is_unloading()) {
virtual bool is_safe(nmethod* nm) {
if (SafepointSynchronize::is_at_safepoint() || nm->is_unloading()) {
return true;
}
nmethod* const nm = method->as_nmethod();
XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm);
return lock->is_owned();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,8 +74,7 @@ public:
class ZIsUnloadingBehaviour : public IsUnloadingBehaviour {
public:
virtual bool has_dead_oop(CompiledMethod* method) const {
nmethod* const nm = method->as_nmethod();
virtual bool has_dead_oop(nmethod* nm) const {
ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
ZLocker<ZReentrantLock> locker(lock);
if (!ZNMethod::is_armed(nm)) {
@ -90,25 +89,22 @@ public:
class ZCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
public:
virtual bool lock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
virtual bool lock(nmethod* nm) {
ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
lock->lock();
return true;
}
virtual void unlock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
virtual void unlock(nmethod* nm) {
ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
lock->unlock();
}
virtual bool is_safe(CompiledMethod* method) {
if (SafepointSynchronize::is_at_safepoint() || method->is_unloading()) {
virtual bool is_safe(nmethod* nm) {
if (SafepointSynchronize::is_at_safepoint() || nm->is_unloading()) {
return true;
}
nmethod* const nm = method->as_nmethod();
ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
return lock->is_owned();
}

View File

@ -1305,7 +1305,7 @@ C2V_VMENTRY(void, reprofile, (JNIEnv* env, jobject, ARGUMENT_PAIR(method)))
}
NOT_PRODUCT(method->set_compiled_invocation_count(0));
CompiledMethod* code = method->code();
nmethod* code = method->code();
if (code != nullptr) {
code->make_not_entrant();
}

View File

@ -256,7 +256,7 @@ extern void vm_exit(int code);
// been deoptimized. If that is the case we return the deopt blob
// unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, CompiledMethod*& cm))
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
// Reset method handle flag.
current->set_is_method_handle_return(false);
@ -267,10 +267,9 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
// has updated oops.
StackWatermarkSet::after_unwind(current);
cm = CodeCache::find_compiled(pc);
assert(cm != nullptr, "this is not a compiled method");
nm = CodeCache::find_nmethod(pc);
// Adjust the pc as needed/
if (cm->is_deopt_pc(pc)) {
if (nm->is_deopt_pc(pc)) {
RegisterMap map(current,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::include,
@ -291,10 +290,10 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;
stringStream tempst;
assert(cm->method() != nullptr, "Unexpected null method()");
assert(nm->method() != nullptr, "Unexpected null method()");
tempst.print("JVMCI compiled method <%s>\n"
" at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
cm->method()->print_value_string(), p2i(pc), p2i(current));
nm->method()->print_value_string(), p2i(pc), p2i(current));
Exceptions::log_exception(exception, tempst.as_string());
}
// for AbortVMOnException flag
@ -332,10 +331,10 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
if (guard_pages_enabled) {
address fast_continuation = cm->handler_for_exception_and_pc(exception, pc);
address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
if (fast_continuation != nullptr) {
// Set flag if return address is a method handle call site.
current->set_is_method_handle_return(cm->is_method_handle_return(pc));
current->set_is_method_handle_return(nm->is_method_handle_return(pc));
return fast_continuation;
}
}
@ -356,7 +355,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
current->clear_exception_oop_and_pc();
bool recursive_exception = false;
continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false, recursive_exception);
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
// If an exception was thrown during exception dispatch, the exception oop may have changed
current->set_exception_oop(exception());
current->set_exception_pc(pc);
@ -368,12 +367,12 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
// Checking for exception oop equality is not
// sufficient because some exceptions are pre-allocated and reused.
if (continuation != nullptr && !recursive_exception && !SharedRuntime::deopt_blob()->contains(continuation)) {
cm->add_handler_for_exception_and_pc(exception, pc, continuation);
nm->add_handler_for_exception_and_pc(exception, pc, continuation);
}
}
// Set flag if return address is a method handle call site.
current->set_is_method_handle_return(cm->is_method_handle_return(pc));
current->set_is_method_handle_return(nm->is_method_handle_return(pc));
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;
@ -395,18 +394,18 @@ address JVMCIRuntime::exception_handler_for_pc(JavaThread* current) {
address pc = current->exception_pc();
// Still in Java mode
DEBUG_ONLY(NoHandleMark nhm);
CompiledMethod* cm = nullptr;
nmethod* nm = nullptr;
address continuation = nullptr;
{
// Enter VM mode by calling the helper
ResetNoHandleMark rnhm;
continuation = exception_handler_for_pc_helper(current, exception, pc, cm);
continuation = exception_handler_for_pc_helper(current, exception, pc, nm);
}
// Back in JAVA, use no oops DON'T safepoint
// Now check to see if the compiled method we were called from is now deoptimized.
// If so we must return to the deopt blob and deoptimize the nmethod
if (cm != nullptr && caller_is_deopted()) {
if (nm != nullptr && caller_is_deopted()) {
continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
}
@ -675,7 +674,7 @@ static void decipher(jlong v, bool ignoreZero) {
if (cb) {
if (cb->is_nmethod()) {
char buf[O_BUFLEN];
tty->print("%s [" INTPTR_FORMAT "+" JLONG_FORMAT "]", cb->as_nmethod_or_null()->method()->name_and_sig_as_C_string(buf, O_BUFLEN), p2i(cb->code_begin()), (jlong)((address)v - cb->code_begin()));
tty->print("%s [" INTPTR_FORMAT "+" JLONG_FORMAT "]", cb->as_nmethod()->method()->name_and_sig_as_C_string(buf, O_BUFLEN), p2i(cb->code_begin()), (jlong)((address)v - cb->code_begin()));
return;
}
cb->print_value_on(tty);
@ -2208,7 +2207,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm, /* phantom_ref */ false) == nullptr, "must be");
if (entry_bci == InvocationEntryBci) {
// If there is an old version we're done with it
CompiledMethod* old = method->code();
nmethod* old = method->code();
if (TraceMethodReplacement && old != nullptr) {
ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string();

View File

@ -266,7 +266,7 @@
nonstatic_field(Method, _vtable_index, int) \
nonstatic_field(Method, _intrinsic_id, u2) \
nonstatic_field(Method, _flags._status, u4) \
volatile_nonstatic_field(Method, _code, CompiledMethod*) \
volatile_nonstatic_field(Method, _code, nmethod*) \
volatile_nonstatic_field(Method, _from_compiled_entry, address) \
\
nonstatic_field(MethodCounters, _invoke_mask, int) \

View File

@ -1011,7 +1011,7 @@ void Method::set_native_function(address function, bool post_event_flag) {
// This function can be called more than once. We must make sure that we always
// use the latest registered method -> check if a stub already has been generated.
// If so, we have to make it not_entrant.
CompiledMethod* nm = code(); // Put it into local variable to guard against concurrent updates
nmethod* nm = code(); // Put it into local variable to guard against concurrent updates
if (nm != nullptr) {
nm->make_not_entrant();
}
@ -1159,7 +1159,7 @@ void Method::clear_code() {
_code = nullptr;
}
void Method::unlink_code(CompiledMethod *compare) {
void Method::unlink_code(nmethod *compare) {
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// We need to check if either the _code or _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
@ -1303,12 +1303,12 @@ address Method::verified_code_entry() {
// Not inline to avoid circular ref.
bool Method::check_code() const {
// cached in a register or local. There's a race on the value of the field.
CompiledMethod *code = Atomic::load_acquire(&_code);
nmethod *code = Atomic::load_acquire(&_code);
return code == nullptr || (code->method() == nullptr) || (code->method() == (Method*)this && !code->is_osr_method());
}
// Install compiled code. Instantly it can execute.
void Method::set_code(const methodHandle& mh, CompiledMethod *code) {
void Method::set_code(const methodHandle& mh, nmethod *code) {
assert_lock_strong(CompiledMethod_lock);
assert( code, "use clear_code to remove code" );
assert( mh->check_code(), "" );

View File

@ -62,7 +62,7 @@ class MethodData;
class MethodCounters;
class ConstMethod;
class InlineTableSizes;
class CompiledMethod;
class nmethod;
class InterpreterOopMap;
class Method : public Metadata {
@ -93,14 +93,14 @@ class Method : public Metadata {
address _i2i_entry; // All-args-on-stack calling convention
// Entry point for calling from compiled code, to compiled code if it exists
// or else the interpreter.
volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
// The entry point for calling both from and to compiled code is
// "_code->entry_point()". Because of tiered compilation and de-opt, this
// field can come and go. It can transition from null to not-null at any
// time (whenever a compile completes). It can transition from not-null to
// null only at safepoints (because of a de-opt).
CompiledMethod* volatile _code; // Points to the corresponding piece of native code
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
nmethod* volatile _code; // Points to the corresponding piece of native code
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
// Constructor
Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name);
@ -357,10 +357,10 @@ class Method : public Metadata {
// nmethod/verified compiler entry
address verified_code_entry();
bool check_code() const; // Not inline to avoid circular ref
CompiledMethod* code() const;
nmethod* code() const;
// Locks CompiledMethod_lock if not held.
void unlink_code(CompiledMethod *compare);
void unlink_code(nmethod *compare);
// Locks CompiledMethod_lock if not held.
void unlink_code();
@ -373,7 +373,7 @@ private:
}
public:
static void set_code(const methodHandle& mh, CompiledMethod* code);
static void set_code(const methodHandle& mh, nmethod* code);
void set_adapter_entry(AdapterHandlerEntry* adapter) {
_adapter = adapter;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ inline address Method::from_interpreted_entry() const {
return Atomic::load_acquire(&_from_interpreted_entry);
}
inline CompiledMethod* Method::code() const {
inline nmethod* Method::code() const {
assert( check_code(), "" );
return Atomic::load_acquire(&_code);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "code/compiledMethod.hpp"
#include "code/nmethod.hpp"
#include "code/scopeDesc.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetStackChunk.hpp"
@ -108,9 +108,9 @@ frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
return Continuation::continuation_parent_frame(map);
}
static int num_java_frames(CompiledMethod* cm, address pc) {
static int num_java_frames(nmethod* nm, address pc) {
int count = 0;
for (ScopeDesc* scope = cm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) {
for (ScopeDesc* scope = nm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) {
count++;
}
return count;
@ -118,8 +118,8 @@ static int num_java_frames(CompiledMethod* cm, address pc) {
static int num_java_frames(const StackChunkFrameStream<ChunkFrames::Mixed>& f) {
assert(f.is_interpreted()
|| (f.cb() != nullptr && f.cb()->is_compiled() && f.cb()->as_compiled_method()->is_java_method()), "");
return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_compiled_method(), f.orig_pc());
|| (f.cb() != nullptr && f.cb()->is_nmethod() && f.cb()->as_nmethod()->is_java_method()), "");
return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_nmethod(), f.orig_pc());
}
int stackChunkOopDesc::num_java_frames() const {
@ -560,11 +560,11 @@ bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames,
iterate_stack(&closure);
assert(!is_empty() || closure._cb == nullptr, "");
if (closure._cb != nullptr && closure._cb->is_compiled()) {
if (closure._cb != nullptr && closure._cb->is_nmethod()) {
assert(argsize() ==
(closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
(closure._cb->as_nmethod()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
"chunk argsize: %d bottom frame argsize: %d", argsize(),
(closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
(closure._cb->as_nmethod()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
}
assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");

View File

@ -26,7 +26,6 @@
#include "classfile/vmClasses.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/compiledMethod.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
@ -1851,9 +1850,8 @@ static void trace_exception(outputStream* st, oop exception_oop, address excepti
exception_oop->print_value_on(&tempst);
tempst.print(" in ");
CodeBlob* blob = CodeCache::find_blob(exception_pc);
if (blob->is_compiled()) {
CompiledMethod* cm = blob->as_compiled_method_or_null();
cm->method()->print_value_on(&tempst);
if (blob->is_nmethod()) {
blob->as_nmethod()->method()->print_value_on(&tempst);
} else if (blob->is_runtime_stub()) {
tempst.print("<runtime-stub>");
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,7 @@ class vframeStreamForte : public vframeStreamCommon {
};
static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, CompiledMethod* nm);
static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmethod* nm);
static bool is_decipherable_interpreted_frame(JavaThread* thread,
frame* fr,
Method** method_p,
@ -150,7 +150,7 @@ void vframeStreamForte::forte_next() {
// Determine if 'fr' is a decipherable compiled frame. We are already
// assured that fr is for a java compiled method.
static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, CompiledMethod* nm) {
static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmethod* nm) {
assert(nm->is_java_method(), "invariant");
if (thread->has_last_Java_frame() && thread->last_Java_pc() == fr->pc()) {
@ -413,9 +413,9 @@ static bool find_initial_Java_frame(JavaThread* thread,
return false;
}
if (candidate.cb()->is_compiled()) {
if (candidate.cb()->is_nmethod()) {
CompiledMethod* nm = candidate.cb()->as_compiled_method();
nmethod* nm = candidate.cb()->as_nmethod();
*method_p = nm->method();
// If the frame is not decipherable, then the value of -1

View File

@ -768,9 +768,8 @@ class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
if (f->can_be_deoptimized() && !f->is_deoptimized_frame()) {
Deoptimization::deoptimize(t, *f);
if (_make_not_entrant) {
CompiledMethod* cm = CodeCache::find_compiled(f->pc());
assert(cm != nullptr, "sanity check");
cm->make_not_entrant();
nmethod* nm = CodeCache::find_nmethod(f->pc());
nm->make_not_entrant();
}
++_result;
}
@ -839,7 +838,7 @@ WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, j
CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
MutexLocker mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
if (code == nullptr) {
return JNI_FALSE;
}
@ -938,7 +937,7 @@ WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject meth
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, CompLevel_none);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
return (code != nullptr ? code->comp_level() : CompLevel_none);
WB_END
@ -1023,7 +1022,7 @@ WB_ENTRY(jint, WB_GetMethodEntryBci(JNIEnv* env, jobject o, jobject method))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, InvocationEntryBci);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
CompiledMethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false);
nmethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false);
return (code != nullptr && code->is_osr_method() ? code->osr_entry_bci() : InvocationEntryBci);
WB_END
@ -1097,8 +1096,8 @@ bool WhiteBox::compile_method(Method* method, int comp_level, int bci, JavaThrea
}
// Check code again because compilation may be finished before Compile_lock is acquired.
if (bci == InvocationEntryBci) {
CompiledMethod* code = mh->code();
if (code != nullptr && code->as_nmethod_or_null() != nullptr) {
nmethod* code = mh->code();
if (code != nullptr) {
return true;
}
} else if (mh->lookup_osr_nmethod_for(bci, comp_level, false) != nullptr) {
@ -1556,7 +1555,7 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, nullptr);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
jobjectArray result = nullptr;
if (code == nullptr) {
return result;
@ -1608,7 +1607,7 @@ CodeBlob* WhiteBox::allocate_code_blob(int size, CodeBlobType blob_type) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = (BufferBlob*) CodeCache::allocate(full_size, blob_type);
if (blob != nullptr) {
::new (blob) BufferBlob("WB::DummyBlob", full_size);
::new (blob) BufferBlob("WB::DummyBlob", CodeBlobKind::Buffer, full_size);
}
}
// Track memory usage statistic after releasing CodeCache_lock

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -105,10 +105,10 @@ bool Continuation::is_return_barrier_entry(const address pc) {
}
bool Continuation::is_continuation_enterSpecial(const frame& f) {
if (f.cb() == nullptr || !f.cb()->is_compiled()) {
if (f.cb() == nullptr || !f.cb()->is_nmethod()) {
return false;
}
Method* m = f.cb()->as_compiled_method()->method();
Method* m = f.cb()->as_nmethod()->method();
return (m != nullptr && m->is_continuation_enter_intrinsic());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,14 +37,14 @@
int ContinuationEntry::_return_pc_offset = 0;
address ContinuationEntry::_return_pc = nullptr;
CompiledMethod* ContinuationEntry::_enter_special = nullptr;
nmethod* ContinuationEntry::_enter_special = nullptr;
int ContinuationEntry::_interpreted_entry_offset = 0;
void ContinuationEntry::set_enter_code(CompiledMethod* cm, int interpreted_entry_offset) {
void ContinuationEntry::set_enter_code(nmethod* nm, int interpreted_entry_offset) {
assert(_return_pc_offset != 0, "");
_return_pc = cm->code_begin() + _return_pc_offset;
_return_pc = nm->code_begin() + _return_pc_offset;
_enter_special = cm;
_enter_special = nm;
_interpreted_entry_offset = interpreted_entry_offset;
assert(_enter_special->code_contains(compiled_entry()), "entry not in enterSpecial");
assert(_enter_special->code_contains(interpreted_entry()), "entry not in enterSpecial");
@ -141,7 +141,7 @@ bool ContinuationEntry::assert_entry_frame_laid_out(JavaThread* thread) {
if (pc != StubRoutines::cont_returnBarrier()) {
CodeBlob* cb = pc != nullptr ? CodeCache::find_blob(pc) : nullptr;
assert(cb != nullptr, "sp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT, p2i(sp), p2i(pc));
assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), "");
assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), "");
}
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,8 +32,8 @@
#include CPU_HEADER(continuationEntry)
class CompiledMethod;
class JavaThread;
class nmethod;
class OopMap;
class RegisterMap;
@ -56,12 +56,12 @@ public:
public:
static int _return_pc_offset; // friend gen_continuation_enter
static void set_enter_code(CompiledMethod* cm, int interpreted_entry_offset);
static void set_enter_code(nmethod* nm, int interpreted_entry_offset);
static bool is_interpreted_call(address call_address);
private:
static address _return_pc;
static CompiledMethod* _enter_special;
static nmethod* _enter_special;
static int _interpreted_entry_offset;
private:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#include "classfile/javaClasses.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.inline.hpp"
#include "code/compiledMethod.inline.hpp"
#include "code/nmethod.inline.hpp"
#include "code/vmreg.inline.hpp"
#include "compiler/oopMap.inline.hpp"
#include "gc/shared/continuationGCSupport.inline.hpp"
@ -1070,8 +1070,8 @@ void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_b
if (hf.is_compiled_frame()) {
if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
log_develop_trace(continuations)("Freezing deoptimized frame");
assert(f.cb()->as_compiled_method()->is_deopt_pc(f.raw_pc()), "");
assert(f.cb()->as_compiled_method()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
}
}
#endif
@ -1470,7 +1470,7 @@ void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
#if INCLUDE_JVMTI
static int num_java_frames(ContinuationWrapper& cont) {
ResourceMark rm; // used for scope traversal in num_java_frames(CompiledMethod*, address)
ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
int count = 0;
for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
count += chunk->num_java_frames();
@ -2290,7 +2290,7 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n
if (hf.is_deoptimized_frame()) {
maybe_set_fastpath(f.sp());
} else if (_thread->is_interp_only_mode()
|| (_cont.is_preempted() && f.cb()->as_compiled_method()->is_marked_for_deoptimization())) {
|| (_cont.is_preempted() && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
// The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
// cannot rely on nmethod patching for deopt.
assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
@ -2309,7 +2309,7 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n
_cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance);
} else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
int stack_args_slots = f.cb()->as_compiled_method()->method()->num_stack_arg_slots(false /* rounded */);
int stack_args_slots = f.cb()->as_nmethod()->method()->num_stack_arg_slots(false /* rounded */);
int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
clear_bitmap_bits(start, start + argsize_in_bytes);
}
@ -2404,7 +2404,7 @@ void ThawBase::finish_thaw(frame& f) {
}
void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw
assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_compiled_method()->is_deopt_pc(f.raw_pc()), "");
assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
LogTarget(Trace, continuations) lt;
@ -2491,10 +2491,10 @@ static void do_deopt_after_thaw(JavaThread* thread) {
fst.register_map()->set_include_argument_oops(false);
ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
for (; !fst.is_done(); fst.next()) {
if (fst.current()->cb()->is_compiled()) {
CompiledMethod* cm = fst.current()->cb()->as_compiled_method();
if (!cm->method()->is_continuation_native_intrinsic()) {
cm->make_deoptimized();
if (fst.current()->cb()->is_nmethod()) {
nmethod* nm = fst.current()->cb()->as_nmethod();
if (!nm->method()->is_continuation_native_intrinsic()) {
nm->make_deoptimized();
}
}
}
@ -2540,7 +2540,7 @@ static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, output
fst.register_map()->set_include_argument_oops(false);
ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) {
if (fst.current()->cb()->is_compiled() && fst.current()->cb()->as_compiled_method()->is_marked_for_deoptimization()) {
if (fst.current()->cb()->is_nmethod() && fst.current()->cb()->as_nmethod()->is_marked_for_deoptimization()) {
st->print_cr(">>> do_verify_after_thaw deopt");
fst.current()->deoptimize(nullptr);
fst.current()->print_on(st);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,7 @@ inline bool ContinuationHelper::Frame::is_stub(CodeBlob* cb) {
}
inline Method* ContinuationHelper::Frame::frame_method(const frame& f) {
return f.is_interpreted_frame() ? f.interpreter_frame_method() : f.cb()->as_compiled_method()->method();
return f.is_interpreted_frame() ? f.interpreter_frame_method() : f.cb()->as_nmethod()->method();
}
inline address ContinuationHelper::Frame::return_pc(const frame& f) {
@ -79,8 +79,8 @@ inline intptr_t* ContinuationHelper::Frame::frame_top(const frame &f) {
inline bool ContinuationHelper::Frame::is_deopt_return(address pc, const frame& sender) {
if (sender.is_interpreted_frame()) return false;
CompiledMethod* cm = sender.cb()->as_compiled_method();
return cm->is_deopt_pc(pc);
nmethod* nm = sender.cb()->as_nmethod();
return nm->is_deopt_pc(pc);
}
#endif
@ -162,16 +162,16 @@ bool ContinuationHelper::CompiledFrame::is_owning_locks(JavaThread* thread, Regi
assert(!f.is_interpreted_frame(), "");
assert(CompiledFrame::is_instance(f), "");
CompiledMethod* cm = f.cb()->as_compiled_method();
assert(!cm->is_compiled() || !cm->as_compiled_method()->is_native_method(), ""); // See compiledVFrame::compiledVFrame(...) in vframe_hp.cpp
nmethod* nm = f.cb()->as_nmethod();
assert(!nm->is_native_method(), ""); // See compiledVFrame::compiledVFrame(...) in vframe_hp.cpp
if (!cm->has_monitors()) {
if (!nm->has_monitors()) {
return false;
}
frame::update_map_with_saved_link(map, Frame::callee_link_address(f)); // the monitor object could be stored in the link register
ResourceMark rm;
for (ScopeDesc* scope = cm->scope_desc_at(f.pc()); scope != nullptr; scope = scope->sender()) {
for (ScopeDesc* scope = nm->scope_desc_at(f.pc()); scope != nullptr; scope = scope->sender()) {
GrowableArray<MonitorValue*>* mons = scope->monitors();
if (mons == nullptr || mons->is_empty()) {
continue;
@ -186,7 +186,7 @@ bool ContinuationHelper::CompiledFrame::is_owning_locks(JavaThread* thread, Regi
StackValue* owner_sv = StackValue::create_stack_value(&f, map, ov); // it is an oop
oop owner = owner_sv->get_obj()();
if (owner != nullptr) {
//assert(cm->has_monitors(), "");
//assert(nm->has_monitors(), "");
return true;
}
}

View File

@ -117,34 +117,34 @@ DeoptimizationScope::~DeoptimizationScope() {
assert(_deopted, "Deopt not executed");
}
void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) {
void DeoptimizationScope::mark(nmethod* nm, bool inc_recompile_counts) {
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// If it's already marked but we still need it to be deopted.
if (cm->is_marked_for_deoptimization()) {
dependent(cm);
if (nm->is_marked_for_deoptimization()) {
dependent(nm);
return;
}
CompiledMethod::DeoptimizationStatus status =
inc_recompile_counts ? CompiledMethod::deoptimize : CompiledMethod::deoptimize_noupdate;
Atomic::store(&cm->_deoptimization_status, status);
nmethod::DeoptimizationStatus status =
inc_recompile_counts ? nmethod::deoptimize : nmethod::deoptimize_noupdate;
Atomic::store(&nm->_deoptimization_status, status);
// Make sure active is not committed
assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be");
assert(cm->_deoptimization_generation == 0, "Is already marked");
assert(nm->_deoptimization_generation == 0, "Is already marked");
cm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen;
nm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen;
_required_gen = DeoptimizationScope::_active_deopt_gen;
}
void DeoptimizationScope::dependent(CompiledMethod* cm) {
void DeoptimizationScope::dependent(nmethod* nm) {
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// A method marked by someone else may have a _required_gen lower than what we marked with.
// Therefore only store it if it's higher than _required_gen.
if (_required_gen < cm->_deoptimization_generation) {
_required_gen = cm->_deoptimization_generation;
if (_required_gen < nm->_deoptimization_generation) {
_required_gen = nm->_deoptimization_generation;
}
}
@ -321,7 +321,7 @@ static void print_objects(JavaThread* deoptee_thread,
tty->print_raw(st.freeze());
}
static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
bool& deoptimized_objects) {
bool realloc_failures = false;
@ -439,7 +439,7 @@ bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArr
bool& realloc_failures) {
frame deoptee = chunk->at(0)->fr();
JavaThread* deoptee_thread = chunk->at(0)->thread();
CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
nmethod* nm = deoptee.cb()->as_nmethod_or_null();
RegisterMap map(chunk->at(0)->register_map());
bool deoptimized_objects = false;
@ -448,7 +448,7 @@ bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArr
// Reallocate the non-escaping objects and restore their fields.
if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations)
|| EliminateAutoBox || EnableVectorAggressiveReboxing)) {
realloc_failures = rematerialize_objects(thread, Unpack_none, cm, deoptee, map, chunk, deoptimized_objects);
realloc_failures = rematerialize_objects(thread, Unpack_none, nm, deoptee, map, chunk, deoptimized_objects);
}
// MonitorInfo structures used in eliminate_locks are not GC safe.
@ -492,8 +492,8 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
frame deoptee = stub_frame.sender(&map);
// Set the deoptee nmethod
assert(current->deopt_compiled_method() == nullptr, "Pending deopt!");
CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
current->set_deopt_compiled_method(cm);
nmethod* nm = deoptee.cb()->as_nmethod_or_null();
current->set_deopt_compiled_method(nm);
if (VerifyStack) {
current->validate_frame_layout();
@ -522,7 +522,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
if (jvmci_enabled COMPILER2_PRESENT( || (DoEscapeAnalysis && EliminateAllocations)
|| EliminateAutoBox || EnableVectorAggressiveReboxing )) {
bool unused;
realloc_failures = rematerialize_objects(current, exec_mode, cm, deoptee, map, chunk, unused);
realloc_failures = rematerialize_objects(current, exec_mode, nm, deoptee, map, chunk, unused);
}
#endif // COMPILER2_OR_JVMCI
@ -1220,8 +1220,8 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap*
bool cache_init_error = false;
if (k->is_instance_klass()) {
#if INCLUDE_JVMCI
CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
nmethod* nm = fr->cb()->as_nmethod_or_null();
if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
if (obj != nullptr) {
@ -1747,14 +1747,14 @@ void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deopt
gather_statistics(reason, Action_none, Bytecodes::_illegal);
if (LogCompilation && xtty != nullptr) {
CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
assert(cm != nullptr, "only compiled methods can deopt");
nmethod* nm = fr.cb()->as_nmethod_or_null();
assert(nm != nullptr, "only compiled methods can deopt");
ttyLocker ttyl;
xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
cm->log_identity(xtty);
nm->log_identity(xtty);
xtty->end_head();
for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
xtty->begin_elem("jvms bci='%d'", sd->bci());
xtty->method(sd->method());
xtty->end_elem();
@ -1782,9 +1782,9 @@ void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason
}
#if INCLUDE_JVMCI
address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
// there is no exception handler for this pc => deoptimize
cm->make_not_entrant();
nm->make_not_entrant();
// Use Deoptimization::deoptimize for all of its side-effects:
// gathering traps statistics, logging...
@ -1797,7 +1797,7 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod*
RegisterMap::WalkContinuation::skip);
frame runtime_frame = thread->last_frame();
frame caller_frame = runtime_frame.sender(&reg_map);
assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method");
assert(caller_frame.cb()->as_nmethod_or_null() == nm, "expect top frame compiled method");
vframe* vf = vframe::new_vframe(&caller_frame, &reg_map, thread);
compiledVFrame* cvf = compiledVFrame::cast(vf);
ScopeDesc* imm_scope = cvf->scope();
@ -1815,7 +1815,7 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod*
Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler);
MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, cm->method()), true);
MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, nm->method()), true);
if (trap_mdo != nullptr) {
trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
}
@ -1950,7 +1950,7 @@ static void register_serializers() {
JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONACTION, true, new DeoptActionSerializer());
}
static void post_deoptimization_event(CompiledMethod* nm,
static void post_deoptimization_event(nmethod* nm,
const Method* method,
int trap_bci,
int instruction,
@ -1979,7 +1979,7 @@ static void post_deoptimization_event(CompiledMethod* nm,
#endif // INCLUDE_JFR
static void log_deopt(CompiledMethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci,
static void log_deopt(nmethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci,
const char* reason_name, const char* reason_action) {
LogTarget(Debug, deoptimization) lt;
if (lt.is_enabled()) {
@ -2041,7 +2041,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
vframe* vf = vframe::new_vframe(&fr, &reg_map, current);
compiledVFrame* cvf = compiledVFrame::cast(vf);
CompiledMethod* nm = cvf->code();
nmethod* nm = cvf->code();
ScopeDesc* trap_scope = cvf->scope();
@ -2058,7 +2058,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
#if INCLUDE_JVMCI
jlong speculation = current->pending_failed_speculation();
if (nm->is_compiled_by_jvmci()) {
nm->as_nmethod()->update_speculation(current);
nm->update_speculation(current);
} else {
assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers");
}
@ -2178,8 +2178,8 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin() JVMCI_ONLY(COMMA debug_id));
st.print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
#if INCLUDE_JVMCI
if (nm->is_nmethod()) {
const char* installed_code_name = nm->as_nmethod()->jvmci_name();
if (nm->is_compiled_by_jvmci()) {
const char* installed_code_name = nm->jvmci_name();
if (installed_code_name != nullptr) {
st.print(" (JVMCI: installed code name=%s) ", installed_code_name);
}
@ -2433,7 +2433,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
// Assume that in new recompiled code the statistic could be different,
// for example, due to different inlining.
if ((reason != Reason_rtm_state_change) && (trap_mdo != nullptr) &&
UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) {
UseRTMDeopt && (nm->rtm_state() != ProfileRTM)) {
trap_mdo->atomic_set_rtm_state(ProfileRTM);
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,9 +57,9 @@ class DeoptimizationScope {
DeoptimizationScope();
~DeoptimizationScope();
// Mark a method, if already marked as dependent.
void mark(CompiledMethod* cm, bool inc_recompile_counts = true);
void mark(nmethod* nm, bool inc_recompile_counts = true);
// Record this as a dependent method.
void dependent(CompiledMethod* cm);
void dependent(nmethod* nm);
// Execute the deoptimization.
// Make the nmethods not entrant, stackwalks and patch return pcs and sets post call nops.
@ -184,7 +184,7 @@ class Deoptimization : AllStatic {
static void deoptimize(JavaThread* thread, frame fr, DeoptReason reason = Reason_constraint);
#if INCLUDE_JVMCI
static address deoptimize_for_missing_exception_handler(CompiledMethod* cm);
static address deoptimize_for_missing_exception_handler(nmethod* nm);
static oop get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, bool& cache_init_error, TRAPS);
#endif

View File

@ -205,11 +205,12 @@ void RegisterMap::print() const {
address frame::raw_pc() const {
if (is_deoptimized_frame()) {
CompiledMethod* cm = cb()->as_compiled_method_or_null();
if (cm->is_method_handle_return(pc()))
return cm->deopt_mh_handler_begin() - pc_return_offset;
nmethod* nm = cb()->as_nmethod_or_null();
assert(nm != nullptr, "only nmethod is expected here");
if (nm->is_method_handle_return(pc()))
return nm->deopt_mh_handler_begin() - pc_return_offset;
else
return cm->deopt_handler_begin() - pc_return_offset;
return nm->deopt_handler_begin() - pc_return_offset;
} else {
return (pc() - pc_return_offset);
}
@ -313,8 +314,8 @@ Method* frame::safe_interpreter_frame_method() const {
bool frame::should_be_deoptimized() const {
if (_deopt_state == is_deoptimized ||
!is_compiled_frame() ) return false;
assert(_cb != nullptr && _cb->is_compiled(), "must be an nmethod");
CompiledMethod* nm = (CompiledMethod *)_cb;
assert(_cb != nullptr && _cb->is_nmethod(), "must be an nmethod");
nmethod* nm = _cb->as_nmethod();
LogTarget(Debug, dependencies) lt;
if (lt.is_enabled()) {
LogStream ls(&lt);
@ -333,7 +334,7 @@ bool frame::should_be_deoptimized() const {
bool frame::can_be_deoptimized() const {
if (!is_compiled_frame()) return false;
CompiledMethod* nm = (CompiledMethod*)_cb;
nmethod* nm = _cb->as_nmethod();
if(!nm->can_be_deoptimized())
return false;
@ -346,18 +347,18 @@ void frame::deoptimize(JavaThread* thread) {
|| (thread->frame_anchor()->has_last_Java_frame() &&
thread->frame_anchor()->walkable()), "must be");
// Schedule deoptimization of an nmethod activation with this frame.
assert(_cb != nullptr && _cb->is_compiled(), "must be");
assert(_cb != nullptr && _cb->is_nmethod(), "must be");
// If the call site is a MethodHandle call site use the MH deopt handler.
CompiledMethod* cm = (CompiledMethod*) _cb;
address deopt = cm->is_method_handle_return(pc()) ?
cm->deopt_mh_handler_begin() :
cm->deopt_handler_begin();
nmethod* nm = _cb->as_nmethod();
address deopt = nm->is_method_handle_return(pc()) ?
nm->deopt_mh_handler_begin() :
nm->deopt_handler_begin();
NativePostCallNop* inst = nativePostCallNop_at(pc());
// Save the original pc before we patch in the new one
cm->set_original_pc(this, pc());
nm->set_original_pc(this, pc());
patch_pc(thread, deopt);
assert(is_deoptimized_frame(), "must be");
@ -674,15 +675,12 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
}
} else if (_cb->is_buffer_blob()) {
st->print("v ~BufferBlob::%s " PTR_FORMAT, ((BufferBlob *)_cb)->name(), p2i(pc()));
} else if (_cb->is_compiled()) {
CompiledMethod* cm = (CompiledMethod*)_cb;
Method* m = cm->method();
} else if (_cb->is_nmethod()) {
nmethod* nm = _cb->as_nmethod();
Method* m = nm->method();
if (m != nullptr) {
if (cm->is_nmethod()) {
nmethod* nm = cm->as_nmethod();
st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : ""));
st->print(" %s", nm->compiler_name());
}
st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : ""));
st->print(" %s", nm->compiler_name());
m->name_and_sig_as_C_string(buf, buflen);
st->print(" %s", buf);
ModuleEntry* module = m->method_holder()->module();
@ -697,12 +695,9 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
st->print(" (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+" INTPTR_FORMAT "]",
m->code_size(), p2i(_pc), p2i(_cb->code_begin()), _pc - _cb->code_begin());
#if INCLUDE_JVMCI
if (cm->is_nmethod()) {
nmethod* nm = cm->as_nmethod();
const char* jvmciName = nm->jvmci_name();
if (jvmciName != nullptr) {
st->print(" (%s)", jvmciName);
}
const char* jvmciName = nm->jvmci_name();
if (jvmciName != nullptr) {
st->print(" (%s)", jvmciName);
}
#endif
} else {
@ -1403,22 +1398,22 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
} else if (is_entry_frame()) {
// For now just label the frame
values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
} else if (cb()->is_compiled()) {
} else if (cb()->is_nmethod()) {
// For now just label the frame
CompiledMethod* cm = cb()->as_compiled_method();
nmethod* nm = cb()->as_nmethod();
values.describe(-1, info_address,
FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
p2i(cm),
cm->method()->name_and_sig_as_C_string(),
p2i(nm),
nm->method()->name_and_sig_as_C_string(),
(_deopt_state == is_deoptimized) ?
" (deoptimized)" :
((_deopt_state == unknown) ? " (state unknown)" : "")),
3);
{ // mark arguments (see nmethod::print_nmethod_labels)
Method* m = cm->method();
Method* m = nm->method();
int stack_slot_offset = cm->frame_size() * wordSize; // offset, in bytes, to caller sp
int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
int sizeargs = m->size_of_parameters();
BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
@ -1469,7 +1464,7 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
if (reg_map != nullptr && is_java_frame()) {
int scope_no = 0;
for (ScopeDesc* scope = cm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
Method* m = scope->method();
int bci = scope->bci();
values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
@ -1507,7 +1502,7 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
}
}
if (cm->method()->is_continuation_enter_intrinsic()) {
if (nm->method()->is_continuation_enter_intrinsic()) {
ContinuationEntry* ce = Continuation::get_continuation_entry_for_entry_frame(reg_map->thread(), *this); // (ContinuationEntry*)unextended_sp();
ce->describe(values, frame_no);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,6 @@
typedef class BytecodeInterpreter* interpreterState;
class CodeBlob;
class CompiledMethod;
class FrameValues;
class InterpreterOopMap;
class JavaCallWrapper;

Some files were not shown because too many files have changed in this diff Show More