diff --git a/src/hotspot/cpu/aarch64/continuationEntry_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/continuationEntry_aarch64.inline.hpp index c70b4fdcfcc..df4d3957239 100644 --- a/src/hotspot/cpu/aarch64/continuationEntry_aarch64.inline.hpp +++ b/src/hotspot/cpu/aarch64/continuationEntry_aarch64.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ inline frame ContinuationEntry::to_frame() const { static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc()); assert(cb != nullptr, ""); - assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), ""); + assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), ""); return frame(entry_sp(), entry_sp(), entry_fp(), entry_pc(), cb); } diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp index 8d0fa8895d1..7acfa79d8b7 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp @@ -222,7 +222,7 @@ bool frame::safe_for_sender(JavaThread *thread) { return false; } - CompiledMethod* nm = sender_blob->as_compiled_method_or_null(); + nmethod* nm = sender_blob->as_nmethod_or_null(); if (nm != nullptr) { if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) { @@ -234,7 +234,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // because the return address counts against the callee's frame. if (sender_blob->frame_size() <= 0) { - assert(!sender_blob->is_compiled(), "should count return address at least"); + assert(!sender_blob->is_nmethod(), "should count return address at least"); return false; } @@ -243,7 +243,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // should not be anything but the call stub (already covered), the interpreter (already covered) // or an nmethod. - if (!sender_blob->is_compiled()) { + if (!sender_blob->is_nmethod()) { return false; } @@ -297,7 +297,7 @@ void frame::patch_pc(Thread* thread, address pc) { DEBUG_ONLY(address old_pc = _pc;) *pc_addr = signed_pc; _pc = pc; // must be set before call to get_deopt_original_pc - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { assert(original_pc == old_pc, "expected original PC to be stored before patching"); _deopt_state = is_deoptimized; @@ -426,7 +426,7 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const { // Verifies the calculated original PC of a deoptimization PC for the // given unextended SP. #ifdef ASSERT -void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) { +void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) { frame fr; // This is ugly but it's better than to change {get,set}_original_pc @@ -449,12 +449,12 @@ void frame::adjust_unextended_sp() { // returning to any of these call sites. if (_cb != nullptr) { - CompiledMethod* sender_cm = _cb->as_compiled_method_or_null(); - if (sender_cm != nullptr) { + nmethod* sender_nm = _cb->as_nmethod_or_null(); + if (sender_nm != nullptr) { // If the sender PC is a deoptimization point, get the original PC. - if (sender_cm->is_deopt_entry(_pc) || - sender_cm->is_deopt_mh_entry(_pc)) { - verify_deopt_original_pc(sender_cm, _unextended_sp); + if (sender_nm->is_deopt_entry(_pc) || + sender_nm->is_deopt_mh_entry(_pc)) { + verify_deopt_original_pc(sender_nm, _unextended_sp); } } } diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.hpp b/src/hotspot/cpu/aarch64/frame_aarch64.hpp index 099dcdb4f2b..401e2c6ae97 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -153,7 +153,7 @@ #ifdef ASSERT // Used in frame::sender_for_{interpreter,compiled}_frame - static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp); + static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp); #endif public: diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp index d3ae7871f61..1dc3208948b 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp @@ -71,11 +71,11 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) { inline void frame::setup(address pc) { adjust_unextended_sp(); - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; - assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc), + assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc), "original PC must be in the main code section of the compiled method (or must be immediately following it)"); } else { if (_cb == SharedRuntime::deopt_blob()) { @@ -178,7 +178,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) { _cb = CodeCache::find_blob(_pc); adjust_unextended_sp(); - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; @@ -240,8 +240,8 @@ inline int frame::frame_size() const { } inline int frame::compiled_frame_stack_argsize() const { - assert(cb()->is_compiled(), ""); - return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; + assert(cb()->is_nmethod(), ""); + return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; } inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { @@ -417,7 +417,7 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const { // Tell GC to use argument oopmaps for some runtime stubs that need it. // For C1, the runtime stub might not have oop maps, so set this flag // outside of update_register_map. - if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers + if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); if (oop_map() != nullptr) { _oop_map->update_register_map(this, map); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 5dd6b534f14..ba708a848df 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -867,7 +867,7 @@ static bool is_always_within_branch_range(Address entry) { // Non-compiled methods stay forever in CodeCache. // We check whether the longest possible branch is within the branch range. assert(CodeCache::find_blob(target) != nullptr && - !CodeCache::find_blob(target)->is_compiled(), + !CodeCache::find_blob(target)->is_nmethod(), "runtime call of compiled method"); const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; const address left_longest_branch_start = CodeCache::low_bound(); diff --git a/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp b/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp index 4b7930c94a8..5424f0d9c75 100644 --- a/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" -#include "code/compiledMethod.hpp" +#include "code/nmethod.hpp" #include "code/relocInfo.hpp" #include "nativeInst_aarch64.hpp" #include "oops/oop.inline.hpp" diff --git a/src/hotspot/cpu/aarch64/stackChunkFrameStream_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/stackChunkFrameStream_aarch64.inline.hpp index 0305d434052..aa4eff19766 100644 --- a/src/hotspot/cpu/aarch64/stackChunkFrameStream_aarch64.inline.hpp +++ b/src/hotspot/cpu/aarch64/stackChunkFrameStream_aarch64.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ template inline bool StackChunkFrameStream::is_in_frame(void* p0) const { assert(!is_done(), ""); intptr_t* p = (intptr_t*)p0; - int argsize = is_compiled() ? (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0; + int argsize = is_compiled() ? (_cb->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0; int frame_size = _cb->frame_size() + argsize; return p == sp() - frame::sender_sp_offset || ((p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size); } diff --git a/src/hotspot/cpu/arm/frame_arm.cpp b/src/hotspot/cpu/arm/frame_arm.cpp index d923e1f43ad..b22cc5c605f 100644 --- a/src/hotspot/cpu/arm/frame_arm.cpp +++ b/src/hotspot/cpu/arm/frame_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,7 +80,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // ok. adapter blobs never have a frame complete and are never ok. if (!_cb->is_frame_complete_at(_pc)) { - if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { + if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { return false; } } @@ -179,7 +179,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // because the return address counts against the callee's frame. if (sender_blob->frame_size() <= 0) { - assert(!sender_blob->is_compiled(), "should count return address at least"); + assert(!sender_blob->is_nmethod(), "should count return address at least"); return false; } @@ -188,7 +188,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // should not be anything but the call stub (already covered), the interpreter (already covered) // or an nmethod. - if (!sender_blob->is_compiled()) { + if (!sender_blob->is_nmethod()) { return false; } @@ -229,7 +229,7 @@ void frame::patch_pc(Thread* thread, address pc) { DEBUG_ONLY(address old_pc = _pc;) *pc_addr = pc; _pc = pc; // must be set before call to get_deopt_original_pc - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { assert(original_pc == old_pc, "expected original PC to be stored before patching"); _deopt_state = is_deoptimized; @@ -332,7 +332,7 @@ bool frame::upcall_stub_frame_is_first() const { // given unextended SP. The unextended SP might also be the saved SP // for MethodHandle call sites. #ifdef ASSERT -void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) { +void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) { frame fr; // This is ugly but it's better than to change {get,set}_original_pc @@ -357,19 +357,19 @@ void frame::adjust_unextended_sp() { // simplest way to tell whether we are returning to such a call site // is as follows: - CompiledMethod* sender_cm = (_cb == nullptr) ? nullptr : _cb->as_compiled_method_or_null(); - if (sender_cm != nullptr) { + nmethod* sender_nm = (_cb == nullptr) ? nullptr : _cb->as_nmethod_or_null(); + if (sender_nm != nullptr) { // If the sender PC is a deoptimization point, get the original // PC. For MethodHandle call site the unextended_sp is stored in // saved_fp. - if (sender_cm->is_deopt_mh_entry(_pc)) { - DEBUG_ONLY(verify_deopt_mh_original_pc(sender_cm, _fp)); + if (sender_nm->is_deopt_mh_entry(_pc)) { + DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp)); _unextended_sp = _fp; } - else if (sender_cm->is_deopt_entry(_pc)) { - DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp)); + else if (sender_nm->is_deopt_entry(_pc)) { + DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp)); } - else if (sender_cm->is_method_handle_return(_pc)) { + else if (sender_nm->is_method_handle_return(_pc)) { _unextended_sp = _fp; } } diff --git a/src/hotspot/cpu/arm/frame_arm.hpp b/src/hotspot/cpu/arm/frame_arm.hpp index 56f8fc9932e..dee005b8d75 100644 --- a/src/hotspot/cpu/arm/frame_arm.hpp +++ b/src/hotspot/cpu/arm/frame_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,8 +93,8 @@ #ifdef ASSERT // Used in frame::sender_for_{interpreter,compiled}_frame - static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false); - static void verify_deopt_mh_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) { + static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false); + static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) { verify_deopt_original_pc(nm, unextended_sp, true); } #endif diff --git a/src/hotspot/cpu/arm/frame_arm.inline.hpp b/src/hotspot/cpu/arm/frame_arm.inline.hpp index 8a08c0d0e9c..0ffd8829c0f 100644 --- a/src/hotspot/cpu/arm/frame_arm.inline.hpp +++ b/src/hotspot/cpu/arm/frame_arm.inline.hpp @@ -58,10 +58,10 @@ inline void frame::init(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, add adjust_unextended_sp(); DEBUG_ONLY(_frame_index = -1;) - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; - assert(_cb->as_compiled_method()->insts_contains_inclusive(_pc), + assert(_cb->as_nmethod()->insts_contains_inclusive(_pc), "original PC must be in the main code section of the the compiled method (or must be immediately following it)"); _deopt_state = is_deoptimized; } else { diff --git a/src/hotspot/cpu/ppc/continuationEntry_ppc.inline.hpp b/src/hotspot/cpu/ppc/continuationEntry_ppc.inline.hpp index 1d2749724c5..4af637b2988 100644 --- a/src/hotspot/cpu/ppc/continuationEntry_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/continuationEntry_ppc.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ inline frame ContinuationEntry::to_frame() const { static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc()); assert(cb != nullptr, ""); - assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), ""); + assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), ""); return frame(entry_sp(), entry_pc(), entry_sp(), entry_fp(), cb); } diff --git a/src/hotspot/cpu/ppc/frame_ppc.cpp b/src/hotspot/cpu/ppc/frame_ppc.cpp index b63789f320d..9841e658a17 100644 --- a/src/hotspot/cpu/ppc/frame_ppc.cpp +++ b/src/hotspot/cpu/ppc/frame_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -90,7 +90,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // so we just assume they are OK. // Adapter blobs never have a complete frame and are never OK if (!_cb->is_frame_complete_at(_pc)) { - if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { + if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { return false; } } @@ -280,7 +280,7 @@ void frame::patch_pc(Thread* thread, address pc) { DEBUG_ONLY(address old_pc = _pc;) own_abi()->lr = (uint64_t)pc; _pc = pc; // must be set before call to get_deopt_original_pc - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { assert(original_pc == old_pc, "expected original PC to be stored before patching"); _deopt_state = is_deoptimized; @@ -288,7 +288,7 @@ void frame::patch_pc(Thread* thread, address pc) { } else { _deopt_state = not_deoptimized; } - assert(!is_compiled_frame() || !_cb->as_compiled_method()->is_deopt_entry(_pc), "must be"); + assert(!is_compiled_frame() || !_cb->as_nmethod()->is_deopt_entry(_pc), "must be"); #ifdef ASSERT { diff --git a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp index 7b1f37a342f..a14051bff63 100644 --- a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -61,11 +61,11 @@ inline void frame::setup(kind knd) { } } - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; - assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc), + assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc), "original PC must be in the main code section of the compiled method (or must be immediately following it)"); } else { if (_cb == SharedRuntime::deopt_blob()) { @@ -329,7 +329,7 @@ inline frame frame::sender_for_compiled_frame(RegisterMap *map) const { // Tell GC to use argument oopmaps for some runtime stubs that need it. // For C1, the runtime stub might not have oop maps, so set this flag // outside of update_register_map. - if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers + if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); if (oop_map() != nullptr) { _oop_map->update_register_map(this, map); @@ -368,8 +368,8 @@ inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { } inline int frame::compiled_frame_stack_argsize() const { - assert(cb()->is_compiled(), ""); - return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; + assert(cb()->is_nmethod(), ""); + return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; } inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp index e1a41a17d93..9520f6a94f2 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -460,7 +460,7 @@ void NativeDeoptInstruction::verify() { bool NativeDeoptInstruction::is_deopt_at(address code_pos) { if (!Assembler::is_illtrap(code_pos)) return false; CodeBlob* cb = CodeCache::find_blob(code_pos); - if (cb == nullptr || !cb->is_compiled()) return false; + if (cb == nullptr || !cb->is_nmethod()) return false; nmethod *nm = (nmethod *)cb; // see NativeInstruction::is_sigill_not_entrant_at() return nm->verified_entry_point() != code_pos; diff --git a/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp b/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp index f8d60ed9f93..2fb15d60c8f 100644 --- a/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ inline bool StackChunkFrameStream::is_in_frame(void* p0) const { assert(!is_done(), ""); assert(is_compiled(), ""); intptr_t* p = (intptr_t*)p0; - int argsize = (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; + int argsize = (_cb->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; int frame_size = _cb->frame_size() + (argsize > 0 ? argsize + frame::metadata_words_at_top : 0); return (p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size; } diff --git a/src/hotspot/cpu/riscv/continuationEntry_riscv.inline.hpp b/src/hotspot/cpu/riscv/continuationEntry_riscv.inline.hpp index e53f3681144..5173316c704 100644 --- a/src/hotspot/cpu/riscv/continuationEntry_riscv.inline.hpp +++ b/src/hotspot/cpu/riscv/continuationEntry_riscv.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ inline frame ContinuationEntry::to_frame() const { static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc()); assert(cb != nullptr, ""); - assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), ""); + assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), ""); return frame(entry_sp(), entry_sp(), entry_fp(), entry_pc(), cb); } diff --git a/src/hotspot/cpu/riscv/frame_riscv.cpp b/src/hotspot/cpu/riscv/frame_riscv.cpp index 194342b6d7d..6d8eba2cb29 100644 --- a/src/hotspot/cpu/riscv/frame_riscv.cpp +++ b/src/hotspot/cpu/riscv/frame_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -211,7 +211,7 @@ bool frame::safe_for_sender(JavaThread *thread) { return thread->is_in_stack_range_excl(jcw, (address)sender.fp()); } - CompiledMethod* nm = sender_blob->as_compiled_method_or_null(); + nmethod* nm = sender_blob->as_nmethod_or_null(); if (nm != nullptr) { if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) { @@ -222,7 +222,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size // because the return address counts against the callee's frame. if (sender_blob->frame_size() <= 0) { - assert(!sender_blob->is_compiled(), "should count return address at least"); + assert(!sender_blob->is_nmethod(), "should count return address at least"); return false; } @@ -230,7 +230,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // code cache (current frame) is called by an entity within the code cache that entity // should not be anything but the call stub (already covered), the interpreter (already covered) // or an nmethod. - if (!sender_blob->is_compiled()) { + if (!sender_blob->is_nmethod()) { return false; } @@ -273,7 +273,7 @@ void frame::patch_pc(Thread* thread, address pc) { DEBUG_ONLY(address old_pc = _pc;) *pc_addr = pc; _pc = pc; // must be set before call to get_deopt_original_pc - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { assert(original_pc == old_pc, "expected original PC to be stored before patching"); _deopt_state = is_deoptimized; @@ -399,7 +399,7 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const { // Verifies the calculated original PC of a deoptimization PC for the // given unextended SP. #ifdef ASSERT -void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) { +void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) { frame fr; // This is ugly but it's better than to change {get,set}_original_pc @@ -423,12 +423,12 @@ void frame::adjust_unextended_sp() { // returning to any of these call sites. if (_cb != nullptr) { - CompiledMethod* sender_cm = _cb->as_compiled_method_or_null(); - if (sender_cm != nullptr) { + nmethod* sender_nm = _cb->as_nmethod_or_null(); + if (sender_nm != nullptr) { // If the sender PC is a deoptimization point, get the original PC. - if (sender_cm->is_deopt_entry(_pc) || - sender_cm->is_deopt_mh_entry(_pc)) { - verify_deopt_original_pc(sender_cm, _unextended_sp); + if (sender_nm->is_deopt_entry(_pc) || + sender_nm->is_deopt_mh_entry(_pc)) { + verify_deopt_original_pc(sender_nm, _unextended_sp); } } } diff --git a/src/hotspot/cpu/riscv/frame_riscv.hpp b/src/hotspot/cpu/riscv/frame_riscv.hpp index 0c0659d1eee..15fe0e8f1f8 100644 --- a/src/hotspot/cpu/riscv/frame_riscv.hpp +++ b/src/hotspot/cpu/riscv/frame_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -186,7 +186,7 @@ #ifdef ASSERT // Used in frame::sender_for_{interpreter,compiled}_frame - static void verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp); + static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp); #endif public: diff --git a/src/hotspot/cpu/riscv/frame_riscv.inline.hpp b/src/hotspot/cpu/riscv/frame_riscv.inline.hpp index 33727a8c6d0..a4e75e09cfe 100644 --- a/src/hotspot/cpu/riscv/frame_riscv.inline.hpp +++ b/src/hotspot/cpu/riscv/frame_riscv.inline.hpp @@ -69,11 +69,11 @@ inline void frame::init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) { inline void frame::setup(address pc) { adjust_unextended_sp(); - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; - assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc), + assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc), "original PC must be in the main code section of the compiled method (or must be immediately following it)"); } else { if (_cb == SharedRuntime::deopt_blob()) { @@ -170,7 +170,7 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) { _cb = CodeCache::find_blob(_pc); adjust_unextended_sp(); - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; @@ -231,8 +231,8 @@ inline int frame::frame_size() const { } inline int frame::compiled_frame_stack_argsize() const { - assert(cb()->is_compiled(), ""); - return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; + assert(cb()->is_nmethod(), ""); + return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; } inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { @@ -413,7 +413,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const { // Tell GC to use argument oopmaps for some runtime stubs that need it. // For C1, the runtime stub might not have oop maps, so set this flag // outside of update_register_map. - if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers + if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); if (oop_map() != nullptr) { _oop_map->update_register_map(this, map); diff --git a/src/hotspot/cpu/riscv/stackChunkFrameStream_riscv.inline.hpp b/src/hotspot/cpu/riscv/stackChunkFrameStream_riscv.inline.hpp index d43b556dad1..7a7ee6d0040 100644 --- a/src/hotspot/cpu/riscv/stackChunkFrameStream_riscv.inline.hpp +++ b/src/hotspot/cpu/riscv/stackChunkFrameStream_riscv.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ template inline bool StackChunkFrameStream::is_in_frame(void* p0) const { assert(!is_done(), ""); intptr_t* p = (intptr_t*)p0; - int argsize = is_compiled() ? (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0; + int argsize = is_compiled() ? (_cb->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0; int frame_size = _cb->frame_size() + argsize; return p == sp() - 2 || ((p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size); } diff --git a/src/hotspot/cpu/s390/frame_s390.cpp b/src/hotspot/cpu/s390/frame_s390.cpp index dbaa243eb1c..62619aa1617 100644 --- a/src/hotspot/cpu/s390/frame_s390.cpp +++ b/src/hotspot/cpu/s390/frame_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -267,7 +267,7 @@ void frame::patch_pc(Thread* thread, address pc) { DEBUG_ONLY(address old_pc = _pc;) own_abi()->return_pc = (uint64_t)pc; _pc = pc; // must be set before call to get_deopt_original_pc - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { // assert(original_pc == _pc, "expected original to be stored before patching"); _deopt_state = is_deoptimized; @@ -275,7 +275,7 @@ void frame::patch_pc(Thread* thread, address pc) { } else { _deopt_state = not_deoptimized; } - assert(!is_compiled_frame() || !_cb->as_compiled_method()->is_deopt_entry(_pc), "must be"); + assert(!is_compiled_frame() || !_cb->as_nmethod()->is_deopt_entry(_pc), "must be"); #ifdef ASSERT { diff --git a/src/hotspot/cpu/s390/frame_s390.inline.hpp b/src/hotspot/cpu/s390/frame_s390.inline.hpp index 178f7f90849..2609da19df3 100644 --- a/src/hotspot/cpu/s390/frame_s390.inline.hpp +++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -56,11 +56,11 @@ inline void frame::setup() { assert(_on_heap || (is_aligned(_sp, alignment_in_bytes) && is_aligned(_fp, alignment_in_bytes)), "invalid alignment sp:" PTR_FORMAT " unextended_sp:" PTR_FORMAT " fp:" PTR_FORMAT, p2i(_sp), p2i(_unextended_sp), p2i(_fp)); - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; - assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc), + assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc), "original PC must be in the main code section of the compiled method (or must be immediately following it)"); } else { if (_cb == SharedRuntime::deopt_blob()) { diff --git a/src/hotspot/cpu/s390/nativeInst_s390.cpp b/src/hotspot/cpu/s390/nativeInst_s390.cpp index 95178e9ae74..9f083fa8904 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.cpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -540,7 +540,7 @@ void NativeMovConstReg::set_narrow_klass(intptr_t data) { ICache::invalidate_range(start, range); } -void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passed_nm /* = nullptr */) { +void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, nmethod *passed_nm /* = nullptr */) { address next_address; address loc = addr_at(0); @@ -565,7 +565,7 @@ void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passe } } -void NativeMovConstReg::set_pcrel_data(intptr_t newData, CompiledMethod *passed_nm /* = nullptr */) { +void NativeMovConstReg::set_pcrel_data(intptr_t newData, nmethod *passed_nm /* = nullptr */) { address next_address; address loc = addr_at(0); diff --git a/src/hotspot/cpu/s390/nativeInst_s390.hpp b/src/hotspot/cpu/s390/nativeInst_s390.hpp index abad50da8b4..13f15224f8b 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.hpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -486,8 +486,8 @@ class NativeMovConstReg: public NativeInstruction { // Patch narrow oop constant in code stream. void set_narrow_oop(intptr_t data); void set_narrow_klass(intptr_t data); - void set_pcrel_addr(intptr_t addr, CompiledMethod *nm = nullptr); - void set_pcrel_data(intptr_t data, CompiledMethod *nm = nullptr); + void set_pcrel_addr(intptr_t addr, nmethod *nm = nullptr); + void set_pcrel_data(intptr_t data, nmethod *nm = nullptr); void verify(); diff --git a/src/hotspot/cpu/x86/continuationEntry_x86.inline.hpp b/src/hotspot/cpu/x86/continuationEntry_x86.inline.hpp index cd173ce95f4..7d13a5200ea 100644 --- a/src/hotspot/cpu/x86/continuationEntry_x86.inline.hpp +++ b/src/hotspot/cpu/x86/continuationEntry_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ inline frame ContinuationEntry::to_frame() const { static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc()); assert(cb != nullptr, ""); - assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), ""); + assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), ""); return frame(entry_sp(), entry_sp(), entry_fp(), entry_pc(), cb); } diff --git a/src/hotspot/cpu/x86/frame_x86.cpp b/src/hotspot/cpu/x86/frame_x86.cpp index bef2443a60a..bff81df1df1 100644 --- a/src/hotspot/cpu/x86/frame_x86.cpp +++ b/src/hotspot/cpu/x86/frame_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,7 +95,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // ok. adapter blobs never have a frame complete and are never ok. if (!_cb->is_frame_complete_at(_pc)) { - if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { + if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { return false; } } @@ -213,7 +213,7 @@ bool frame::safe_for_sender(JavaThread *thread) { return false; } - CompiledMethod* nm = sender_blob->as_compiled_method_or_null(); + nmethod* nm = sender_blob->as_nmethod_or_null(); if (nm != nullptr) { if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) { @@ -225,7 +225,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // because the return address counts against the callee's frame. if (sender_blob->frame_size() <= 0) { - assert(!sender_blob->is_compiled(), "should count return address at least"); + assert(!sender_blob->is_nmethod(), "should count return address at least"); return false; } @@ -234,7 +234,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // should not be anything but the call stub (already covered), the interpreter (already covered) // or an nmethod. - if (!sender_blob->is_compiled()) { + if (!sender_blob->is_nmethod()) { return false; } @@ -283,7 +283,7 @@ void frame::patch_pc(Thread* thread, address pc) { DEBUG_ONLY(address old_pc = _pc;) *pc_addr = pc; _pc = pc; // must be set before call to get_deopt_original_pc - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { assert(original_pc == old_pc, "expected original PC to be stored before patching"); _deopt_state = is_deoptimized; @@ -291,7 +291,7 @@ void frame::patch_pc(Thread* thread, address pc) { } else { _deopt_state = not_deoptimized; } - assert(!is_compiled_frame() || !_cb->as_compiled_method()->is_deopt_entry(_pc), "must be"); + assert(!is_compiled_frame() || !_cb->as_nmethod()->is_deopt_entry(_pc), "must be"); #ifdef ASSERT { @@ -415,7 +415,7 @@ frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const { // Verifies the calculated original PC of a deoptimization PC for the // given unextended SP. #ifdef ASSERT -void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) { +void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) { frame fr; // This is ugly but it's better than to change {get,set}_original_pc @@ -438,12 +438,12 @@ void frame::adjust_unextended_sp() { // returning to any of these call sites. if (_cb != nullptr) { - CompiledMethod* sender_cm = _cb->as_compiled_method_or_null(); - if (sender_cm != nullptr) { + nmethod* sender_nm = _cb->as_nmethod_or_null(); + if (sender_nm != nullptr) { // If the sender PC is a deoptimization point, get the original PC. - if (sender_cm->is_deopt_entry(_pc) || - sender_cm->is_deopt_mh_entry(_pc)) { - verify_deopt_original_pc(sender_cm, _unextended_sp); + if (sender_nm->is_deopt_entry(_pc) || + sender_nm->is_deopt_mh_entry(_pc)) { + verify_deopt_original_pc(sender_nm, _unextended_sp); } } } diff --git a/src/hotspot/cpu/x86/frame_x86.hpp b/src/hotspot/cpu/x86/frame_x86.hpp index 44c8574c540..f3034ee9263 100644 --- a/src/hotspot/cpu/x86/frame_x86.hpp +++ b/src/hotspot/cpu/x86/frame_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -146,7 +146,7 @@ #ifdef ASSERT // Used in frame::sender_for_{interpreter,compiled}_frame - static void verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp); + static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp); #endif public: diff --git a/src/hotspot/cpu/x86/frame_x86.inline.hpp b/src/hotspot/cpu/x86/frame_x86.inline.hpp index f69803d579d..55e263d4a0b 100644 --- a/src/hotspot/cpu/x86/frame_x86.inline.hpp +++ b/src/hotspot/cpu/x86/frame_x86.inline.hpp @@ -66,11 +66,11 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) { inline void frame::setup(address pc) { adjust_unextended_sp(); - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; - assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc), + assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc), "original PC must be in the main code section of the compiled method (or must be immediately following it)"); } else { if (_cb == SharedRuntime::deopt_blob()) { @@ -164,7 +164,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) { _cb = CodeCache::find_blob(_pc); adjust_unextended_sp(); - address original_pc = CompiledMethod::get_deopt_original_pc(this); + address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; @@ -226,8 +226,8 @@ inline int frame::frame_size() const { } inline int frame::compiled_frame_stack_argsize() const { - assert(cb()->is_compiled(), ""); - return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; + assert(cb()->is_nmethod(), ""); + return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; } inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { @@ -397,7 +397,7 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const { // Tell GC to use argument oopmaps for some runtime stubs that need it. // For C1, the runtime stub might not have oop maps, so set this flag // outside of update_register_map. - if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers + if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); if (oop_map() != nullptr) { _oop_map->update_register_map(this, map); diff --git a/src/hotspot/cpu/x86/stackChunkFrameStream_x86.inline.hpp b/src/hotspot/cpu/x86/stackChunkFrameStream_x86.inline.hpp index 3b3e2dee10c..71b938248ec 100644 --- a/src/hotspot/cpu/x86/stackChunkFrameStream_x86.inline.hpp +++ b/src/hotspot/cpu/x86/stackChunkFrameStream_x86.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ template inline bool StackChunkFrameStream::is_in_frame(void* p0) const { assert(!is_done(), ""); intptr_t* p = (intptr_t*)p0; - int argsize = is_compiled() ? (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0; + int argsize = is_compiled() ? (_cb->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0; int frame_size = _cb->frame_size() + argsize; return p == sp() - frame::sender_sp_offset || ((p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size); } diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp index a62f93ab930..5172853ecd5 100644 --- a/src/hotspot/os/posix/signals_posix.cpp +++ b/src/hotspot/os/posix/signals_posix.cpp @@ -24,8 +24,8 @@ #include "precompiled.hpp" #include "code/codeCache.hpp" -#include "code/compiledMethod.hpp" #include "code/nativeInst.hpp" +#include "code/nmethod.hpp" #include "jvm.h" #include "logging/log.hpp" #include "os_posix.hpp" @@ -613,17 +613,17 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info, if (!signal_was_handled && pc != nullptr && os::is_readable_pointer(pc)) { if (NativeDeoptInstruction::is_deopt_at(pc)) { CodeBlob* cb = CodeCache::find_blob(pc); - if (cb != nullptr && cb->is_compiled()) { + if (cb != nullptr && cb->is_nmethod()) { MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, t);) // can call PcDescCache::add_pc_desc - CompiledMethod* cm = cb->as_compiled_method(); - assert(cm->insts_contains_inclusive(pc), ""); - address deopt = cm->is_method_handle_return(pc) ? - cm->deopt_mh_handler_begin() : - cm->deopt_handler_begin(); + nmethod* nm = cb->as_nmethod(); + assert(nm->insts_contains_inclusive(pc), ""); + address deopt = nm->is_method_handle_return(pc) ? + nm->deopt_mh_handler_begin() : + nm->deopt_handler_begin(); assert(deopt != nullptr, ""); frame fr = os::fetch_frame_from_context(uc); - cm->set_original_pc(&fr, pc); + nm->set_original_pc(&fr, pc); os::Posix::ucontext_set_pc(uc, deopt); signal_was_handled = true; diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index e2d0b653d15..23024aa60cc 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -2781,10 +2781,10 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (exception_code == EXCEPTION_IN_PAGE_ERROR) { - CompiledMethod* nm = nullptr; + nmethod* nm = nullptr; if (in_java) { CodeBlob* cb = CodeCache::find_blob(pc); - nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; } bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc); @@ -2833,14 +2833,14 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { // If it is, patch return address to be deopt handler. if (NativeDeoptInstruction::is_deopt_at(pc)) { CodeBlob* cb = CodeCache::find_blob(pc); - if (cb != nullptr && cb->is_compiled()) { - CompiledMethod* cm = cb->as_compiled_method(); + if (cb != nullptr && cb->is_nmethod()) { + nmethod* nm = cb->as_nmethod(); frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); - address deopt = cm->is_method_handle_return(pc) ? - cm->deopt_mh_handler_begin() : - cm->deopt_handler_begin(); - assert(cm->insts_contains_inclusive(pc), ""); - cm->set_original_pc(&fr, pc); + address deopt = nm->is_method_handle_return(pc) ? + nm->deopt_mh_handler_begin() : + nm->deopt_handler_begin(); + assert(nm->insts_contains_inclusive(pc), ""); + nm->set_original_pc(&fr, pc); // Set pc to handler exceptionInfo->ContextRecord->PC_NAME = (DWORD64)deopt; return EXCEPTION_CONTINUE_EXECUTION; diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp index 242042d4247..e1e81d673a7 100644 --- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp +++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -236,7 +236,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, ((NativeInstruction*)pc)->is_safepoint_poll() && CodeCache::contains((void*) pc) && ((cb = CodeCache::find_blob(pc)) != nullptr) && - cb->is_compiled()) { + cb->is_nmethod()) { if (TraceTraps) { tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (%s)", p2i(pc), USE_POLL_BIT_ONLY ? "SIGTRAP" : "SIGSEGV"); @@ -249,7 +249,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, ((NativeInstruction*)pc)->is_safepoint_poll_return() && CodeCache::contains((void*) pc) && ((cb = CodeCache::find_blob(pc)) != nullptr) && - cb->is_compiled()) { + cb->is_nmethod()) { if (TraceTraps) { tty->print_cr("trap: safepoint_poll at return at " INTPTR_FORMAT " (nmethod)", p2i(pc)); } @@ -339,7 +339,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault here if the // underlying file has been truncated. Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = cb ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = cb ? cb->as_nmethod_or_null() : nullptr; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) { address next_pc = pc + 4; diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp index 4750ed88056..3dfe9e30f79 100644 --- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -256,7 +256,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // here if the underlying file has been truncated. // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) { address next_pc = pc + NativeCall::instruction_size; diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp index c73e83996ff..593f6494540 100644 --- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp +++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp @@ -440,7 +440,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // here if the underlying file has been truncated. // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc); if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) { address next_pc = Assembler::locate_next_instruction(pc); diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp index 3698896abb7..e1c9dc8a13a 100644 --- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -239,7 +239,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // here if the underlying file has been truncated. // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) { address next_pc = pc + NativeCall::instruction_size; diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp index 55127058843..6f9ac548ce1 100644 --- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp +++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -323,7 +323,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // here if the underlying file has been truncated. // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; if ((nm != nullptr && nm->has_unsafe_access()) || (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc))) { unsafe_access = true; } diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp index 0b666f29c31..55963b06806 100644 --- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp +++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -263,7 +263,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, ((NativeInstruction*)pc)->is_safepoint_poll() && CodeCache::contains((void*) pc) && ((cb = CodeCache::find_blob(pc)) != nullptr) && - cb->is_compiled()) { + cb->is_nmethod()) { if (TraceTraps) { tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (%s)", p2i(pc), USE_POLL_BIT_ONLY ? "SIGTRAP" : "SIGSEGV"); @@ -275,7 +275,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, ((NativeInstruction*)pc)->is_safepoint_poll_return() && CodeCache::contains((void*) pc) && ((cb = CodeCache::find_blob(pc)) != nullptr) && - cb->is_compiled()) { + cb->is_nmethod()) { if (TraceTraps) { tty->print_cr("trap: safepoint_poll at return at " INTPTR_FORMAT " (nmethod)", p2i(pc)); } @@ -354,7 +354,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault here if the // underlying file has been truncated. Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) { address next_pc = pc + 4; diff --git a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp index 9f13e2bdd2c..079c3b42a9c 100644 --- a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp +++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -229,7 +229,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // here if the underlying file has been truncated. // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) { address next_pc = Assembler::locate_next_instruction(pc); diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp index 5aa65e705d9..9ac1152a013 100644 --- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp +++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -309,7 +309,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // BugId 4454115: A read from a MappedByteBuffer can fault here if the // underlying file has been truncated. Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; if (nm != nullptr && nm->has_unsafe_access()) { // We don't really need a stub here! Just set the pending exception and // continue at the next instruction after the faulting read. Returning diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp index 4dcaedf71da..b37a8d1f3a6 100644 --- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp +++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp @@ -259,7 +259,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // here if the underlying file has been truncated. // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob(pc); - CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc); if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) { address next_pc = Assembler::locate_next_instruction(pc); diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp index caf92cd0bfc..81db61ad993 100644 --- a/src/hotspot/share/ci/ciEnv.cpp +++ b/src/hotspot/share/ci/ciEnv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1142,7 +1142,7 @@ void ciEnv::register_method(ciMethod* target, if (entry_bci == InvocationEntryBci) { // If there is an old version we're done with it - CompiledMethod* old = method->code(); + nmethod* old = method->code(); if (TraceMethodReplacement && old != nullptr) { ResourceMark rm; char *method_name = method->name_and_sig_as_C_string(); diff --git a/src/hotspot/share/ci/ciMethod.cpp b/src/hotspot/share/ci/ciMethod.cpp index d54e8a4d697..0b41a257a4b 100644 --- a/src/hotspot/share/ci/ciMethod.cpp +++ b/src/hotspot/share/ci/ciMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1129,7 +1129,7 @@ int ciMethod::code_size_for_inlining() { int ciMethod::inline_instructions_size() { if (_inline_instructions_size == -1) { GUARDED_VM_ENTRY( - CompiledMethod* code = get_Method()->code(); + nmethod* code = get_Method()->code(); if (code != nullptr && (code->comp_level() == CompLevel_full_optimization)) { int isize = code->insts_end() - code->verified_entry_point() - code->skipped_instructions_size(); _inline_instructions_size = isize > 0 ? isize : 0; @@ -1145,7 +1145,7 @@ int ciMethod::inline_instructions_size() { // ciMethod::log_nmethod_identity void ciMethod::log_nmethod_identity(xmlStream* log) { GUARDED_VM_ENTRY( - CompiledMethod* code = get_Method()->code(); + nmethod* code = get_Method()->code(); if (code != nullptr) { code->log_identity(log); } diff --git a/src/hotspot/share/ci/ciReplay.cpp b/src/hotspot/share/ci/ciReplay.cpp index 449d49a2368..6edbadcec4f 100644 --- a/src/hotspot/share/ci/ciReplay.cpp +++ b/src/hotspot/share/ci/ciReplay.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -804,7 +804,7 @@ class CompileReplay : public StackObj { } } // Make sure the existence of a prior compile doesn't stop this one - CompiledMethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code(); + nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code(); if (nm != nullptr) { nm->make_not_entrant(); } diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index 39f0a9e4ba4..08ed98c24fb 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -2417,7 +2417,7 @@ static void print_stack_element_to_stream(outputStream* st, Handle mirror, int m // Neither sourcename nor linenumber buf_off += os::snprintf_checked(buf + buf_off, buf_size - buf_off, "Unknown Source)"); } - CompiledMethod* nm = method->code(); + nmethod* nm = method->code(); if (WizardMode && nm != nullptr) { os::snprintf_checked(buf + buf_off, buf_size - buf_off, "(nmethod " INTPTR_FORMAT ")", (intptr_t)nm); } @@ -2543,7 +2543,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand RegisterMap::ProcessFrames::skip, RegisterMap::WalkContinuation::include); int decode_offset = 0; - CompiledMethod* nm = nullptr; + nmethod* nm = nullptr; bool skip_fillInStackTrace_check = false; bool skip_throwableInit_check = false; bool skip_hidden = !ShowHiddenFrames; @@ -2587,10 +2587,10 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand // HMMM QQQ might be nice to have frame return nm as null if cb is non-null // but non nmethod fr = fr.sender(&map); - if (cb == nullptr || !cb->is_compiled()) { + if (cb == nullptr || !cb->is_nmethod()) { continue; } - nm = cb->as_compiled_method(); + nm = cb->as_nmethod(); assert(nm->method() != nullptr, "must be"); if (nm->method()->is_native()) { method = nm->method(); diff --git a/src/hotspot/share/code/codeBehaviours.cpp b/src/hotspot/share/code/codeBehaviours.cpp index 279eca4738e..34fda41b0eb 100644 --- a/src/hotspot/share/code/codeBehaviours.cpp +++ b/src/hotspot/share/code/codeBehaviours.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ CompiledICProtectionBehaviour* CompiledICProtectionBehaviour::_current = nullptr; -bool DefaultICProtectionBehaviour::lock(CompiledMethod* method) { +bool DefaultICProtectionBehaviour::lock(nmethod* method) { if (is_safe(method)) { return false; } @@ -37,10 +37,10 @@ bool DefaultICProtectionBehaviour::lock(CompiledMethod* method) { return true; } -void DefaultICProtectionBehaviour::unlock(CompiledMethod* method) { +void DefaultICProtectionBehaviour::unlock(nmethod* method) { CompiledIC_lock->unlock(); } -bool DefaultICProtectionBehaviour::is_safe(CompiledMethod* method) { +bool DefaultICProtectionBehaviour::is_safe(nmethod* method) { return SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->owned_by_self(); } diff --git a/src/hotspot/share/code/codeBehaviours.hpp b/src/hotspot/share/code/codeBehaviours.hpp index f026957aa97..0350f5752f6 100644 --- a/src/hotspot/share/code/codeBehaviours.hpp +++ b/src/hotspot/share/code/codeBehaviours.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,24 +27,24 @@ #include "memory/allocation.hpp" -class CompiledMethod; +class nmethod; class CompiledICProtectionBehaviour { static CompiledICProtectionBehaviour* _current; public: - virtual bool lock(CompiledMethod* method) = 0; - virtual void unlock(CompiledMethod* method) = 0; - virtual bool is_safe(CompiledMethod* method) = 0; + virtual bool lock(nmethod* method) = 0; + virtual void unlock(nmethod* method) = 0; + virtual bool is_safe(nmethod* method) = 0; static CompiledICProtectionBehaviour* current() { return _current; } static void set_current(CompiledICProtectionBehaviour* current) { _current = current; } }; class DefaultICProtectionBehaviour: public CompiledICProtectionBehaviour, public CHeapObj { - virtual bool lock(CompiledMethod* method); - virtual void unlock(CompiledMethod* method); - virtual bool is_safe(CompiledMethod* method); + virtual bool lock(nmethod* method); + virtual void unlock(nmethod* method); + virtual bool is_safe(nmethod* method); }; #endif // SHARE_CODE_CODEBEHAVIOURS_HPP diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp index d24e29c288d..83418742988 100644 --- a/src/hotspot/share/code/codeBlob.cpp +++ b/src/hotspot/share/code/codeBlob.cpp @@ -54,9 +54,6 @@ #include "c1/c1_Runtime1.hpp" #endif -const char* CodeBlob::compiler_name() const { - return compilertype2name(_type); -} unsigned int CodeBlob::align_code_offset(int offset) { // align the size to CodeEntryAlignment @@ -64,7 +61,6 @@ unsigned int CodeBlob::align_code_offset(int offset) { return align_up(offset + header_size, CodeEntryAlignment) - header_size; } - // This must be consistent with the CodeBlob constructor's layout actions. unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { unsigned int size = header_size; @@ -77,99 +73,79 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { return size; } -CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : - _code_begin(layout.code_begin()), - _code_end(layout.code_end()), - _content_begin(layout.content_begin()), - _data_end(layout.data_end()), - _relocation_begin(layout.relocation_begin()), - _relocation_end(layout.relocation_end()), +#ifdef ASSERT +void CodeBlob::verify_parameters() { + assert(is_aligned(_size, oopSize), "unaligned size"); + assert(is_aligned(_header_size, oopSize), "unaligned size"); + assert(is_aligned(_relocation_size, oopSize), "unaligned size"); + assert(_data_offset <= size(), "codeBlob is too small"); + assert(code_end() == content_end(), "must be the same - see code_end()"); +#ifdef COMPILER1 + // probably wrong for tiered + assert(frame_size() >= -1, "must use frame size or -1 for runtime stubs"); +#endif // COMPILER1 +} +#endif + +CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size, int relocation_size, + int content_offset, int code_offset, int frame_complete_offset, int data_offset, + int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) : _oop_maps(oop_maps), _name(name), - _size(layout.size()), - _header_size(layout.header_size()), + _size(size), + _header_size(header_size), + _relocation_size(relocation_size), + _content_offset(content_offset), + _code_offset(code_offset), _frame_complete_offset(frame_complete_offset), - _data_offset(layout.data_offset()), + _data_offset(data_offset), _frame_size(frame_size), - _caller_must_gc_arguments(caller_must_gc_arguments), - _is_compiled(compiled), - _type(type) + S390_ONLY(_ctable_offset(0) COMMA) + _kind(kind), + _caller_must_gc_arguments(caller_must_gc_arguments) { - assert(is_aligned(layout.size(), oopSize), "unaligned size"); - assert(is_aligned(layout.header_size(), oopSize), "unaligned size"); - assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size"); - assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); -#ifdef COMPILER1 - // probably wrong for tiered - assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); -#endif // COMPILER1 - S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields + DEBUG_ONLY( verify_parameters(); ) } -CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb /*UNUSED*/, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : - _code_begin(layout.code_begin()), - _code_end(layout.code_end()), - _content_begin(layout.content_begin()), - _data_end(layout.data_end()), - _relocation_begin(layout.relocation_begin()), - _relocation_end(layout.relocation_end()), +CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int header_size, + int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : + _oop_maps(nullptr), // will be set by set_oop_maps() call _name(name), - _size(layout.size()), - _header_size(layout.header_size()), + _size(size), + _header_size(header_size), + _relocation_size(align_up(cb->total_relocation_size(), oopSize)), + _content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)), + _code_offset(_content_offset + cb->total_offset_of(cb->insts())), _frame_complete_offset(frame_complete_offset), - _data_offset(layout.data_offset()), + _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)), _frame_size(frame_size), - _caller_must_gc_arguments(caller_must_gc_arguments), - _is_compiled(compiled), - _type(type) + S390_ONLY(_ctable_offset(0) COMMA) + _kind(kind), + _caller_must_gc_arguments(caller_must_gc_arguments) { - assert(is_aligned(_size, oopSize), "unaligned size"); - assert(is_aligned(_header_size, oopSize), "unaligned size"); - assert(_data_offset <= _size, "codeBlob is too small"); - assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()"); + DEBUG_ONLY( verify_parameters(); ) set_oop_maps(oop_maps); -#ifdef COMPILER1 - // probably wrong for tiered - assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); -#endif // COMPILER1 - S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields } - -// Creates a simple CodeBlob. Sets up the size of the different regions. -RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) - : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, nullptr, false /* caller_must_gc_arguments */) +// Simple CodeBlob used for simple BufferBlob. +CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size) : + _oop_maps(nullptr), + _name(name), + _size(size), + _header_size(header_size), + _relocation_size(0), + _content_offset(CodeBlob::align_code_offset(header_size)), + _code_offset(_content_offset), + _frame_complete_offset(CodeOffsets::frame_never_safe), + _data_offset(size), + _frame_size(0), + S390_ONLY(_ctable_offset(0) COMMA) + _kind(kind), + _caller_must_gc_arguments(false) { - assert(is_aligned(locs_size, oopSize), "unaligned size"); -} - - -// Creates a RuntimeBlob from a CodeBuffer -// and copy code and relocation info. -RuntimeBlob::RuntimeBlob( - const char* name, - CodeBuffer* cb, - int header_size, - int size, - int frame_complete, - int frame_size, - OopMapSet* oop_maps, - bool caller_must_gc_arguments -) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { - cb->copy_code_and_locs_to(this); -} - -void RuntimeBlob::free(RuntimeBlob* blob) { - assert(blob != nullptr, "caller must check for nullptr"); - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */); - { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CodeCache::free(blob); - } - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + assert(is_aligned(size, oopSize), "unaligned size"); + assert(is_aligned(header_size, oopSize), "unaligned size"); } void CodeBlob::purge(bool free_code_cache_data, bool unregister_nmethod) { @@ -191,6 +167,46 @@ void CodeBlob::set_oop_maps(OopMapSet* p) { } } +const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const { + assert(_oop_maps != nullptr, "nope"); + return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); +} + +void CodeBlob::print_code_on(outputStream* st) { + ResourceMark m; + Disassembler::decode(this, st); +} + +//----------------------------------------------------------------------------------------- +// Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info. + +RuntimeBlob::RuntimeBlob( + const char* name, + CodeBlobKind kind, + CodeBuffer* cb, + int size, + int header_size, + int frame_complete, + int frame_size, + OopMapSet* oop_maps, + bool caller_must_gc_arguments) + : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) +{ + cb->copy_code_and_locs_to(this); +} + +void RuntimeBlob::free(RuntimeBlob* blob) { + assert(blob != nullptr, "caller must check for nullptr"); + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */); + { + MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CodeCache::free(blob); + } + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); +} + void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { // Do not hold the CodeCache lock during name formatting. assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); @@ -230,22 +246,11 @@ void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const cha MemoryService::track_code_cache_memory_usage(); } -const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const { - assert(_oop_maps != nullptr, "nope"); - return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); -} - -void CodeBlob::print_code_on(outputStream* st) { - ResourceMark m; - Disassembler::decode(this, st); -} - //---------------------------------------------------------------------------------------------------- // Implementation of BufferBlob - -BufferBlob::BufferBlob(const char* name, int size) -: RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) +BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size) +: RuntimeBlob(name, kind, size, sizeof(BufferBlob)) {} BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { @@ -259,7 +264,7 @@ BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { assert(name != nullptr, "must provide a name"); { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) BufferBlob(name, size); + blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); @@ -268,10 +273,11 @@ BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { } -BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) - : RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, nullptr) +BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size) + : RuntimeBlob(name, kind, cb, size, sizeof(BufferBlob), CodeOffsets::frame_never_safe, 0, nullptr) {} +// Used by gtest BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock @@ -280,7 +286,7 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { assert(name != nullptr, "must provide a name"); { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) BufferBlob(name, size, cb); + blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); @@ -301,7 +307,7 @@ void BufferBlob::free(BufferBlob *blob) { // Implementation of AdapterBlob AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : - BufferBlob("I2C/C2I adapters", size, cb) { + BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size) { CodeCache::commit(this); } @@ -322,6 +328,9 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { return blob; } +//---------------------------------------------------------------------------------------------------- +// Implementation of VtableBlob + void* VtableBlob::operator new(size_t s, unsigned size) throw() { // Handling of allocation failure stops compilation and prints a bunch of // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock @@ -333,7 +342,7 @@ void* VtableBlob::operator new(size_t s, unsigned size) throw() { } VtableBlob::VtableBlob(const char* name, int size) : - BufferBlob(name, size) { + BufferBlob(name, CodeBlobKind::Vtable, size) { } VtableBlob* VtableBlob::create(const char* name, int buffer_size) { @@ -404,7 +413,8 @@ RuntimeStub::RuntimeStub( OopMapSet* oop_maps, bool caller_must_gc_arguments ) -: RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) +: RuntimeBlob(name, CodeBlobKind::Runtime_Stub, cb, size, sizeof(RuntimeStub), + frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { } @@ -460,7 +470,8 @@ DeoptimizationBlob::DeoptimizationBlob( int unpack_with_reexecution_offset, int frame_size ) -: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) +: SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb, + size, sizeof(DeoptimizationBlob), frame_size, oop_maps) { _unpack_offset = unpack_offset; _unpack_with_exception = unpack_with_exception_offset; @@ -509,7 +520,8 @@ UncommonTrapBlob::UncommonTrapBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) +: SingletonBlob("UncommonTrapBlob", CodeBlobKind::Uncommon_Trap, cb, + size, sizeof(UncommonTrapBlob), frame_size, oop_maps) {} @@ -545,7 +557,8 @@ ExceptionBlob::ExceptionBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) +: SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb, + size, sizeof(ExceptionBlob), frame_size, oop_maps) {} @@ -580,7 +593,8 @@ SafepointBlob::SafepointBlob( OopMapSet* oop_maps, int frame_size ) -: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) +: SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb, + size, sizeof(SafepointBlob), frame_size, oop_maps) {} @@ -602,6 +616,61 @@ SafepointBlob* SafepointBlob::create( return blob; } +//---------------------------------------------------------------------------------------------------- +// Implementation of UpcallStub + +UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) : + RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub), + CodeOffsets::frame_never_safe, 0 /* no frame size */, + /* oop maps = */ nullptr, /* caller must gc arguments = */ false), + _receiver(receiver), + _frame_data_offset(frame_data_offset) +{ + CodeCache::commit(this); +} + +void* UpcallStub::operator new(size_t s, unsigned size) throw() { + return CodeCache::allocate(size, CodeBlobType::NonNMethod); +} + +UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) { + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + + UpcallStub* blob = nullptr; + unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub)); + { + MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset); + } + if (blob == nullptr) { + return nullptr; // caller must handle this + } + + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); + + trace_new_stub(blob, "UpcallStub"); + + return blob; +} + +void UpcallStub::oops_do(OopClosure* f, const frame& frame) { + frame_data_for_frame(frame)->old_handles->oops_do(f); +} + +JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const { + return &frame_data_for_frame(frame)->jfa; +} + +void UpcallStub::free(UpcallStub* blob) { + assert(blob != nullptr, "caller must check for nullptr"); + JNIHandles::destroy_global(blob->receiver()); + RuntimeBlob::free(blob); +} + +void UpcallStub::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { + ShouldNotReachHere(); // caller should never have to gc arguments +} //---------------------------------------------------------------------------------------------------- // Verification and printing @@ -678,10 +747,6 @@ void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const print_on(st); } -void RuntimeBlob::verify() { - ShouldNotReachHere(); -} - void BufferBlob::verify() { // unimplemented } @@ -730,60 +795,6 @@ void DeoptimizationBlob::print_value_on(outputStream* st) const { st->print_cr("Deoptimization (frame not available)"); } -// Implementation of UpcallStub - -UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) : - RuntimeBlob(name, cb, sizeof(UpcallStub), size, CodeOffsets::frame_never_safe, 0 /* no frame size */, - /* oop maps = */ nullptr, /* caller must gc arguments = */ false), - _receiver(receiver), - _frame_data_offset(frame_data_offset) { - CodeCache::commit(this); -} - -void* UpcallStub::operator new(size_t s, unsigned size) throw() { - return CodeCache::allocate(size, CodeBlobType::NonNMethod); -} - -UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) { - ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - - UpcallStub* blob = nullptr; - unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub)); - { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset); - } - if (blob == nullptr) { - return nullptr; // caller must handle this - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); - - trace_new_stub(blob, "UpcallStub"); - - return blob; -} - -void UpcallStub::oops_do(OopClosure* f, const frame& frame) { - frame_data_for_frame(frame)->old_handles->oops_do(f); -} - -JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const { - return &frame_data_for_frame(frame)->jfa; -} - -void UpcallStub::free(UpcallStub* blob) { - assert(blob != nullptr, "caller must check for nullptr"); - JNIHandles::destroy_global(blob->receiver()); - RuntimeBlob::free(blob); -} - -void UpcallStub::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { - ShouldNotReachHere(); // caller should never have to gc arguments -} - -// Misc. void UpcallStub::verify() { // unimplemented } diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index 56caa906ecb..134d60e5cb5 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -52,8 +52,7 @@ enum class CodeBlobType { // CodeBlob - superclass for all entries in the CodeCache. // // Subtypes are: -// CompiledMethod : Compiled Java methods (include method that calls to native code) -// nmethod : JIT Compiled Java methods +// nmethod : JIT Compiled Java methods // RuntimeBlob : Non-compiled method code; generated glue code // BufferBlob : Used for non-relocatable code such as interpreter, stubroutines, etc. // AdapterBlob : Used to hold C2I/I2C adapters @@ -75,8 +74,22 @@ enum class CodeBlobType { // - instruction space // - data space +enum class CodeBlobKind : u1 { + None, + Nmethod, + Buffer, + Adapter, + Vtable, + MH_Adapter, + Runtime_Stub, + Deoptimization, + Exception, + Safepoint, + Uncommon_Trap, + Upcall, + Number_Of_Kinds +}; -class CodeBlobLayout; class UpcallStub; // for as_upcall_stub() class RuntimeStub; // for as_runtime_stub() class JavaFrameAnchor; // for UpcallStub::jfa_for_frame @@ -87,23 +100,15 @@ class CodeBlob { friend class CodeCacheDumper; protected: - // order fields from large to small to minimize padding between fields - address _code_begin; - address _code_end; - address _content_begin; // address to where content region begins (this includes consts, insts, stubs) - // address _content_end - not required, for all CodeBlobs _code_end == _content_end for now - address _data_end; - address _relocation_begin; - address _relocation_end; - ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob - const char* _name; - S390_ONLY(int _ctable_offset;) int _size; // total size of CodeBlob in bytes int _header_size; // size of header (depends on subclass) + int _relocation_size; // size of relocation + int _content_offset; // offset to where content region begins (this includes consts, insts, stubs) + int _code_offset; // offset to where instructions region begins (this includes insts, stubs) int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have // not finished setting up their frame. Beware of pc's in // that range. There is a similar range(s) on returns @@ -111,28 +116,32 @@ protected: int _data_offset; // offset to where data region begins int _frame_size; // size of stack frame in words (NOT slots. On x64 these are 64bit words) - bool _caller_must_gc_arguments; + S390_ONLY(int _ctable_offset;) - bool _is_compiled; - const CompilerType _type; // CompilerType + CodeBlobKind _kind; // Kind of this code blob + + bool _caller_must_gc_arguments; #ifndef PRODUCT AsmRemarks _asm_remarks; DbgStrings _dbg_strings; #endif // not PRODUCT - CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, - int frame_size, ImmutableOopMapSet* oop_maps, - bool caller_must_gc_arguments, bool compiled = false); - CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, - int frame_size, OopMapSet* oop_maps, - bool caller_must_gc_arguments, bool compiled = false); + DEBUG_ONLY( void verify_parameters() ); + + CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size, int relocation_size, + int content_offset, int code_offset, int data_offset, int frame_complete_offset, + int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments); + + CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int header_size, + int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); + + // Simple CodeBlob used for simple BufferBlob. + CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size); void operator delete(void* p) { } public: - // Only used by unit test. - CodeBlob() : _type(compiler_none) {} virtual ~CodeBlob() { assert(_oop_maps == nullptr, "Not flushed"); @@ -146,44 +155,42 @@ public: virtual void purge(bool free_code_cache_data, bool unregister_nmethod); // Typing - virtual bool is_buffer_blob() const { return false; } - virtual bool is_nmethod() const { return false; } - virtual bool is_runtime_stub() const { return false; } - virtual bool is_deoptimization_stub() const { return false; } - virtual bool is_uncommon_trap_stub() const { return false; } - virtual bool is_exception_stub() const { return false; } - virtual bool is_safepoint_stub() const { return false; } - virtual bool is_adapter_blob() const { return false; } - virtual bool is_vtable_blob() const { return false; } - virtual bool is_method_handles_adapter_blob() const { return false; } - virtual bool is_upcall_stub() const { return false; } - bool is_compiled() const { return _is_compiled; } - const bool* is_compiled_addr() const { return &_is_compiled; } - - inline bool is_compiled_by_c1() const { return _type == compiler_c1; }; - inline bool is_compiled_by_c2() const { return _type == compiler_c2; }; - inline bool is_compiled_by_jvmci() const { return _type == compiler_jvmci; }; - const char* compiler_name() const; - CompilerType compiler_type() const { return _type; } + bool is_nmethod() const { return _kind == CodeBlobKind::Nmethod; } + bool is_buffer_blob() const { return _kind == CodeBlobKind::Buffer; } + bool is_runtime_stub() const { return _kind == CodeBlobKind::Runtime_Stub; } + bool is_deoptimization_stub() const { return _kind == CodeBlobKind::Deoptimization; } + bool is_uncommon_trap_stub() const { return _kind == CodeBlobKind::Uncommon_Trap; } + bool is_exception_stub() const { return _kind == CodeBlobKind::Exception; } + bool is_safepoint_stub() const { return _kind == CodeBlobKind::Safepoint; } + bool is_adapter_blob() const { return _kind == CodeBlobKind::Adapter; } + bool is_vtable_blob() const { return _kind == CodeBlobKind::Vtable; } + bool is_method_handles_adapter_blob() const { return _kind == CodeBlobKind::MH_Adapter; } + bool is_upcall_stub() const { return _kind == CodeBlobKind::Upcall; } // Casting - nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : nullptr; } - nmethod* as_nmethod() { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } - CompiledMethod* as_compiled_method_or_null() { return is_compiled() ? (CompiledMethod*) this : nullptr; } - CompiledMethod* as_compiled_method() { assert(is_compiled(), "must be compiled"); return (CompiledMethod*) this; } - CodeBlob* as_codeblob_or_null() const { return (CodeBlob*) this; } - UpcallStub* as_upcall_stub() const { assert(is_upcall_stub(), "must be upcall stub"); return (UpcallStub*) this; } - RuntimeStub* as_runtime_stub() const { assert(is_runtime_stub(), "must be runtime blob"); return (RuntimeStub*) this; } + nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : nullptr; } + nmethod* as_nmethod() { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } + CodeBlob* as_codeblob_or_null() const { return (CodeBlob*) this; } + UpcallStub* as_upcall_stub() const { assert(is_upcall_stub(), "must be upcall stub"); return (UpcallStub*) this; } + RuntimeStub* as_runtime_stub() const { assert(is_runtime_stub(), "must be runtime blob"); return (RuntimeStub*) this; } // Boundaries - address header_begin() const { return (address) this; } - relocInfo* relocation_begin() const { return (relocInfo*) _relocation_begin; }; - relocInfo* relocation_end() const { return (relocInfo*) _relocation_end; } - address content_begin() const { return _content_begin; } - address content_end() const { return _code_end; } // _code_end == _content_end is true for all types of blobs for now, it is also checked in the constructor - address code_begin() const { return _code_begin; } - address code_end() const { return _code_end; } - address data_end() const { return _data_end; } + address header_begin() const { return (address) this; } + address header_end() const { return ((address) this) + _header_size; } + relocInfo* relocation_begin() const { return (relocInfo*) header_end(); } + relocInfo* relocation_end() const { return (relocInfo*)(header_end() + _relocation_size); } + address content_begin() const { return (address) header_begin() + _content_offset; } + address content_end() const { return (address) header_begin() + _data_offset; } + address code_begin() const { return (address) header_begin() + _code_offset; } + // code_end == content_end is true for all types of blobs for now, it is also checked in the constructor + address code_end() const { return (address) header_begin() + _data_offset; } + address data_begin() const { return (address) header_begin() + _data_offset; } + address data_end() const { return (address) header_begin() + _size; } + + // Offsets + int content_offset() const { return _content_offset; } + int code_offset() const { return _code_offset; } + int data_offset() const { return _data_offset; } // This field holds the beginning of the const section in the old code buffer. // It is needed to fix relocations of pc-relative loads when resizing the @@ -192,17 +199,16 @@ public: void set_ctable_begin(address ctable) { S390_ONLY(_ctable_offset = ctable - header_begin();) } // Sizes - int size() const { return _size; } - int header_size() const { return _header_size; } - int relocation_size() const { return pointer_delta_as_int((address) relocation_end(), (address) relocation_begin()); } - int content_size() const { return pointer_delta_as_int(content_end(), content_begin()); } - int code_size() const { return pointer_delta_as_int(code_end(), code_begin()); } + int size() const { return _size; } + int header_size() const { return _header_size; } + int relocation_size() const { return pointer_delta_as_int((address) relocation_end(), (address) relocation_begin()); } + int content_size() const { return pointer_delta_as_int(content_end(), content_begin()); } + int code_size() const { return pointer_delta_as_int(code_end(), code_begin()); } + // Only used from CodeCache::free_unused_tail() after the Interpreter blob was trimmed void adjust_size(size_t used) { _size = (int)used; _data_offset = (int)used; - _code_end = (address)this + used; - _data_end = (address)this + used; } // Containment @@ -213,8 +219,6 @@ public: code_contains(addr) && addr >= code_begin() + _frame_complete_offset; } int frame_complete_offset() const { return _frame_complete_offset; } - virtual bool is_not_entrant() const { return false; } - // OopMap for frame ImmutableOopMapSet* oop_maps() const { return _oop_maps; } void set_oop_maps(OopMapSet* p); @@ -260,97 +264,8 @@ public: #endif }; -class CodeBlobLayout : public StackObj { -private: - int _size; - int _header_size; - int _relocation_size; - int _content_offset; - int _code_offset; - int _data_offset; - address _code_begin; - address _code_end; - address _content_begin; - address _content_end; - address _data_end; - address _relocation_begin; - address _relocation_end; - -public: - CodeBlobLayout(address code_begin, address code_end, address content_begin, address content_end, address data_end, address relocation_begin, address relocation_end) : - _size(0), - _header_size(0), - _relocation_size(0), - _content_offset(0), - _code_offset(0), - _data_offset(0), - _code_begin(code_begin), - _code_end(code_end), - _content_begin(content_begin), - _content_end(content_end), - _data_end(data_end), - _relocation_begin(relocation_begin), - _relocation_end(relocation_end) - { - } - - CodeBlobLayout(const address start, int size, int header_size, int relocation_size, int data_offset) : - _size(size), - _header_size(header_size), - _relocation_size(relocation_size), - _content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)), - _code_offset(_content_offset), - _data_offset(data_offset) - { - assert(is_aligned(_relocation_size, oopSize), "unaligned size"); - - _code_begin = (address) start + _code_offset; - _code_end = (address) start + _data_offset; - - _content_begin = (address) start + _content_offset; - _content_end = (address) start + _data_offset; - - _data_end = (address) start + _size; - _relocation_begin = (address) start + _header_size; - _relocation_end = _relocation_begin + _relocation_size; - } - - CodeBlobLayout(const address start, int size, int header_size, const CodeBuffer* cb) : - _size(size), - _header_size(header_size), - _relocation_size(align_up(cb->total_relocation_size(), oopSize)), - _content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)), - _code_offset(_content_offset + cb->total_offset_of(cb->insts())), - _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)) - { - assert(is_aligned(_relocation_size, oopSize), "unaligned size"); - - _code_begin = (address) start + _code_offset; - _code_end = (address) start + _data_offset; - - _content_begin = (address) start + _content_offset; - _content_end = (address) start + _data_offset; - - _data_end = (address) start + _size; - _relocation_begin = (address) start + _header_size; - _relocation_end = _relocation_begin + _relocation_size; - } - - int size() const { return _size; } - int header_size() const { return _header_size; } - int relocation_size() const { return _relocation_size; } - int content_offset() const { return _content_offset; } - int code_offset() const { return _code_offset; } - int data_offset() const { return _data_offset; } - address code_begin() const { return _code_begin; } - address code_end() const { return _code_end; } - address data_end() const { return _data_end; } - address relocation_begin() const { return _relocation_begin; } - address relocation_end() const { return _relocation_end; } - address content_begin() const { return _content_begin; } - address content_end() const { return _content_end; } -}; - +//---------------------------------------------------------------------------------------------------- +// RuntimeBlob: used for non-compiled method code (adapters, stubs, blobs) class RuntimeBlob : public CodeBlob { friend class VMStructs; @@ -358,16 +273,19 @@ class RuntimeBlob : public CodeBlob { // Creation // a) simple CodeBlob - // frame_complete is the offset from the beginning of the instructions - // to where the frame setup (from stackwalk viewpoint) is complete. - RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size); + RuntimeBlob(const char* name, CodeBlobKind kind, int size, int header_size) + : CodeBlob(name, kind, size, header_size) + {} // b) full CodeBlob + // frame_complete is the offset from the beginning of the instructions + // to where the frame setup (from stackwalk viewpoint) is complete. RuntimeBlob( const char* name, + CodeBlobKind kind, CodeBuffer* cb, - int header_size, int size, + int header_size, int frame_complete, int frame_size, OopMapSet* oop_maps, @@ -376,15 +294,6 @@ class RuntimeBlob : public CodeBlob { static void free(RuntimeBlob* blob); - void verify(); - - // OopMap for frame - virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { ShouldNotReachHere(); } - - // Debugging - virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } - virtual void print_value_on(outputStream* st) const { CodeBlob::print_value_on(st); } - // Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService. static void trace_new_stub(RuntimeBlob* blob, const char* name1, const char* name2 = ""); }; @@ -403,8 +312,8 @@ class BufferBlob: public RuntimeBlob { private: // Creation support - BufferBlob(const char* name, int size); - BufferBlob(const char* name, int size, CodeBuffer* cb); + BufferBlob(const char* name, CodeBlobKind kind, int size); + BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size); void* operator new(size_t s, unsigned size) throw(); @@ -415,15 +324,12 @@ class BufferBlob: public RuntimeBlob { static void free(BufferBlob* buf); - // Typing - virtual bool is_buffer_blob() const { return true; } - // GC/Verification support - void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } + void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override { /* nothing to do */ } - void verify(); - void print_on(outputStream* st) const; - void print_value_on(outputStream* st) const; + void verify() override; + void print_on(outputStream* st) const override; + void print_value_on(outputStream* st) const override; }; @@ -437,9 +343,6 @@ private: public: // Creation static AdapterBlob* create(CodeBuffer* cb); - - // Typing - virtual bool is_adapter_blob() const { return true; } }; //--------------------------------------------------------------------------------------------------- @@ -452,9 +355,6 @@ private: public: // Creation static VtableBlob* create(const char* name, int buffer_size); - - // Typing - virtual bool is_vtable_blob() const { return true; } }; //---------------------------------------------------------------------------------------------------- @@ -462,14 +362,11 @@ public: class MethodHandlesAdapterBlob: public BufferBlob { private: - MethodHandlesAdapterBlob(int size): BufferBlob("MethodHandles adapters", size) {} + MethodHandlesAdapterBlob(int size): BufferBlob("MethodHandles adapters", CodeBlobKind::MH_Adapter, size) {} public: // Creation static MethodHandlesAdapterBlob* create(int buffer_size); - - // Typing - virtual bool is_method_handles_adapter_blob() const { return true; } }; @@ -506,17 +403,14 @@ class RuntimeStub: public RuntimeBlob { static void free(RuntimeStub* stub) { RuntimeBlob::free(stub); } - // Typing - bool is_runtime_stub() const { return true; } - address entry_point() const { return code_begin(); } // GC/Verification support - void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } + void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override { /* nothing to do */ } - void verify(); - void print_on(outputStream* st) const; - void print_value_on(outputStream* st) const; + void verify() override; + void print_on(outputStream* st) const override; + void print_value_on(outputStream* st) const override; }; @@ -531,23 +425,24 @@ class SingletonBlob: public RuntimeBlob { public: SingletonBlob( - const char* name, - CodeBuffer* cb, - int header_size, - int size, - int frame_size, - OopMapSet* oop_maps + const char* name, + CodeBlobKind kind, + CodeBuffer* cb, + int size, + int header_size, + int frame_size, + OopMapSet* oop_maps ) - : RuntimeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps) + : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, frame_size, oop_maps) {}; address entry_point() { return code_begin(); } // GC/Verification support - void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } - void verify(); // does nothing - void print_on(outputStream* st) const; - void print_value_on(outputStream* st) const; + void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override { /* nothing to do */ } + void verify() override; // does nothing + void print_on(outputStream* st) const override; + void print_value_on(outputStream* st) const override; }; @@ -592,14 +487,8 @@ class DeoptimizationBlob: public SingletonBlob { int frame_size ); - // Typing - bool is_deoptimization_stub() const { return true; } - - // GC for args - void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ } - // Printing - void print_value_on(outputStream* st) const; + void print_value_on(outputStream* st) const override; address unpack() const { return code_begin() + _unpack_offset; } address unpack_with_exception() const { return code_begin() + _unpack_with_exception; } @@ -656,12 +545,6 @@ class UncommonTrapBlob: public SingletonBlob { OopMapSet* oop_maps, int frame_size ); - - // GC for args - void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } - - // Typing - bool is_uncommon_trap_stub() const { return true; } }; @@ -686,12 +569,6 @@ class ExceptionBlob: public SingletonBlob { OopMapSet* oop_maps, int frame_size ); - - // GC for args - void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } - - // Typing - bool is_exception_stub() const { return true; } }; #endif // COMPILER2 @@ -717,12 +594,6 @@ class SafepointBlob: public SingletonBlob { OopMapSet* oop_maps, int frame_size ); - - // GC for args - void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } - - // Typing - bool is_safepoint_stub() const { return true; } }; //---------------------------------------------------------------------------------------------------- @@ -759,17 +630,14 @@ class UpcallStub: public RuntimeBlob { JavaFrameAnchor* jfa_for_frame(const frame& frame) const; - // Typing - virtual bool is_upcall_stub() const override { return true; } - // GC/Verification support void oops_do(OopClosure* f, const frame& frame); - virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override; - virtual void verify() override; + void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override; + void verify() override; // Misc. - virtual void print_on(outputStream* st) const override; - virtual void print_value_on(outputStream* st) const override; + void print_on(outputStream* st) const override; + void print_value_on(outputStream* st) const override; }; #endif // SHARE_CODE_CODEBLOB_HPP diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index 9c0fa80cab5..4327f5556b7 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -161,7 +161,6 @@ class CodeBlob_sizes { // Iterate over all CodeHeaps #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator heap = _heaps->begin(); heap != _heaps->end(); ++heap) -#define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) // Iterate over all CodeBlobs (cb) on the given CodeHeap @@ -174,7 +173,6 @@ ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr; // Initialize arrays of CodeHeap subsets GrowableArray* CodeCache::_heaps = new(mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); -GrowableArray* CodeCache::_compiled_heaps = new(mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); GrowableArray* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); GrowableArray* CodeCache::_allocable_heaps = new(mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); @@ -424,9 +422,6 @@ void CodeCache::add_heap(CodeHeap* heap) { _heaps->insert_sorted(heap); CodeBlobType type = heap->code_blob_type(); - if (code_blob_type_accepts_compiled(type)) { - _compiled_heaps->insert_sorted(heap); - } if (code_blob_type_accepts_nmethod(type)) { _nmethod_heaps->insert_sorted(heap); } @@ -669,8 +664,8 @@ CodeBlob* CodeCache::find_blob(void* start) { nmethod* CodeCache::find_nmethod(void* start) { CodeBlob* cb = find_blob(start); - assert(cb->is_nmethod(), "did not find an nmethod"); - return (nmethod*)cb; + assert(cb != nullptr, "did not find an nmethod"); + return cb->as_nmethod(); } void CodeCache::blobs_do(void f(CodeBlob* nm)) { @@ -882,7 +877,7 @@ void CodeCache::arm_all_nmethods() { // Mark nmethods for unloading if they contain otherwise unreachable oops. void CodeCache::do_unloading(bool unloading_occurred) { assert_locked_or_safepoint(CodeCache_lock); - CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); + NMethodIterator iter(NMethodIterator::all_blobs); while(iter.next()) { iter.method()->do_unloading(unloading_occurred); } @@ -1011,7 +1006,7 @@ int CodeCache::nmethod_count(CodeBlobType code_blob_type) { int CodeCache::nmethod_count() { int count = 0; - FOR_ALL_NMETHOD_HEAPS(heap) { + for (GrowableArrayIterator heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) { count += (*heap)->nmethod_count(); } return count; @@ -1178,7 +1173,7 @@ bool CodeCache::has_nmethods_with_dependencies() { void CodeCache::clear_inline_caches() { assert_locked_or_safepoint(CodeCache_lock); - CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { iter.method()->clear_inline_caches(); } @@ -1271,38 +1266,32 @@ void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassD #endif } -CompiledMethod* CodeCache::find_compiled(void* start) { - CodeBlob *cb = find_blob(start); - assert(cb == nullptr || cb->is_compiled(), "did not find an compiled_method"); - return (CompiledMethod*)cb; -} - #if INCLUDE_JVMTI // RedefineClasses support for saving nmethods that are dependent on "old" methods. // We don't really expect this table to grow very large. If it does, it can become a hashtable. -static GrowableArray* old_compiled_method_table = nullptr; +static GrowableArray* old_nmethod_table = nullptr; -static void add_to_old_table(CompiledMethod* c) { - if (old_compiled_method_table == nullptr) { - old_compiled_method_table = new (mtCode) GrowableArray(100, mtCode); +static void add_to_old_table(nmethod* c) { + if (old_nmethod_table == nullptr) { + old_nmethod_table = new (mtCode) GrowableArray(100, mtCode); } - old_compiled_method_table->push(c); + old_nmethod_table->push(c); } static void reset_old_method_table() { - if (old_compiled_method_table != nullptr) { - delete old_compiled_method_table; - old_compiled_method_table = nullptr; + if (old_nmethod_table != nullptr) { + delete old_nmethod_table; + old_nmethod_table = nullptr; } } // Remove this method when flushed. -void CodeCache::unregister_old_nmethod(CompiledMethod* c) { +void CodeCache::unregister_old_nmethod(nmethod* c) { assert_lock_strong(CodeCache_lock); - if (old_compiled_method_table != nullptr) { - int index = old_compiled_method_table->find(c); + if (old_nmethod_table != nullptr) { + int index = old_nmethod_table->find(c); if (index != -1) { - old_compiled_method_table->delete_at(index); + old_nmethod_table->delete_at(index); } } } @@ -1310,13 +1299,13 @@ void CodeCache::unregister_old_nmethod(CompiledMethod* c) { void CodeCache::old_nmethods_do(MetadataClosure* f) { // Walk old method table and mark those on stack. int length = 0; - if (old_compiled_method_table != nullptr) { - length = old_compiled_method_table->length(); + if (old_nmethod_table != nullptr) { + length = old_nmethod_table->length(); for (int i = 0; i < length; i++) { // Walk all methods saved on the last pass. Concurrent class unloading may // also be looking at this method's metadata, so don't delete it yet if // it is marked as unloaded. - old_compiled_method_table->at(i)->metadata_do(f); + old_nmethod_table->at(i)->metadata_do(f); } } log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length); @@ -1329,9 +1318,9 @@ void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deo // So delete old method table and create a new one. reset_old_method_table(); - CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); + NMethodIterator iter(NMethodIterator::all_blobs); while(iter.next()) { - CompiledMethod* nm = iter.method(); + nmethod* nm = iter.method(); // Walk all alive nmethods to check for old Methods. // This includes methods whose inline caches point to old methods, so // inline cache clearing is unnecessary. @@ -1344,9 +1333,9 @@ void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deo void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); - CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); + NMethodIterator iter(NMethodIterator::all_blobs); while(iter.next()) { - CompiledMethod* nm = iter.method(); + nmethod* nm = iter.method(); if (!nm->method()->is_method_handle_intrinsic()) { if (nm->can_be_deoptimized()) { deopt_scope->mark(nm); @@ -1365,9 +1354,9 @@ void CodeCache::mark_directives_matches(bool top_only) { Thread *thread = Thread::current(); HandleMark hm(thread); - CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { - CompiledMethod* nm = iter.method(); + nmethod* nm = iter.method(); methodHandle mh(thread, nm->method()); if (DirectivesStack::hasMatchingDirectives(mh, top_only)) { ResourceMark rm; @@ -1383,9 +1372,9 @@ void CodeCache::recompile_marked_directives_matches() { // Try the max level and let the directives be applied during the compilation. int comp_level = CompilationPolicy::highest_compile_level(); - RelaxedCompiledMethodIterator iter(RelaxedCompiledMethodIterator::only_not_unloading); + RelaxedNMethodIterator iter(RelaxedNMethodIterator::only_not_unloading); while(iter.next()) { - CompiledMethod* nm = iter.method(); + nmethod* nm = iter.method(); methodHandle mh(thread, nm->method()); if (mh->has_matching_directives()) { ResourceMark rm; @@ -1424,9 +1413,9 @@ void CodeCache::recompile_marked_directives_matches() { // Mark methods for deopt (if safe or possible). void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { - CompiledMethod* nm = iter.method(); + nmethod* nm = iter.method(); if (!nm->is_native_method()) { deopt_scope->mark(nm); } @@ -1436,9 +1425,9 @@ void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_ void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while(iter.next()) { - CompiledMethod* nm = iter.method(); + nmethod* nm = iter.method(); if (nm->is_dependent_on_method(dependee)) { deopt_scope->mark(nm); } @@ -1446,9 +1435,9 @@ void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method } void CodeCache::make_marked_nmethods_deoptimized() { - RelaxedCompiledMethodIterator iter(RelaxedCompiledMethodIterator::only_not_unloading); + RelaxedNMethodIterator iter(RelaxedNMethodIterator::only_not_unloading); while(iter.next()) { - CompiledMethod* nm = iter.method(); + nmethod* nm = iter.method(); if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { nm->make_not_entrant(); nm->make_deoptimized(); @@ -1849,15 +1838,15 @@ void CodeCache::print_summary(outputStream* st, bool detailed) { void CodeCache::print_codelist(outputStream* st) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); + NMethodIterator iter(NMethodIterator::only_not_unloading); while (iter.next()) { - CompiledMethod* cm = iter.method(); + nmethod* nm = iter.method(); ResourceMark rm; - char* method_name = cm->method()->name_and_sig_as_C_string(); + char* method_name = nm->method()->name_and_sig_as_C_string(); st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", - cm->compile_id(), cm->comp_level(), cm->get_state(), + nm->compile_id(), nm->comp_level(), nm->get_state(), method_name, - (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); + (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); } } @@ -1897,8 +1886,8 @@ void CodeCache::write_perf_map(const char* filename) { CodeBlob *cb = iter.method(); ResourceMark rm; const char* method_name = - cb->is_compiled() ? cb->as_compiled_method()->method()->external_name() - : cb->name(); + cb->is_nmethod() ? cb->as_nmethod()->method()->external_name() + : cb->name(); fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s", (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(), method_name); diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp index eb4759d7ea4..584af0b761a 100644 --- a/src/hotspot/share/code/codeCache.hpp +++ b/src/hotspot/share/code/codeCache.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,14 +83,13 @@ class DeoptimizationScope; class CodeCache : AllStatic { friend class VMStructs; friend class JVMCIVMStructs; - template friend class CodeBlobIterator; + template friend class CodeBlobIterator; friend class WhiteBox; friend class CodeCacheLoader; friend class ShenandoahParallelCodeHeapIterator; private: // CodeHeaps of the cache static GrowableArray* _heaps; - static GrowableArray* _compiled_heaps; static GrowableArray* _nmethod_heaps; static GrowableArray* _allocable_heaps; @@ -144,7 +143,6 @@ class CodeCache : AllStatic { static void add_heap(CodeHeap* heap); static const GrowableArray* heaps() { return _heaps; } - static const GrowableArray* compiled_heaps() { return _compiled_heaps; } static const GrowableArray* nmethod_heaps() { return _nmethod_heaps; } // Allocation/administration @@ -165,7 +163,6 @@ class CodeCache : AllStatic { static CodeBlob* find_blob_and_oopmap(void* start, int& slot); // Returns the CodeBlob containing the given address static int find_oopmap_slot_fast(void* start); // Returns a fast oopmap slot if there is any; -1 otherwise static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address - static CompiledMethod* find_compiled(void* start); static int blob_count(); // Returns the total number of CodeBlobs in the cache static int blob_count(CodeBlobType code_blob_type); @@ -258,14 +255,9 @@ class CodeCache : AllStatic { // Returns true if an own CodeHeap for the given CodeBlobType is available static bool heap_available(CodeBlobType code_blob_type); - // Returns the CodeBlobType for the given CompiledMethod - static CodeBlobType get_code_blob_type(CompiledMethod* cm) { - return get_code_heap(cm)->code_blob_type(); - } - - static bool code_blob_type_accepts_compiled(CodeBlobType code_blob_type) { - bool result = code_blob_type == CodeBlobType::All || code_blob_type <= CodeBlobType::MethodProfiled; - return result; + // Returns the CodeBlobType for the given nmethod + static CodeBlobType get_code_blob_type(nmethod* nm) { + return get_code_heap(nm)->code_blob_type(); } static bool code_blob_type_accepts_nmethod(CodeBlobType type) { @@ -315,7 +307,7 @@ class CodeCache : AllStatic { static void mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope); static void mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope); static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN; - static void unregister_old_nmethod(CompiledMethod* c) NOT_JVMTI_RETURN; + static void unregister_old_nmethod(nmethod* c) NOT_JVMTI_RETURN; // Support for fullspeed debugging static void mark_dependents_on_method_for_breakpoint(const methodHandle& dependee); @@ -369,8 +361,8 @@ template class CodeBlobIterator : publi // Filter is_unloading as required if (_only_not_unloading) { - CompiledMethod* cm = _code_blob->as_compiled_method_or_null(); - if (cm != nullptr && cm->is_unloading()) { + nmethod* nm = _code_blob->as_nmethod_or_null(); + if (nm != nullptr && nm->is_unloading()) { continue; } } @@ -442,12 +434,6 @@ private: } }; -struct CompiledMethodFilter { - static bool apply(CodeBlob* cb) { return cb->is_compiled(); } - static const GrowableArray* heaps() { return CodeCache::compiled_heaps(); } -}; - - struct NMethodFilter { static bool apply(CodeBlob* cb) { return cb->is_nmethod(); } static const GrowableArray* heaps() { return CodeCache::nmethod_heaps(); } @@ -458,9 +444,8 @@ struct AllCodeBlobsFilter { static const GrowableArray* heaps() { return CodeCache::heaps(); } }; -typedef CodeBlobIterator CompiledMethodIterator; -typedef CodeBlobIterator RelaxedCompiledMethodIterator; typedef CodeBlobIterator NMethodIterator; +typedef CodeBlobIterator RelaxedNMethodIterator; typedef CodeBlobIterator AllCodeBlobsIterator; #endif // SHARE_CODE_CODECACHE_HPP diff --git a/src/hotspot/share/code/codeHeapState.hpp b/src/hotspot/share/code/codeHeapState.hpp index ad3b03d1303..f30e492d7a5 100644 --- a/src/hotspot/share/code/codeHeapState.hpp +++ b/src/hotspot/share/code/codeHeapState.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -44,8 +44,8 @@ class CodeHeapState : public CHeapObj { enum blobType { noType = 0, // must be! due to initialization by memset to zero - // The nMethod_* values correspond to the CompiledMethod enum values. - // We can't use the CompiledMethod values 1:1 because we depend on noType == 0. + // The nMethod_* values correspond to the nmethod enum values. + // We can't use the nmethod values 1:1 because we depend on noType == 0. nMethod_inconstruction, // under construction. Very soon, the type will transition to "in_use". // can't be observed while holding Compile_lock and CodeCache_lock simultaneously. // left in here for completeness (and to document we spent a thought). diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp index 250ef063a2a..9c71bc4c7c0 100644 --- a/src/hotspot/share/code/compiledIC.cpp +++ b/src/hotspot/share/code/compiledIC.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,7 @@ // Every time a compiled IC is changed or its type is being accessed, // either the CompiledIC_lock must be set or we must be at a safe point. -CompiledICLocker::CompiledICLocker(CompiledMethod* method) +CompiledICLocker::CompiledICLocker(nmethod* method) : _method(method), _behaviour(CompiledICProtectionBehaviour::current()), _locked(_behaviour->lock(_method)) { @@ -56,15 +56,15 @@ CompiledICLocker::~CompiledICLocker() { } } -bool CompiledICLocker::is_safe(CompiledMethod* method) { +bool CompiledICLocker::is_safe(nmethod* method) { return CompiledICProtectionBehaviour::current()->is_safe(method); } bool CompiledICLocker::is_safe(address code) { CodeBlob* cb = CodeCache::find_blob(code); - assert(cb != nullptr && cb->is_compiled(), "must be compiled"); - CompiledMethod* cm = cb->as_compiled_method(); - return CompiledICProtectionBehaviour::current()->is_safe(cm); + assert(cb != nullptr && cb->is_nmethod(), "must be compiled"); + nmethod* nm = cb->as_nmethod(); + return CompiledICProtectionBehaviour::current()->is_safe(nm); } CompiledICData::CompiledICData() @@ -167,12 +167,12 @@ CompiledIC::CompiledIC(RelocIterator* iter) assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); } -CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) { +CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) { address call_site = nativeCall_before(return_addr)->instruction_address(); return CompiledIC_at(nm, call_site); } -CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) { +CompiledIC* CompiledIC_at(nmethod* nm, address call_site) { RelocIterator iter(nm, call_site, call_site + 1); iter.next(); return CompiledIC_at(&iter); @@ -180,8 +180,8 @@ CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) { CompiledIC* CompiledIC_at(Relocation* call_reloc) { address call_site = call_reloc->addr(); - CompiledMethod* cm = CodeCache::find_blob(call_reloc->addr())->as_compiled_method(); - return CompiledIC_at(cm, call_site); + nmethod* nm = CodeCache::find_blob(call_reloc->addr())->as_nmethod(); + return CompiledIC_at(nm, call_site); } CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) { @@ -204,7 +204,7 @@ void CompiledIC::set_to_clean() { void CompiledIC::set_to_monomorphic() { assert(data()->is_initialized(), "must be initialized"); Method* method = data()->speculated_method(); - CompiledMethod* code = method->code(); + nmethod* code = method->code(); address entry; bool to_compiled = code != nullptr && code->is_in_use() && !code->is_unloading(); @@ -321,7 +321,7 @@ void CompiledIC::verify() { // ---------------------------------------------------------------------------- void CompiledDirectCall::set_to_clean() { - // in_use is unused but needed to match template function in CompiledMethod + // in_use is unused but needed to match template function in nmethod assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); // Reset call site RelocIterator iter((nmethod*)nullptr, instruction_address(), instruction_address() + 1); @@ -343,8 +343,8 @@ void CompiledDirectCall::set_to_clean() { } void CompiledDirectCall::set(const methodHandle& callee_method) { - CompiledMethod* code = callee_method->code(); - CompiledMethod* caller = CodeCache::find_compiled(instruction_address()); + nmethod* code = callee_method->code(); + nmethod* caller = CodeCache::find_nmethod(instruction_address()); bool to_interp_cont_enter = caller->method()->is_continuation_enter_intrinsic() && ContinuationEntry::is_interpreted_call(instruction_address()); @@ -377,14 +377,14 @@ bool CompiledDirectCall::is_clean() const { bool CompiledDirectCall::is_call_to_interpreted() const { // It is a call to interpreted, if it calls to a stub. Hence, the destination // must be in the stub part of the nmethod that contains the call - CompiledMethod* cm = CodeCache::find_compiled(instruction_address()); - return cm->stub_contains(destination()); + nmethod* nm = CodeCache::find_nmethod(instruction_address()); + return nm->stub_contains(destination()); } bool CompiledDirectCall::is_call_to_compiled() const { - CompiledMethod* caller = CodeCache::find_compiled(instruction_address()); + nmethod* caller = CodeCache::find_nmethod(instruction_address()); CodeBlob* dest_cb = CodeCache::find_blob(destination()); - return !caller->stub_contains(destination()) && dest_cb->is_compiled(); + return !caller->stub_contains(destination()) && dest_cb->is_nmethod(); } address CompiledDirectCall::find_stub_for(address instruction) { diff --git a/src/hotspot/share/code/compiledIC.hpp b/src/hotspot/share/code/compiledIC.hpp index 321bf280ed4..4439ff958f7 100644 --- a/src/hotspot/share/code/compiledIC.hpp +++ b/src/hotspot/share/code/compiledIC.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,18 +39,18 @@ // class CompiledIC; class CompiledICProtectionBehaviour; -class CompiledMethod; +class nmethod; class CompiledICLocker: public StackObj { - CompiledMethod* _method; + nmethod* _method; CompiledICProtectionBehaviour* _behaviour; bool _locked; NoSafepointVerifier _nsv; public: - CompiledICLocker(CompiledMethod* method); + CompiledICLocker(nmethod* method); ~CompiledICLocker(); - static bool is_safe(CompiledMethod* method); + static bool is_safe(nmethod* method); static bool is_safe(address code); }; @@ -98,7 +98,7 @@ class CompiledICData : public CHeapObj { class CompiledIC: public ResourceObj { private: - CompiledMethod* _method; + nmethod* _method; CompiledICData* _data; NativeCall* _call; @@ -114,8 +114,8 @@ private: public: // conversion (machine PC to CompiledIC*) - friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr); - friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site); + friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr); + friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site); friend CompiledIC* CompiledIC_at(Relocation* call_site); friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); @@ -146,8 +146,8 @@ public: void verify() PRODUCT_RETURN; }; -CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr); -CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site); +CompiledIC* CompiledIC_before(nmethod* nm, address return_addr); +CompiledIC* CompiledIC_at(nmethod* nm, address call_site); CompiledIC* CompiledIC_at(Relocation* call_site); CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp deleted file mode 100644 index 6553d6f7934..00000000000 --- a/src/hotspot/share/code/compiledMethod.cpp +++ /dev/null @@ -1,647 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "code/compiledIC.hpp" -#include "code/compiledMethod.inline.hpp" -#include "code/exceptionHandlerTable.hpp" -#include "code/scopeDesc.hpp" -#include "code/codeCache.hpp" -#include "gc/shared/barrierSet.hpp" -#include "gc/shared/barrierSetNMethod.hpp" -#include "gc/shared/gcBehaviours.hpp" -#include "interpreter/bytecode.inline.hpp" -#include "logging/log.hpp" -#include "logging/logTag.hpp" -#include "memory/resourceArea.hpp" -#include "oops/klass.inline.hpp" -#include "oops/methodData.hpp" -#include "oops/method.inline.hpp" -#include "oops/weakHandle.inline.hpp" -#include "prims/methodHandles.hpp" -#include "runtime/atomic.hpp" -#include "runtime/deoptimization.hpp" -#include "runtime/frame.inline.hpp" -#include "runtime/jniHandles.inline.hpp" -#include "runtime/handles.inline.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/sharedRuntime.hpp" - -CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, - int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, - bool caller_must_gc_arguments, bool compiled) - : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), - _deoptimization_status(not_marked), - _deoptimization_generation(0), - _method(method), - _gc_data(nullptr) -{ - init_defaults(); -} - -CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, - int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, - OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) - : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, - frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), - _deoptimization_status(not_marked), - _deoptimization_generation(0), - _method(method), - _gc_data(nullptr) -{ - init_defaults(); -} - -void CompiledMethod::init_defaults() { - { // avoid uninitialized fields, even for short time periods - _scopes_data_begin = nullptr; - _deopt_handler_begin = nullptr; - _deopt_mh_handler_begin = nullptr; - _exception_cache = nullptr; - } - _has_unsafe_access = 0; - _has_method_handle_invokes = 0; - _has_wide_vectors = 0; - _has_monitors = 0; -} - -bool CompiledMethod::is_method_handle_return(address return_pc) { - if (!has_method_handle_invokes()) return false; - PcDesc* pd = pc_desc_at(return_pc); - if (pd == nullptr) - return false; - return pd->is_method_handle_invoke(); -} - -// Returns a string version of the method state. -const char* CompiledMethod::state() const { - int state = get_state(); - switch (state) { - case not_installed: - return "not installed"; - case in_use: - return "in use"; - case not_used: - return "not_used"; - case not_entrant: - return "not_entrant"; - default: - fatal("unexpected method state: %d", state); - return nullptr; - } -} - -//----------------------------------------------------------------------------- -void CompiledMethod::set_deoptimized_done() { - ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); - if (_deoptimization_status != deoptimize_done) { // can't go backwards - Atomic::store(&_deoptimization_status, deoptimize_done); - } -} - -//----------------------------------------------------------------------------- - -ExceptionCache* CompiledMethod::exception_cache_acquire() const { - return Atomic::load_acquire(&_exception_cache); -} - -void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { - assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); - assert(new_entry != nullptr,"Must be non null"); - assert(new_entry->next() == nullptr, "Must be null"); - - for (;;) { - ExceptionCache *ec = exception_cache(); - if (ec != nullptr) { - Klass* ex_klass = ec->exception_type(); - if (!ex_klass->is_loader_alive()) { - // We must guarantee that entries are not inserted with new next pointer - // edges to ExceptionCache entries with dead klasses, due to bad interactions - // with concurrent ExceptionCache cleanup. Therefore, the inserts roll - // the head pointer forward to the first live ExceptionCache, so that the new - // next pointers always point at live ExceptionCaches, that are not removed due - // to concurrent ExceptionCache cleanup. - ExceptionCache* next = ec->next(); - if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) { - CodeCache::release_exception_cache(ec); - } - continue; - } - ec = exception_cache(); - if (ec != nullptr) { - new_entry->set_next(ec); - } - } - if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) { - return; - } - } -} - -void CompiledMethod::clean_exception_cache() { - // For each nmethod, only a single thread may call this cleanup function - // at the same time, whether called in STW cleanup or concurrent cleanup. - // Note that if the GC is processing exception cache cleaning in a concurrent phase, - // then a single writer may contend with cleaning up the head pointer to the - // first ExceptionCache node that has a Klass* that is alive. That is fine, - // as long as there is no concurrent cleanup of next pointers from concurrent writers. - // And the concurrent writers do not clean up next pointers, only the head. - // Also note that concurrent readers will walk through Klass* pointers that are not - // alive. That does not cause ABA problems, because Klass* is deleted after - // a handshake with all threads, after all stale ExceptionCaches have been - // unlinked. That is also when the CodeCache::exception_cache_purge_list() - // is deleted, with all ExceptionCache entries that were cleaned concurrently. - // That similarly implies that CAS operations on ExceptionCache entries do not - // suffer from ABA problems as unlinking and deletion is separated by a global - // handshake operation. - ExceptionCache* prev = nullptr; - ExceptionCache* curr = exception_cache_acquire(); - - while (curr != nullptr) { - ExceptionCache* next = curr->next(); - - if (!curr->exception_type()->is_loader_alive()) { - if (prev == nullptr) { - // Try to clean head; this is contended by concurrent inserts, that - // both lazily clean the head, and insert entries at the head. If - // the CAS fails, the operation is restarted. - if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) { - prev = nullptr; - curr = exception_cache_acquire(); - continue; - } - } else { - // It is impossible to during cleanup connect the next pointer to - // an ExceptionCache that has not been published before a safepoint - // prior to the cleanup. Therefore, release is not required. - prev->set_next(next); - } - // prev stays the same. - - CodeCache::release_exception_cache(curr); - } else { - prev = curr; - } - - curr = next; - } -} - -// public method for accessing the exception cache -// These are the public access methods. -address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { - // We never grab a lock to read the exception cache, so we may - // have false negatives. This is okay, as it can only happen during - // the first few exception lookups for a given nmethod. - ExceptionCache* ec = exception_cache_acquire(); - while (ec != nullptr) { - address ret_val; - if ((ret_val = ec->match(exception,pc)) != nullptr) { - return ret_val; - } - ec = ec->next(); - } - return nullptr; -} - -void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { - // There are potential race conditions during exception cache updates, so we - // must own the ExceptionCache_lock before doing ANY modifications. Because - // we don't lock during reads, it is possible to have several threads attempt - // to update the cache with the same data. We need to check for already inserted - // copies of the current data before adding it. - - MutexLocker ml(ExceptionCache_lock); - ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); - - if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) { - target_entry = new ExceptionCache(exception,pc,handler); - add_exception_cache_entry(target_entry); - } -} - -// private method for handling exception cache -// These methods are private, and used to manipulate the exception cache -// directly. -ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { - ExceptionCache* ec = exception_cache_acquire(); - while (ec != nullptr) { - if (ec->match_exception_with_space(exception)) { - return ec; - } - ec = ec->next(); - } - return nullptr; -} - -//-------------end of code for ExceptionCache-------------- - -bool CompiledMethod::is_at_poll_return(address pc) { - RelocIterator iter(this, pc, pc+1); - while (iter.next()) { - if (iter.type() == relocInfo::poll_return_type) - return true; - } - return false; -} - - -bool CompiledMethod::is_at_poll_or_poll_return(address pc) { - RelocIterator iter(this, pc, pc+1); - while (iter.next()) { - relocInfo::relocType t = iter.type(); - if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) - return true; - } - return false; -} - -void CompiledMethod::verify_oop_relocations() { - // Ensure sure that the code matches the current oop values - RelocIterator iter(this, nullptr, nullptr); - while (iter.next()) { - if (iter.type() == relocInfo::oop_type) { - oop_Relocation* reloc = iter.oop_reloc(); - if (!reloc->oop_is_immediate()) { - reloc->verify_oop_relocation(); - } - } - } -} - - -ScopeDesc* CompiledMethod::scope_desc_at(address pc) { - PcDesc* pd = pc_desc_at(pc); - guarantee(pd != nullptr, "scope must be present"); - return new ScopeDesc(this, pd); -} - -ScopeDesc* CompiledMethod::scope_desc_near(address pc) { - PcDesc* pd = pc_desc_near(pc); - guarantee(pd != nullptr, "scope must be present"); - return new ScopeDesc(this, pd); -} - -address CompiledMethod::oops_reloc_begin() const { - // If the method is not entrant then a JMP is plastered over the - // first few bytes. If an oop in the old code was there, that oop - // should not get GC'd. Skip the first few bytes of oops on - // not-entrant methods. - if (frame_complete_offset() != CodeOffsets::frame_never_safe && - code_begin() + frame_complete_offset() > - verified_entry_point() + NativeJump::instruction_size) - { - // If we have a frame_complete_offset after the native jump, then there - // is no point trying to look for oops before that. This is a requirement - // for being allowed to scan oops concurrently. - return code_begin() + frame_complete_offset(); - } - - // It is not safe to read oops concurrently using entry barriers, if their - // location depend on whether the nmethod is entrant or not. - // assert(BarrierSet::barrier_set()->barrier_set_nmethod() == nullptr, "Not safe oop scan"); - - address low_boundary = verified_entry_point(); - if (!is_in_use() && is_nmethod()) { - low_boundary += NativeJump::instruction_size; - // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. - // This means that the low_boundary is going to be a little too high. - // This shouldn't matter, since oops of non-entrant methods are never used. - // In fact, why are we bothering to look at oops in a non-entrant method?? - } - return low_boundary; -} - -// Method that knows how to preserve outgoing arguments at call. This method must be -// called with a frame corresponding to a Java invoke -void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { - if (method() == nullptr) { - return; - } - - // handle the case of an anchor explicitly set in continuation code that doesn't have a callee - JavaThread* thread = reg_map->thread(); - if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) { - return; - } - - if (!method()->is_native()) { - address pc = fr.pc(); - bool has_receiver, has_appendix; - Symbol* signature; - - // The method attached by JIT-compilers should be used, if present. - // Bytecode can be inaccurate in such case. - Method* callee = attached_method_before_pc(pc); - if (callee != nullptr) { - has_receiver = !(callee->access_flags().is_static()); - has_appendix = false; - signature = callee->signature(); - } else { - SimpleScopeDesc ssd(this, pc); - - Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci()); - has_receiver = call.has_receiver(); - has_appendix = call.has_appendix(); - signature = call.signature(); - } - - fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); - } else if (method()->is_continuation_enter_intrinsic()) { - // This method only calls Continuation.enter() - Symbol* signature = vmSymbols::continuationEnter_signature(); - fr.oops_compiled_arguments_do(signature, false, false, reg_map, f); - } -} - -Method* CompiledMethod::attached_method(address call_instr) { - assert(code_contains(call_instr), "not part of the nmethod"); - RelocIterator iter(this, call_instr, call_instr + 1); - while (iter.next()) { - if (iter.addr() == call_instr) { - switch(iter.type()) { - case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); - case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); - case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); - default: break; - } - } - } - return nullptr; // not found -} - -Method* CompiledMethod::attached_method_before_pc(address pc) { - if (NativeCall::is_call_before(pc)) { - NativeCall* ncall = nativeCall_before(pc); - return attached_method(ncall->instruction_address()); - } - return nullptr; // not a call -} - -void CompiledMethod::clear_inline_caches() { - assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint"); - RelocIterator iter(this); - while (iter.next()) { - iter.reloc()->clear_inline_cache(); - } -} - -#ifdef ASSERT -// Check class_loader is alive for this bit of metadata. -class CheckClass : public MetadataClosure { - void do_metadata(Metadata* md) { - Klass* klass = nullptr; - if (md->is_klass()) { - klass = ((Klass*)md); - } else if (md->is_method()) { - klass = ((Method*)md)->method_holder(); - } else if (md->is_methodData()) { - klass = ((MethodData*)md)->method()->method_holder(); - } else { - md->print(); - ShouldNotReachHere(); - } - assert(klass->is_loader_alive(), "must be alive"); - } -}; -#endif // ASSERT - - -static void clean_ic_if_metadata_is_dead(CompiledIC *ic) { - ic->clean_metadata(); -} - -// Clean references to unloaded nmethods at addr from this one, which is not unloaded. -template -static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, CompiledMethod* from, - bool clean_all) { - CodeBlob* cb = CodeCache::find_blob(callsite->destination()); - if (!cb->is_compiled()) { - return; - } - CompiledMethod* cm = cb->as_compiled_method(); - if (clean_all || !cm->is_in_use() || cm->is_unloading() || cm->method()->code() != cm) { - callsite->set_to_clean(); - } -} - -// Cleans caches in nmethods that point to either classes that are unloaded -// or nmethods that are unloaded. -// -// Can be called either in parallel by G1 currently or after all -// nmethods are unloaded. Return postponed=true in the parallel case for -// inline caches found that point to nmethods that are not yet visited during -// the do_unloading walk. -void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) { - ResourceMark rm; - - // Exception cache only needs to be called if unloading occurred - if (unloading_occurred) { - clean_exception_cache(); - } - - cleanup_inline_caches_impl(unloading_occurred, false); - -#ifdef ASSERT - // Check that the metadata embedded in the nmethod is alive - CheckClass check_class; - metadata_do(&check_class); -#endif -} - -void CompiledMethod::run_nmethod_entry_barrier() { - BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs_nm != nullptr) { - // We want to keep an invariant that nmethods found through iterations of a Thread's - // nmethods found in safepoints have gone through an entry barrier and are not armed. - // By calling this nmethod entry barrier, it plays along and acts - // like any other nmethod found on the stack of a thread (fewer surprises). - nmethod* nm = as_nmethod_or_null(); - if (nm != nullptr && bs_nm->is_armed(nm)) { - bool alive = bs_nm->nmethod_entry_barrier(nm); - assert(alive, "should be alive"); - } - } -} - -// Only called by whitebox test -void CompiledMethod::cleanup_inline_caches_whitebox() { - assert_locked_or_safepoint(CodeCache_lock); - CompiledICLocker ic_locker(this); - cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */); -} - -address* CompiledMethod::orig_pc_addr(const frame* fr) { - return (address*) ((address)fr->unextended_sp() + orig_pc_offset()); -} - -// Called to clean up after class unloading for live nmethods -void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { - assert(CompiledICLocker::is_safe(this), "mt unsafe call"); - ResourceMark rm; - - // Find all calls in an nmethod and clear the ones that point to bad nmethods. - RelocIterator iter(this, oops_reloc_begin()); - bool is_in_static_stub = false; - while(iter.next()) { - - switch (iter.type()) { - - case relocInfo::virtual_call_type: - if (unloading_occurred) { - // If class unloading occurred we first clear ICs where the cached metadata - // is referring to an unloaded klass or method. - clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); - } - - clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all); - break; - - case relocInfo::opt_virtual_call_type: - case relocInfo::static_call_type: - clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all); - break; - - case relocInfo::static_stub_type: { - is_in_static_stub = true; - break; - } - - case relocInfo::metadata_type: { - // Only the metadata relocations contained in static/opt virtual call stubs - // contains the Method* passed to c2i adapters. It is the only metadata - // relocation that needs to be walked, as it is the one metadata relocation - // that violates the invariant that all metadata relocations have an oop - // in the compiled method (due to deferred resolution and code patching). - - // This causes dead metadata to remain in compiled methods that are not - // unloading. Unless these slippery metadata relocations of the static - // stubs are at least cleared, subsequent class redefinition operations - // will access potentially free memory, and JavaThread execution - // concurrent to class unloading may call c2i adapters with dead methods. - if (!is_in_static_stub) { - // The first metadata relocation after a static stub relocation is the - // metadata relocation of the static stub used to pass the Method* to - // c2i adapters. - continue; - } - is_in_static_stub = false; - if (is_unloading()) { - // If the nmethod itself is dying, then it may point at dead metadata. - // Nobody should follow that metadata; it is strictly unsafe. - continue; - } - metadata_Relocation* r = iter.metadata_reloc(); - Metadata* md = r->metadata_value(); - if (md != nullptr && md->is_method()) { - Method* method = static_cast(md); - if (!method->method_holder()->is_loader_alive()) { - Atomic::store(r->metadata_addr(), (Method*)nullptr); - - if (!r->metadata_is_immediate()) { - r->fix_metadata_relocation(); - } - } - } - break; - } - - default: - break; - } - } -} - -address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) { - // Exception happened outside inline-cache check code => we are inside - // an active nmethod => use cpc to determine a return address - int exception_offset = int(pc - code_begin()); - int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset ); -#ifdef ASSERT - if (cont_offset == 0) { - Thread* thread = Thread::current(); - ResourceMark rm(thread); - CodeBlob* cb = CodeCache::find_blob(pc); - assert(cb != nullptr && cb == this, ""); - - // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once. - stringStream ss; - ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc)); - print_on(&ss); - method()->print_codes_on(&ss); - print_code_on(&ss); - print_pcs_on(&ss); - tty->print("%s", ss.as_string()); // print all at once - } -#endif - if (cont_offset == 0) { - // Let the normal error handling report the exception - return nullptr; - } - if (cont_offset == exception_offset) { -#if INCLUDE_JVMCI - Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check; - JavaThread *thread = JavaThread::current(); - thread->set_jvmci_implicit_exception_pc(pc); - thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason, - Deoptimization::Action_reinterpret)); - return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); -#else - ShouldNotReachHere(); -#endif - } - return code_begin() + cont_offset; -} - -class HasEvolDependency : public MetadataClosure { - bool _has_evol_dependency; - public: - HasEvolDependency() : _has_evol_dependency(false) {} - void do_metadata(Metadata* md) { - if (md->is_method()) { - Method* method = (Method*)md; - if (method->is_old()) { - _has_evol_dependency = true; - } - } - } - bool has_evol_dependency() const { return _has_evol_dependency; } -}; - -bool CompiledMethod::has_evol_metadata() { - // Check the metadata in relocIter and CompiledIC and also deoptimize - // any nmethod that has reference to old methods. - HasEvolDependency check_evol; - metadata_do(&check_evol); - if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) { - ResourceMark rm; - log_debug(redefine, class, nmethod) - ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata", - _method->method_holder()->external_name(), - _method->name()->as_C_string(), - _method->signature()->as_C_string(), - compile_id()); - } - return check_evol.has_evol_dependency(); -} diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp deleted file mode 100644 index 42d68bda554..00000000000 --- a/src/hotspot/share/code/compiledMethod.hpp +++ /dev/null @@ -1,415 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_CODE_COMPILEDMETHOD_HPP -#define SHARE_CODE_COMPILEDMETHOD_HPP - -#include "code/codeBlob.hpp" -#include "code/pcDesc.hpp" -#include "oops/metadata.hpp" -#include "oops/method.hpp" - -class Dependencies; -class ExceptionHandlerTable; -class ImplicitExceptionTable; -class AbstractCompiler; -class xmlStream; -class CompiledDirectCall; -class NativeCallWrapper; -class ScopeDesc; -class CompiledIC; -class MetadataClosure; - -// This class is used internally by nmethods, to cache -// exception/pc/handler information. - -class ExceptionCache : public CHeapObj { - friend class VMStructs; - private: - enum { cache_size = 16 }; - Klass* _exception_type; - address _pc[cache_size]; - address _handler[cache_size]; - volatile int _count; - ExceptionCache* volatile _next; - ExceptionCache* _purge_list_next; - - inline address pc_at(int index); - void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } - - inline address handler_at(int index); - void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } - - inline int count(); - // increment_count is only called under lock, but there may be concurrent readers. - void increment_count(); - - public: - - ExceptionCache(Handle exception, address pc, address handler); - - Klass* exception_type() { return _exception_type; } - ExceptionCache* next(); - void set_next(ExceptionCache *ec); - ExceptionCache* purge_list_next() { return _purge_list_next; } - void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; } - - address match(Handle exception, address pc); - bool match_exception_with_space(Handle exception) ; - address test_address(address addr); - bool add_address_and_handler(address addr, address handler) ; -}; - -class nmethod; - -// cache pc descs found in earlier inquiries -class PcDescCache { - friend class VMStructs; - private: - enum { cache_size = 4 }; - // The array elements MUST be volatile! Several threads may modify - // and read from the cache concurrently. find_pc_desc_internal has - // returned wrong results. C++ compiler (namely xlC12) may duplicate - // C++ field accesses if the elements are not volatile. - typedef PcDesc* PcDescPtr; - volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found - public: - PcDescCache() { debug_only(_pc_descs[0] = nullptr); } - void reset_to(PcDesc* initial_pc_desc); - PcDesc* find_pc_desc(int pc_offset, bool approximate); - void add_pc_desc(PcDesc* pc_desc); - PcDesc* last_pc_desc() { return _pc_descs[0]; } -}; - -class PcDescSearch { -private: - address _code_begin; - PcDesc* _lower; - PcDesc* _upper; -public: - PcDescSearch(address code, PcDesc* lower, PcDesc* upper) : - _code_begin(code), _lower(lower), _upper(upper) - { - } - - address code_begin() const { return _code_begin; } - PcDesc* scopes_pcs_begin() const { return _lower; } - PcDesc* scopes_pcs_end() const { return _upper; } -}; - -class PcDescContainer { -private: - PcDescCache _pc_desc_cache; -public: - PcDescContainer() {} - - PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search); - void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); } - - PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) { - address base_address = search.code_begin(); - PcDesc* desc = _pc_desc_cache.last_pc_desc(); - if (desc != nullptr && desc->pc_offset() == pc - base_address) { - return desc; - } - return find_pc_desc_internal(pc, approximate, search); - } -}; - - -class CompiledMethod : public CodeBlob { - friend class VMStructs; - friend class DeoptimizationScope; - void init_defaults(); -protected: - enum DeoptimizationStatus : u1 { - not_marked, - deoptimize, - deoptimize_noupdate, - deoptimize_done - }; - - volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization - // Used to track in which deoptimize handshake this method will be deoptimized. - uint64_t _deoptimization_generation; - - // set during construction - unsigned int _has_unsafe_access:1; // May fault due to unsafe access. - unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? - unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints - unsigned int _has_monitors:1; // Fastpath monitor detection for continuations - - Method* _method; - address _scopes_data_begin; - // All deoptee's will resume execution at this location described by - // this address. - address _deopt_handler_begin; - // All deoptee's at a MethodHandle call site will resume execution - // at this location described by this offset. - address _deopt_mh_handler_begin; - - PcDescContainer _pc_desc_container; - ExceptionCache * volatile _exception_cache; - - void* _gc_data; - - virtual void purge(bool free_code_cache_data, bool unregister_nmethod) = 0; - -private: - DeoptimizationStatus deoptimization_status() const { - return Atomic::load(&_deoptimization_status); - } - -protected: - CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled); - CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled); - -public: - // Only used by unit test. - CompiledMethod() {} - - template - T* gc_data() const { return reinterpret_cast(_gc_data); } - template - void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast(gc_data); } - - bool has_unsafe_access() const { return _has_unsafe_access; } - void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } - - bool has_monitors() const { return _has_monitors; } - void set_has_monitors(bool z) { _has_monitors = z; } - - bool has_method_handle_invokes() const { return _has_method_handle_invokes; } - void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } - - bool has_wide_vectors() const { return _has_wide_vectors; } - void set_has_wide_vectors(bool z) { _has_wide_vectors = z; } - - enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is - // allowed to advance state - in_use = 0, // executable nmethod - not_used = 1, // not entrant, but revivable - not_entrant = 2, // marked for deoptimization but activations may still exist - }; - - virtual bool is_in_use() const = 0; - virtual int comp_level() const = 0; - virtual int compile_id() const = 0; - - virtual address verified_entry_point() const = 0; - virtual void log_identity(xmlStream* log) const = 0; - virtual void log_state_change() const = 0; - virtual bool make_not_used() = 0; - virtual bool make_not_entrant() = 0; - virtual bool make_entrant() = 0; - virtual address entry_point() const = 0; - virtual bool is_osr_method() const = 0; - virtual int osr_entry_bci() const = 0; - Method* method() const { return _method; } - virtual void print_pcs_on(outputStream* st) = 0; - bool is_native_method() const { return _method != nullptr && _method->is_native(); } - bool is_java_method() const { return _method != nullptr && !_method->is_native(); } - - // ScopeDesc retrieval operation - PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } - // pc_desc_near returns the first PcDesc at or after the given pc. - PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } - - // ScopeDesc for an instruction - ScopeDesc* scope_desc_at(address pc); - ScopeDesc* scope_desc_near(address pc); - - bool is_at_poll_return(address pc); - bool is_at_poll_or_poll_return(address pc); - - bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; } - bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; } - void set_deoptimized_done(); - - virtual void make_deoptimized() { assert(false, "not supported"); }; - - bool update_recompile_counts() const { - // Update recompile counts when either the update is explicitly requested (deoptimize) - // or the nmethod is not marked for deoptimization at all (not_marked). - // The latter happens during uncommon traps when deoptimized nmethod is made not entrant. - DeoptimizationStatus status = deoptimization_status(); - return status != deoptimize_noupdate && status != deoptimize_done; - } - - // tells whether frames described by this nmethod can be deoptimized - // note: native wrappers cannot be deoptimized. - bool can_be_deoptimized() const { return is_java_method(); } - - virtual oop oop_at(int index) const = 0; - virtual Metadata* metadata_at(int index) const = 0; - - address scopes_data_begin() const { return _scopes_data_begin; } - virtual address scopes_data_end() const = 0; - int scopes_data_size() const { return int(scopes_data_end() - scopes_data_begin()); } - - virtual PcDesc* scopes_pcs_begin() const = 0; - virtual PcDesc* scopes_pcs_end() const = 0; - int scopes_pcs_size() const { return int((intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin()); } - - address insts_begin() const { return code_begin(); } - address insts_end() const { return stub_begin(); } - // Returns true if a given address is in the 'insts' section. The method - // insts_contains_inclusive() is end-inclusive. - bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); } - bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); } - - int insts_size() const { return int(insts_end() - insts_begin()); } - - virtual address consts_begin() const = 0; - virtual address consts_end() const = 0; - bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); } - int consts_size() const { return int(consts_end() - consts_begin()); } - - virtual int skipped_instructions_size() const = 0; - - virtual address stub_begin() const = 0; - virtual address stub_end() const = 0; - bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); } - int stub_size() const { return int(stub_end() - stub_begin()); } - - virtual address handler_table_begin() const = 0; - virtual address handler_table_end() const = 0; - bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } - int handler_table_size() const { return int(handler_table_end() - handler_table_begin()); } - - virtual address exception_begin() const = 0; - - virtual address nul_chk_table_begin() const = 0; - virtual address nul_chk_table_end() const = 0; - bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } - int nul_chk_table_size() const { return int(nul_chk_table_end() - nul_chk_table_begin()); } - - virtual oop* oop_addr_at(int index) const = 0; - virtual Metadata** metadata_addr_at(int index) const = 0; - -protected: - // Exception cache support - // Note: _exception_cache may be read and cleaned concurrently. - ExceptionCache* exception_cache() const { return _exception_cache; } - ExceptionCache* exception_cache_acquire() const; - void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } - -public: - address handler_for_exception_and_pc(Handle exception, address pc); - void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); - void clean_exception_cache(); - - void add_exception_cache_entry(ExceptionCache* new_entry); - ExceptionCache* exception_cache_entry_for_exception(Handle exception); - - // MethodHandle - bool is_method_handle_return(address return_pc); - address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; } - - address deopt_handler_begin() const { return _deopt_handler_begin; } - address* deopt_handler_begin_addr() { return &_deopt_handler_begin; } - // Deopt - // Return true is the PC is one would expect if the frame is being deopted. - inline bool is_deopt_pc(address pc); - inline bool is_deopt_mh_entry(address pc); - inline bool is_deopt_entry(address pc); - - // Accessor/mutator for the original pc of a frame before a frame was deopted. - address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } - void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } - - virtual int orig_pc_offset() = 0; - -private: - address* orig_pc_addr(const frame* fr); - -public: - virtual const char* compile_kind() const = 0; - virtual int get_state() const = 0; - - const char* state() const; - - bool inlinecache_check_contains(address addr) const { - return (addr >= code_begin() && addr < verified_entry_point()); - } - - void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f); - - // implicit exceptions support - address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); } - address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); } - - static address get_deopt_original_pc(const frame* fr); - - // Inline cache support for class unloading and nmethod unloading - private: - void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all); - - address continuation_for_implicit_exception(address pc, bool for_div0_check); - - public: - // Serial version used by whitebox test - void cleanup_inline_caches_whitebox(); - - virtual void clear_inline_caches(); - - // Execute nmethod barrier code, as if entering through nmethod call. - void run_nmethod_entry_barrier(); - - void verify_oop_relocations(); - - bool has_evol_metadata(); - - // Fast breakpoint support. Tells if this compiled method is - // dependent on the given method. Returns true if this nmethod - // corresponds to the given method as well. - virtual bool is_dependent_on_method(Method* dependee) = 0; - - virtual address call_instruction_address(address pc) const = 0; - - Method* attached_method(address call_pc); - Method* attached_method_before_pc(address pc); - - virtual void metadata_do(MetadataClosure* f) = 0; - - // GC support - protected: - address oops_reloc_begin() const; - - public: - // GC unloading support - // Cleans unloaded klasses and unloaded nmethods in inline caches - - virtual bool is_unloading() = 0; - - void unload_nmethod_caches(bool class_unloading_occurred); - virtual void do_unloading(bool unloading_occurred) = 0; - -private: - PcDesc* find_pc_desc(address pc, bool approximate) { - return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end())); - } -}; - -#endif // SHARE_CODE_COMPILEDMETHOD_HPP diff --git a/src/hotspot/share/code/debugInfo.cpp b/src/hotspot/share/code/debugInfo.cpp index 4cc94dd13e7..2ef9db5bf83 100644 --- a/src/hotspot/share/code/debugInfo.cpp +++ b/src/hotspot/share/code/debugInfo.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,15 +53,9 @@ void DebugInfoWriteStream::write_metadata(Metadata* h) { } oop DebugInfoReadStream::read_oop() { - nmethod* nm = const_cast(code())->as_nmethod_or_null(); - oop o; - if (nm != nullptr) { - // Despite these oops being found inside nmethods that are on-stack, - // they are not kept alive by all GCs (e.g. G1 and Shenandoah). - o = nm->oop_at_phantom(read_int()); - } else { - o = code()->oop_at(read_int()); - } + // Despite these oops being found inside nmethods that are on-stack, + // they are not kept alive by all GCs (e.g. G1 and Shenandoah). + oop o = code()->oop_at_phantom(read_int()); assert(oopDesc::is_oop_or_null(o), "oop only"); return o; } diff --git a/src/hotspot/share/code/debugInfo.hpp b/src/hotspot/share/code/debugInfo.hpp index 1214059294d..dfe32a66fcd 100644 --- a/src/hotspot/share/code/debugInfo.hpp +++ b/src/hotspot/share/code/debugInfo.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -372,11 +372,11 @@ class MonitorValue: public ResourceObj { class DebugInfoReadStream : public CompressedReadStream { private: - const CompiledMethod* _code; - const CompiledMethod* code() const { return _code; } + const nmethod* _code; + const nmethod* code() const { return _code; } GrowableArray* _obj_pool; public: - DebugInfoReadStream(const CompiledMethod* code, int offset, GrowableArray* obj_pool = nullptr) : + DebugInfoReadStream(const nmethod* code, int offset, GrowableArray* obj_pool = nullptr) : CompressedReadStream(code->scopes_data_begin(), offset) { _code = code; _obj_pool = obj_pool; diff --git a/src/hotspot/share/code/debugInfoRec.cpp b/src/hotspot/share/code/debugInfoRec.cpp index 15353bf2872..85be80dbf0b 100644 --- a/src/hotspot/share/code/debugInfoRec.cpp +++ b/src/hotspot/share/code/debugInfoRec.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -175,7 +175,7 @@ void DebugInformationRecorder::add_non_safepoint(int pc_offset) { void DebugInformationRecorder::add_new_pc_offset(int pc_offset) { assert(_pcs_length == 0 || last_pc()->pc_offset() < pc_offset, - "must specify a new, larger pc offset"); + "must specify a new, larger pc offset: %d >= %d", last_pc()->pc_offset(), pc_offset); // add the pcdesc if (_pcs_length == _pcs_size) { diff --git a/src/hotspot/share/code/exceptionHandlerTable.cpp b/src/hotspot/share/code/exceptionHandlerTable.cpp index 8bcf5a43953..aedeb0e9e04 100644 --- a/src/hotspot/share/code/exceptionHandlerTable.cpp +++ b/src/hotspot/share/code/exceptionHandlerTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,9 +65,9 @@ ExceptionHandlerTable::ExceptionHandlerTable(int initial_size) { } -ExceptionHandlerTable::ExceptionHandlerTable(const CompiledMethod* cm) { - _table = (HandlerTableEntry*)cm->handler_table_begin(); - _length = cm->handler_table_size() / sizeof(HandlerTableEntry); +ExceptionHandlerTable::ExceptionHandlerTable(const nmethod* nm) { + _table = (HandlerTableEntry*)nm->handler_table_begin(); + _length = nm->handler_table_size() / sizeof(HandlerTableEntry); _size = 0; // no space allocated by ExceptionHandlerTable! } @@ -98,9 +98,9 @@ void ExceptionHandlerTable::add_subtable( } -void ExceptionHandlerTable::copy_to(CompiledMethod* cm) { - assert(size_in_bytes() == cm->handler_table_size(), "size of space allocated in compiled method incorrect"); - copy_bytes_to(cm->handler_table_begin()); +void ExceptionHandlerTable::copy_to(nmethod* nm) { + assert(size_in_bytes() == nm->handler_table_size(), "size of space allocated in compiled method incorrect"); + copy_bytes_to(nm->handler_table_begin()); } void ExceptionHandlerTable::copy_bytes_to(address addr) { @@ -215,7 +215,7 @@ void ImplicitExceptionTable::print(address base) const { } } -ImplicitExceptionTable::ImplicitExceptionTable(const CompiledMethod* nm) { +ImplicitExceptionTable::ImplicitExceptionTable(const nmethod* nm) { if (nm->nul_chk_table_size() == 0) { _len = 0; _data = nullptr; diff --git a/src/hotspot/share/code/exceptionHandlerTable.hpp b/src/hotspot/share/code/exceptionHandlerTable.hpp index f1dcab657ff..083dc430111 100644 --- a/src/hotspot/share/code/exceptionHandlerTable.hpp +++ b/src/hotspot/share/code/exceptionHandlerTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,7 +99,7 @@ class ExceptionHandlerTable { ExceptionHandlerTable(int initial_size = 8); // (run-time) construction from nmethod - ExceptionHandlerTable(const CompiledMethod* nm); + ExceptionHandlerTable(const nmethod* nm); // (compile-time) add entries void add_subtable( @@ -116,7 +116,7 @@ class ExceptionHandlerTable { // nmethod support int size_in_bytes() const { return align_up(_length * (int)sizeof(HandlerTableEntry), oopSize); } - void copy_to(CompiledMethod* nm); + void copy_to(nmethod* nm); void copy_bytes_to(address addr); // lookup @@ -150,7 +150,7 @@ class ImplicitExceptionTable { public: ImplicitExceptionTable( ) : _size(0), _len(0), _data(0) { } // (run-time) construction from nmethod - ImplicitExceptionTable( const CompiledMethod *nm ); + ImplicitExceptionTable(const nmethod *nm); void set_size( uint size ); void append( uint exec_off, uint cont_off ); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 2755df32513..c2970e23239 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -26,10 +26,9 @@ #include "asm/assembler.inline.hpp" #include "code/codeCache.hpp" #include "code/compiledIC.hpp" -#include "code/compiledMethod.inline.hpp" #include "code/dependencies.hpp" #include "code/nativeInst.hpp" -#include "code/nmethod.hpp" +#include "code/nmethod.inline.hpp" #include "code/scopeDesc.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/compilationLog.hpp" @@ -44,7 +43,7 @@ #include "gc/shared/barrierSetNMethod.hpp" #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/collectedHeap.hpp" -#include "interpreter/bytecode.hpp" +#include "interpreter/bytecode.inline.hpp" #include "jvm.h" #include "logging/log.hpp" #include "logging/logStream.hpp" @@ -98,9 +97,9 @@ Symbol* name = m->name(); \ Symbol* signature = m->signature(); \ HOTSPOT_COMPILED_METHOD_UNLOAD( \ - (char *) klass_name->bytes(), klass_name->utf8_length(), \ - (char *) name->bytes(), name->utf8_length(), \ - (char *) signature->bytes(), signature->utf8_length()); \ + (char *) klass_name->bytes(), klass_name->utf8_length(), \ + (char *) name->bytes(), name->utf8_length(), \ + (char *) signature->bytes(), signature->utf8_length()); \ } \ } @@ -138,6 +137,9 @@ struct java_nmethod_stats_struct { uint oops_size; uint metadata_size; + uint size_gt_32k; + int size_max; + void note_nmethod(nmethod* nm) { nmethod_count += 1; total_size += nm->size(); @@ -156,27 +158,33 @@ struct java_nmethod_stats_struct { speculations_size += nm->speculations_size(); jvmci_data_size += nm->jvmci_data_size(); #endif + int short_pos_max = ((1<<15) - 1); + if (nm->size() > short_pos_max) size_gt_32k++; + if (nm->size() > size_max) size_max = nm->size(); } void print_nmethod_stats(const char* name) { if (nmethod_count == 0) return; tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name); - if (total_size != 0) tty->print_cr(" total in heap = %u", total_size); - if (nmethod_count != 0) tty->print_cr(" header = " SIZE_FORMAT, nmethod_count * sizeof(nmethod)); - if (relocation_size != 0) tty->print_cr(" relocation = %u", relocation_size); - if (consts_size != 0) tty->print_cr(" constants = %u", consts_size); - if (insts_size != 0) tty->print_cr(" main code = %u", insts_size); - if (stub_size != 0) tty->print_cr(" stub code = %u", stub_size); - if (oops_size != 0) tty->print_cr(" oops = %u", oops_size); - if (metadata_size != 0) tty->print_cr(" metadata = %u", metadata_size); - if (scopes_data_size != 0) tty->print_cr(" scopes data = %u", scopes_data_size); - if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %u", scopes_pcs_size); - if (dependencies_size != 0) tty->print_cr(" dependencies = %u", dependencies_size); - if (handler_table_size != 0) tty->print_cr(" handler table = %u", handler_table_size); - if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %u", nul_chk_table_size); + if (total_size != 0) tty->print_cr(" total in heap = %u (100%%)", total_size); + uint header_size = (uint)(nmethod_count * sizeof(nmethod)); + if (nmethod_count != 0) tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_size); + if (relocation_size != 0) tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_size); + if (consts_size != 0) tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_size); + if (insts_size != 0) tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_size); + if (stub_size != 0) tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_size); + if (oops_size != 0) tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_size); + if (metadata_size != 0) tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_size); + if (scopes_data_size != 0) tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_size); + if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_size); + if (dependencies_size != 0) tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_size); + if (handler_table_size != 0) tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_size); + if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_size); #if INCLUDE_JVMCI - if (speculations_size != 0) tty->print_cr(" speculations = %u", speculations_size); - if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %u", jvmci_data_size); + if (speculations_size != 0) tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_size); + if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_size); #endif + if (size_gt_32k != 0) tty->print_cr(" size > 32k = %u", size_gt_32k); + if (size_max != 0) tty->print_cr(" max size = %d", size_max); } }; @@ -417,6 +425,558 @@ static int adjust_pcs_size(int pcs_size) { return nsize; } +bool nmethod::is_method_handle_return(address return_pc) { + if (!has_method_handle_invokes()) return false; + PcDesc* pd = pc_desc_at(return_pc); + if (pd == nullptr) + return false; + return pd->is_method_handle_invoke(); +} + +// Returns a string version of the method state. +const char* nmethod::state() const { + int state = get_state(); + switch (state) { + case not_installed: + return "not installed"; + case in_use: + return "in use"; + case not_entrant: + return "not_entrant"; + default: + fatal("unexpected method state: %d", state); + return nullptr; + } +} + +void nmethod::set_deoptimized_done() { + ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); + if (_deoptimization_status != deoptimize_done) { // can't go backwards + Atomic::store(&_deoptimization_status, deoptimize_done); + } +} + +ExceptionCache* nmethod::exception_cache_acquire() const { + return Atomic::load_acquire(&_exception_cache); +} + +void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { + assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); + assert(new_entry != nullptr,"Must be non null"); + assert(new_entry->next() == nullptr, "Must be null"); + + for (;;) { + ExceptionCache *ec = exception_cache(); + if (ec != nullptr) { + Klass* ex_klass = ec->exception_type(); + if (!ex_klass->is_loader_alive()) { + // We must guarantee that entries are not inserted with new next pointer + // edges to ExceptionCache entries with dead klasses, due to bad interactions + // with concurrent ExceptionCache cleanup. Therefore, the inserts roll + // the head pointer forward to the first live ExceptionCache, so that the new + // next pointers always point at live ExceptionCaches, that are not removed due + // to concurrent ExceptionCache cleanup. + ExceptionCache* next = ec->next(); + if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) { + CodeCache::release_exception_cache(ec); + } + continue; + } + ec = exception_cache(); + if (ec != nullptr) { + new_entry->set_next(ec); + } + } + if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) { + return; + } + } +} + +void nmethod::clean_exception_cache() { + // For each nmethod, only a single thread may call this cleanup function + // at the same time, whether called in STW cleanup or concurrent cleanup. + // Note that if the GC is processing exception cache cleaning in a concurrent phase, + // then a single writer may contend with cleaning up the head pointer to the + // first ExceptionCache node that has a Klass* that is alive. That is fine, + // as long as there is no concurrent cleanup of next pointers from concurrent writers. + // And the concurrent writers do not clean up next pointers, only the head. + // Also note that concurrent readers will walk through Klass* pointers that are not + // alive. That does not cause ABA problems, because Klass* is deleted after + // a handshake with all threads, after all stale ExceptionCaches have been + // unlinked. That is also when the CodeCache::exception_cache_purge_list() + // is deleted, with all ExceptionCache entries that were cleaned concurrently. + // That similarly implies that CAS operations on ExceptionCache entries do not + // suffer from ABA problems as unlinking and deletion is separated by a global + // handshake operation. + ExceptionCache* prev = nullptr; + ExceptionCache* curr = exception_cache_acquire(); + + while (curr != nullptr) { + ExceptionCache* next = curr->next(); + + if (!curr->exception_type()->is_loader_alive()) { + if (prev == nullptr) { + // Try to clean head; this is contended by concurrent inserts, that + // both lazily clean the head, and insert entries at the head. If + // the CAS fails, the operation is restarted. + if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) { + prev = nullptr; + curr = exception_cache_acquire(); + continue; + } + } else { + // It is impossible to during cleanup connect the next pointer to + // an ExceptionCache that has not been published before a safepoint + // prior to the cleanup. Therefore, release is not required. + prev->set_next(next); + } + // prev stays the same. + + CodeCache::release_exception_cache(curr); + } else { + prev = curr; + } + + curr = next; + } +} + +// public method for accessing the exception cache +// These are the public access methods. +address nmethod::handler_for_exception_and_pc(Handle exception, address pc) { + // We never grab a lock to read the exception cache, so we may + // have false negatives. This is okay, as it can only happen during + // the first few exception lookups for a given nmethod. + ExceptionCache* ec = exception_cache_acquire(); + while (ec != nullptr) { + address ret_val; + if ((ret_val = ec->match(exception,pc)) != nullptr) { + return ret_val; + } + ec = ec->next(); + } + return nullptr; +} + +void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { + // There are potential race conditions during exception cache updates, so we + // must own the ExceptionCache_lock before doing ANY modifications. Because + // we don't lock during reads, it is possible to have several threads attempt + // to update the cache with the same data. We need to check for already inserted + // copies of the current data before adding it. + + MutexLocker ml(ExceptionCache_lock); + ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); + + if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) { + target_entry = new ExceptionCache(exception,pc,handler); + add_exception_cache_entry(target_entry); + } +} + +// private method for handling exception cache +// These methods are private, and used to manipulate the exception cache +// directly. +ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) { + ExceptionCache* ec = exception_cache_acquire(); + while (ec != nullptr) { + if (ec->match_exception_with_space(exception)) { + return ec; + } + ec = ec->next(); + } + return nullptr; +} + +bool nmethod::is_at_poll_return(address pc) { + RelocIterator iter(this, pc, pc+1); + while (iter.next()) { + if (iter.type() == relocInfo::poll_return_type) + return true; + } + return false; +} + + +bool nmethod::is_at_poll_or_poll_return(address pc) { + RelocIterator iter(this, pc, pc+1); + while (iter.next()) { + relocInfo::relocType t = iter.type(); + if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) + return true; + } + return false; +} + +void nmethod::verify_oop_relocations() { + // Ensure sure that the code matches the current oop values + RelocIterator iter(this, nullptr, nullptr); + while (iter.next()) { + if (iter.type() == relocInfo::oop_type) { + oop_Relocation* reloc = iter.oop_reloc(); + if (!reloc->oop_is_immediate()) { + reloc->verify_oop_relocation(); + } + } + } +} + + +ScopeDesc* nmethod::scope_desc_at(address pc) { + PcDesc* pd = pc_desc_at(pc); + guarantee(pd != nullptr, "scope must be present"); + return new ScopeDesc(this, pd); +} + +ScopeDesc* nmethod::scope_desc_near(address pc) { + PcDesc* pd = pc_desc_near(pc); + guarantee(pd != nullptr, "scope must be present"); + return new ScopeDesc(this, pd); +} + +address nmethod::oops_reloc_begin() const { + // If the method is not entrant then a JMP is plastered over the + // first few bytes. If an oop in the old code was there, that oop + // should not get GC'd. Skip the first few bytes of oops on + // not-entrant methods. + if (frame_complete_offset() != CodeOffsets::frame_never_safe && + code_begin() + frame_complete_offset() > + verified_entry_point() + NativeJump::instruction_size) + { + // If we have a frame_complete_offset after the native jump, then there + // is no point trying to look for oops before that. This is a requirement + // for being allowed to scan oops concurrently. + return code_begin() + frame_complete_offset(); + } + + // It is not safe to read oops concurrently using entry barriers, if their + // location depend on whether the nmethod is entrant or not. + // assert(BarrierSet::barrier_set()->barrier_set_nmethod() == nullptr, "Not safe oop scan"); + + address low_boundary = verified_entry_point(); + if (!is_in_use()) { + low_boundary += NativeJump::instruction_size; + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // This means that the low_boundary is going to be a little too high. + // This shouldn't matter, since oops of non-entrant methods are never used. + // In fact, why are we bothering to look at oops in a non-entrant method?? + } + return low_boundary; +} + +// Method that knows how to preserve outgoing arguments at call. This method must be +// called with a frame corresponding to a Java invoke +void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { + if (method() == nullptr) { + return; + } + + // handle the case of an anchor explicitly set in continuation code that doesn't have a callee + JavaThread* thread = reg_map->thread(); + if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) { + return; + } + + if (!method()->is_native()) { + address pc = fr.pc(); + bool has_receiver, has_appendix; + Symbol* signature; + + // The method attached by JIT-compilers should be used, if present. + // Bytecode can be inaccurate in such case. + Method* callee = attached_method_before_pc(pc); + if (callee != nullptr) { + has_receiver = !(callee->access_flags().is_static()); + has_appendix = false; + signature = callee->signature(); + } else { + SimpleScopeDesc ssd(this, pc); + + Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci()); + has_receiver = call.has_receiver(); + has_appendix = call.has_appendix(); + signature = call.signature(); + } + + fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); + } else if (method()->is_continuation_enter_intrinsic()) { + // This method only calls Continuation.enter() + Symbol* signature = vmSymbols::continuationEnter_signature(); + fr.oops_compiled_arguments_do(signature, false, false, reg_map, f); + } +} + +Method* nmethod::attached_method(address call_instr) { + assert(code_contains(call_instr), "not part of the nmethod"); + RelocIterator iter(this, call_instr, call_instr + 1); + while (iter.next()) { + if (iter.addr() == call_instr) { + switch(iter.type()) { + case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); + case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); + case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); + default: break; + } + } + } + return nullptr; // not found +} + +Method* nmethod::attached_method_before_pc(address pc) { + if (NativeCall::is_call_before(pc)) { + NativeCall* ncall = nativeCall_before(pc); + return attached_method(ncall->instruction_address()); + } + return nullptr; // not a call +} + +void nmethod::clear_inline_caches() { + assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint"); + RelocIterator iter(this); + while (iter.next()) { + iter.reloc()->clear_inline_cache(); + } +} + +#ifdef ASSERT +// Check class_loader is alive for this bit of metadata. +class CheckClass : public MetadataClosure { + void do_metadata(Metadata* md) { + Klass* klass = nullptr; + if (md->is_klass()) { + klass = ((Klass*)md); + } else if (md->is_method()) { + klass = ((Method*)md)->method_holder(); + } else if (md->is_methodData()) { + klass = ((MethodData*)md)->method()->method_holder(); + } else { + md->print(); + ShouldNotReachHere(); + } + assert(klass->is_loader_alive(), "must be alive"); + } +}; +#endif // ASSERT + + +static void clean_ic_if_metadata_is_dead(CompiledIC *ic) { + ic->clean_metadata(); +} + +// Clean references to unloaded nmethods at addr from this one, which is not unloaded. +template +static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, nmethod* from, + bool clean_all) { + CodeBlob* cb = CodeCache::find_blob(callsite->destination()); + if (!cb->is_nmethod()) { + return; + } + nmethod* nm = cb->as_nmethod(); + if (clean_all || !nm->is_in_use() || nm->is_unloading() || nm->method()->code() != nm) { + callsite->set_to_clean(); + } +} + +// Cleans caches in nmethods that point to either classes that are unloaded +// or nmethods that are unloaded. +// +// Can be called either in parallel by G1 currently or after all +// nmethods are unloaded. Return postponed=true in the parallel case for +// inline caches found that point to nmethods that are not yet visited during +// the do_unloading walk. +void nmethod::unload_nmethod_caches(bool unloading_occurred) { + ResourceMark rm; + + // Exception cache only needs to be called if unloading occurred + if (unloading_occurred) { + clean_exception_cache(); + } + + cleanup_inline_caches_impl(unloading_occurred, false); + +#ifdef ASSERT + // Check that the metadata embedded in the nmethod is alive + CheckClass check_class; + metadata_do(&check_class); +#endif +} + +void nmethod::run_nmethod_entry_barrier() { + BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); + if (bs_nm != nullptr) { + // We want to keep an invariant that nmethods found through iterations of a Thread's + // nmethods found in safepoints have gone through an entry barrier and are not armed. + // By calling this nmethod entry barrier, it plays along and acts + // like any other nmethod found on the stack of a thread (fewer surprises). + nmethod* nm = this; + if (bs_nm->is_armed(nm)) { + bool alive = bs_nm->nmethod_entry_barrier(nm); + assert(alive, "should be alive"); + } + } +} + +// Only called by whitebox test +void nmethod::cleanup_inline_caches_whitebox() { + assert_locked_or_safepoint(CodeCache_lock); + CompiledICLocker ic_locker(this); + cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */); +} + +address* nmethod::orig_pc_addr(const frame* fr) { + return (address*) ((address)fr->unextended_sp() + orig_pc_offset()); +} + +// Called to clean up after class unloading for live nmethods +void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { + assert(CompiledICLocker::is_safe(this), "mt unsafe call"); + ResourceMark rm; + + // Find all calls in an nmethod and clear the ones that point to bad nmethods. + RelocIterator iter(this, oops_reloc_begin()); + bool is_in_static_stub = false; + while(iter.next()) { + + switch (iter.type()) { + + case relocInfo::virtual_call_type: + if (unloading_occurred) { + // If class unloading occurred we first clear ICs where the cached metadata + // is referring to an unloaded klass or method. + clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); + } + + clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all); + break; + + case relocInfo::opt_virtual_call_type: + case relocInfo::static_call_type: + clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all); + break; + + case relocInfo::static_stub_type: { + is_in_static_stub = true; + break; + } + + case relocInfo::metadata_type: { + // Only the metadata relocations contained in static/opt virtual call stubs + // contains the Method* passed to c2i adapters. It is the only metadata + // relocation that needs to be walked, as it is the one metadata relocation + // that violates the invariant that all metadata relocations have an oop + // in the compiled method (due to deferred resolution and code patching). + + // This causes dead metadata to remain in compiled methods that are not + // unloading. Unless these slippery metadata relocations of the static + // stubs are at least cleared, subsequent class redefinition operations + // will access potentially free memory, and JavaThread execution + // concurrent to class unloading may call c2i adapters with dead methods. + if (!is_in_static_stub) { + // The first metadata relocation after a static stub relocation is the + // metadata relocation of the static stub used to pass the Method* to + // c2i adapters. + continue; + } + is_in_static_stub = false; + if (is_unloading()) { + // If the nmethod itself is dying, then it may point at dead metadata. + // Nobody should follow that metadata; it is strictly unsafe. + continue; + } + metadata_Relocation* r = iter.metadata_reloc(); + Metadata* md = r->metadata_value(); + if (md != nullptr && md->is_method()) { + Method* method = static_cast(md); + if (!method->method_holder()->is_loader_alive()) { + Atomic::store(r->metadata_addr(), (Method*)nullptr); + + if (!r->metadata_is_immediate()) { + r->fix_metadata_relocation(); + } + } + } + break; + } + + default: + break; + } + } +} + +address nmethod::continuation_for_implicit_exception(address pc, bool for_div0_check) { + // Exception happened outside inline-cache check code => we are inside + // an active nmethod => use cpc to determine a return address + int exception_offset = int(pc - code_begin()); + int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset ); +#ifdef ASSERT + if (cont_offset == 0) { + Thread* thread = Thread::current(); + ResourceMark rm(thread); + CodeBlob* cb = CodeCache::find_blob(pc); + assert(cb != nullptr && cb == this, ""); + + // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once. + stringStream ss; + ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc)); + print_on(&ss); + method()->print_codes_on(&ss); + print_code_on(&ss); + print_pcs_on(&ss); + tty->print("%s", ss.as_string()); // print all at once + } +#endif + if (cont_offset == 0) { + // Let the normal error handling report the exception + return nullptr; + } + if (cont_offset == exception_offset) { +#if INCLUDE_JVMCI + Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check; + JavaThread *thread = JavaThread::current(); + thread->set_jvmci_implicit_exception_pc(pc); + thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason, + Deoptimization::Action_reinterpret)); + return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); +#else + ShouldNotReachHere(); +#endif + } + return code_begin() + cont_offset; +} + +class HasEvolDependency : public MetadataClosure { + bool _has_evol_dependency; + public: + HasEvolDependency() : _has_evol_dependency(false) {} + void do_metadata(Metadata* md) { + if (md->is_method()) { + Method* method = (Method*)md; + if (method->is_old()) { + _has_evol_dependency = true; + } + } + } + bool has_evol_dependency() const { return _has_evol_dependency; } +}; + +bool nmethod::has_evol_metadata() { + // Check the metadata in relocIter and CompiledIC and also deoptimize + // any nmethod that has reference to old methods. + HasEvolDependency check_evol; + metadata_do(&check_evol); + if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) { + ResourceMark rm; + log_debug(redefine, class, nmethod) + ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata", + _method->method_holder()->external_name(), + _method->name()->as_C_string(), + _method->signature()->as_C_string(), + compile_id()); + } + return check_evol.has_evol_dependency(); +} int nmethod::total_size() const { return @@ -440,16 +1000,28 @@ const char* nmethod::compile_kind() const { return nullptr; } +const char* nmethod::compiler_name() const { + return compilertype2name(_compiler_type); +} + // Fill in default values for various flag fields void nmethod::init_defaults() { + // avoid uninitialized fields, even for short time periods + _exception_cache = nullptr; + + _has_unsafe_access = 0; + _has_method_handle_invokes = 0; + _has_wide_vectors = 0; + _has_monitors = 0; + _state = not_installed; _has_flushed_dependencies = 0; _load_reported = false; // jvmti state - _oops_do_mark_link = nullptr; - _osr_link = nullptr; + _oops_do_mark_link = nullptr; + _osr_link = nullptr; #if INCLUDE_RTM_OPT - _rtm_state = NoRTM; + _rtm_state = NoRTM; #endif } @@ -639,18 +1211,19 @@ nmethod::nmethod( ByteSize basic_lock_owner_sp_offset, ByteSize basic_lock_sp_offset, OopMapSet* oop_maps ) - : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), + : CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod), + offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), + _deoptimization_generation(0), + _method(method), + _gc_data(nullptr), _compiled_ic_data(nullptr), _is_unlinked(false), _native_receiver_sp_offset(basic_lock_owner_sp_offset), _native_basic_lock_sp_offset(basic_lock_sp_offset), - _is_unloading_state(0) + _is_unloading_state(0), + _deoptimization_status(not_marked) { { - int scopes_data_offset = 0; - int deoptimize_offset = 0; - int deoptimize_mh_offset = 0; - debug_only(NoSafepointVerifier nsv;) assert_locked_or_safepoint(CodeCache_lock); @@ -661,14 +1234,16 @@ nmethod::nmethod( // values something that will never match a pc like the nmethod vtable entry _exception_offset = 0; _orig_pc_offset = 0; + _deopt_handler_offset = 0; + _deopt_mh_handler_offset = 0; _gc_epoch = CodeCache::gc_epoch(); _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); _oops_offset = data_offset(); _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize); - scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); - _scopes_pcs_offset = scopes_data_offset; + _scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); + _scopes_pcs_offset = _scopes_data_offset; _dependencies_offset = _scopes_pcs_offset; _handler_table_offset = _dependencies_offset; _nul_chk_table_offset = _handler_table_offset; @@ -681,6 +1256,7 @@ nmethod::nmethod( _nmethod_end_offset = _nul_chk_table_offset; #endif _compile_id = compile_id; + _compiler_type = type; _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); _osr_entry_point = nullptr; @@ -689,10 +1265,6 @@ nmethod::nmethod( _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); - _scopes_data_begin = (address) this + scopes_data_offset; - _deopt_handler_begin = (address) this + deoptimize_offset; - _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset; - code_buffer->copy_code_and_locs_to(this); code_buffer->copy_values_to(this); @@ -784,51 +1356,54 @@ nmethod::nmethod( JVMCINMethodData* jvmci_data #endif ) - : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), + : CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod), + offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), + _deoptimization_generation(0), + _method(method), + _gc_data(nullptr), _compiled_ic_data(nullptr), _is_unlinked(false), _native_receiver_sp_offset(in_ByteSize(-1)), _native_basic_lock_sp_offset(in_ByteSize(-1)), - _is_unloading_state(0) + _is_unloading_state(0), + _deoptimization_status(not_marked) { assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); { debug_only(NoSafepointVerifier nsv;) assert_locked_or_safepoint(CodeCache_lock); - _deopt_handler_begin = (address) this; - _deopt_mh_handler_begin = (address) this; - init_defaults(); - _entry_bci = entry_bci; - _compile_id = compile_id; - _comp_level = comp_level; - _orig_pc_offset = orig_pc_offset; - _gc_epoch = CodeCache::gc_epoch(); + _entry_bci = entry_bci; + _compile_id = compile_id; + _compiler_type = type; + _comp_level = comp_level; + _orig_pc_offset = orig_pc_offset; + _gc_epoch = CodeCache::gc_epoch(); // Section offsets - _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); - _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); + _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); + _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); set_ctable_begin(header_begin() + _consts_offset); - _skipped_instructions_size = code_buffer->total_skipped_instructions_size(); + _skipped_instructions_size = code_buffer->total_skipped_instructions_size(); #if INCLUDE_JVMCI if (compiler->is_jvmci()) { // JVMCI might not produce any stub sections if (offsets->value(CodeOffsets::Exceptions) != -1) { - _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); + _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); } else { - _exception_offset = -1; + _exception_offset = -1; } if (offsets->value(CodeOffsets::Deopt) != -1) { - _deopt_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::Deopt); + _deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt); } else { - _deopt_handler_begin = nullptr; + _deopt_handler_offset = -1; } if (offsets->value(CodeOffsets::DeoptMH) != -1) { - _deopt_mh_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::DeoptMH); + _deopt_mh_handler_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH); } else { - _deopt_mh_handler_begin = nullptr; + _deopt_mh_handler_offset = -1; } } else #endif @@ -837,25 +1412,25 @@ nmethod::nmethod( assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set"); assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set"); - _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); - _deopt_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::Deopt); + _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); + _deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); if (offsets->value(CodeOffsets::DeoptMH) != -1) { - _deopt_mh_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::DeoptMH); + _deopt_mh_handler_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); } else { - _deopt_mh_handler_begin = nullptr; + _deopt_mh_handler_offset = -1; } } if (offsets->value(CodeOffsets::UnwindHandler) != -1) { - _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); + _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); } else { _unwind_handler_offset = -1; } _oops_offset = data_offset(); _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize); - int scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); + _scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize); - _scopes_pcs_offset = scopes_data_offset + align_up(debug_info->data_size (), oopSize); + _scopes_pcs_offset = _scopes_data_offset + align_up(debug_info->data_size (), oopSize); _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); _handler_table_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes(), oopSize); _nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize); @@ -871,7 +1446,6 @@ nmethod::nmethod( _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); _exception_cache = nullptr; - _scopes_data_begin = (address) this + scopes_data_offset; _pc_desc_container.reset_to(scopes_pcs_begin()); @@ -1509,7 +2083,7 @@ oop nmethod::oop_at_phantom(int index) const { void nmethod::flush_dependencies() { if (!has_flushed_dependencies()) { - set_has_flushed_dependencies(); + set_has_flushed_dependencies(true); for (Dependencies::DepStream deps(this); deps.next(); ) { if (deps.type() == Dependencies::call_site_target_value) { // CallSite dependencies are managed on per-CallSite instance basis. @@ -2026,7 +2600,7 @@ void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) { break; } } - assert(has_method_handle_invokes() == (_deopt_mh_handler_begin != nullptr), "must have deopt mh handler"); + assert(has_method_handle_invokes() == (_deopt_mh_handler_offset != -1), "must have deopt mh handler"); int size = count * sizeof(PcDesc); assert(scopes_pcs_size() >= size, "oob"); @@ -2989,8 +3563,8 @@ const char* nmethod::nmethod_section_label(address pos) const { if (pos == consts_begin() && pos != insts_begin()) label = "[Constants]"; // Check stub_code before checking exception_handler or deopt_handler. if (pos == this->stub_begin()) label = "[Stub Code]"; - if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]"; - if (JVMCI_ONLY(_deopt_handler_begin != nullptr &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]"; + if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]"; + if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]"; return label; } diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 2993db21305..cca63a702b4 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -25,15 +25,122 @@ #ifndef SHARE_CODE_NMETHOD_HPP #define SHARE_CODE_NMETHOD_HPP -#include "code/compiledMethod.hpp" +#include "code/codeBlob.hpp" +#include "code/pcDesc.hpp" +#include "oops/metadata.hpp" +#include "oops/method.hpp" +class AbstractCompiler; +class CompiledDirectCall; +class CompiledIC; class CompiledICData; class CompileTask; class DepChange; +class Dependencies; class DirectiveSet; class DebugInformationRecorder; +class ExceptionHandlerTable; +class ImplicitExceptionTable; class JvmtiThreadState; +class MetadataClosure; +class NativeCallWrapper; class OopIterateClosure; +class ScopeDesc; +class xmlStream; + +// This class is used internally by nmethods, to cache +// exception/pc/handler information. + +class ExceptionCache : public CHeapObj { + friend class VMStructs; + private: + enum { cache_size = 16 }; + Klass* _exception_type; + address _pc[cache_size]; + address _handler[cache_size]; + volatile int _count; + ExceptionCache* volatile _next; + ExceptionCache* _purge_list_next; + + inline address pc_at(int index); + void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } + + inline address handler_at(int index); + void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } + + inline int count(); + // increment_count is only called under lock, but there may be concurrent readers. + void increment_count(); + + public: + + ExceptionCache(Handle exception, address pc, address handler); + + Klass* exception_type() { return _exception_type; } + ExceptionCache* next(); + void set_next(ExceptionCache *ec); + ExceptionCache* purge_list_next() { return _purge_list_next; } + void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; } + + address match(Handle exception, address pc); + bool match_exception_with_space(Handle exception) ; + address test_address(address addr); + bool add_address_and_handler(address addr, address handler) ; +}; + +// cache pc descs found in earlier inquiries +class PcDescCache { + friend class VMStructs; + private: + enum { cache_size = 4 }; + // The array elements MUST be volatile! Several threads may modify + // and read from the cache concurrently. find_pc_desc_internal has + // returned wrong results. C++ compiler (namely xlC12) may duplicate + // C++ field accesses if the elements are not volatile. + typedef PcDesc* PcDescPtr; + volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found + public: + PcDescCache() { debug_only(_pc_descs[0] = nullptr); } + void reset_to(PcDesc* initial_pc_desc); + PcDesc* find_pc_desc(int pc_offset, bool approximate); + void add_pc_desc(PcDesc* pc_desc); + PcDesc* last_pc_desc() { return _pc_descs[0]; } +}; + +class PcDescSearch { +private: + address _code_begin; + PcDesc* _lower; + PcDesc* _upper; +public: + PcDescSearch(address code, PcDesc* lower, PcDesc* upper) : + _code_begin(code), _lower(lower), _upper(upper) + { + } + + address code_begin() const { return _code_begin; } + PcDesc* scopes_pcs_begin() const { return _lower; } + PcDesc* scopes_pcs_end() const { return _upper; } +}; + +class PcDescContainer { +private: + PcDescCache _pc_desc_cache; +public: + PcDescContainer() {} + + PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search); + void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); } + + PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) { + address base_address = search.code_begin(); + PcDesc* desc = _pc_desc_cache.last_pc_desc(); + if (desc != nullptr && desc->pc_offset() == pc - base_address) { + return desc; + } + return find_pc_desc_internal(pc, approximate, search); + } +}; // nmethods (native methods) are the compiled code versions of Java methods. // @@ -65,19 +172,192 @@ class FailedSpeculation; class JVMCINMethodData; #endif -class nmethod : public CompiledMethod { +class nmethod : public CodeBlob { friend class VMStructs; friend class JVMCIVMStructs; friend class CodeCache; // scavengable oops friend class JVMCINMethodData; + friend class DeoptimizationScope; private: + // Used to track in which deoptimize handshake this method will be deoptimized. + uint64_t _deoptimization_generation; + uint64_t _gc_epoch; + Method* _method; + // To support simple linked-list chaining of nmethods: nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head + PcDescContainer _pc_desc_container; + ExceptionCache* volatile _exception_cache; + + void* _gc_data; + + struct oops_do_mark_link; // Opaque data type. + static nmethod* volatile _oops_do_mark_nmethods; + oops_do_mark_link* volatile _oops_do_mark_link; + + // offsets for entry points + address _entry_point; // entry point with class check + address _verified_entry_point; // entry point without class check + address _osr_entry_point; // entry point for on stack replacement + + CompiledICData* _compiled_ic_data; + + // Shared fields for all nmethod's + int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method + + // Offsets for different nmethod parts + int _exception_offset; + // All deoptee's will resume execution at this location described by + // this offset. + int _deopt_handler_offset; + // All deoptee's at a MethodHandle call site will resume execution + // at this location described by this offset. + int _deopt_mh_handler_offset; + // Offset of the unwind handler if it exists + int _unwind_handler_offset; + + int _consts_offset; + int _stub_offset; + int _oops_offset; // offset to where embedded oop table begins (inside data) + int _metadata_offset; // embedded meta data table + int _scopes_data_offset; + int _scopes_pcs_offset; + int _dependencies_offset; + int _handler_table_offset; + int _nul_chk_table_offset; +#if INCLUDE_JVMCI + int _speculations_offset; + int _jvmci_data_offset; +#endif + int _nmethod_end_offset; + int _skipped_instructions_size; + + // location in frame (offset for sp) that deopt can store the original + // pc during a deopt. + int _orig_pc_offset; + + int _compile_id; // which compilation made this nmethod + + CompilerType _compiler_type; // which compiler made this nmethod (u1) + + bool _is_unlinked; + +#if INCLUDE_RTM_OPT + // RTM state at compile time. Used during deoptimization to decide + // whether to restart collecting RTM locking abort statistic again. + RTMState _rtm_state; +#endif + + // These are used for compiled synchronized native methods to + // locate the owner and stack slot for the BasicLock. They are + // needed because there is no debug information for compiled native + // wrappers and the oop maps are insufficient to allow + // frame::retrieve_receiver() to work. Currently they are expected + // to be byte offsets from the Java stack pointer for maximum code + // sharing between platforms. JVMTI's GetLocalInstance() uses these + // offsets to find the receiver for non-static native wrapper frames. + ByteSize _native_receiver_sp_offset; + ByteSize _native_basic_lock_sp_offset; + + CompLevel _comp_level; // compilation level (s1) + + // Local state used to keep track of whether unloading is happening or not + volatile uint8_t _is_unloading_state; + + // used by jvmti to track if an event has been posted for this nmethod. + bool _load_reported; + + // Protected by CompiledMethod_lock + volatile signed char _state; // {not_installed, in_use, not_entrant} + + // set during construction + uint8_t _has_unsafe_access:1, // May fault due to unsafe access. + _has_method_handle_invokes:1,// Has this method MethodHandle invokes? + _has_wide_vectors:1, // Preserve wide vectors at safepoints + _has_monitors:1, // Fastpath monitor detection for continuations + _has_flushed_dependencies:1; // Used for maintenance of dependencies (under CodeCache_lock) + + enum DeoptimizationStatus : u1 { + not_marked, + deoptimize, + deoptimize_noupdate, + deoptimize_done + }; + + volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization + + DeoptimizationStatus deoptimization_status() const { + return Atomic::load(&_deoptimization_status); + } + + // For native wrappers + nmethod(Method* method, + CompilerType type, + int nmethod_size, + int compile_id, + CodeOffsets* offsets, + CodeBuffer *code_buffer, + int frame_size, + ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ + ByteSize basic_lock_sp_offset, /* synchronized natives only */ + OopMapSet* oop_maps); + + // Creation support + nmethod(Method* method, + CompilerType type, + int nmethod_size, + int compile_id, + int entry_bci, + CodeOffsets* offsets, + int orig_pc_offset, + DebugInformationRecorder *recorder, + Dependencies* dependencies, + CodeBuffer *code_buffer, + int frame_size, + OopMapSet* oop_maps, + ExceptionHandlerTable* handler_table, + ImplicitExceptionTable* nul_chk_table, + AbstractCompiler* compiler, + CompLevel comp_level +#if INCLUDE_JVMCI + , char* speculations = nullptr, + int speculations_len = 0, + JVMCINMethodData* jvmci_data = nullptr +#endif + ); + + // helper methods + void* operator new(size_t size, int nmethod_size, int comp_level) throw(); + + // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod. + // Attention: Only allow NonNMethod space for special nmethods which don't need to be + // findable by nmethod iterators! In particular, they must not contain oops! + void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw(); + + const char* reloc_string_for(u_char* begin, u_char* end); + + bool try_transition(signed char new_state); + + // Returns true if this thread changed the state of the nmethod or + // false if another thread performed the transition. + bool make_entrant() { Unimplemented(); return false; } + void inc_decompile_count(); + + // Inform external interfaces that a compiled method has been unloaded + void post_compiled_method_unload(); + + // Initialize fields to their default values + void init_defaults(); + + PcDesc* find_pc_desc(address pc, bool approximate) { + return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end())); + } + // STW two-phase nmethod root processing helpers. // // When determining liveness of a given nmethod to do code cache unloading, @@ -137,7 +417,6 @@ class nmethod : public CompiledMethod { // the next state by marking the _transition_ with (C) and (O), which mean "current" // and "other" thread respectively. // - struct oops_do_mark_link; // Opaque data type. // States used for claiming nmethods during root processing. static const uint claim_weak_request_tag = 0; @@ -189,146 +468,7 @@ class nmethod : public CompiledMethod { // transitions). void oops_do_set_strong_done(nmethod* old_head); - static nmethod* volatile _oops_do_mark_nmethods; - oops_do_mark_link* volatile _oops_do_mark_link; - - // offsets for entry points - address _entry_point; // entry point with class check - address _verified_entry_point; // entry point without class check - address _osr_entry_point; // entry point for on stack replacement - - CompiledICData* _compiled_ic_data; - bool _is_unlinked; - - // Shared fields for all nmethod's - int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method - - // Offsets for different nmethod parts - int _exception_offset; - // Offset of the unwind handler if it exists - int _unwind_handler_offset; - - int _consts_offset; - int _stub_offset; - int _oops_offset; // offset to where embedded oop table begins (inside data) - int _metadata_offset; // embedded meta data table - int _scopes_data_offset; - int _scopes_pcs_offset; - int _dependencies_offset; - int _handler_table_offset; - int _nul_chk_table_offset; -#if INCLUDE_JVMCI - int _speculations_offset; - int _jvmci_data_offset; -#endif - int _nmethod_end_offset; - - int code_offset() const { return int(code_begin() - header_begin()); } - - // location in frame (offset for sp) that deopt can store the original - // pc during a deopt. - int _orig_pc_offset; - - int _compile_id; // which compilation made this nmethod - -#if INCLUDE_RTM_OPT - // RTM state at compile time. Used during deoptimization to decide - // whether to restart collecting RTM locking abort statistic again. - RTMState _rtm_state; -#endif - - // These are used for compiled synchronized native methods to - // locate the owner and stack slot for the BasicLock. They are - // needed because there is no debug information for compiled native - // wrappers and the oop maps are insufficient to allow - // frame::retrieve_receiver() to work. Currently they are expected - // to be byte offsets from the Java stack pointer for maximum code - // sharing between platforms. JVMTI's GetLocalInstance() uses these - // offsets to find the receiver for non-static native wrapper frames. - ByteSize _native_receiver_sp_offset; - ByteSize _native_basic_lock_sp_offset; - - CompLevel _comp_level; // compilation level - - // Local state used to keep track of whether unloading is happening or not - volatile uint8_t _is_unloading_state; - - // protected by CodeCache_lock - bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) - - // used by jvmti to track if an event has been posted for this nmethod. - bool _load_reported; - - // Protected by CompiledMethod_lock - volatile signed char _state; // {not_installed, in_use, not_used, not_entrant} - - int _skipped_instructions_size; - - // For native wrappers - nmethod(Method* method, - CompilerType type, - int nmethod_size, - int compile_id, - CodeOffsets* offsets, - CodeBuffer *code_buffer, - int frame_size, - ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ - ByteSize basic_lock_sp_offset, /* synchronized natives only */ - OopMapSet* oop_maps); - - // Creation support - nmethod(Method* method, - CompilerType type, - int nmethod_size, - int compile_id, - int entry_bci, - CodeOffsets* offsets, - int orig_pc_offset, - DebugInformationRecorder *recorder, - Dependencies* dependencies, - CodeBuffer *code_buffer, - int frame_size, - OopMapSet* oop_maps, - ExceptionHandlerTable* handler_table, - ImplicitExceptionTable* nul_chk_table, - AbstractCompiler* compiler, - CompLevel comp_level -#if INCLUDE_JVMCI - , char* speculations = nullptr, - int speculations_len = 0, - JVMCINMethodData* jvmci_data = nullptr -#endif - ); - - // helper methods - void* operator new(size_t size, int nmethod_size, int comp_level) throw(); - // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod. - // Attention: Only allow NonNMethod space for special nmethods which don't need to be - // findable by nmethod iterators! In particular, they must not contain oops! - void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw(); - - const char* reloc_string_for(u_char* begin, u_char* end); - - bool try_transition(signed char new_state); - - // Returns true if this thread changed the state of the nmethod or - // false if another thread performed the transition. - bool make_entrant() { Unimplemented(); return false; } - void inc_decompile_count(); - - // Inform external interfaces that a compiled method has been unloaded - void post_compiled_method_unload(); - - // Initialize fields to their default values - void init_defaults(); - - // Offsets - int content_offset() const { return int(content_begin() - header_begin()); } - int data_offset() const { return _data_offset; } - - address header_end() const { return (address) header_begin() + header_size(); } - - public: +public: // create nmethod with entry_bci static nmethod* new_nmethod(const methodHandle& method, int compile_id, @@ -351,14 +491,6 @@ class nmethod : public CompiledMethod { #endif ); - // Only used for unit tests. - nmethod() - : CompiledMethod(), - _native_receiver_sp_offset(in_ByteSize(-1)), - _native_basic_lock_sp_offset(in_ByteSize(-1)), - _is_unloading_state(0) {} - - static nmethod* new_native_nmethod(const methodHandle& method, int compile_id, CodeBuffer *code_buffer, @@ -370,86 +502,126 @@ class nmethod : public CompiledMethod { OopMapSet* oop_maps, int exception_handler = -1); - // type info - bool is_nmethod() const { return true; } - bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } + Method* method () const { return _method; } + bool is_native_method() const { return _method != nullptr && _method->is_native(); } + bool is_java_method () const { return _method != nullptr && !_method->is_native(); } + bool is_osr_method () const { return _entry_bci != InvocationEntryBci; } + + // Compiler task identification. Note that all OSR methods + // are numbered in an independent sequence if CICountOSR is true, + // and native method wrappers are also numbered independently if + // CICountNative is true. + int compile_id() const { return _compile_id; } + const char* compile_kind() const; + + inline bool is_compiled_by_c1 () const { return _compiler_type == compiler_c1; } + inline bool is_compiled_by_c2 () const { return _compiler_type == compiler_c2; } + inline bool is_compiled_by_jvmci() const { return _compiler_type == compiler_jvmci; } + CompilerType compiler_type () const { return _compiler_type; } + const char* compiler_name () const; // boundaries for different parts - address consts_begin () const { return header_begin() + _consts_offset ; } - address consts_end () const { return code_begin() ; } - address stub_begin () const { return header_begin() + _stub_offset ; } - address stub_end () const { return header_begin() + _oops_offset ; } - address exception_begin () const { return header_begin() + _exception_offset ; } - address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : nullptr; } - oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } - oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } + address consts_begin () const { return header_begin() + _consts_offset ; } + address consts_end () const { return header_begin() + code_offset() ; } + address insts_begin () const { return header_begin() + code_offset() ; } + address insts_end () const { return header_begin() + _stub_offset ; } + address stub_begin () const { return header_begin() + _stub_offset ; } + address stub_end () const { return header_begin() + _oops_offset ; } + address exception_begin () const { return header_begin() + _exception_offset ; } + address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; } + address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; } + address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : nullptr; } + oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } + oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } - Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } - Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } + Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } + Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; } - address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } - PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } - PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } - address dependencies_begin () const { return header_begin() + _dependencies_offset ; } - address dependencies_end () const { return header_begin() + _handler_table_offset ; } - address handler_table_begin () const { return header_begin() + _handler_table_offset ; } - address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } - address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } - - int skipped_instructions_size () const { return _skipped_instructions_size ; } + address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; } + address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } + PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset) ; } + PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } + address dependencies_begin () const { return header_begin() + _dependencies_offset ; } + address dependencies_end () const { return header_begin() + _handler_table_offset ; } + address handler_table_begin () const { return header_begin() + _handler_table_offset ; } + address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } + address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } #if INCLUDE_JVMCI - address nul_chk_table_end () const { return header_begin() + _speculations_offset ; } - address speculations_begin () const { return header_begin() + _speculations_offset ; } - address speculations_end () const { return header_begin() + _jvmci_data_offset ; } - address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; } - address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; } + address nul_chk_table_end () const { return header_begin() + _speculations_offset ; } + address speculations_begin () const { return header_begin() + _speculations_offset ; } + address speculations_end () const { return header_begin() + _jvmci_data_offset ; } + address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; } + address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; } #else - address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } + address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } #endif // Sizes - int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); } - int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); } - int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); } + int consts_size () const { return int( consts_end () - consts_begin ()); } + int insts_size () const { return int( insts_end () - insts_begin ()); } + int stub_size () const { return int( stub_end () - stub_begin ()); } + int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); } + int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); } + int scopes_data_size () const { return int( scopes_data_end () - scopes_data_begin ()); } + int scopes_pcs_size () const { return int((intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin ()); } + int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); } + int handler_table_size() const { return int( handler_table_end() - handler_table_begin()); } + int nul_chk_table_size() const { return int( nul_chk_table_end() - nul_chk_table_begin()); } #if INCLUDE_JVMCI - int speculations_size () const { return int( speculations_end () - speculations_begin ()); } - int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); } + int speculations_size () const { return int( speculations_end () - speculations_begin ()); } + int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); } #endif int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } - int total_size () const; + int skipped_instructions_size () const { return _skipped_instructions_size; } + int total_size() const; // Containment - bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } - bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } - bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } - bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } + bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } + // Returns true if a given address is in the 'insts' section. The method + // insts_contains_inclusive() is end-inclusive. + bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } + bool insts_contains_inclusive(address addr) const { return insts_begin () <= addr && addr <= insts_end (); } + bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } + bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } + bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } + bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } + bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } + bool handler_table_contains (address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } + bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } // entry points - address entry_point() const { return _entry_point; } // normal entry point - address verified_entry_point() const { return _verified_entry_point; } // if klass is correct + address entry_point() const { return _entry_point; } // normal entry point + address verified_entry_point() const { return _verified_entry_point; } // if klass is correct + + enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is + // allowed to advance state + in_use = 0, // executable nmethod + not_entrant = 1 // marked for deoptimization but activations may still exist + }; // flag accessing and manipulation - bool is_not_installed() const { return _state == not_installed; } - bool is_in_use() const { return _state <= in_use; } - bool is_not_entrant() const { return _state == not_entrant; } + bool is_not_installed() const { return _state == not_installed; } + bool is_in_use() const { return _state <= in_use; } + bool is_not_entrant() const { return _state == not_entrant; } + int get_state() const { return _state; } void clear_unloading_state(); // Heuristically deduce an nmethod isn't worth keeping around bool is_cold(); - virtual bool is_unloading(); - virtual void do_unloading(bool unloading_occurred); + bool is_unloading(); + void do_unloading(bool unloading_occurred); - bool is_unlinked() const { return _is_unlinked; } - void set_is_unlinked() { assert(!_is_unlinked, "already unlinked"); _is_unlinked = true; } + bool is_unlinked() const { return _is_unlinked; } + void set_is_unlinked() { assert(!_is_unlinked, "already unlinked"); _is_unlinked = true; } #if INCLUDE_RTM_OPT // rtm state accessing and manipulating - RTMState rtm_state() const { return _rtm_state; } - void set_rtm_state(RTMState state) { _rtm_state = state; } + RTMState rtm_state() const { return _rtm_state; } + void set_rtm_state(RTMState state) { _rtm_state = state; } #endif bool make_in_use() { @@ -462,23 +634,51 @@ class nmethod : public CompiledMethod { bool make_not_entrant(); bool make_not_used() { return make_not_entrant(); } - int get_state() const { - return _state; + bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; } + bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; } + void set_deoptimized_done(); + + bool update_recompile_counts() const { + // Update recompile counts when either the update is explicitly requested (deoptimize) + // or the nmethod is not marked for deoptimization at all (not_marked). + // The latter happens during uncommon traps when deoptimized nmethod is made not entrant. + DeoptimizationStatus status = deoptimization_status(); + return status != deoptimize_noupdate && status != deoptimize_done; } + // tells whether frames described by this nmethod can be deoptimized + // note: native wrappers cannot be deoptimized. + bool can_be_deoptimized() const { return is_java_method(); } + bool has_dependencies() { return dependencies_size() != 0; } void print_dependencies_on(outputStream* out) PRODUCT_RETURN; void flush_dependencies(); - bool has_flushed_dependencies() { return _has_flushed_dependencies; } - void set_has_flushed_dependencies() { + + template + T* gc_data() const { return reinterpret_cast(_gc_data); } + template + void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast(gc_data); } + + bool has_unsafe_access() const { return _has_unsafe_access; } + void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } + + bool has_monitors() const { return _has_monitors; } + void set_has_monitors(bool z) { _has_monitors = z; } + + bool has_method_handle_invokes() const { return _has_method_handle_invokes; } + void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } + + bool has_wide_vectors() const { return _has_wide_vectors; } + void set_has_wide_vectors(bool z) { _has_wide_vectors = z; } + + bool has_flushed_dependencies() const { return _has_flushed_dependencies; } + void set_has_flushed_dependencies(bool z) { assert(!has_flushed_dependencies(), "should only happen once"); - _has_flushed_dependencies = 1; + _has_flushed_dependencies = z; } int comp_level() const { return _comp_level; } - void unlink_from_method(); - // Support for oops in scopes and relocs: // Note: index 0 is reserved for null. oop oop_at(int index) const; @@ -491,7 +691,7 @@ class nmethod : public CompiledMethod { // Support for meta data in scopes and relocs: // Note: index 0 is reserved for null. - Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); } + Metadata* metadata_at(int index) const { return index == 0 ? nullptr: *metadata_addr_at(index); } Metadata** metadata_addr_at(int index) const { // for GC // relocation indexes are biased by 1 (because 0 is reserved) assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); @@ -506,10 +706,87 @@ private: void fix_oop_relocations(address begin, address end, bool initialize_immediates); inline void initialize_immediate_oop(oop* dest, jobject handle); +protected: + address oops_reloc_begin() const; + public: void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } void fix_oop_relocations() { fix_oop_relocations(nullptr, nullptr, false); } + bool is_at_poll_return(address pc); + bool is_at_poll_or_poll_return(address pc); + +protected: + // Exception cache support + // Note: _exception_cache may be read and cleaned concurrently. + ExceptionCache* exception_cache() const { return _exception_cache; } + ExceptionCache* exception_cache_acquire() const; + void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } + +public: + address handler_for_exception_and_pc(Handle exception, address pc); + void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); + void clean_exception_cache(); + + void add_exception_cache_entry(ExceptionCache* new_entry); + ExceptionCache* exception_cache_entry_for_exception(Handle exception); + + + // MethodHandle + bool is_method_handle_return(address return_pc); + // Deopt + // Return true is the PC is one would expect if the frame is being deopted. + inline bool is_deopt_pc(address pc); + inline bool is_deopt_mh_entry(address pc); + inline bool is_deopt_entry(address pc); + + // Accessor/mutator for the original pc of a frame before a frame was deopted. + address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } + void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } + + const char* state() const; + + bool inlinecache_check_contains(address addr) const { + return (addr >= code_begin() && addr < verified_entry_point()); + } + + void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override; + + // implicit exceptions support + address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); } + address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); } + + static address get_deopt_original_pc(const frame* fr); + + // Inline cache support for class unloading and nmethod unloading + private: + void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all); + + address continuation_for_implicit_exception(address pc, bool for_div0_check); + + public: + // Serial version used by whitebox test + void cleanup_inline_caches_whitebox(); + + void clear_inline_caches(); + + // Execute nmethod barrier code, as if entering through nmethod call. + void run_nmethod_entry_barrier(); + + void verify_oop_relocations(); + + bool has_evol_metadata(); + + Method* attached_method(address call_pc); + Method* attached_method_before_pc(address pc); + + // GC unloading support + // Cleans unloaded klasses and unloaded nmethods in inline caches + + void unload_nmethod_caches(bool class_unloading_occurred); + + void unlink_from_method(); + // On-stack replacement support int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } @@ -524,7 +801,7 @@ public: void unlink(); // Deallocate this nmethod - called by the GC - void purge(bool free_code_cache_data, bool unregister_nmethod); + void purge(bool free_code_cache_data, bool unregister_nmethod) override; // See comment at definition of _last_seen_on_stack void mark_as_maybe_on_stack(); @@ -549,7 +826,6 @@ public: } #endif - public: void oops_do(OopClosure* f) { oops_do(f, false); } void oops_do(OopClosure* f, bool allow_dead); @@ -591,6 +867,15 @@ public: void set_load_reported() { _load_reported = true; } public: + // ScopeDesc retrieval operation + PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } + // pc_desc_near returns the first PcDesc at or after the given pc. + PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } + + // ScopeDesc for an instruction + ScopeDesc* scope_desc_at(address pc); + ScopeDesc* scope_desc_near(address pc); + // copying of debugging information void copy_scopes_pcs(PcDesc* pcs, int count); void copy_scopes_data(address buffer, int size); @@ -604,7 +889,7 @@ public: void post_compiled_method_load_event(JvmtiThreadState* state = nullptr); // verify operations - void verify(); + void verify() override; void verify_scopes(); void verify_interrupt_point(address interrupt_point, bool is_inline_cache); @@ -616,8 +901,8 @@ public: void decode(outputStream* st) const { decode2(st); } // just delegate here. // printing support - void print() const; - void print(outputStream* st) const; + void print() const override; + void print(outputStream* st) const; void print_code(); #if defined(SUPPORT_DATA_STRUCTS) @@ -626,7 +911,7 @@ public: void print_pcs_on(outputStream* st); void print_scopes() { print_scopes_on(tty); } void print_scopes_on(outputStream* st) PRODUCT_RETURN; - void print_value_on(outputStream* st) const; + void print_value_on(outputStream* st) const override; void print_handler_table(); void print_nul_chk_table(); void print_recorded_oop(int log_n, int index); @@ -646,7 +931,7 @@ public: void print_nmethod(bool print_code); // need to re-define this from CodeBlob else the overload hides it - virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } + void print_on(outputStream* st) const override { CodeBlob::print_on(st); } void print_on(outputStream* st, const char* msg) const; // Logging @@ -655,7 +940,7 @@ public: void log_state_change() const; // Prints block-level comments, including nmethod specific block labels: - virtual void print_block_comment(outputStream* stream, address block_begin) const { + void print_block_comment(outputStream* stream, address block_begin) const override { #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) print_nmethod_labels(stream, block_begin); CodeBlob::print_block_comment(stream, block_begin); @@ -670,13 +955,6 @@ public: // Prints a comment for one native instruction (reloc info, pc desc) void print_code_comment_on(outputStream* st, int column, address begin, address end); - // Compiler task identification. Note that all OSR methods - // are numbered in an independent sequence if CICountOSR is true, - // and native method wrappers are also numbered independently if - // CICountNative is true. - virtual int compile_id() const { return _compile_id; } - const char* compile_kind() const; - // tells if this compiled method is dependent on the given changes, // and the changes have invalidated it bool check_dependency_on(DepChange& changes); @@ -684,7 +962,7 @@ public: // Fast breakpoint support. Tells if this compiled method is // dependent on the given method. Returns true if this nmethod // corresponds to the given method as well. - virtual bool is_dependent_on_method(Method* dependee); + bool is_dependent_on_method(Method* dependee); // JVMTI's GetLocalInstance() support ByteSize native_receiver_sp_offset() { @@ -699,11 +977,11 @@ public: static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); } static ByteSize state_offset() { return byte_offset_of(nmethod, _state); } - virtual void metadata_do(MetadataClosure* f); + void metadata_do(MetadataClosure* f); address call_instruction_address(address pc) const; - virtual void make_deoptimized(); + void make_deoptimized(); void finalize_relocations(); }; diff --git a/src/hotspot/share/code/compiledMethod.inline.hpp b/src/hotspot/share/code/nmethod.inline.hpp similarity index 74% rename from src/hotspot/share/code/compiledMethod.inline.hpp rename to src/hotspot/share/code/nmethod.inline.hpp index a5f768fc973..1b6ac5614b4 100644 --- a/src/hotspot/share/code/compiledMethod.inline.hpp +++ b/src/hotspot/share/code/nmethod.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,19 +22,19 @@ * */ -#ifndef SHARE_CODE_COMPILEDMETHOD_INLINE_HPP -#define SHARE_CODE_COMPILEDMETHOD_INLINE_HPP +#ifndef SHARE_CODE_NMETHOD_INLINE_HPP +#define SHARE_CODE_NMETHOD_INLINE_HPP -#include "code/compiledMethod.hpp" +#include "code/nmethod.hpp" #include "code/nativeInst.hpp" #include "runtime/atomic.hpp" #include "runtime/frame.hpp" -inline bool CompiledMethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } +inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } // When using JVMCI the address might be off by the size of a call instruction. -inline bool CompiledMethod::is_deopt_entry(address pc) { +inline bool nmethod::is_deopt_entry(address pc) { return pc == deopt_handler_begin() #if INCLUDE_JVMCI || (is_compiled_by_jvmci() && pc == (deopt_handler_begin() + NativeCall::instruction_size)) @@ -42,7 +42,7 @@ inline bool CompiledMethod::is_deopt_entry(address pc) { ; } -inline bool CompiledMethod::is_deopt_mh_entry(address pc) { +inline bool nmethod::is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin() #if INCLUDE_JVMCI || (is_compiled_by_jvmci() && pc == (deopt_mh_handler_begin() + NativeCall::instruction_size)) @@ -51,19 +51,19 @@ inline bool CompiledMethod::is_deopt_mh_entry(address pc) { } // ----------------------------------------------------------------------------- -// CompiledMethod::get_deopt_original_pc +// nmethod::get_deopt_original_pc // // Return the original PC for the given PC if: -// (a) the given PC belongs to a nmethod and +// (a) the given PC belongs to an nmethod and // (b) it is a deopt PC -inline address CompiledMethod::get_deopt_original_pc(const frame* fr) { +inline address nmethod::get_deopt_original_pc(const frame* fr) { if (fr->cb() == nullptr) return nullptr; - CompiledMethod* cm = fr->cb()->as_compiled_method_or_null(); - if (cm != nullptr && cm->is_deopt_pc(fr->pc())) - return cm->get_original_pc(fr); - + nmethod* nm = fr->cb()->as_nmethod_or_null(); + if (nm != nullptr && nm->is_deopt_pc(fr->pc())) { + return nm->get_original_pc(fr); + } return nullptr; } @@ -86,4 +86,4 @@ address ExceptionCache::handler_at(int index) { inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); } -#endif // SHARE_CODE_COMPILEDMETHOD_INLINE_HPP +#endif // SHARE_CODE_NMETHOD_INLINE_HPP diff --git a/src/hotspot/share/code/pcDesc.cpp b/src/hotspot/share/code/pcDesc.cpp index c6ee8c69521..241eee03a9c 100644 --- a/src/hotspot/share/code/pcDesc.cpp +++ b/src/hotspot/share/code/pcDesc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,11 +36,11 @@ PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) { _flags = 0; } -address PcDesc::real_pc(const CompiledMethod* code) const { +address PcDesc::real_pc(const nmethod* code) const { return code->code_begin() + pc_offset(); } -void PcDesc::print_on(outputStream* st, CompiledMethod* code) { +void PcDesc::print_on(outputStream* st, nmethod* code) { #ifndef PRODUCT ResourceMark rm; st->print_cr("PcDesc(pc=" PTR_FORMAT " offset=%x bits=%x):", p2i(real_pc(code)), pc_offset(), _flags); @@ -57,7 +57,7 @@ void PcDesc::print_on(outputStream* st, CompiledMethod* code) { #endif } -bool PcDesc::verify(CompiledMethod* code) { +bool PcDesc::verify(nmethod* code) { //Unimplemented(); return true; } diff --git a/src/hotspot/share/code/pcDesc.hpp b/src/hotspot/share/code/pcDesc.hpp index 4ec1db4ff06..8048c3909c7 100644 --- a/src/hotspot/share/code/pcDesc.hpp +++ b/src/hotspot/share/code/pcDesc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ // PcDescs map a physical PC (given as offset from start of nmethod) to // the corresponding source scope and byte code index. -class CompiledMethod; +class nmethod; class PcDesc { friend class VMStructs; @@ -102,11 +102,11 @@ class PcDesc { void set_arg_escape(bool z) { set_flag(PCDESC_arg_escape, z); } // Returns the real pc - address real_pc(const CompiledMethod* code) const; + address real_pc(const nmethod* code) const; - void print(CompiledMethod* code) { print_on(tty, code); } - void print_on(outputStream* st, CompiledMethod* code); - bool verify(CompiledMethod* code); + void print(nmethod* code) { print_on(tty, code); } + void print_on(outputStream* st, nmethod* code); + bool verify(nmethod* code); }; #endif // SHARE_CODE_PCDESC_HPP diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp index ef908757675..69ff4bc78d6 100644 --- a/src/hotspot/share/code/relocInfo.cpp +++ b/src/hotspot/share/code/relocInfo.cpp @@ -117,13 +117,13 @@ void relocInfo::change_reloc_info_for_address(RelocIterator *itr, address pc, re // ---------------------------------------------------------------------------------------------------- // Implementation of RelocIterator -void RelocIterator::initialize(CompiledMethod* nm, address begin, address limit) { +void RelocIterator::initialize(nmethod* nm, address begin, address limit) { initialize_misc(); if (nm == nullptr && begin != nullptr) { // allow nmethod to be deduced from beginning address CodeBlob* cb = CodeCache::find_blob(begin); - nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr; + nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr; } guarantee(nm != nullptr, "must be able to deduce nmethod from other arguments"); @@ -633,9 +633,9 @@ address virtual_call_Relocation::cached_value() { } Method* virtual_call_Relocation::method_value() { - CompiledMethod* cm = code(); - if (cm == nullptr) return (Method*)nullptr; - Metadata* m = cm->metadata_at(_method_index); + nmethod* nm = code(); + if (nm == nullptr) return (Method*)nullptr; + Metadata* m = nm->metadata_at(_method_index); assert(m != nullptr || _method_index == 0, "should be non-null for non-zero index"); assert(m == nullptr || m->is_method(), "not a method"); return (Method*)m; @@ -659,9 +659,9 @@ void opt_virtual_call_Relocation::unpack_data() { } Method* opt_virtual_call_Relocation::method_value() { - CompiledMethod* cm = code(); - if (cm == nullptr) return (Method*)nullptr; - Metadata* m = cm->metadata_at(_method_index); + nmethod* nm = code(); + if (nm == nullptr) return (Method*)nullptr; + Metadata* m = nm->metadata_at(_method_index); assert(m != nullptr || _method_index == 0, "should be non-null for non-zero index"); assert(m == nullptr || m->is_method(), "not a method"); return (Method*)m; @@ -689,9 +689,9 @@ address opt_virtual_call_Relocation::static_stub() { } Method* static_call_Relocation::method_value() { - CompiledMethod* cm = code(); - if (cm == nullptr) return (Method*)nullptr; - Metadata* m = cm->metadata_at(_method_index); + nmethod* nm = code(); + if (nm == nullptr) return (Method*)nullptr; + Metadata* m = nm->metadata_at(_method_index); assert(m != nullptr || _method_index == 0, "should be non-null for non-zero index"); assert(m == nullptr || m->is_method(), "not a method"); return (Method*)m; diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp index 5f67f94bdad..9f1db9f4684 100644 --- a/src/hotspot/share/code/relocInfo.hpp +++ b/src/hotspot/share/code/relocInfo.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,11 +34,10 @@ #include -class nmethod; class CodeBlob; -class CompiledMethod; class Metadata; class NativeMovConstReg; +class nmethod; // Types in this file: // relocInfo @@ -571,7 +570,7 @@ class RelocIterator : public StackObj { address _limit; // stop producing relocations after this _addr relocInfo* _current; // the current relocation information relocInfo* _end; // end marker; we're done iterating when _current == _end - CompiledMethod* _code; // compiled method containing _addr + nmethod* _code; // compiled method containing _addr address _addr; // instruction to which the relocation applies short _databuf; // spare buffer for compressed data short* _data; // pointer to the relocation's data @@ -601,13 +600,13 @@ class RelocIterator : public StackObj { void initialize_misc(); - void initialize(CompiledMethod* nm, address begin, address limit); + void initialize(nmethod* nm, address begin, address limit); RelocIterator() { initialize_misc(); } public: // constructor - RelocIterator(CompiledMethod* nm, address begin = nullptr, address limit = nullptr); + RelocIterator(nmethod* nm, address begin = nullptr, address limit = nullptr); RelocIterator(CodeSection* cb, address begin = nullptr, address limit = nullptr); // get next reloc info, return !eos @@ -640,7 +639,7 @@ class RelocIterator : public StackObj { relocType type() const { return current()->type(); } int format() const { return (relocInfo::have_format) ? current()->format() : 0; } address addr() const { return _addr; } - CompiledMethod* code() const { return _code; } + nmethod* code() const { return _code; } short* data() const { return _data; } int datalen() const { return _datalen; } bool has_current() const { return _datalen >= 0; } @@ -827,7 +826,7 @@ class Relocation { public: // accessors which only make sense for a bound Relocation address addr() const { return binding()->addr(); } - CompiledMethod* code() const { return binding()->code(); } + nmethod* code() const { return binding()->code(); } bool addr_in_const() const { return binding()->addr_in_const(); } protected: short* data() const { return binding()->data(); } @@ -1463,7 +1462,7 @@ APPLY_TO_RELOCATIONS(EACH_CASE); #undef EACH_CASE_AUX #undef EACH_CASE -inline RelocIterator::RelocIterator(CompiledMethod* nm, address begin, address limit) { +inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) { initialize(nm, begin, limit); } diff --git a/src/hotspot/share/code/scopeDesc.cpp b/src/hotspot/share/code/scopeDesc.cpp index 1bcb762152a..52a095ac840 100644 --- a/src/hotspot/share/code/scopeDesc.cpp +++ b/src/hotspot/share/code/scopeDesc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" -ScopeDesc::ScopeDesc(const CompiledMethod* code, PcDesc* pd, bool ignore_objects) { +ScopeDesc::ScopeDesc(const nmethod* code, PcDesc* pd, bool ignore_objects) { int obj_decode_offset = ignore_objects ? DebugInformationRecorder::serialized_null : pd->obj_decode_offset(); _code = code; _decode_offset = pd->scope_decode_offset(); diff --git a/src/hotspot/share/code/scopeDesc.hpp b/src/hotspot/share/code/scopeDesc.hpp index be3ba352070..8e8a876095e 100644 --- a/src/hotspot/share/code/scopeDesc.hpp +++ b/src/hotspot/share/code/scopeDesc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ class SimpleScopeDesc : public StackObj { int _bci; public: - SimpleScopeDesc(CompiledMethod* code, address pc) { + SimpleScopeDesc(nmethod* code, address pc) { PcDesc* pc_desc = code->pc_desc_at(pc); assert(pc_desc != nullptr, "Must be able to find matching PcDesc"); // save this here so we only have to look up the PcDesc once @@ -61,7 +61,7 @@ class SimpleScopeDesc : public StackObj { class ScopeDesc : public ResourceObj { public: // Constructor - ScopeDesc(const CompiledMethod* code, PcDesc* pd, bool ignore_objects = false); + ScopeDesc(const nmethod* code, PcDesc* pd, bool ignore_objects = false); // Direct access to scope ScopeDesc* at_offset(int decode_offset) { return new ScopeDesc(this, decode_offset); } @@ -120,7 +120,7 @@ class ScopeDesc : public ResourceObj { GrowableArray* _objects; // Nmethod information - const CompiledMethod* _code; + const nmethod* _code; // Decoding operations void decode_body(); diff --git a/src/hotspot/share/compiler/compilationPolicy.cpp b/src/hotspot/share/compiler/compilationPolicy.cpp index 57173ed621c..4fcd9b5bde4 100644 --- a/src/hotspot/share/compiler/compilationPolicy.cpp +++ b/src/hotspot/share/compiler/compilationPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -212,7 +212,7 @@ bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) { } CompLevel CompilationPolicy::comp_level(Method* method) { - CompiledMethod *nm = method->code(); + nmethod *nm = method->code(); if (nm != nullptr && nm->is_in_use()) { return (CompLevel)nm->comp_level(); } @@ -708,7 +708,7 @@ void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { } nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee, - int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS) { + int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) { if (PrintTieredEvents) { print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level); } @@ -1137,7 +1137,7 @@ CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cu // Handle the invocation event. void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, - CompLevel level, CompiledMethod* nm, TRAPS) { + CompLevel level, nmethod* nm, TRAPS) { if (should_create_mdo(mh, level)) { create_mdo(mh, THREAD); } @@ -1152,7 +1152,7 @@ void CompilationPolicy::method_invocation_event(const methodHandle& mh, const me // Handle the back branch event. Notice that we can compile the method // with a regular entry from here. void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level, CompiledMethod* nm, TRAPS) { + int bci, CompLevel level, nmethod* nm, TRAPS) { if (should_create_mdo(mh, level)) { create_mdo(mh, THREAD); } diff --git a/src/hotspot/share/compiler/compilationPolicy.hpp b/src/hotspot/share/compiler/compilationPolicy.hpp index f7f7f593c26..3ec60cd89c7 100644 --- a/src/hotspot/share/compiler/compilationPolicy.hpp +++ b/src/hotspot/share/compiler/compilationPolicy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -235,9 +235,9 @@ class CompilationPolicy : AllStatic { // Get a compilation level for a given method. static CompLevel comp_level(Method* method); static void method_invocation_event(const methodHandle& method, const methodHandle& inlinee, - CompLevel level, CompiledMethod* nm, TRAPS); + CompLevel level, nmethod* nm, TRAPS); static void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, - int bci, CompLevel level, CompiledMethod* nm, TRAPS); + int bci, CompLevel level, nmethod* nm, TRAPS); static void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } static void set_start_time(jlong t) { _start_time = t; } @@ -265,7 +265,7 @@ public: // Return initial compile level to use with Xcomp (depends on compilation mode). static void reprofile(ScopeDesc* trap_scope, bool is_osr); static nmethod* event(const methodHandle& method, const methodHandle& inlinee, - int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS); + int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS); // Select task is called by CompileBroker. We should return a task or nullptr. static CompileTask* select_task(CompileQueue* compile_queue); // Tell the runtime if we think a given method is adequately profiled. diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 7adb4dfc587..d569767c63d 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -1376,9 +1376,8 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, if (osr_bci == InvocationEntryBci) { // standard compilation - CompiledMethod* method_code = method->code(); - if (method_code != nullptr && method_code->is_nmethod() - && (compile_reason != CompileTask::Reason_DirectivesChanged)) { + nmethod* method_code = method->code(); + if (method_code != nullptr && (compile_reason != CompileTask::Reason_DirectivesChanged)) { if (compilation_is_complete(method, osr_bci, comp_level)) { return (nmethod*) method_code; } @@ -1481,12 +1480,7 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, // return requested nmethod // We accept a higher level osr method if (osr_bci == InvocationEntryBci) { - CompiledMethod* code = method->code(); - if (code == nullptr) { - return (nmethod*) code; - } else { - return code->as_nmethod_or_null(); - } + return method->code(); } return method->lookup_osr_nmethod_for(osr_bci, comp_level, false); } @@ -1511,7 +1505,7 @@ bool CompileBroker::compilation_is_complete(const methodHandle& method, if (method->is_not_compilable(comp_level)) { return true; } else { - CompiledMethod* result = method->code(); + nmethod* result = method->code(); if (result == nullptr) return false; return comp_level == result->comp_level(); } diff --git a/src/hotspot/share/compiler/oopMap.cpp b/src/hotspot/share/compiler/oopMap.cpp index 6ab8970a795..09b9feee3db 100644 --- a/src/hotspot/share/compiler/oopMap.cpp +++ b/src/hotspot/share/compiler/oopMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -498,7 +498,6 @@ static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, VMReg reg = omv.content_reg(); address loc = fr->oopmapreg_to_location(omv.reg(), reg_map); reg_map->set_location(reg, loc); - //DEBUG_ONLY(nof_callee++;) } } } @@ -520,15 +519,7 @@ void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) // Scan through oopmap and find location of all callee-saved registers // (we do not do update in place, since info could be overwritten) - DEBUG_ONLY(int nof_callee = 0;) update_register_map1(this, fr, reg_map); - - // Check that runtime stubs save all callee-saved registers -#ifdef COMPILER2 - assert(cb == nullptr || cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() || - (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT), - "must save all"); -#endif // COMPILER2 } const ImmutableOopMap* OopMapSet::find_map(const frame *fr) { diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.cpp b/src/hotspot/share/gc/g1/g1HeapRegion.cpp index be012bbc5a6..b4c1f93ffd8 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.cpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.cpp @@ -336,7 +336,7 @@ public: _hr(hr), _failures(false) {} void do_code_blob(CodeBlob* cb) { - nmethod* nm = (cb == nullptr) ? nullptr : cb->as_compiled_method()->as_nmethod_or_null(); + nmethod* nm = (cb == nullptr) ? nullptr : cb->as_nmethod_or_null(); if (nm != nullptr) { // Verify that the nemthod is live VerifyCodeRootOopClosure oop_cl(_hr); diff --git a/src/hotspot/share/gc/shared/gcBehaviours.cpp b/src/hotspot/share/gc/shared/gcBehaviours.cpp index 76d28d1ab75..b52ef9e7d3d 100644 --- a/src/hotspot/share/gc/shared/gcBehaviours.cpp +++ b/src/hotspot/share/gc/shared/gcBehaviours.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,19 +23,18 @@ */ #include "precompiled.hpp" -#include "code/compiledMethod.hpp" #include "code/nmethod.hpp" #include "gc/shared/gcBehaviours.hpp" IsUnloadingBehaviour* IsUnloadingBehaviour::_current = nullptr; -bool IsUnloadingBehaviour::is_unloading(CompiledMethod* cm) { - if (cm->method()->can_be_allocated_in_NonNMethod_space()) { +bool IsUnloadingBehaviour::is_unloading(nmethod* nm) { + if (nm->method()->can_be_allocated_in_NonNMethod_space()) { // When the nmethod is in NonNMethod space, we may reach here without IsUnloadingBehaviour. // However, we only allow this for special methods which never get unloaded. return false; } - return _current->has_dead_oop(cm) || cm->as_nmethod()->is_cold(); + return _current->has_dead_oop(nm) || nm->is_cold(); } class IsCompiledMethodUnloadingOopClosure: public OopClosure { @@ -70,12 +69,8 @@ public: } }; -bool ClosureIsUnloadingBehaviour::has_dead_oop(CompiledMethod* cm) const { - if (cm->is_nmethod()) { - IsCompiledMethodUnloadingOopClosure cl(_cl); - static_cast(cm)->oops_do(&cl, true /* allow_dead */); - return cl.is_unloading(); - } else { - return false; - } +bool ClosureIsUnloadingBehaviour::has_dead_oop(nmethod* nm) const { + IsCompiledMethodUnloadingOopClosure cl(_cl); + nm->oops_do(&cl, true /* allow_dead */); + return cl.is_unloading(); } diff --git a/src/hotspot/share/gc/shared/gcBehaviours.hpp b/src/hotspot/share/gc/shared/gcBehaviours.hpp index 6265123f0f6..59f2a3e7460 100644 --- a/src/hotspot/share/gc/shared/gcBehaviours.hpp +++ b/src/hotspot/share/gc/shared/gcBehaviours.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,14 +28,14 @@ #include "memory/iterator.hpp" #include "oops/oopsHierarchy.hpp" -// This is the behaviour for checking if a CompiledMethod is unloading +// This is the behaviour for checking if an nmethod is unloading // or has unloaded due to having phantomly dead oops in it after a GC. class IsUnloadingBehaviour { static IsUnloadingBehaviour* _current; public: - static bool is_unloading(CompiledMethod* cm); - virtual bool has_dead_oop(CompiledMethod* cm) const = 0; + static bool is_unloading(nmethod* nm); + virtual bool has_dead_oop(nmethod* nm) const = 0; static IsUnloadingBehaviour* current() { return _current; } static void set_current(IsUnloadingBehaviour* current) { _current = current; } }; @@ -48,7 +48,7 @@ public: : _cl(is_alive) { } - virtual bool has_dead_oop(CompiledMethod* cm) const; + virtual bool has_dead_oop(nmethod* nm) const; }; #endif // SHARE_GC_SHARED_GCBEHAVIOURS_HPP diff --git a/src/hotspot/share/gc/shared/parallelCleaning.cpp b/src/hotspot/share/gc/shared/parallelCleaning.cpp index ef82c8b676e..1150657f265 100644 --- a/src/hotspot/share/gc/shared/parallelCleaning.cpp +++ b/src/hotspot/share/gc/shared/parallelCleaning.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, bool unloading_ _first_nmethod(nullptr), _claimed_nmethod(nullptr) { // Get first alive nmethod - CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); + NMethodIterator iter(NMethodIterator::all_blobs); if(iter.next()) { _first_nmethod = iter.method(); } @@ -49,15 +49,15 @@ CodeCacheUnloadingTask::~CodeCacheUnloadingTask() { CodeCache::verify_clean_inline_caches(); } -void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) { - CompiledMethod* first; - CompiledMethodIterator last(CompiledMethodIterator::all_blobs); +void CodeCacheUnloadingTask::claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) { + nmethod* first; + NMethodIterator last(NMethodIterator::all_blobs); do { *num_claimed_nmethods = 0; first = _claimed_nmethod; - last = CompiledMethodIterator(CompiledMethodIterator::all_blobs, first); + last = NMethodIterator(NMethodIterator::all_blobs, first); if (first != nullptr) { @@ -81,7 +81,7 @@ void CodeCacheUnloadingTask::work(uint worker_id) { } int num_claimed_nmethods; - CompiledMethod* claimed_nmethods[MaxClaimNmethods]; + nmethod* claimed_nmethods[MaxClaimNmethods]; while (true) { claim_nmethods(claimed_nmethods, &num_claimed_nmethods); diff --git a/src/hotspot/share/gc/shared/parallelCleaning.hpp b/src/hotspot/share/gc/shared/parallelCleaning.hpp index f734a819259..a15326f1b85 100644 --- a/src/hotspot/share/gc/shared/parallelCleaning.hpp +++ b/src/hotspot/share/gc/shared/parallelCleaning.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,8 +36,8 @@ class CodeCacheUnloadingTask { const uint _num_workers; // Variables used to claim nmethods. - CompiledMethod* _first_nmethod; - CompiledMethod* volatile _claimed_nmethod; + nmethod* _first_nmethod; + nmethod* volatile _claimed_nmethod; public: CodeCacheUnloadingTask(uint num_workers, bool unloading_occurred); @@ -45,7 +45,7 @@ public: private: static const int MaxClaimNmethods = 16; - void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods); + void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods); public: // Cleaning and unloading of nmethods. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp index 1017210e23e..696c87a7065 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp @@ -1,4 +1,5 @@ /* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -77,8 +78,7 @@ public: class ShenandoahIsUnloadingBehaviour : public IsUnloadingBehaviour { public: - virtual bool has_dead_oop(CompiledMethod* method) const { - nmethod* const nm = method->as_nmethod(); + virtual bool has_dead_oop(nmethod* nm) const { assert(ShenandoahHeap::heap()->is_concurrent_weak_root_in_progress(), "Only for this phase"); ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); ShenandoahReentrantLocker locker(data->lock()); @@ -90,27 +90,24 @@ public: class ShenandoahCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour { public: - virtual bool lock(CompiledMethod* method) { - nmethod* const nm = method->as_nmethod(); + virtual bool lock(nmethod* nm) { ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm); assert(lock != nullptr, "Not yet registered?"); lock->lock(); return true; } - virtual void unlock(CompiledMethod* method) { - nmethod* const nm = method->as_nmethod(); + virtual void unlock(nmethod* nm) { ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm); assert(lock != nullptr, "Not yet registered?"); lock->unlock(); } - virtual bool is_safe(CompiledMethod* method) { - if (SafepointSynchronize::is_at_safepoint() || method->is_unloading()) { + virtual bool is_safe(nmethod* nm) { + if (SafepointSynchronize::is_at_safepoint() || nm->is_unloading()) { return true; } - nmethod* const nm = method->as_nmethod(); ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm); assert(lock != nullptr, "Not yet registered?"); return lock->owned_by_self(); diff --git a/src/hotspot/share/gc/x/xUnload.cpp b/src/hotspot/share/gc/x/xUnload.cpp index 230dbf613a4..b62521fbf2c 100644 --- a/src/hotspot/share/gc/x/xUnload.cpp +++ b/src/hotspot/share/gc/x/xUnload.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,8 +75,7 @@ public: class XIsUnloadingBehaviour : public IsUnloadingBehaviour { public: - virtual bool has_dead_oop(CompiledMethod* method) const { - nmethod* const nm = method->as_nmethod(); + virtual bool has_dead_oop(nmethod* nm) const { XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); XLocker locker(lock); XIsUnloadingOopClosure cl; @@ -87,25 +86,22 @@ public: class XCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour { public: - virtual bool lock(CompiledMethod* method) { - nmethod* const nm = method->as_nmethod(); + virtual bool lock(nmethod* nm) { XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); lock->lock(); return true; } - virtual void unlock(CompiledMethod* method) { - nmethod* const nm = method->as_nmethod(); + virtual void unlock(nmethod* nm) { XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); lock->unlock(); } - virtual bool is_safe(CompiledMethod* method) { - if (SafepointSynchronize::is_at_safepoint() || method->is_unloading()) { + virtual bool is_safe(nmethod* nm) { + if (SafepointSynchronize::is_at_safepoint() || nm->is_unloading()) { return true; } - nmethod* const nm = method->as_nmethod(); XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); return lock->is_owned(); } diff --git a/src/hotspot/share/gc/z/zUnload.cpp b/src/hotspot/share/gc/z/zUnload.cpp index 3ab4cd5b19f..1bbfa040ba8 100644 --- a/src/hotspot/share/gc/z/zUnload.cpp +++ b/src/hotspot/share/gc/z/zUnload.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,8 +74,7 @@ public: class ZIsUnloadingBehaviour : public IsUnloadingBehaviour { public: - virtual bool has_dead_oop(CompiledMethod* method) const { - nmethod* const nm = method->as_nmethod(); + virtual bool has_dead_oop(nmethod* nm) const { ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); ZLocker locker(lock); if (!ZNMethod::is_armed(nm)) { @@ -90,25 +89,22 @@ public: class ZCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour { public: - virtual bool lock(CompiledMethod* method) { - nmethod* const nm = method->as_nmethod(); + virtual bool lock(nmethod* nm) { ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); lock->lock(); return true; } - virtual void unlock(CompiledMethod* method) { - nmethod* const nm = method->as_nmethod(); + virtual void unlock(nmethod* nm) { ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); lock->unlock(); } - virtual bool is_safe(CompiledMethod* method) { - if (SafepointSynchronize::is_at_safepoint() || method->is_unloading()) { + virtual bool is_safe(nmethod* nm) { + if (SafepointSynchronize::is_at_safepoint() || nm->is_unloading()) { return true; } - nmethod* const nm = method->as_nmethod(); ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); return lock->is_owned(); } diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index 23e726fb180..fc0270daff6 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -1305,7 +1305,7 @@ C2V_VMENTRY(void, reprofile, (JNIEnv* env, jobject, ARGUMENT_PAIR(method))) } NOT_PRODUCT(method->set_compiled_invocation_count(0)); - CompiledMethod* code = method->code(); + nmethod* code = method->code(); if (code != nullptr) { code->make_not_entrant(); } diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index b095008fe0b..3d1351f91bb 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -256,7 +256,7 @@ extern void vm_exit(int code); // been deoptimized. If that is the case we return the deopt blob // unpack_with_exception entry instead. This makes life for the exception blob easier // because making that same check and diverting is painful from assembly language. -JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, CompiledMethod*& cm)) +JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm)) // Reset method handle flag. current->set_is_method_handle_return(false); @@ -267,10 +267,9 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c // has updated oops. StackWatermarkSet::after_unwind(current); - cm = CodeCache::find_compiled(pc); - assert(cm != nullptr, "this is not a compiled method"); + nm = CodeCache::find_nmethod(pc); // Adjust the pc as needed/ - if (cm->is_deopt_pc(pc)) { + if (nm->is_deopt_pc(pc)) { RegisterMap map(current, RegisterMap::UpdateMap::skip, RegisterMap::ProcessFrames::include, @@ -291,10 +290,10 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c if (log_is_enabled(Info, exceptions)) { ResourceMark rm; stringStream tempst; - assert(cm->method() != nullptr, "Unexpected null method()"); + assert(nm->method() != nullptr, "Unexpected null method()"); tempst.print("JVMCI compiled method <%s>\n" " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT, - cm->method()->print_value_string(), p2i(pc), p2i(current)); + nm->method()->print_value_string(), p2i(pc), p2i(current)); Exceptions::log_exception(exception, tempst.as_string()); } // for AbortVMOnException flag @@ -332,10 +331,10 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions if (guard_pages_enabled) { - address fast_continuation = cm->handler_for_exception_and_pc(exception, pc); + address fast_continuation = nm->handler_for_exception_and_pc(exception, pc); if (fast_continuation != nullptr) { // Set flag if return address is a method handle call site. - current->set_is_method_handle_return(cm->is_method_handle_return(pc)); + current->set_is_method_handle_return(nm->is_method_handle_return(pc)); return fast_continuation; } } @@ -356,7 +355,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c current->clear_exception_oop_and_pc(); bool recursive_exception = false; - continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false, recursive_exception); + continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception); // If an exception was thrown during exception dispatch, the exception oop may have changed current->set_exception_oop(exception()); current->set_exception_pc(pc); @@ -368,12 +367,12 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c // Checking for exception oop equality is not // sufficient because some exceptions are pre-allocated and reused. if (continuation != nullptr && !recursive_exception && !SharedRuntime::deopt_blob()->contains(continuation)) { - cm->add_handler_for_exception_and_pc(exception, pc, continuation); + nm->add_handler_for_exception_and_pc(exception, pc, continuation); } } // Set flag if return address is a method handle call site. - current->set_is_method_handle_return(cm->is_method_handle_return(pc)); + current->set_is_method_handle_return(nm->is_method_handle_return(pc)); if (log_is_enabled(Info, exceptions)) { ResourceMark rm; @@ -395,18 +394,18 @@ address JVMCIRuntime::exception_handler_for_pc(JavaThread* current) { address pc = current->exception_pc(); // Still in Java mode DEBUG_ONLY(NoHandleMark nhm); - CompiledMethod* cm = nullptr; + nmethod* nm = nullptr; address continuation = nullptr; { // Enter VM mode by calling the helper ResetNoHandleMark rnhm; - continuation = exception_handler_for_pc_helper(current, exception, pc, cm); + continuation = exception_handler_for_pc_helper(current, exception, pc, nm); } // Back in JAVA, use no oops DON'T safepoint // Now check to see if the compiled method we were called from is now deoptimized. // If so we must return to the deopt blob and deoptimize the nmethod - if (cm != nullptr && caller_is_deopted()) { + if (nm != nullptr && caller_is_deopted()) { continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); } @@ -675,7 +674,7 @@ static void decipher(jlong v, bool ignoreZero) { if (cb) { if (cb->is_nmethod()) { char buf[O_BUFLEN]; - tty->print("%s [" INTPTR_FORMAT "+" JLONG_FORMAT "]", cb->as_nmethod_or_null()->method()->name_and_sig_as_C_string(buf, O_BUFLEN), p2i(cb->code_begin()), (jlong)((address)v - cb->code_begin())); + tty->print("%s [" INTPTR_FORMAT "+" JLONG_FORMAT "]", cb->as_nmethod()->method()->name_and_sig_as_C_string(buf, O_BUFLEN), p2i(cb->code_begin()), (jlong)((address)v - cb->code_begin())); return; } cb->print_value_on(tty); @@ -2208,7 +2207,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV, assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm, /* phantom_ref */ false) == nullptr, "must be"); if (entry_bci == InvocationEntryBci) { // If there is an old version we're done with it - CompiledMethod* old = method->code(); + nmethod* old = method->code(); if (TraceMethodReplacement && old != nullptr) { ResourceMark rm; char *method_name = method->name_and_sig_as_C_string(); diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 242933c18af..2a67a6791b4 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -266,7 +266,7 @@ nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _intrinsic_id, u2) \ nonstatic_field(Method, _flags._status, u4) \ - volatile_nonstatic_field(Method, _code, CompiledMethod*) \ + volatile_nonstatic_field(Method, _code, nmethod*) \ volatile_nonstatic_field(Method, _from_compiled_entry, address) \ \ nonstatic_field(MethodCounters, _invoke_mask, int) \ diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index 8395ac21bc6..794c134205e 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -1011,7 +1011,7 @@ void Method::set_native_function(address function, bool post_event_flag) { // This function can be called more than once. We must make sure that we always // use the latest registered method -> check if a stub already has been generated. // If so, we have to make it not_entrant. - CompiledMethod* nm = code(); // Put it into local variable to guard against concurrent updates + nmethod* nm = code(); // Put it into local variable to guard against concurrent updates if (nm != nullptr) { nm->make_not_entrant(); } @@ -1159,7 +1159,7 @@ void Method::clear_code() { _code = nullptr; } -void Method::unlink_code(CompiledMethod *compare) { +void Method::unlink_code(nmethod *compare) { ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); // We need to check if either the _code or _from_compiled_code_entry_point // refer to this nmethod because there is a race in setting these two fields @@ -1303,12 +1303,12 @@ address Method::verified_code_entry() { // Not inline to avoid circular ref. bool Method::check_code() const { // cached in a register or local. There's a race on the value of the field. - CompiledMethod *code = Atomic::load_acquire(&_code); + nmethod *code = Atomic::load_acquire(&_code); return code == nullptr || (code->method() == nullptr) || (code->method() == (Method*)this && !code->is_osr_method()); } // Install compiled code. Instantly it can execute. -void Method::set_code(const methodHandle& mh, CompiledMethod *code) { +void Method::set_code(const methodHandle& mh, nmethod *code) { assert_lock_strong(CompiledMethod_lock); assert( code, "use clear_code to remove code" ); assert( mh->check_code(), "" ); diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index a554dc5fb7b..295d868d094 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -62,7 +62,7 @@ class MethodData; class MethodCounters; class ConstMethod; class InlineTableSizes; -class CompiledMethod; +class nmethod; class InterpreterOopMap; class Method : public Metadata { @@ -93,14 +93,14 @@ class Method : public Metadata { address _i2i_entry; // All-args-on-stack calling convention // Entry point for calling from compiled code, to compiled code if it exists // or else the interpreter. - volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() + volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() // The entry point for calling both from and to compiled code is // "_code->entry_point()". Because of tiered compilation and de-opt, this // field can come and go. It can transition from null to not-null at any // time (whenever a compile completes). It can transition from not-null to // null only at safepoints (because of a de-opt). - CompiledMethod* volatile _code; // Points to the corresponding piece of native code - volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry + nmethod* volatile _code; // Points to the corresponding piece of native code + volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry // Constructor Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name); @@ -357,10 +357,10 @@ class Method : public Metadata { // nmethod/verified compiler entry address verified_code_entry(); bool check_code() const; // Not inline to avoid circular ref - CompiledMethod* code() const; + nmethod* code() const; // Locks CompiledMethod_lock if not held. - void unlink_code(CompiledMethod *compare); + void unlink_code(nmethod *compare); // Locks CompiledMethod_lock if not held. void unlink_code(); @@ -373,7 +373,7 @@ private: } public: - static void set_code(const methodHandle& mh, CompiledMethod* code); + static void set_code(const methodHandle& mh, nmethod* code); void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; } diff --git a/src/hotspot/share/oops/method.inline.hpp b/src/hotspot/share/oops/method.inline.hpp index bb83615d39e..f27834de30b 100644 --- a/src/hotspot/share/oops/method.inline.hpp +++ b/src/hotspot/share/oops/method.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ inline address Method::from_interpreted_entry() const { return Atomic::load_acquire(&_from_interpreted_entry); } -inline CompiledMethod* Method::code() const { +inline nmethod* Method::code() const { assert( check_code(), "" ); return Atomic::load_acquire(&_code); } diff --git a/src/hotspot/share/oops/stackChunkOop.cpp b/src/hotspot/share/oops/stackChunkOop.cpp index e114161625b..a8c6cbc2bd2 100644 --- a/src/hotspot/share/oops/stackChunkOop.cpp +++ b/src/hotspot/share/oops/stackChunkOop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" -#include "code/compiledMethod.hpp" +#include "code/nmethod.hpp" #include "code/scopeDesc.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetStackChunk.hpp" @@ -108,9 +108,9 @@ frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) { return Continuation::continuation_parent_frame(map); } -static int num_java_frames(CompiledMethod* cm, address pc) { +static int num_java_frames(nmethod* nm, address pc) { int count = 0; - for (ScopeDesc* scope = cm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) { + for (ScopeDesc* scope = nm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) { count++; } return count; @@ -118,8 +118,8 @@ static int num_java_frames(CompiledMethod* cm, address pc) { static int num_java_frames(const StackChunkFrameStream& f) { assert(f.is_interpreted() - || (f.cb() != nullptr && f.cb()->is_compiled() && f.cb()->as_compiled_method()->is_java_method()), ""); - return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_compiled_method(), f.orig_pc()); + || (f.cb() != nullptr && f.cb()->is_nmethod() && f.cb()->as_nmethod()->is_java_method()), ""); + return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_nmethod(), f.orig_pc()); } int stackChunkOopDesc::num_java_frames() const { @@ -560,11 +560,11 @@ bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames, iterate_stack(&closure); assert(!is_empty() || closure._cb == nullptr, ""); - if (closure._cb != nullptr && closure._cb->is_compiled()) { + if (closure._cb != nullptr && closure._cb->is_nmethod()) { assert(argsize() == - (closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord, + (closure._cb->as_nmethod()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord, "chunk argsize: %d bottom frame argsize: %d", argsize(), - (closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord); + (closure._cb->as_nmethod()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord); } assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), ""); diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 7caaca8846c..b34d6266989 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -26,7 +26,6 @@ #include "classfile/vmClasses.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" -#include "code/compiledMethod.inline.hpp" #include "code/compiledIC.hpp" #include "code/nmethod.hpp" #include "code/pcDesc.hpp" @@ -1851,9 +1850,8 @@ static void trace_exception(outputStream* st, oop exception_oop, address excepti exception_oop->print_value_on(&tempst); tempst.print(" in "); CodeBlob* blob = CodeCache::find_blob(exception_pc); - if (blob->is_compiled()) { - CompiledMethod* cm = blob->as_compiled_method_or_null(); - cm->method()->print_value_on(&tempst); + if (blob->is_nmethod()) { + blob->as_nmethod()->method()->print_value_on(&tempst); } else if (blob->is_runtime_stub()) { tempst.print(""); } else { diff --git a/src/hotspot/share/prims/forte.cpp b/src/hotspot/share/prims/forte.cpp index 53dbc3caee0..b5973d4ad02 100644 --- a/src/hotspot/share/prims/forte.cpp +++ b/src/hotspot/share/prims/forte.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,7 +80,7 @@ class vframeStreamForte : public vframeStreamCommon { }; -static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, CompiledMethod* nm); +static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmethod* nm); static bool is_decipherable_interpreted_frame(JavaThread* thread, frame* fr, Method** method_p, @@ -150,7 +150,7 @@ void vframeStreamForte::forte_next() { // Determine if 'fr' is a decipherable compiled frame. We are already // assured that fr is for a java compiled method. -static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, CompiledMethod* nm) { +static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmethod* nm) { assert(nm->is_java_method(), "invariant"); if (thread->has_last_Java_frame() && thread->last_Java_pc() == fr->pc()) { @@ -413,9 +413,9 @@ static bool find_initial_Java_frame(JavaThread* thread, return false; } - if (candidate.cb()->is_compiled()) { + if (candidate.cb()->is_nmethod()) { - CompiledMethod* nm = candidate.cb()->as_compiled_method(); + nmethod* nm = candidate.cb()->as_nmethod(); *method_p = nm->method(); // If the frame is not decipherable, then the value of -1 diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index d7de84ba20b..1ee0ba17158 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -768,9 +768,8 @@ class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation { if (f->can_be_deoptimized() && !f->is_deoptimized_frame()) { Deoptimization::deoptimize(t, *f); if (_make_not_entrant) { - CompiledMethod* cm = CodeCache::find_compiled(f->pc()); - assert(cm != nullptr, "sanity check"); - cm->make_not_entrant(); + nmethod* nm = CodeCache::find_nmethod(f->pc()); + nm->make_not_entrant(); } ++_result; } @@ -839,7 +838,7 @@ WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, j CHECK_JNI_EXCEPTION_(env, JNI_FALSE); MutexLocker mu(Compile_lock); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); + nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); if (code == nullptr) { return JNI_FALSE; } @@ -938,7 +937,7 @@ WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject meth jmethodID jmid = reflected_method_to_jmid(thread, env, method); CHECK_JNI_EXCEPTION_(env, CompLevel_none); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); + nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); return (code != nullptr ? code->comp_level() : CompLevel_none); WB_END @@ -1023,7 +1022,7 @@ WB_ENTRY(jint, WB_GetMethodEntryBci(JNIEnv* env, jobject o, jobject method)) jmethodID jmid = reflected_method_to_jmid(thread, env, method); CHECK_JNI_EXCEPTION_(env, InvocationEntryBci); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - CompiledMethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false); + nmethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false); return (code != nullptr && code->is_osr_method() ? code->osr_entry_bci() : InvocationEntryBci); WB_END @@ -1097,8 +1096,8 @@ bool WhiteBox::compile_method(Method* method, int comp_level, int bci, JavaThrea } // Check code again because compilation may be finished before Compile_lock is acquired. if (bci == InvocationEntryBci) { - CompiledMethod* code = mh->code(); - if (code != nullptr && code->as_nmethod_or_null() != nullptr) { + nmethod* code = mh->code(); + if (code != nullptr) { return true; } } else if (mh->lookup_osr_nmethod_for(bci, comp_level, false) != nullptr) { @@ -1556,7 +1555,7 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo jmethodID jmid = reflected_method_to_jmid(thread, env, method); CHECK_JNI_EXCEPTION_(env, nullptr); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); + nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); jobjectArray result = nullptr; if (code == nullptr) { return result; @@ -1608,7 +1607,7 @@ CodeBlob* WhiteBox::allocate_code_blob(int size, CodeBlobType blob_type) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); blob = (BufferBlob*) CodeCache::allocate(full_size, blob_type); if (blob != nullptr) { - ::new (blob) BufferBlob("WB::DummyBlob", full_size); + ::new (blob) BufferBlob("WB::DummyBlob", CodeBlobKind::Buffer, full_size); } } // Track memory usage statistic after releasing CodeCache_lock diff --git a/src/hotspot/share/runtime/continuation.cpp b/src/hotspot/share/runtime/continuation.cpp index 03c0af1a572..cd55b4a9cff 100644 --- a/src/hotspot/share/runtime/continuation.cpp +++ b/src/hotspot/share/runtime/continuation.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,10 +105,10 @@ bool Continuation::is_return_barrier_entry(const address pc) { } bool Continuation::is_continuation_enterSpecial(const frame& f) { - if (f.cb() == nullptr || !f.cb()->is_compiled()) { + if (f.cb() == nullptr || !f.cb()->is_nmethod()) { return false; } - Method* m = f.cb()->as_compiled_method()->method(); + Method* m = f.cb()->as_nmethod()->method(); return (m != nullptr && m->is_continuation_enter_intrinsic()); } diff --git a/src/hotspot/share/runtime/continuationEntry.cpp b/src/hotspot/share/runtime/continuationEntry.cpp index 42d4cb12486..31b062292f0 100644 --- a/src/hotspot/share/runtime/continuationEntry.cpp +++ b/src/hotspot/share/runtime/continuationEntry.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,14 +37,14 @@ int ContinuationEntry::_return_pc_offset = 0; address ContinuationEntry::_return_pc = nullptr; -CompiledMethod* ContinuationEntry::_enter_special = nullptr; +nmethod* ContinuationEntry::_enter_special = nullptr; int ContinuationEntry::_interpreted_entry_offset = 0; -void ContinuationEntry::set_enter_code(CompiledMethod* cm, int interpreted_entry_offset) { +void ContinuationEntry::set_enter_code(nmethod* nm, int interpreted_entry_offset) { assert(_return_pc_offset != 0, ""); - _return_pc = cm->code_begin() + _return_pc_offset; + _return_pc = nm->code_begin() + _return_pc_offset; - _enter_special = cm; + _enter_special = nm; _interpreted_entry_offset = interpreted_entry_offset; assert(_enter_special->code_contains(compiled_entry()), "entry not in enterSpecial"); assert(_enter_special->code_contains(interpreted_entry()), "entry not in enterSpecial"); @@ -141,7 +141,7 @@ bool ContinuationEntry::assert_entry_frame_laid_out(JavaThread* thread) { if (pc != StubRoutines::cont_returnBarrier()) { CodeBlob* cb = pc != nullptr ? CodeCache::find_blob(pc) : nullptr; assert(cb != nullptr, "sp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT, p2i(sp), p2i(pc)); - assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), ""); + assert(cb->as_nmethod()->method()->is_continuation_enter_intrinsic(), ""); } return true; diff --git a/src/hotspot/share/runtime/continuationEntry.hpp b/src/hotspot/share/runtime/continuationEntry.hpp index d2d5a5ab366..5930b008d27 100644 --- a/src/hotspot/share/runtime/continuationEntry.hpp +++ b/src/hotspot/share/runtime/continuationEntry.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,8 +32,8 @@ #include CPU_HEADER(continuationEntry) -class CompiledMethod; class JavaThread; +class nmethod; class OopMap; class RegisterMap; @@ -56,12 +56,12 @@ public: public: static int _return_pc_offset; // friend gen_continuation_enter - static void set_enter_code(CompiledMethod* cm, int interpreted_entry_offset); + static void set_enter_code(nmethod* nm, int interpreted_entry_offset); static bool is_interpreted_call(address call_address); private: static address _return_pc; - static CompiledMethod* _enter_special; + static nmethod* _enter_special; static int _interpreted_entry_offset; private: diff --git a/src/hotspot/share/runtime/continuationFreezeThaw.cpp b/src/hotspot/share/runtime/continuationFreezeThaw.cpp index ec6adf5cb67..2b8b69e1c7a 100644 --- a/src/hotspot/share/runtime/continuationFreezeThaw.cpp +++ b/src/hotspot/share/runtime/continuationFreezeThaw.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "classfile/javaClasses.inline.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.inline.hpp" -#include "code/compiledMethod.inline.hpp" +#include "code/nmethod.inline.hpp" #include "code/vmreg.inline.hpp" #include "compiler/oopMap.inline.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" @@ -1070,8 +1070,8 @@ void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_b if (hf.is_compiled_frame()) { if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc log_develop_trace(continuations)("Freezing deoptimized frame"); - assert(f.cb()->as_compiled_method()->is_deopt_pc(f.raw_pc()), ""); - assert(f.cb()->as_compiled_method()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), ""); + assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), ""); + assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), ""); } } #endif @@ -1470,7 +1470,7 @@ void FreezeBase::throw_stack_overflow_on_humongous_chunk() { #if INCLUDE_JVMTI static int num_java_frames(ContinuationWrapper& cont) { - ResourceMark rm; // used for scope traversal in num_java_frames(CompiledMethod*, address) + ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address) int count = 0; for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) { count += chunk->num_java_frames(); @@ -2290,7 +2290,7 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n if (hf.is_deoptimized_frame()) { maybe_set_fastpath(f.sp()); } else if (_thread->is_interp_only_mode() - || (_cont.is_preempted() && f.cb()->as_compiled_method()->is_marked_for_deoptimization())) { + || (_cont.is_preempted() && f.cb()->as_nmethod()->is_marked_for_deoptimization())) { // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so // cannot rely on nmethod patching for deopt. assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller"); @@ -2309,7 +2309,7 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance); } else if (_cont.tail()->has_bitmap() && added_argsize > 0) { address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top); - int stack_args_slots = f.cb()->as_compiled_method()->method()->num_stack_arg_slots(false /* rounded */); + int stack_args_slots = f.cb()->as_nmethod()->method()->num_stack_arg_slots(false /* rounded */); int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size; clear_bitmap_bits(start, start + argsize_in_bytes); } @@ -2404,7 +2404,7 @@ void ThawBase::finish_thaw(frame& f) { } void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw - assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_compiled_method()->is_deopt_pc(f.raw_pc()), ""); + assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), ""); assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), ""); LogTarget(Trace, continuations) lt; @@ -2491,10 +2491,10 @@ static void do_deopt_after_thaw(JavaThread* thread) { fst.register_map()->set_include_argument_oops(false); ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map()); for (; !fst.is_done(); fst.next()) { - if (fst.current()->cb()->is_compiled()) { - CompiledMethod* cm = fst.current()->cb()->as_compiled_method(); - if (!cm->method()->is_continuation_native_intrinsic()) { - cm->make_deoptimized(); + if (fst.current()->cb()->is_nmethod()) { + nmethod* nm = fst.current()->cb()->as_nmethod(); + if (!nm->method()->is_continuation_native_intrinsic()) { + nm->make_deoptimized(); } } } @@ -2540,7 +2540,7 @@ static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, output fst.register_map()->set_include_argument_oops(false); ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map()); for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) { - if (fst.current()->cb()->is_compiled() && fst.current()->cb()->as_compiled_method()->is_marked_for_deoptimization()) { + if (fst.current()->cb()->is_nmethod() && fst.current()->cb()->as_nmethod()->is_marked_for_deoptimization()) { st->print_cr(">>> do_verify_after_thaw deopt"); fst.current()->deoptimize(nullptr); fst.current()->print_on(st); diff --git a/src/hotspot/share/runtime/continuationHelper.inline.hpp b/src/hotspot/share/runtime/continuationHelper.inline.hpp index 402703f1b19..d6b18a75815 100644 --- a/src/hotspot/share/runtime/continuationHelper.inline.hpp +++ b/src/hotspot/share/runtime/continuationHelper.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,7 @@ inline bool ContinuationHelper::Frame::is_stub(CodeBlob* cb) { } inline Method* ContinuationHelper::Frame::frame_method(const frame& f) { - return f.is_interpreted_frame() ? f.interpreter_frame_method() : f.cb()->as_compiled_method()->method(); + return f.is_interpreted_frame() ? f.interpreter_frame_method() : f.cb()->as_nmethod()->method(); } inline address ContinuationHelper::Frame::return_pc(const frame& f) { @@ -79,8 +79,8 @@ inline intptr_t* ContinuationHelper::Frame::frame_top(const frame &f) { inline bool ContinuationHelper::Frame::is_deopt_return(address pc, const frame& sender) { if (sender.is_interpreted_frame()) return false; - CompiledMethod* cm = sender.cb()->as_compiled_method(); - return cm->is_deopt_pc(pc); + nmethod* nm = sender.cb()->as_nmethod(); + return nm->is_deopt_pc(pc); } #endif @@ -162,16 +162,16 @@ bool ContinuationHelper::CompiledFrame::is_owning_locks(JavaThread* thread, Regi assert(!f.is_interpreted_frame(), ""); assert(CompiledFrame::is_instance(f), ""); - CompiledMethod* cm = f.cb()->as_compiled_method(); - assert(!cm->is_compiled() || !cm->as_compiled_method()->is_native_method(), ""); // See compiledVFrame::compiledVFrame(...) in vframe_hp.cpp + nmethod* nm = f.cb()->as_nmethod(); + assert(!nm->is_native_method(), ""); // See compiledVFrame::compiledVFrame(...) in vframe_hp.cpp - if (!cm->has_monitors()) { + if (!nm->has_monitors()) { return false; } frame::update_map_with_saved_link(map, Frame::callee_link_address(f)); // the monitor object could be stored in the link register ResourceMark rm; - for (ScopeDesc* scope = cm->scope_desc_at(f.pc()); scope != nullptr; scope = scope->sender()) { + for (ScopeDesc* scope = nm->scope_desc_at(f.pc()); scope != nullptr; scope = scope->sender()) { GrowableArray* mons = scope->monitors(); if (mons == nullptr || mons->is_empty()) { continue; @@ -186,7 +186,7 @@ bool ContinuationHelper::CompiledFrame::is_owning_locks(JavaThread* thread, Regi StackValue* owner_sv = StackValue::create_stack_value(&f, map, ov); // it is an oop oop owner = owner_sv->get_obj()(); if (owner != nullptr) { - //assert(cm->has_monitors(), ""); + //assert(nm->has_monitors(), ""); return true; } } diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index e8f443ed1e0..d019decd6d8 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -117,34 +117,34 @@ DeoptimizationScope::~DeoptimizationScope() { assert(_deopted, "Deopt not executed"); } -void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) { +void DeoptimizationScope::mark(nmethod* nm, bool inc_recompile_counts) { ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); // If it's already marked but we still need it to be deopted. - if (cm->is_marked_for_deoptimization()) { - dependent(cm); + if (nm->is_marked_for_deoptimization()) { + dependent(nm); return; } - CompiledMethod::DeoptimizationStatus status = - inc_recompile_counts ? CompiledMethod::deoptimize : CompiledMethod::deoptimize_noupdate; - Atomic::store(&cm->_deoptimization_status, status); + nmethod::DeoptimizationStatus status = + inc_recompile_counts ? nmethod::deoptimize : nmethod::deoptimize_noupdate; + Atomic::store(&nm->_deoptimization_status, status); // Make sure active is not committed assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be"); - assert(cm->_deoptimization_generation == 0, "Is already marked"); + assert(nm->_deoptimization_generation == 0, "Is already marked"); - cm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen; + nm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen; _required_gen = DeoptimizationScope::_active_deopt_gen; } -void DeoptimizationScope::dependent(CompiledMethod* cm) { +void DeoptimizationScope::dependent(nmethod* nm) { ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); // A method marked by someone else may have a _required_gen lower than what we marked with. // Therefore only store it if it's higher than _required_gen. - if (_required_gen < cm->_deoptimization_generation) { - _required_gen = cm->_deoptimization_generation; + if (_required_gen < nm->_deoptimization_generation) { + _required_gen = nm->_deoptimization_generation; } } @@ -321,7 +321,7 @@ static void print_objects(JavaThread* deoptee_thread, tty->print_raw(st.freeze()); } -static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method, +static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method, frame& deoptee, RegisterMap& map, GrowableArray* chunk, bool& deoptimized_objects) { bool realloc_failures = false; @@ -439,7 +439,7 @@ bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArr bool& realloc_failures) { frame deoptee = chunk->at(0)->fr(); JavaThread* deoptee_thread = chunk->at(0)->thread(); - CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); + nmethod* nm = deoptee.cb()->as_nmethod_or_null(); RegisterMap map(chunk->at(0)->register_map()); bool deoptimized_objects = false; @@ -448,7 +448,7 @@ bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArr // Reallocate the non-escaping objects and restore their fields. if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations) || EliminateAutoBox || EnableVectorAggressiveReboxing)) { - realloc_failures = rematerialize_objects(thread, Unpack_none, cm, deoptee, map, chunk, deoptimized_objects); + realloc_failures = rematerialize_objects(thread, Unpack_none, nm, deoptee, map, chunk, deoptimized_objects); } // MonitorInfo structures used in eliminate_locks are not GC safe. @@ -492,8 +492,8 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread frame deoptee = stub_frame.sender(&map); // Set the deoptee nmethod assert(current->deopt_compiled_method() == nullptr, "Pending deopt!"); - CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); - current->set_deopt_compiled_method(cm); + nmethod* nm = deoptee.cb()->as_nmethod_or_null(); + current->set_deopt_compiled_method(nm); if (VerifyStack) { current->validate_frame_layout(); @@ -522,7 +522,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread if (jvmci_enabled COMPILER2_PRESENT( || (DoEscapeAnalysis && EliminateAllocations) || EliminateAutoBox || EnableVectorAggressiveReboxing )) { bool unused; - realloc_failures = rematerialize_objects(current, exec_mode, cm, deoptee, map, chunk, unused); + realloc_failures = rematerialize_objects(current, exec_mode, nm, deoptee, map, chunk, unused); } #endif // COMPILER2_OR_JVMCI @@ -1220,8 +1220,8 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* bool cache_init_error = false; if (k->is_instance_klass()) { #if INCLUDE_JVMCI - CompiledMethod* cm = fr->cb()->as_compiled_method_or_null(); - if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) { + nmethod* nm = fr->cb()->as_nmethod_or_null(); + if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) { AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv; obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD); if (obj != nullptr) { @@ -1747,14 +1747,14 @@ void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deopt gather_statistics(reason, Action_none, Bytecodes::_illegal); if (LogCompilation && xtty != nullptr) { - CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); - assert(cm != nullptr, "only compiled methods can deopt"); + nmethod* nm = fr.cb()->as_nmethod_or_null(); + assert(nm != nullptr, "only compiled methods can deopt"); ttyLocker ttyl; xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc())); - cm->log_identity(xtty); + nm->log_identity(xtty); xtty->end_head(); - for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { + for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { xtty->begin_elem("jvms bci='%d'", sd->bci()); xtty->method(sd->method()); xtty->end_elem(); @@ -1782,9 +1782,9 @@ void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason } #if INCLUDE_JVMCI -address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) { +address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) { // there is no exception handler for this pc => deoptimize - cm->make_not_entrant(); + nm->make_not_entrant(); // Use Deoptimization::deoptimize for all of its side-effects: // gathering traps statistics, logging... @@ -1797,7 +1797,7 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* RegisterMap::WalkContinuation::skip); frame runtime_frame = thread->last_frame(); frame caller_frame = runtime_frame.sender(®_map); - assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method"); + assert(caller_frame.cb()->as_nmethod_or_null() == nm, "expect top frame compiled method"); vframe* vf = vframe::new_vframe(&caller_frame, ®_map, thread); compiledVFrame* cvf = compiledVFrame::cast(vf); ScopeDesc* imm_scope = cvf->scope(); @@ -1815,7 +1815,7 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler); - MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, cm->method()), true); + MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, nm->method()), true); if (trap_mdo != nullptr) { trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler); } @@ -1950,7 +1950,7 @@ static void register_serializers() { JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONACTION, true, new DeoptActionSerializer()); } -static void post_deoptimization_event(CompiledMethod* nm, +static void post_deoptimization_event(nmethod* nm, const Method* method, int trap_bci, int instruction, @@ -1979,7 +1979,7 @@ static void post_deoptimization_event(CompiledMethod* nm, #endif // INCLUDE_JFR -static void log_deopt(CompiledMethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci, +static void log_deopt(nmethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci, const char* reason_name, const char* reason_action) { LogTarget(Debug, deoptimization) lt; if (lt.is_enabled()) { @@ -2041,7 +2041,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr vframe* vf = vframe::new_vframe(&fr, ®_map, current); compiledVFrame* cvf = compiledVFrame::cast(vf); - CompiledMethod* nm = cvf->code(); + nmethod* nm = cvf->code(); ScopeDesc* trap_scope = cvf->scope(); @@ -2058,7 +2058,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr #if INCLUDE_JVMCI jlong speculation = current->pending_failed_speculation(); if (nm->is_compiled_by_jvmci()) { - nm->as_nmethod()->update_speculation(current); + nm->update_speculation(current); } else { assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers"); } @@ -2178,8 +2178,8 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin() JVMCI_ONLY(COMMA debug_id)); st.print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id()); #if INCLUDE_JVMCI - if (nm->is_nmethod()) { - const char* installed_code_name = nm->as_nmethod()->jvmci_name(); + if (nm->is_compiled_by_jvmci()) { + const char* installed_code_name = nm->jvmci_name(); if (installed_code_name != nullptr) { st.print(" (JVMCI: installed code name=%s) ", installed_code_name); } @@ -2433,7 +2433,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr // Assume that in new recompiled code the statistic could be different, // for example, due to different inlining. if ((reason != Reason_rtm_state_change) && (trap_mdo != nullptr) && - UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) { + UseRTMDeopt && (nm->rtm_state() != ProfileRTM)) { trap_mdo->atomic_set_rtm_state(ProfileRTM); } #endif diff --git a/src/hotspot/share/runtime/deoptimization.hpp b/src/hotspot/share/runtime/deoptimization.hpp index e74047811c2..61e85d19fd7 100644 --- a/src/hotspot/share/runtime/deoptimization.hpp +++ b/src/hotspot/share/runtime/deoptimization.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,9 +57,9 @@ class DeoptimizationScope { DeoptimizationScope(); ~DeoptimizationScope(); // Mark a method, if already marked as dependent. - void mark(CompiledMethod* cm, bool inc_recompile_counts = true); + void mark(nmethod* nm, bool inc_recompile_counts = true); // Record this as a dependent method. - void dependent(CompiledMethod* cm); + void dependent(nmethod* nm); // Execute the deoptimization. // Make the nmethods not entrant, stackwalks and patch return pcs and sets post call nops. @@ -184,7 +184,7 @@ class Deoptimization : AllStatic { static void deoptimize(JavaThread* thread, frame fr, DeoptReason reason = Reason_constraint); #if INCLUDE_JVMCI - static address deoptimize_for_missing_exception_handler(CompiledMethod* cm); + static address deoptimize_for_missing_exception_handler(nmethod* nm); static oop get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, bool& cache_init_error, TRAPS); #endif diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp index 33abbd61694..daff7355742 100644 --- a/src/hotspot/share/runtime/frame.cpp +++ b/src/hotspot/share/runtime/frame.cpp @@ -205,11 +205,12 @@ void RegisterMap::print() const { address frame::raw_pc() const { if (is_deoptimized_frame()) { - CompiledMethod* cm = cb()->as_compiled_method_or_null(); - if (cm->is_method_handle_return(pc())) - return cm->deopt_mh_handler_begin() - pc_return_offset; + nmethod* nm = cb()->as_nmethod_or_null(); + assert(nm != nullptr, "only nmethod is expected here"); + if (nm->is_method_handle_return(pc())) + return nm->deopt_mh_handler_begin() - pc_return_offset; else - return cm->deopt_handler_begin() - pc_return_offset; + return nm->deopt_handler_begin() - pc_return_offset; } else { return (pc() - pc_return_offset); } @@ -313,8 +314,8 @@ Method* frame::safe_interpreter_frame_method() const { bool frame::should_be_deoptimized() const { if (_deopt_state == is_deoptimized || !is_compiled_frame() ) return false; - assert(_cb != nullptr && _cb->is_compiled(), "must be an nmethod"); - CompiledMethod* nm = (CompiledMethod *)_cb; + assert(_cb != nullptr && _cb->is_nmethod(), "must be an nmethod"); + nmethod* nm = _cb->as_nmethod(); LogTarget(Debug, dependencies) lt; if (lt.is_enabled()) { LogStream ls(<); @@ -333,7 +334,7 @@ bool frame::should_be_deoptimized() const { bool frame::can_be_deoptimized() const { if (!is_compiled_frame()) return false; - CompiledMethod* nm = (CompiledMethod*)_cb; + nmethod* nm = _cb->as_nmethod(); if(!nm->can_be_deoptimized()) return false; @@ -346,18 +347,18 @@ void frame::deoptimize(JavaThread* thread) { || (thread->frame_anchor()->has_last_Java_frame() && thread->frame_anchor()->walkable()), "must be"); // Schedule deoptimization of an nmethod activation with this frame. - assert(_cb != nullptr && _cb->is_compiled(), "must be"); + assert(_cb != nullptr && _cb->is_nmethod(), "must be"); // If the call site is a MethodHandle call site use the MH deopt handler. - CompiledMethod* cm = (CompiledMethod*) _cb; - address deopt = cm->is_method_handle_return(pc()) ? - cm->deopt_mh_handler_begin() : - cm->deopt_handler_begin(); + nmethod* nm = _cb->as_nmethod(); + address deopt = nm->is_method_handle_return(pc()) ? + nm->deopt_mh_handler_begin() : + nm->deopt_handler_begin(); NativePostCallNop* inst = nativePostCallNop_at(pc()); // Save the original pc before we patch in the new one - cm->set_original_pc(this, pc()); + nm->set_original_pc(this, pc()); patch_pc(thread, deopt); assert(is_deoptimized_frame(), "must be"); @@ -674,15 +675,12 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose } } else if (_cb->is_buffer_blob()) { st->print("v ~BufferBlob::%s " PTR_FORMAT, ((BufferBlob *)_cb)->name(), p2i(pc())); - } else if (_cb->is_compiled()) { - CompiledMethod* cm = (CompiledMethod*)_cb; - Method* m = cm->method(); + } else if (_cb->is_nmethod()) { + nmethod* nm = _cb->as_nmethod(); + Method* m = nm->method(); if (m != nullptr) { - if (cm->is_nmethod()) { - nmethod* nm = cm->as_nmethod(); - st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); - st->print(" %s", nm->compiler_name()); - } + st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); + st->print(" %s", nm->compiler_name()); m->name_and_sig_as_C_string(buf, buflen); st->print(" %s", buf); ModuleEntry* module = m->method_holder()->module(); @@ -697,12 +695,9 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose st->print(" (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+" INTPTR_FORMAT "]", m->code_size(), p2i(_pc), p2i(_cb->code_begin()), _pc - _cb->code_begin()); #if INCLUDE_JVMCI - if (cm->is_nmethod()) { - nmethod* nm = cm->as_nmethod(); - const char* jvmciName = nm->jvmci_name(); - if (jvmciName != nullptr) { - st->print(" (%s)", jvmciName); - } + const char* jvmciName = nm->jvmci_name(); + if (jvmciName != nullptr) { + st->print(" (%s)", jvmciName); } #endif } else { @@ -1403,22 +1398,22 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m } else if (is_entry_frame()) { // For now just label the frame values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2); - } else if (cb()->is_compiled()) { + } else if (cb()->is_nmethod()) { // For now just label the frame - CompiledMethod* cm = cb()->as_compiled_method(); + nmethod* nm = cb()->as_nmethod(); values.describe(-1, info_address, FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no, - p2i(cm), - cm->method()->name_and_sig_as_C_string(), + p2i(nm), + nm->method()->name_and_sig_as_C_string(), (_deopt_state == is_deoptimized) ? " (deoptimized)" : ((_deopt_state == unknown) ? " (state unknown)" : "")), 3); { // mark arguments (see nmethod::print_nmethod_labels) - Method* m = cm->method(); + Method* m = nm->method(); - int stack_slot_offset = cm->frame_size() * wordSize; // offset, in bytes, to caller sp + int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp int sizeargs = m->size_of_parameters(); BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); @@ -1469,7 +1464,7 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m if (reg_map != nullptr && is_java_frame()) { int scope_no = 0; - for (ScopeDesc* scope = cm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) { + for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) { Method* m = scope->method(); int bci = scope->bci(); values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2); @@ -1507,7 +1502,7 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m } } - if (cm->method()->is_continuation_enter_intrinsic()) { + if (nm->method()->is_continuation_enter_intrinsic()) { ContinuationEntry* ce = Continuation::get_continuation_entry_for_entry_frame(reg_map->thread(), *this); // (ContinuationEntry*)unextended_sp(); ce->describe(values, frame_no); } diff --git a/src/hotspot/share/runtime/frame.hpp b/src/hotspot/share/runtime/frame.hpp index 4d5777059e0..6bf24aec5a2 100644 --- a/src/hotspot/share/runtime/frame.hpp +++ b/src/hotspot/share/runtime/frame.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,6 @@ typedef class BytecodeInterpreter* interpreterState; class CodeBlob; -class CompiledMethod; class FrameValues; class InterpreterOopMap; class JavaCallWrapper; diff --git a/src/hotspot/share/runtime/frame.inline.hpp b/src/hotspot/share/runtime/frame.inline.hpp index da774fe1620..4624c114ec4 100644 --- a/src/hotspot/share/runtime/frame.inline.hpp +++ b/src/hotspot/share/runtime/frame.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "runtime/frame.hpp" #include "code/codeBlob.inline.hpp" -#include "code/compiledMethod.inline.hpp" +#include "code/nmethod.inline.hpp" #include "interpreter/interpreter.hpp" #include "oops/stackChunkOop.inline.hpp" #include "oops/method.hpp" @@ -64,8 +64,8 @@ inline bool frame::is_upcall_stub_frame() const { inline bool frame::is_compiled_frame() const { if (_cb != nullptr && - _cb->is_compiled() && - ((CompiledMethod*)_cb)->is_java_method()) { + _cb->is_nmethod() && + _cb->as_nmethod()->is_java_method()) { return true; } return false; diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index b16a879189e..bb5def1c171 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -120,7 +120,7 @@ class JavaThread: public Thread { // Deopt support DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization - CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized + nmethod* _deopt_nmethod; // nmethod that is currently being deoptimized vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays vframeArray* _vframe_array_last; // Holds last vFrameArray we popped // Holds updates by JVMTI agents for compiled frames that cannot be performed immediately. They @@ -686,8 +686,8 @@ private: void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; } DeoptResourceMark* deopt_mark(void) { return _deopt_mark; } - void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; } - CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; } + void set_deopt_compiled_method(nmethod* nm) { _deopt_nmethod = nm; } + nmethod* deopt_compiled_method() { return _deopt_nmethod; } Method* callee_target() const { return _callee_target; } void set_callee_target (Method* x) { _callee_target = x; } diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index 74972b655a5..935c1beee40 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -844,8 +844,8 @@ void ThreadSafepointState::handle_polling_page_exception() { address real_return_addr = self->saved_exception_pc(); CodeBlob *cb = CodeCache::find_blob(real_return_addr); - assert(cb != nullptr && cb->is_compiled(), "return address should be in nmethod"); - CompiledMethod* nm = (CompiledMethod*)cb; + assert(cb != nullptr && cb->is_nmethod(), "return address should be in nmethod"); + nmethod* nm = cb->as_nmethod(); // Find frame of caller frame stub_fr = self->last_frame(); diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index ebd30f05451..248d840f01f 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -29,7 +29,7 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/compiledIC.hpp" -#include "code/compiledMethod.inline.hpp" +#include "code/nmethod.inline.hpp" #include "code/scopeDesc.hpp" #include "code/vtableStubs.hpp" #include "compiler/abstractCompiler.hpp" @@ -485,7 +485,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr // The fastest case first CodeBlob* blob = CodeCache::find_blob(return_address); - CompiledMethod* nm = (blob != nullptr) ? blob->as_compiled_method_or_null() : nullptr; + nmethod* nm = (blob != nullptr) ? blob->as_nmethod_or_null() : nullptr; if (nm != nullptr) { // Set flag if return address is a method handle call site. current->set_is_method_handle_return(nm->is_method_handle_return(return_address)); @@ -558,10 +558,10 @@ address SharedRuntime::get_poll_stub(address pc) { CodeBlob *cb = CodeCache::find_blob(pc); // Should be an nmethod - guarantee(cb != nullptr && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod"); + guarantee(cb != nullptr && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod"); // Look up the relocation information - assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc), + assert(cb->as_nmethod()->is_at_poll_or_poll_return(pc), "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc)); #ifdef ASSERT @@ -572,8 +572,8 @@ address SharedRuntime::get_poll_stub(address pc) { } #endif - bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc); - bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors(); + bool at_poll_return = cb->as_nmethod()->is_at_poll_return(pc); + bool has_wide_vectors = cb->as_nmethod()->has_wide_vectors(); if (at_poll_return) { assert(SharedRuntime::polling_page_return_handler_blob() != nullptr, "polling page return stub not created yet"); @@ -683,26 +683,25 @@ JRT_END // ret_pc points into caller; we are returning caller's exception handler // for given exception // Note that the implementation of this method assumes it's only called when an exception has actually occured -address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address ret_pc, Handle& exception, +address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) { - assert(cm != nullptr, "must exist"); + assert(nm != nullptr, "must exist"); ResourceMark rm; #if INCLUDE_JVMCI - if (cm->is_compiled_by_jvmci()) { + if (nm->is_compiled_by_jvmci()) { // lookup exception handler for this pc - int catch_pco = pointer_delta_as_int(ret_pc, cm->code_begin()); - ExceptionHandlerTable table(cm); + int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin()); + ExceptionHandlerTable table(nm); HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0); if (t != nullptr) { - return cm->code_begin() + t->pco(); + return nm->code_begin() + t->pco(); } else { - return Deoptimization::deoptimize_for_missing_exception_handler(cm); + return Deoptimization::deoptimize_for_missing_exception_handler(nm); } } #endif // INCLUDE_JVMCI - nmethod* nm = cm->as_nmethod(); ScopeDesc* sd = nm->scope_desc_at(ret_pc); // determine handler bci, if any EXCEPTION_MARK; @@ -913,7 +912,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, // 2. Inline-cache check in nmethod, or // 3. Implicit null exception in nmethod - if (!cb->is_compiled()) { + if (!cb->is_nmethod()) { bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(); if (!is_in_blob) { // Allow normal crash reporting to handle this @@ -925,8 +924,8 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, } // Otherwise, it's a compiled method. Consult its exception handlers. - CompiledMethod* cm = (CompiledMethod*)cb; - if (cm->inlinecache_check_contains(pc)) { + nmethod* nm = cb->as_nmethod(); + if (nm->inlinecache_check_contains(pc)) { // exception happened inside inline-cache check code // => the nmethod is not yet active (i.e., the frame // is not set up yet) => use return address pushed by @@ -935,7 +934,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, return StubRoutines::throw_NullPointerException_at_call_entry(); } - if (cm->method()->is_method_handle_intrinsic()) { + if (nm->method()->is_method_handle_intrinsic()) { // exception happened inside MH dispatch code, similar to a vtable stub Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc)); return StubRoutines::throw_NullPointerException_at_call_entry(); @@ -944,7 +943,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, #ifndef PRODUCT _implicit_null_throws++; #endif - target_pc = cm->continuation_for_implicit_null_exception(pc); + target_pc = nm->continuation_for_implicit_null_exception(pc); // If there's an unexpected fault, target_pc might be null, // in which case we want to fall through into the normal // error handling code. @@ -955,12 +954,12 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, case IMPLICIT_DIVIDE_BY_ZERO: { - CompiledMethod* cm = CodeCache::find_compiled(pc); - guarantee(cm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions"); + nmethod* nm = CodeCache::find_nmethod(pc); + guarantee(nm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions"); #ifndef PRODUCT _implicit_div0_throws++; #endif - target_pc = cm->continuation_for_implicit_div0_exception(pc); + target_pc = nm->continuation_for_implicit_div0_exception(pc); // If there's an unexpected fault, target_pc might be null, // in which case we want to fall through into the normal // error handling code. @@ -1109,7 +1108,7 @@ Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, } Method* SharedRuntime::extract_attached_method(vframeStream& vfst) { - CompiledMethod* caller = vfst.nm(); + nmethod* caller = vfst.nm(); address pc = vfst.frame_pc(); { // Get call instruction under lock because another thread may be busy patching it. @@ -1295,8 +1294,8 @@ methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, T frame caller_frame = current->last_frame().sender(&cbl_map); CodeBlob* caller_cb = caller_frame.cb(); - guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method"); - CompiledMethod* caller_nm = caller_cb->as_compiled_method(); + guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method"); + nmethod* caller_nm = caller_cb->as_nmethod(); // determine call info & receiver // note: a) receiver is null for static calls @@ -1506,8 +1505,8 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* curren frame stub_frame = current->last_frame(); assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); frame caller = stub_frame.sender(®_map); - enter_special = caller.cb() != nullptr && caller.cb()->is_compiled() - && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic(); + enter_special = caller.cb() != nullptr && caller.cb()->is_nmethod() + && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic(); } JRT_BLOCK_END @@ -1603,7 +1602,7 @@ methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) { RegisterMap::WalkContinuation::skip); frame caller_frame = current->last_frame().sender(®_map); CodeBlob* cb = caller_frame.cb(); - CompiledMethod* caller_nm = cb->as_compiled_method(); + nmethod* caller_nm = cb->as_nmethod(); CompiledICLocker ml(caller_nm); CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); @@ -1634,11 +1633,11 @@ methodHandle SharedRuntime::reresolve_call_site(TRAPS) { // so no update to the caller is needed. if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) || - (caller.is_native_frame() && ((CompiledMethod*)caller.cb())->method()->is_continuation_enter_intrinsic())) { + (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) { address pc = caller.pc(); - CompiledMethod* caller_nm = CodeCache::find_compiled(pc); + nmethod* caller_nm = CodeCache::find_nmethod(pc); // Default call_addr is the location of the "basic" call. // Determine the address of the call we a reresolving. With @@ -1766,7 +1765,7 @@ JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal // Result from nmethod::is_unloading is not stable across safepoints. NoSafepointVerifier nsv; - CompiledMethod* callee = method->code(); + nmethod* callee = method->code(); if (callee == nullptr) { return; } @@ -1775,12 +1774,12 @@ JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current())); CodeBlob* cb = CodeCache::find_blob(caller_pc); - if (cb == nullptr || !cb->is_compiled() || !callee->is_in_use() || callee->is_unloading()) { + if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) { return; } - // The check above makes sure this is a nmethod. - CompiledMethod* caller = cb->as_compiled_method(); + // The check above makes sure this is an nmethod. + nmethod* caller = cb->as_nmethod(); // Get the return PC for the passed caller PC. address return_pc = caller_pc + frame::pc_return_offset; @@ -3043,7 +3042,7 @@ JRT_END frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) { ResourceMark rm(current); frame activation; - CompiledMethod* nm = nullptr; + nmethod* nm = nullptr; int count = 1; assert(fr.is_java_frame(), "Must start on Java frame"); @@ -3066,8 +3065,8 @@ frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* curren } } else { CodeBlob* cb = fr.cb(); - if (cb != nullptr && cb->is_compiled()) { - nm = cb->as_compiled_method(); + if (cb != nullptr && cb->is_nmethod()) { + nm = cb->as_nmethod(); method = nm->method(); // scope_desc_near() must be used, instead of scope_desc_at() because on // SPARC, the pcDesc can be on the delay slot after the call instruction. diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index 9d259616bd9..c8a10006764 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -181,7 +181,7 @@ class SharedRuntime: AllStatic { static address exception_handler_for_return_address(JavaThread* current, address return_address); // exception handling and implicit exceptions - static address compute_compiled_exc_handler(CompiledMethod* nm, address ret_pc, Handle& exception, + static address compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred); enum ImplicitExceptionKind { IMPLICIT_NULL, @@ -328,7 +328,7 @@ class SharedRuntime: AllStatic { // deopt blob static void generate_deopt_blob(void); - static bool handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, + static bool handle_ic_miss_helper_internal(Handle receiver, nmethod* caller_nm, const frame& caller_frame, methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, bool& needs_ic_stub_refill, TRAPS); diff --git a/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp b/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp index 0a279c57385..8ead76211b7 100644 --- a/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp +++ b/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp @@ -1,4 +1,5 @@ -/* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. +/* + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,7 +111,7 @@ inline bool StackChunkFrameStream::is_stub() const { template inline bool StackChunkFrameStream::is_compiled() const { - return cb() != nullptr && _cb->is_compiled(); + return cb() != nullptr && _cb->is_nmethod(); } template <> @@ -188,9 +189,9 @@ inline int StackChunkFrameStream::stack_argsize() const { return 0; } assert(cb() != nullptr, ""); - assert(cb()->is_compiled(), ""); - assert(cb()->as_compiled_method()->method() != nullptr, ""); - return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; + assert(cb()->is_nmethod(), ""); + assert(cb()->as_nmethod()->method() != nullptr, ""); + return (cb()->as_nmethod()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; } template @@ -265,7 +266,7 @@ inline void StackChunkFrameStream::get_oopmap() const { template inline void StackChunkFrameStream::get_oopmap(address pc, int oopmap_slot) const { assert(cb() != nullptr, ""); - assert(!is_compiled() || !cb()->as_compiled_method()->is_deopt_pc(pc), ""); + assert(!is_compiled() || !cb()->as_nmethod()->is_deopt_pc(pc), ""); if (oopmap_slot >= 0) { assert(oopmap_slot >= 0, ""); assert(cb()->oop_map_for_slot(oopmap_slot, pc) != nullptr, ""); @@ -317,13 +318,13 @@ inline address StackChunkFrameStream::orig_pc() const { if (is_interpreted() || is_stub()) { return pc1; } - CompiledMethod* cm = cb()->as_compiled_method(); - if (cm->is_deopt_pc(pc1)) { - pc1 = *(address*)((address)unextended_sp() + cm->orig_pc_offset()); + nmethod* nm = cb()->as_nmethod(); + if (nm->is_deopt_pc(pc1)) { + pc1 = *(address*)((address)unextended_sp() + nm->orig_pc_offset()); } assert(pc1 != nullptr, ""); - assert(!cm->is_deopt_pc(pc1), ""); + assert(!nm->is_deopt_pc(pc1), ""); assert(_cb == CodeCache::find_blob_fast(pc1), ""); return pc1; @@ -344,7 +345,7 @@ void StackChunkFrameStream::handle_deopted() const { address pc1 = pc(); int oopmap_slot = CodeCache::find_oopmap_slot_fast(pc1); if (oopmap_slot < 0) { // UNLIKELY; we could have marked frames for deoptimization in thaw_chunk - if (cb()->as_compiled_method()->is_deopt_pc(pc1)) { + if (cb()->as_nmethod()->is_deopt_pc(pc1)) { pc1 = orig_pc(); oopmap_slot = CodeCache::find_oopmap_slot_fast(pc1); } diff --git a/src/hotspot/share/runtime/vframe.cpp b/src/hotspot/share/runtime/vframe.cpp index 9644962ade2..ccb90c427b0 100644 --- a/src/hotspot/share/runtime/vframe.cpp +++ b/src/hotspot/share/runtime/vframe.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,8 +71,8 @@ vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThrea // Compiled frame CodeBlob* cb = f->cb(); if (cb != nullptr) { - if (cb->is_compiled()) { - CompiledMethod* nm = (CompiledMethod*)cb; + if (cb->is_nmethod()) { + nmethod* nm = cb->as_nmethod(); return new compiledVFrame(f, reg_map, thread, nm); } diff --git a/src/hotspot/share/runtime/vframe.hpp b/src/hotspot/share/runtime/vframe.hpp index 2f2ab48ac43..427fea2f78e 100644 --- a/src/hotspot/share/runtime/vframe.hpp +++ b/src/hotspot/share/runtime/vframe.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -292,10 +292,10 @@ class vframeStreamCommon : StackObj { inline int decode_offset() const; inline oop continuation() const; - CodeBlob* cb() const { return _frame.cb(); } - CompiledMethod* nm() const { - assert( cb() != nullptr && cb()->is_compiled(), "usage"); - return (CompiledMethod*) cb(); + CodeBlob* cb() const { return _frame.cb(); } + nmethod* nm() const { + assert(cb() != nullptr, "usage"); + return cb()->as_nmethod(); } const RegisterMap* reg_map() { return &_reg_map; } diff --git a/src/hotspot/share/runtime/vframe.inline.hpp b/src/hotspot/share/runtime/vframe.inline.hpp index 97ab8e4e2db..4630e695ce9 100644 --- a/src/hotspot/share/runtime/vframe.inline.hpp +++ b/src/hotspot/share/runtime/vframe.inline.hpp @@ -206,7 +206,7 @@ inline bool vframeStreamCommon::fill_from_frame() { // Compiled frame - if (cb() != nullptr && cb()->is_compiled()) { + if (cb() != nullptr && cb()->is_nmethod()) { assert(nm()->method() != nullptr, "must be"); if (nm()->is_native_method()) { // Do not rely on scopeDesc since the pc might be imprecise due to the _last_native_pc trick. diff --git a/src/hotspot/share/runtime/vframe_hp.cpp b/src/hotspot/share/runtime/vframe_hp.cpp index b508b091047..28b62c60fd9 100644 --- a/src/hotspot/share/runtime/vframe_hp.cpp +++ b/src/hotspot/share/runtime/vframe_hp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -242,7 +242,7 @@ BasicLock* compiledVFrame::resolve_monitor_lock(Location location) const { GrowableArray* compiledVFrame::monitors() const { // Natives has no scope if (scope() == nullptr) { - CompiledMethod* nm = code(); + nmethod* nm = code(); Method* method = nm->method(); assert(method->is_native(), "Expect a native method"); if (!method->is_synchronized()) { @@ -299,13 +299,13 @@ GrowableArray* compiledVFrame::monitors() const { } -compiledVFrame::compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, CompiledMethod* nm) +compiledVFrame::compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, nmethod* nm) : javaVFrame(fr, reg_map, thread) { _scope = nullptr; _vframe_id = 0; // Compiled method (native stub or Java code) // native wrappers have no scope data, it is implied - if (!nm->is_compiled() || !nm->as_compiled_method()->is_native_method()) { + if (!nm->is_native_method()) { _scope = nm->scope_desc_at(_fr.pc()); } } @@ -333,15 +333,15 @@ bool compiledVFrame::is_top() const { } -CompiledMethod* compiledVFrame::code() const { - return CodeCache::find_compiled(_fr.pc()); +nmethod* compiledVFrame::code() const { + return CodeCache::find_nmethod(_fr.pc()); } Method* compiledVFrame::method() const { if (scope() == nullptr) { // native nmethods have no scope the method is implied - nmethod* nm = code()->as_nmethod(); + nmethod* nm = code(); assert(nm->is_native_method(), "must be native"); return nm->method(); } @@ -358,7 +358,7 @@ int compiledVFrame::bci() const { int compiledVFrame::raw_bci() const { if (scope() == nullptr) { // native nmethods have no scope the method/bci is implied - nmethod* nm = code()->as_nmethod(); + nmethod* nm = code(); assert(nm->is_native_method(), "must be native"); return 0; } @@ -368,7 +368,7 @@ int compiledVFrame::raw_bci() const { bool compiledVFrame::should_reexecute() const { if (scope() == nullptr) { // native nmethods have no scope the method/bci is implied - nmethod* nm = code()->as_nmethod(); + nmethod* nm = code(); assert(nm->is_native_method(), "must be native"); return false; } @@ -378,7 +378,7 @@ bool compiledVFrame::should_reexecute() const { bool compiledVFrame::has_ea_local_in_scope() const { if (scope() == nullptr) { // native nmethod, all objs escape - assert(code()->as_nmethod()->is_native_method(), "must be native"); + assert(code()->is_native_method(), "must be native"); return false; } return (scope()->objects() != nullptr) || scope()->has_ea_local_in_scope(); @@ -387,7 +387,7 @@ bool compiledVFrame::has_ea_local_in_scope() const { bool compiledVFrame::arg_escape() const { if (scope() == nullptr) { // native nmethod, all objs escape - assert(code()->as_nmethod()->is_native_method(), "must be native"); + assert(code()->is_native_method(), "must be native"); return false; } return scope()->arg_escape(); @@ -397,7 +397,7 @@ vframe* compiledVFrame::sender() const { const frame f = fr(); if (scope() == nullptr) { // native nmethods have no scope the method/bci is implied - nmethod* nm = code()->as_nmethod(); + nmethod* nm = code(); assert(nm->is_native_method(), "must be native"); return vframe::sender(); } else { diff --git a/src/hotspot/share/runtime/vframe_hp.hpp b/src/hotspot/share/runtime/vframe_hp.hpp index 23818e41544..f5bfdbe6dab 100644 --- a/src/hotspot/share/runtime/vframe_hp.hpp +++ b/src/hotspot/share/runtime/vframe_hp.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ class compiledVFrame: public javaVFrame { public: // Constructors - compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, CompiledMethod* nm); + compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, nmethod* nm); // Update a local in a compiled frame. Update happens when deopt occurs void update_local(BasicType type, int index, jvalue value); @@ -77,7 +77,7 @@ class compiledVFrame: public javaVFrame { void update_monitor(int index, MonitorInfo* value); // Returns the active nmethod - CompiledMethod* code() const; + nmethod* code() const; // Returns the scopeDesc ScopeDesc* scope() const { return _scope; } diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index 2639707a57d..3935a9dcca8 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -306,7 +306,7 @@ nonstatic_field(Method, _access_flags, AccessFlags) \ nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _intrinsic_id, u2) \ - volatile_nonstatic_field(Method, _code, CompiledMethod*) \ + volatile_nonstatic_field(Method, _code, nmethod*) \ nonstatic_field(Method, _i2i_entry, address) \ volatile_nonstatic_field(Method, _from_compiled_entry, address) \ volatile_nonstatic_field(Method, _from_interpreted_entry, address) \ @@ -548,45 +548,37 @@ /* CodeBlobs (NOTE: incomplete, but only a little) */ \ /***************************************************/ \ \ - nonstatic_field(CodeBlob, _name, const char*) \ - nonstatic_field(CodeBlob, _size, int) \ - nonstatic_field(CodeBlob, _header_size, int) \ - nonstatic_field(CodeBlob, _frame_complete_offset, int) \ - nonstatic_field(CodeBlob, _data_offset, int) \ - nonstatic_field(CodeBlob, _frame_size, int) \ - nonstatic_field(CodeBlob, _oop_maps, ImmutableOopMapSet*) \ - nonstatic_field(CodeBlob, _code_begin, address) \ - nonstatic_field(CodeBlob, _code_end, address) \ - nonstatic_field(CodeBlob, _content_begin, address) \ - nonstatic_field(CodeBlob, _data_end, address) \ + nonstatic_field(CodeBlob, _name, const char*) \ + nonstatic_field(CodeBlob, _size, int) \ + nonstatic_field(CodeBlob, _header_size, int) \ + nonstatic_field(CodeBlob, _relocation_size, int) \ + nonstatic_field(CodeBlob, _content_offset, int) \ + nonstatic_field(CodeBlob, _code_offset, int) \ + nonstatic_field(CodeBlob, _frame_complete_offset, int) \ + nonstatic_field(CodeBlob, _data_offset, int) \ + nonstatic_field(CodeBlob, _frame_size, int) \ + nonstatic_field(CodeBlob, _oop_maps, ImmutableOopMapSet*) \ + nonstatic_field(CodeBlob, _caller_must_gc_arguments, bool) \ \ nonstatic_field(DeoptimizationBlob, _unpack_offset, int) \ \ - nonstatic_field(RuntimeStub, _caller_must_gc_arguments, bool) \ - \ - /********************************************************/ \ - /* CompiledMethod (NOTE: incomplete, but only a little) */ \ - /********************************************************/ \ - \ - nonstatic_field(CompiledMethod, _method, Method*) \ - volatile_nonstatic_field(CompiledMethod, _exception_cache, ExceptionCache*) \ - nonstatic_field(CompiledMethod, _scopes_data_begin, address) \ - nonstatic_field(CompiledMethod, _deopt_handler_begin, address) \ - nonstatic_field(CompiledMethod, _deopt_mh_handler_begin, address) \ - \ /**************************************************/ \ /* NMethods (NOTE: incomplete, but only a little) */ \ /**************************************************/ \ \ + nonstatic_field(nmethod, _method, Method*) \ nonstatic_field(nmethod, _entry_bci, int) \ nonstatic_field(nmethod, _osr_link, nmethod*) \ nonstatic_field(nmethod, _state, volatile signed char) \ nonstatic_field(nmethod, _exception_offset, int) \ + nonstatic_field(nmethod, _deopt_handler_offset, int) \ + nonstatic_field(nmethod, _deopt_mh_handler_offset, int) \ nonstatic_field(nmethod, _orig_pc_offset, int) \ nonstatic_field(nmethod, _stub_offset, int) \ nonstatic_field(nmethod, _consts_offset, int) \ nonstatic_field(nmethod, _oops_offset, int) \ nonstatic_field(nmethod, _metadata_offset, int) \ + nonstatic_field(nmethod, _scopes_data_offset, int) \ nonstatic_field(nmethod, _scopes_pcs_offset, int) \ nonstatic_field(nmethod, _dependencies_offset, int) \ nonstatic_field(nmethod, _handler_table_offset, int) \ @@ -597,6 +589,7 @@ nonstatic_field(nmethod, _osr_entry_point, address) \ nonstatic_field(nmethod, _compile_id, int) \ nonstatic_field(nmethod, _comp_level, CompLevel) \ + volatile_nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \ \ unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ \ @@ -1310,8 +1303,7 @@ declare_type(AdapterBlob, BufferBlob) \ declare_type(MethodHandlesAdapterBlob, BufferBlob) \ declare_type(VtableBlob, BufferBlob) \ - declare_type(CompiledMethod, CodeBlob) \ - declare_type(nmethod, CompiledMethod) \ + declare_type(nmethod, CodeBlob) \ declare_type(RuntimeStub, RuntimeBlob) \ declare_type(SingletonBlob, RuntimeBlob) \ declare_type(SafepointBlob, SingletonBlob) \ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java index 976af5bac20..f546a8cea53 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,10 +42,9 @@ public class CodeBlob extends VMObject { private static AddressField nameField; private static CIntegerField sizeField; private static CIntegerField headerSizeField; - private static AddressField contentBeginField; - private static AddressField codeBeginField; - private static AddressField codeEndField; - private static AddressField dataEndField; + private static CIntegerField relocationSizeField; + private static CIntegerField contentOffsetField; + private static CIntegerField codeOffsetField; private static CIntegerField frameCompleteOffsetField; private static CIntegerField dataOffsetField; private static CIntegerField frameSizeField; @@ -63,11 +62,10 @@ public class CodeBlob extends VMObject { nameField = type.getAddressField("_name"); sizeField = type.getCIntegerField("_size"); headerSizeField = type.getCIntegerField("_header_size"); + relocationSizeField = type.getCIntegerField("_relocation_size"); + contentOffsetField = type.getCIntegerField("_content_offset"); + codeOffsetField = type.getCIntegerField("_code_offset"); frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset"); - contentBeginField = type.getAddressField("_content_begin"); - codeBeginField = type.getAddressField("_code_begin"); - codeEndField = type.getAddressField("_code_end"); - dataEndField = type.getAddressField("_data_end"); dataOffsetField = type.getCIntegerField("_data_offset"); frameSizeField = type.getCIntegerField("_frame_size"); oopMapsField = type.getAddressField("_oop_maps"); @@ -86,30 +84,35 @@ public class CodeBlob extends VMObject { }); } - public Address headerBegin() { return getAddress(); } + public Address headerBegin() { return getAddress(); } - public Address headerEnd() { return getAddress().addOffsetTo(getHeaderSize()); } + public Address headerEnd() { return getAddress().addOffsetTo(getHeaderSize()); } - public Address contentBegin() { return contentBeginField.getValue(addr); } + public Address contentBegin() { return headerBegin().addOffsetTo(getContentOffset()); } - public Address contentEnd() { return headerBegin().addOffsetTo(getDataOffset()); } + public Address contentEnd() { return headerBegin().addOffsetTo(getDataOffset()); } - public Address codeBegin() { return codeBeginField.getValue(addr); } + public Address codeBegin() { return headerBegin().addOffsetTo(getCodeOffset()); } - public Address codeEnd() { return codeEndField.getValue(addr); } + public Address codeEnd() { return headerBegin().addOffsetTo(getDataOffset()); } - public Address dataBegin() { return headerBegin().addOffsetTo(getDataOffset()); } + public Address dataBegin() { return headerBegin().addOffsetTo(getDataOffset()); } - public Address dataEnd() { return dataEndField.getValue(addr); } + public Address dataEnd() { return headerBegin().addOffsetTo(getSize()); } + + // Offsets + public int getContentOffset() { return (int) contentOffsetField.getValue(addr); } + + public int getCodeOffset() { return (int) codeOffsetField .getValue(addr); } public long getFrameCompleteOffset() { return frameCompleteOffsetField.getValue(addr); } - public int getDataOffset() { return (int) dataOffsetField.getValue(addr); } + public int getDataOffset() { return (int) dataOffsetField.getValue(addr); } // Sizes - public int getSize() { return (int) sizeField.getValue(addr); } + public int getSize() { return (int) sizeField.getValue(addr); } - public int getHeaderSize() { return (int) headerSizeField.getValue(addr); } + public int getHeaderSize() { return (int) headerSizeField.getValue(addr); } public long getFrameSizeWords() { return (int) frameSizeField.getValue(addr); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CompiledMethod.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CompiledMethod.java deleted file mode 100644 index cae1c5389d9..00000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CompiledMethod.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package sun.jvm.hotspot.code; - -import java.util.*; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.oops.*; -import sun.jvm.hotspot.runtime.*; -import sun.jvm.hotspot.types.*; -import sun.jvm.hotspot.utilities.*; -import sun.jvm.hotspot.utilities.Observable; -import sun.jvm.hotspot.utilities.Observer; - -public abstract class CompiledMethod extends CodeBlob { - private static AddressField methodField; - private static AddressField deoptHandlerBeginField; - private static AddressField deoptMhHandlerBeginField; - private static AddressField scopesDataBeginField; - - static { - VM.registerVMInitializedObserver(new Observer() { - public void update(Observable o, Object data) { - initialize(VM.getVM().getTypeDataBase()); - } - }); - } - - private static void initialize(TypeDataBase db) { - Type type = db.lookupType("CompiledMethod"); - - methodField = type.getAddressField("_method"); - deoptHandlerBeginField = type.getAddressField("_deopt_handler_begin"); - deoptMhHandlerBeginField = type.getAddressField("_deopt_mh_handler_begin"); - scopesDataBeginField = type.getAddressField("_scopes_data_begin"); - } - - public CompiledMethod(Address addr) { - super(addr); - } - - public Method getMethod() { - return (Method)Metadata.instantiateWrapperFor(methodField.getValue(addr)); - } - - public Address deoptHandlerBegin() { return deoptHandlerBeginField.getValue(addr); } - public Address deoptMhHandlerBegin() { return deoptMhHandlerBeginField.getValue(addr); } - public Address scopesDataBegin() { return scopesDataBeginField.getValue(addr); } - - public static int getMethodOffset() { return (int) methodField.getOffset(); } - - @Override - public boolean isCompiled() { - return true; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java index 6263f8aa22c..c1f16c7957e 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ package sun.jvm.hotspot.code; import java.io.*; import java.util.*; import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.memory.*; import sun.jvm.hotspot.oops.*; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.types.*; @@ -34,8 +35,9 @@ import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.Observable; import sun.jvm.hotspot.utilities.Observer; -public class NMethod extends CompiledMethod { +public class NMethod extends CodeBlob { private static long pcDescSize; + private static AddressField methodField; /** != InvocationEntryBci if this nmethod is an on-stack replacement method */ private static CIntegerField entryBCIField; /** To support simple linked-list chaining of nmethods */ @@ -43,10 +45,13 @@ public class NMethod extends CompiledMethod { /** Offsets for different nmethod parts */ private static CIntegerField exceptionOffsetField; + private static CIntegerField deoptHandlerOffsetField; + private static CIntegerField deoptMhHandlerOffsetField; private static CIntegerField origPCOffsetField; private static CIntegerField stubOffsetField; private static CIntegerField oopsOffsetField; private static CIntegerField metadataOffsetField; + private static CIntegerField scopesDataOffsetField; private static CIntegerField scopesPCsOffsetField; private static CIntegerField dependenciesOffsetField; private static CIntegerField handlerTableOffsetField; @@ -76,14 +81,18 @@ public class NMethod extends CompiledMethod { private static void initialize(TypeDataBase db) { Type type = db.lookupType("nmethod"); + methodField = type.getAddressField("_method"); entryBCIField = type.getCIntegerField("_entry_bci"); osrLinkField = type.getAddressField("_osr_link"); exceptionOffsetField = type.getCIntegerField("_exception_offset"); + deoptHandlerOffsetField = type.getCIntegerField("_deopt_handler_offset"); + deoptMhHandlerOffsetField = type.getCIntegerField("_deopt_mh_handler_offset"); origPCOffsetField = type.getCIntegerField("_orig_pc_offset"); stubOffsetField = type.getCIntegerField("_stub_offset"); oopsOffsetField = type.getCIntegerField("_oops_offset"); metadataOffsetField = type.getCIntegerField("_metadata_offset"); + scopesDataOffsetField = type.getCIntegerField("_scopes_data_offset"); scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset"); dependenciesOffsetField = type.getCIntegerField("_dependencies_offset"); handlerTableOffsetField = type.getCIntegerField("_handler_table_offset"); @@ -105,6 +114,10 @@ public class NMethod extends CompiledMethod { return addr; } + public Method getMethod() { + return (Method)Metadata.instantiateWrapperFor(methodField.getValue(addr)); + } + // Type info public boolean isNMethod() { return true; } public boolean isJavaMethod() { return !getMethod().isNative(); } @@ -117,12 +130,15 @@ public class NMethod extends CompiledMethod { public Address instsBegin() { return codeBegin(); } public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); } public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); } + public Address deoptHandlerBegin() { return headerBegin().addOffsetTo(getDeoptHandlerOffset()); } + public Address deoptMhHandlerBegin() { return headerBegin().addOffsetTo(getDeoptMhHandlerOffset()); } public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); } public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); } public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); } public Address oopsEnd() { return headerBegin().addOffsetTo(getMetadataOffset()); } public Address metadataBegin() { return headerBegin().addOffsetTo(getMetadataOffset()); } - public Address metadataEnd() { return scopesDataBegin(); } + public Address metadataEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); } + public Address scopesDataBegin() { return headerBegin().addOffsetTo(getScopesDataOffset()); } public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); } public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); } public Address scopesPCsEnd() { return headerBegin().addOffsetTo(getDependenciesOffset()); } @@ -420,6 +436,7 @@ public class NMethod extends CompiledMethod { public static int getVerifiedEntryPointOffset() { return (int) verifiedEntryPointField.getOffset(); } public static int getOSREntryPointOffset() { return (int) osrEntryPointField.getOffset(); } public static int getEntryBCIOffset() { return (int) entryBCIField.getOffset(); } + public static int getMethodOffset() { return (int) methodField.getOffset(); } public void print() { printOn(System.out); @@ -497,9 +514,12 @@ public class NMethod extends CompiledMethod { private int getEntryBCI() { return (int) entryBCIField .getValue(addr); } private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); } + private int getDeoptHandlerOffset() { return (int) deoptHandlerOffsetField .getValue(addr); } + private int getDeoptMhHandlerOffset() { return (int) deoptMhHandlerOffsetField.getValue(addr); } private int getStubOffset() { return (int) stubOffsetField .getValue(addr); } private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); } private int getMetadataOffset() { return (int) metadataOffsetField .getValue(addr); } + private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); } private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); } private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); } private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/PStack.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/PStack.java index 038c871a9f9..8f09dcc48ef 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/PStack.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/PStack.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,7 +140,7 @@ public class PStack extends Tool { CodeBlob cb = c.findBlobUnsafe(pc); if (cb.isNMethod()) { if (cb.isNativeMethod()) { - out.print(((CompiledMethod)cb).getMethod().externalNameAndSignature()); + out.print(((NMethod)cb).getMethod().externalNameAndSignature()); long diff = pc.minus(cb.codeBegin()); if (diff != 0L) { out.print(" + 0x" + Long.toHexString(diff)); diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotVMConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotVMConfig.java index 7259e0fcab0..c49f24efed5 100644 --- a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotVMConfig.java +++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotVMConfig.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -149,7 +149,7 @@ class HotSpotVMConfig extends HotSpotVMConfigAccess { final int methodVtableIndexOffset = getFieldOffset("Method::_vtable_index", Integer.class, "int"); final int methodDataOffset = getFieldOffset("Method::_method_data", Integer.class, "MethodData*"); - final int methodCodeOffset = getFieldOffset("Method::_code", Integer.class, "CompiledMethod*"); + final int methodCodeOffset = getFieldOffset("Method::_code", Integer.class, "nmethod*"); final int methodFlagsForceInline = getConstant("MethodFlags::_misc_force_inline", Integer.class); final int methodFlagsDontInline = getConstant("MethodFlags::_misc_dont_inline", Integer.class); diff --git a/test/jdk/com/sun/jdi/EATests.java b/test/jdk/com/sun/jdi/EATests.java index b38879e22f4..cd80d01a07f 100644 --- a/test/jdk/com/sun/jdi/EATests.java +++ b/test/jdk/com/sun/jdi/EATests.java @@ -1285,7 +1285,7 @@ class EAMaterializeLocalAtObjectReturnTarget extends EATestCaseBaseTarget { ///////////////////////////////////////////////////////////////////////////// // Test if an eliminated object can be reallocated *just* before a call returns an object. -// (See CompiledMethod::is_at_poll_return()) +// (See nmethod::is_at_poll_return()) // Details: the callee method has just one safepoint poll at the return. The other safepoint // is at the end of an iteration of the endless loop. We can detect if we suspended the target // there because the local xy is out of scope there.