8290025: Remove the Sweeper
Reviewed-by: stefank, kvn, iveresov, coleenp, vlivanov, mdoerr
This commit is contained in:
parent
dc7e2562f4
commit
054c23f484
@ -188,16 +188,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
}
|
||||
|
||||
// We must always be able to find a recognizable pc
|
||||
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
|
||||
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
|
||||
if (sender_pc == NULL || sender_blob == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could be a zombie method
|
||||
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could just be some random pointer within the codeBlob
|
||||
if (!sender_blob->code_contains(sender_pc)) {
|
||||
return false;
|
||||
|
@ -165,10 +165,8 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
|
||||
// Here's a sticky one. This constructor can be called via AsyncGetCallTrace
|
||||
// when last_Java_sp is non-null but the pc fetched is junk. If we are truly
|
||||
// unlucky the junk value could be to a zombied method and we'll die on the
|
||||
// find_blob call. This is also why we can have no asserts on the validity
|
||||
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
|
||||
// when last_Java_sp is non-null but the pc fetched is junk.
|
||||
// AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
|
||||
// -> pd_last_frame should use a specialized version of pd_last_frame which could
|
||||
// call a specilaized frame constructor instead of this one.
|
||||
// Then we could use the assert below. However this assert is of somewhat dubious
|
||||
|
@ -160,7 +160,7 @@ address NativeCall::destination() const {
|
||||
address destination = instruction_address() + displacement();
|
||||
|
||||
// Do we use a trampoline stub for this call?
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie.
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
assert(cb && cb->is_nmethod(), "sanity");
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
||||
@ -456,7 +456,7 @@ bool NativeInstruction::is_movk() {
|
||||
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_sigill_zombie_not_entrant() {
|
||||
bool NativeInstruction::is_sigill_not_entrant() {
|
||||
return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
|
||||
}
|
||||
|
||||
@ -471,13 +471,13 @@ bool NativeInstruction::is_stop() {
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// MT-safe inserting of a jump over a jump or a nop (used by
|
||||
// nmethod::make_not_entrant_or_zombie)
|
||||
// nmethod::make_not_entrant)
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
||||
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
|
||||
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
|
||||
|| nativeInstruction_at(verified_entry)->is_sigill_not_entrant(),
|
||||
"Aarch64 cannot replace non-jump with jump");
|
||||
|
||||
// Patch this nmethod atomically.
|
||||
@ -488,8 +488,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
|
||||
unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
|
||||
*(unsigned int*)verified_entry = insn;
|
||||
} else {
|
||||
// We use an illegal instruction for marking a method as
|
||||
// not_entrant or zombie.
|
||||
// We use an illegal instruction for marking a method as not_entrant.
|
||||
NativeIllegalInstruction::insert(verified_entry);
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ public:
|
||||
bool is_safepoint_poll();
|
||||
bool is_movz();
|
||||
bool is_movk();
|
||||
bool is_sigill_zombie_not_entrant();
|
||||
bool is_sigill_not_entrant();
|
||||
bool is_stop();
|
||||
|
||||
protected:
|
||||
|
@ -123,7 +123,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
}
|
||||
|
||||
// We must always be able to find a recognizable pc
|
||||
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
|
||||
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
|
||||
if (sender_pc == NULL || sender_blob == NULL) {
|
||||
return false;
|
||||
}
|
||||
@ -148,10 +148,6 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
return sender.is_interpreted_frame_valid(thread);
|
||||
}
|
||||
|
||||
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could just be some random pointer within the codeBlob
|
||||
if (!sender_blob->code_contains(sender_pc)) {
|
||||
return false;
|
||||
|
@ -290,7 +290,7 @@ void RawNativeJump::check_verified_entry_alignment(address entry, address verifi
|
||||
void RawNativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be");
|
||||
int *a = (int *)verified_entry;
|
||||
a[0] = zombie_illegal_instruction; // always illegal
|
||||
a[0] = not_entrant_illegal_instruction; // always illegal
|
||||
ICache::invalidate_range((address)&a[0], sizeof a[0]);
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ class RawNativeInstruction {
|
||||
|
||||
// illegal instruction used by NativeJump::patch_verified_entry
|
||||
// permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4
|
||||
static const int zombie_illegal_instruction = 0xe7f000f0;
|
||||
static const int not_entrant_illegal_instruction = 0xe7f000f0;
|
||||
|
||||
static int decode_rotated_imm12(int encoding) {
|
||||
int base = encoding & 0xff;
|
||||
|
@ -119,16 +119,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
address sender_pc = (address) sender_abi->lr;;
|
||||
|
||||
// We must always be able to find a recognizable pc.
|
||||
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
|
||||
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
|
||||
if (sender_blob == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could be a zombie method
|
||||
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// It should be safe to construct the sender though it might not be valid.
|
||||
|
||||
frame sender(sender_sp, sender_pc);
|
||||
|
@ -122,7 +122,12 @@ void BarrierSetNMethod::disarm(nmethod* nm) {
|
||||
}
|
||||
|
||||
void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
|
||||
Unimplemented();
|
||||
if (!supports_entry_barrier(nm)) {
|
||||
return;
|
||||
}
|
||||
|
||||
NativeNMethodBarrier* barrier = get_nmethod_barrier(nm);
|
||||
barrier->release_set_guard_value(arm_value);
|
||||
}
|
||||
|
||||
bool BarrierSetNMethod::is_armed(nmethod* nm) {
|
||||
|
@ -40,14 +40,14 @@
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant or zombie
|
||||
// We use an illtrap for marking a method as not_entrant
|
||||
// Work around a C++ compiler bug which changes 'this'
|
||||
bool NativeInstruction::is_sigill_zombie_not_entrant_at(address addr) {
|
||||
bool NativeInstruction::is_sigill_not_entrant_at(address addr) {
|
||||
if (*(int*)addr != 0 /*illtrap*/) return false;
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
if (cb == NULL || !cb->is_nmethod()) return false;
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
// This method is not_entrant or zombie iff the illtrap instruction is
|
||||
// This method is not_entrant iff the illtrap instruction is
|
||||
// located at the verified entry point.
|
||||
return nm->verified_entry_point() == addr;
|
||||
}
|
||||
@ -71,7 +71,7 @@ address NativeCall::destination() const {
|
||||
// Trampoline stubs are located behind the main code.
|
||||
if (destination > addr) {
|
||||
// Filter out recursive method invocation (call to verified/unverified entry point).
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie.
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
assert(cb && cb->is_nmethod(), "sanity");
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
||||
@ -196,7 +196,7 @@ intptr_t NativeMovConstReg::data() const {
|
||||
return MacroAssembler::get_const(addr);
|
||||
}
|
||||
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
assert(cb != NULL, "Could not find code blob");
|
||||
if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
|
||||
narrowOop no = MacroAssembler::get_narrow_oop(addr, cb->content_begin());
|
||||
@ -318,7 +318,7 @@ void NativeMovConstReg::verify() {
|
||||
address addr = addr_at(0);
|
||||
if (! MacroAssembler::is_load_const_at(addr) &&
|
||||
! MacroAssembler::is_load_const_from_method_toc_at(addr)) {
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // find_nmethod() asserts if nmethod is zombie.
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
if (! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) &&
|
||||
! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) &&
|
||||
! MacroAssembler::is_bl(*((int*) addr))) {
|
||||
@ -343,7 +343,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
|
||||
a->b(dest);
|
||||
} else {
|
||||
// The signal handler will continue at dest=OptoRuntime::handle_wrong_method_stub().
|
||||
// We use an illtrap for marking a method as not_entrant or zombie.
|
||||
// We use an illtrap for marking a method as not_entrant.
|
||||
a->illtrap();
|
||||
}
|
||||
ICache::ppc64_flush_icache_bytes(verified_entry, code_size);
|
||||
@ -406,7 +406,7 @@ address NativeCallTrampolineStub::encoded_destination_addr() const {
|
||||
}
|
||||
|
||||
address NativeCallTrampolineStub::destination(nmethod *nm) const {
|
||||
CodeBlob* cb = nm ? nm : CodeCache::find_blob_unsafe(addr_at(0));
|
||||
CodeBlob* cb = nm ? nm : CodeCache::find_blob(addr_at(0));
|
||||
assert(cb != NULL, "Could not find code blob");
|
||||
address ctable = cb->content_begin();
|
||||
|
||||
|
@ -67,12 +67,12 @@ class NativeInstruction {
|
||||
return MacroAssembler::tdi_get_si16(long_at(0), Assembler::traptoUnconditional, 0);
|
||||
}
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant or zombie.
|
||||
bool is_sigill_zombie_not_entrant() {
|
||||
// We use an illtrap for marking a method as not_entrant.
|
||||
bool is_sigill_not_entrant() {
|
||||
// Work around a C++ compiler bug which changes 'this'.
|
||||
return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
|
||||
return NativeInstruction::is_sigill_not_entrant_at(addr_at(0));
|
||||
}
|
||||
static bool is_sigill_zombie_not_entrant_at(address addr);
|
||||
static bool is_sigill_not_entrant_at(address addr);
|
||||
|
||||
#ifdef COMPILER2
|
||||
// SIGTRAP-based implicit range checks
|
||||
|
@ -175,16 +175,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
}
|
||||
|
||||
// We must always be able to find a recognizable pc
|
||||
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
|
||||
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
|
||||
if (sender_pc == NULL || sender_blob == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could be a zombie method
|
||||
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could just be some random pointer within the codeBlob
|
||||
if (!sender_blob->code_contains(sender_pc)) {
|
||||
return false;
|
||||
|
@ -115,10 +115,8 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) {
|
||||
_pc = (address)(ptr_sp[-1]);
|
||||
|
||||
// Here's a sticky one. This constructor can be called via AsyncGetCallTrace
|
||||
// when last_Java_sp is non-null but the pc fetched is junk. If we are truly
|
||||
// unlucky the junk value could be to a zombied method and we'll die on the
|
||||
// find_blob call. This is also why we can have no asserts on the validity
|
||||
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
|
||||
// when last_Java_sp is non-null but the pc fetched is junk.
|
||||
// AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
|
||||
// -> pd_last_frame should use a specialized version of pd_last_frame which could
|
||||
// call a specilaized frame constructor instead of this one.
|
||||
// Then we could use the assert below. However this assert is of somewhat dubious
|
||||
|
@ -124,7 +124,7 @@ address NativeCall::destination() const {
|
||||
address destination = MacroAssembler::target_addr_for_insn(instruction_address());
|
||||
|
||||
// Do we use a trampoline stub for this call?
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie.
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
assert(cb && cb->is_nmethod(), "sanity");
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
||||
@ -328,7 +328,7 @@ bool NativeInstruction::is_lwu_to_zr(address instr) {
|
||||
}
|
||||
|
||||
// A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction.
|
||||
bool NativeInstruction::is_sigill_zombie_not_entrant() {
|
||||
bool NativeInstruction::is_sigill_not_entrant() {
|
||||
// jvmci
|
||||
return uint_at(0) == 0xffffffff;
|
||||
}
|
||||
@ -345,14 +345,14 @@ bool NativeInstruction::is_stop() {
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// MT-safe inserting of a jump over a jump or a nop (used by
|
||||
// nmethod::make_not_entrant_or_zombie)
|
||||
// nmethod::make_not_entrant)
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
||||
|
||||
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() ||
|
||||
nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
|
||||
nativeInstruction_at(verified_entry)->is_sigill_not_entrant(),
|
||||
"riscv cannot replace non-jump with jump");
|
||||
|
||||
// Patch this nmethod atomically.
|
||||
@ -371,7 +371,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
|
||||
*(unsigned int*)verified_entry = insn;
|
||||
} else {
|
||||
// We use an illegal instruction for marking a method as
|
||||
// not_entrant or zombie.
|
||||
// not_entrant.
|
||||
NativeIllegalInstruction::insert(verified_entry);
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ class NativeInstruction {
|
||||
inline bool is_nop() const;
|
||||
inline bool is_jump_or_nop();
|
||||
bool is_safepoint_poll();
|
||||
bool is_sigill_zombie_not_entrant();
|
||||
bool is_sigill_not_entrant();
|
||||
bool is_stop();
|
||||
|
||||
protected:
|
||||
|
@ -122,16 +122,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
address sender_pc = (address) sender_abi->return_pc;
|
||||
|
||||
// We must always be able to find a recognizable pc.
|
||||
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
|
||||
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
|
||||
if (sender_blob == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could be a zombie method
|
||||
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// It should be safe to construct the sender though it might not be valid.
|
||||
|
||||
frame sender(sender_sp, sender_pc);
|
||||
@ -424,7 +419,7 @@ void frame::back_trace(outputStream* st, intptr_t* start_sp, intptr_t* top_pc, u
|
||||
}
|
||||
}
|
||||
} else if (CodeCache::contains(current_pc)) {
|
||||
blob = CodeCache::find_blob_unsafe(current_pc);
|
||||
blob = CodeCache::find_blob(current_pc);
|
||||
if (blob) {
|
||||
if (blob->is_nmethod()) {
|
||||
frame_type = 3;
|
||||
|
@ -4484,7 +4484,7 @@ intptr_t MacroAssembler::get_const_from_toc(address pc) {
|
||||
if (is_load_const_from_toc_pcrelative(pc)) {
|
||||
dataLoc = pc + offset;
|
||||
} else {
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie.
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
assert(cb && cb->is_nmethod(), "sanity");
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
dataLoc = nm->ctable_begin() + offset;
|
||||
|
@ -168,20 +168,20 @@ bool NativeInstruction::is_illegal() {
|
||||
return halfword_at(-2) == illegal_instruction();
|
||||
}
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant or zombie.
|
||||
bool NativeInstruction::is_sigill_zombie_not_entrant() {
|
||||
// We use an illtrap for marking a method as not_entrant.
|
||||
bool NativeInstruction::is_sigill_not_entrant() {
|
||||
if (!is_illegal()) return false; // Just a quick path.
|
||||
|
||||
// One-sided error of is_illegal tolerable here
|
||||
// (see implementation of is_illegal() for details).
|
||||
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr_at(0));
|
||||
CodeBlob* cb = CodeCache::find_blob(addr_at(0));
|
||||
if (cb == NULL || !cb->is_nmethod()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
// This method is not_entrant or zombie if the illtrap instruction
|
||||
// This method is not_entrant if the illtrap instruction
|
||||
// is located at the verified entry point.
|
||||
// BE AWARE: the current pc (this) points to the instruction after the
|
||||
// "illtrap" location.
|
||||
|
@ -85,8 +85,8 @@ class NativeInstruction {
|
||||
// Bcrl is currently the only accepted instruction here.
|
||||
bool is_jump();
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant or zombie.
|
||||
bool is_sigill_zombie_not_entrant();
|
||||
// We use an illtrap for marking a method as not_entrant.
|
||||
bool is_sigill_not_entrant();
|
||||
|
||||
bool is_safepoint_poll() {
|
||||
// Is the current instruction a POTENTIAL read access to the polling page?
|
||||
|
@ -127,7 +127,7 @@ void CompiledDirectStaticCall::verify() {
|
||||
_call->verify_alignment();
|
||||
|
||||
#ifdef ASSERT
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
|
||||
CodeBlob *cb = CodeCache::find_blob((address) _call);
|
||||
assert(cb != NULL, "sanity");
|
||||
#endif
|
||||
|
||||
|
@ -177,16 +177,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
}
|
||||
|
||||
// We must always be able to find a recognizable pc
|
||||
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
|
||||
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
|
||||
if (sender_pc == NULL || sender_blob == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could be a zombie method
|
||||
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could just be some random pointer within the codeBlob
|
||||
if (!sender_blob->code_contains(sender_pc)) {
|
||||
return false;
|
||||
|
@ -153,10 +153,8 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
DEBUG_ONLY(_frame_index = -1;)
|
||||
|
||||
// Here's a sticky one. This constructor can be called via AsyncGetCallTrace
|
||||
// when last_Java_sp is non-null but the pc fetched is junk. If we are truly
|
||||
// unlucky the junk value could be to a zombied method and we'll die on the
|
||||
// find_blob call. This is also why we can have no asserts on the validity
|
||||
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
|
||||
// when last_Java_sp is non-null but the pc fetched is junk.
|
||||
// AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
|
||||
// -> pd_last_frame should use a specialized version of pd_last_frame which could
|
||||
// call a specialized frame constructor instead of this one.
|
||||
// Then we could use the assert below. However this assert is of somewhat dubious
|
||||
|
@ -495,7 +495,7 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
|
||||
}
|
||||
|
||||
|
||||
// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
|
||||
// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::make_not_entrant)
|
||||
// The problem: jmp <dest> is a 5-byte instruction. Atomic write can be only with 4 bytes.
|
||||
// First patches the first word atomically to be a jump to itself.
|
||||
// Then patches the last byte and then atomically patches the first word (4-bytes),
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "nativeInst_zero.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
// This method is called by nmethod::make_not_entrant_or_zombie to
|
||||
// This method is called by nmethod::make_not_entrant to
|
||||
// insert a jump to SharedRuntime::get_handle_wrong_method_stub()
|
||||
// (dest) at the start of a compiled method (verified_entry) to avoid
|
||||
// a race where a method is invoked while being made non-entrant.
|
||||
|
@ -635,7 +635,7 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info,
|
||||
address pc = os::Posix::ucontext_get_pc(uc);
|
||||
assert(pc != NULL, "");
|
||||
if (NativeDeoptInstruction::is_deopt_at(pc)) {
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
if (cb != NULL && cb->is_compiled()) {
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, t);) // can call PcDescCache::add_pc_desc
|
||||
CompiledMethod* cm = cb->as_compiled_method();
|
||||
|
@ -2679,7 +2679,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
|
||||
CompiledMethod* nm = NULL;
|
||||
if (in_java) {
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
}
|
||||
|
||||
@ -2698,9 +2698,9 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
if (in_java &&
|
||||
(exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
|
||||
exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
|
||||
if (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
|
||||
if (nativeInstruction_at(pc)->is_sigill_not_entrant()) {
|
||||
if (TraceTraps) {
|
||||
tty->print_cr("trap: zombie_not_entrant");
|
||||
tty->print_cr("trap: not_entrant");
|
||||
}
|
||||
return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub());
|
||||
}
|
||||
@ -2729,7 +2729,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
// Check for UD trap caused by NOP patching.
|
||||
// If it is, patch return address to be deopt handler.
|
||||
if (NativeDeoptInstruction::is_deopt_at(pc)) {
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
if (cb != NULL && cb->is_compiled()) {
|
||||
CompiledMethod* cm = cb->as_compiled_method();
|
||||
frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
|
||||
|
@ -193,7 +193,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
//
|
||||
// SIGILL: the compiler generates illegal opcodes
|
||||
// at places where it wishes to interrupt the VM:
|
||||
// Safepoints, Unreachable Code, Entry points of Zombie methods,
|
||||
// Safepoints, Unreachable Code, Entry points of not entrant nmethods,
|
||||
// This results in a SIGILL with (*pc) == inserted illegal instruction.
|
||||
//
|
||||
// (so, SIGILLs with a pc inside the zero page are real errors)
|
||||
@ -202,7 +202,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
// The ppc trap instruction raises a SIGTRAP and is very efficient if it
|
||||
// does not trap. It is used for conditional branches that are expected
|
||||
// to be never taken. These are:
|
||||
// - zombie methods
|
||||
// - not entrant nmethods
|
||||
// - IC (inline cache) misses.
|
||||
// - null checks leading to UncommonTraps.
|
||||
// - range checks leading to Uncommon Traps.
|
||||
@ -225,9 +225,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
CodeBlob *cb = NULL;
|
||||
int stop_type = -1;
|
||||
// Handle signal from NativeJump::patch_verified_entry().
|
||||
if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
|
||||
if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) {
|
||||
if (TraceTraps) {
|
||||
tty->print_cr("trap: zombie_not_entrant");
|
||||
tty->print_cr("trap: not_entrant");
|
||||
}
|
||||
stub = SharedRuntime::get_handle_wrong_method_stub();
|
||||
goto run_stub;
|
||||
@ -341,7 +341,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
else if (sig == SIGBUS) {
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
|
||||
// underlying file has been truncated. Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = cb ? cb->as_compiled_method_or_null() : NULL;
|
||||
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
|
||||
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
|
||||
|
@ -246,9 +246,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
|
||||
// Handle signal from NativeJump::patch_verified_entry().
|
||||
if ((sig == SIGILL)
|
||||
&& nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
|
||||
&& nativeInstruction_at(pc)->is_sigill_not_entrant()) {
|
||||
if (TraceTraps) {
|
||||
tty->print_cr("trap: zombie_not_entrant");
|
||||
tty->print_cr("trap: not_entrant");
|
||||
}
|
||||
stub = SharedRuntime::get_handle_wrong_method_stub();
|
||||
} else if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) {
|
||||
@ -265,7 +265,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault
|
||||
// here if the underlying file has been truncated.
|
||||
// Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
|
||||
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
|
||||
|
@ -440,7 +440,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault
|
||||
// here if the underlying file has been truncated.
|
||||
// Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
|
||||
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
|
||||
|
@ -207,9 +207,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
|
||||
// Handle signal from NativeJump::patch_verified_entry().
|
||||
if ((sig == SIGILL || sig == SIGTRAP)
|
||||
&& nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
|
||||
&& nativeInstruction_at(pc)->is_sigill_not_entrant()) {
|
||||
if (TraceTraps) {
|
||||
tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
|
||||
tty->print_cr("trap: not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
|
||||
}
|
||||
stub = SharedRuntime::get_handle_wrong_method_stub();
|
||||
} else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
|
||||
@ -218,7 +218,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault
|
||||
// here if the underlying file has been truncated.
|
||||
// Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
|
||||
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
|
||||
|
@ -323,7 +323,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault
|
||||
// here if the underlying file has been truncated.
|
||||
// Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
if ((nm != NULL && nm->has_unsafe_access()) || (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc))) {
|
||||
unsafe_access = true;
|
||||
@ -331,12 +331,12 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
} else if (sig == SIGSEGV &&
|
||||
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
|
||||
// Determination of interpreter/vtable stub/compiled code null exception
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
if (cb != NULL) {
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
|
||||
}
|
||||
} else if (sig == SIGILL && *(int *)pc == NativeInstruction::zombie_illegal_instruction) {
|
||||
// Zombie
|
||||
} else if (sig == SIGILL && *(int *)pc == NativeInstruction::not_entrant_illegal_instruction) {
|
||||
// Not entrant
|
||||
stub = SharedRuntime::get_handle_wrong_method_stub();
|
||||
}
|
||||
} else if ((thread->thread_state() == _thread_in_vm ||
|
||||
|
@ -248,9 +248,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
CodeBlob *cb = NULL;
|
||||
int stop_type = -1;
|
||||
// Handle signal from NativeJump::patch_verified_entry().
|
||||
if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
|
||||
if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) {
|
||||
if (TraceTraps) {
|
||||
tty->print_cr("trap: zombie_not_entrant");
|
||||
tty->print_cr("trap: not_entrant");
|
||||
}
|
||||
stub = SharedRuntime::get_handle_wrong_method_stub();
|
||||
}
|
||||
@ -356,7 +356,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
else if (sig == SIGBUS) {
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
|
||||
// underlying file has been truncated. Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
|
||||
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
|
||||
|
@ -208,9 +208,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
|
||||
// Handle signal from NativeJump::patch_verified_entry().
|
||||
if ((sig == SIGILL || sig == SIGTRAP)
|
||||
&& nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
|
||||
&& nativeInstruction_at(pc)->is_sigill_not_entrant()) {
|
||||
if (TraceTraps) {
|
||||
tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
|
||||
tty->print_cr("trap: not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
|
||||
}
|
||||
stub = SharedRuntime::get_handle_wrong_method_stub();
|
||||
} else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
|
||||
@ -219,7 +219,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault
|
||||
// here if the underlying file has been truncated.
|
||||
// Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
|
||||
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
|
||||
|
@ -242,9 +242,9 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
// a fault inside compiled code, the interpreter, or a stub
|
||||
|
||||
// Handle signal from NativeJump::patch_verified_entry().
|
||||
if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
|
||||
if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) {
|
||||
if (TraceTraps) {
|
||||
tty->print_cr("trap: zombie_not_entrant (SIGILL)");
|
||||
tty->print_cr("trap: not_entrant (SIGILL)");
|
||||
}
|
||||
stub = SharedRuntime::get_handle_wrong_method_stub();
|
||||
}
|
||||
@ -302,7 +302,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
else if (sig == SIGBUS) {
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
|
||||
// underlying file has been truncated. Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
if (nm != NULL && nm->has_unsafe_access()) {
|
||||
// We don't really need a stub here! Just set the pending exception and
|
||||
|
@ -257,7 +257,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault
|
||||
// here if the underlying file has been truncated.
|
||||
// Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
|
||||
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
|
||||
|
@ -263,9 +263,6 @@ class Compilation: public StackObj {
|
||||
return env()->comp_level() == CompLevel_full_profile &&
|
||||
C1UpdateMethodData && MethodData::profile_return();
|
||||
}
|
||||
bool age_code() const {
|
||||
return _method->profile_aging();
|
||||
}
|
||||
|
||||
// will compilation make optimistic assumptions that might lead to
|
||||
// deoptimization and that the runtime will account for?
|
||||
|
@ -2683,10 +2683,6 @@ void LIRGenerator::do_Base(Base* x) {
|
||||
__ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
|
||||
}
|
||||
}
|
||||
if (compilation()->age_code()) {
|
||||
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
|
||||
decrement_age(info);
|
||||
}
|
||||
// increment invocation counters if needed
|
||||
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
|
||||
profile_parameters(x);
|
||||
@ -3253,27 +3249,6 @@ void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int
|
||||
increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
|
||||
}
|
||||
|
||||
void LIRGenerator::decrement_age(CodeEmitInfo* info) {
|
||||
ciMethod* method = info->scope()->method();
|
||||
MethodCounters* mc_adr = method->ensure_method_counters();
|
||||
if (mc_adr != NULL) {
|
||||
LIR_Opr mc = new_pointer_register();
|
||||
__ move(LIR_OprFact::intptrConst(mc_adr), mc);
|
||||
int offset = in_bytes(MethodCounters::nmethod_age_offset());
|
||||
LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
|
||||
LIR_Opr result = new_register(T_INT);
|
||||
__ load(counter, result);
|
||||
__ sub(result, LIR_OprFact::intConst(1), result);
|
||||
__ store(result, counter);
|
||||
// DeoptimizeStub will reexecute from the current state in code info.
|
||||
CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
|
||||
Deoptimization::Action_make_not_entrant);
|
||||
__ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
|
||||
__ branch(lir_cond_lessEqual, deopt);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
|
||||
ciMethod *method, LIR_Opr step, int frequency,
|
||||
int bci, bool backedge, bool notify) {
|
||||
|
@ -418,7 +418,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
||||
increment_event_counter(info, step, bci, true);
|
||||
}
|
||||
}
|
||||
void decrement_age(CodeEmitInfo* info);
|
||||
CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false);
|
||||
CodeEmitInfo* state_for(Instruction* x);
|
||||
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/compilationLog.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/compilerEvent.hpp"
|
||||
@ -1067,6 +1068,9 @@ void ciEnv::register_method(ciMethod* target,
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if memory should be freed before allocation
|
||||
CodeCache::gc_on_allocation();
|
||||
|
||||
// To prevent compile queue updates.
|
||||
MutexLocker locker(THREAD, MethodCompileQueue_lock);
|
||||
|
||||
@ -1158,12 +1162,6 @@ void ciEnv::register_method(ciMethod* target,
|
||||
nm->set_rtm_state(rtm_state);
|
||||
#endif
|
||||
|
||||
// Record successful registration.
|
||||
// (Put nm into the task handle *before* publishing to the Java heap.)
|
||||
if (task() != NULL) {
|
||||
task()->set_code(nm);
|
||||
}
|
||||
|
||||
if (entry_bci == InvocationEntryBci) {
|
||||
if (TieredCompilation) {
|
||||
// If there is an old version we're done with it
|
||||
@ -1204,15 +1202,19 @@ void ciEnv::register_method(ciMethod* target,
|
||||
}
|
||||
}
|
||||
}
|
||||
} // safepoints are allowed again
|
||||
}
|
||||
|
||||
NoSafepointVerifier nsv;
|
||||
if (nm != NULL) {
|
||||
// JVMTI -- compiled method notification (must be done outside lock)
|
||||
nm->post_compiled_method_load_event();
|
||||
// Compilation succeeded, post what we know about it
|
||||
nm->post_compiled_method(task());
|
||||
task()->set_num_inlined_bytecodes(num_inlined_bytecodes());
|
||||
} else {
|
||||
// The CodeCache is full.
|
||||
record_failure("code cache is full");
|
||||
}
|
||||
|
||||
// safepoints are allowed again
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
@ -387,7 +387,6 @@ public:
|
||||
int immediate_oops_patched,
|
||||
RTMState rtm_state = NoRTM);
|
||||
|
||||
|
||||
// Access to certain well known ciObjects.
|
||||
#define VM_CLASS_FUNC(name, ignore_s) \
|
||||
ciInstanceKlass* name() { \
|
||||
|
@ -142,7 +142,6 @@ ciMethod::ciMethod(const methodHandle& h_m, ciInstanceKlass* holder) :
|
||||
constantPoolHandle cpool(Thread::current(), h_m->constants());
|
||||
_signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
|
||||
_method_data = NULL;
|
||||
_nmethod_age = h_m->nmethod_age();
|
||||
// Take a snapshot of these values, so they will be commensurate with the MDO.
|
||||
if (ProfileInterpreter || CompilerConfig::is_c1_profiling()) {
|
||||
int invcnt = h_m->interpreter_invocation_count();
|
||||
@ -1208,15 +1207,6 @@ bool ciMethod::check_call(int refinfo_index, bool is_static) const {
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::profile_aging
|
||||
//
|
||||
// Should the method be compiled with an age counter?
|
||||
bool ciMethod::profile_aging() const {
|
||||
return UseCodeAging && (!MethodCounters::is_nmethod_hot(nmethod_age()) &&
|
||||
!MethodCounters::is_nmethod_age_unset(nmethod_age()));
|
||||
}
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::print_codes
|
||||
//
|
||||
|
@ -80,7 +80,6 @@ class ciMethod : public ciMetadata {
|
||||
int _max_locals;
|
||||
vmIntrinsicID _intrinsic_id;
|
||||
int _handler_count;
|
||||
int _nmethod_age;
|
||||
int _interpreter_invocation_count;
|
||||
int _interpreter_throwout_count;
|
||||
int _instructions_size;
|
||||
@ -191,10 +190,6 @@ class ciMethod : public ciMetadata {
|
||||
int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; }
|
||||
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
|
||||
int size_of_parameters() const { check_is_loaded(); return _size_of_parameters; }
|
||||
int nmethod_age() const { check_is_loaded(); return _nmethod_age; }
|
||||
|
||||
// Should the method be compiled with an age counter?
|
||||
bool profile_aging() const;
|
||||
|
||||
// Code size for inlining decisions.
|
||||
int code_size_for_inlining();
|
||||
|
@ -307,6 +307,8 @@ AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
|
||||
AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
|
||||
CodeCache::gc_on_allocation();
|
||||
|
||||
AdapterBlob* blob = NULL;
|
||||
unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
|
||||
{
|
||||
|
@ -211,18 +211,8 @@ public:
|
||||
code_contains(addr) && addr >= code_begin() + _frame_complete_offset; }
|
||||
int frame_complete_offset() const { return _frame_complete_offset; }
|
||||
|
||||
// CodeCache support: really only used by the nmethods, but in order to get
|
||||
// asserts and certain bookkeeping to work in the CodeCache they are defined
|
||||
// virtual here.
|
||||
virtual bool is_zombie() const { return false; }
|
||||
virtual bool is_locked_by_vm() const { return false; }
|
||||
|
||||
virtual bool is_unloaded() const { return false; }
|
||||
virtual bool is_not_entrant() const { return false; }
|
||||
|
||||
// GC support
|
||||
virtual bool is_alive() const = 0;
|
||||
|
||||
// OopMap for frame
|
||||
ImmutableOopMapSet* oop_maps() const { return _oop_maps; }
|
||||
void set_oop_maps(OopMapSet* p);
|
||||
@ -384,9 +374,6 @@ class RuntimeBlob : public CodeBlob {
|
||||
|
||||
static void free(RuntimeBlob* blob);
|
||||
|
||||
// GC support
|
||||
virtual bool is_alive() const = 0;
|
||||
|
||||
void verify();
|
||||
|
||||
// OopMap for frame
|
||||
@ -435,7 +422,6 @@ class BufferBlob: public RuntimeBlob {
|
||||
|
||||
// GC/Verification support
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
|
||||
bool is_alive() const { return true; }
|
||||
|
||||
void verify();
|
||||
void print_on(outputStream* st) const;
|
||||
@ -532,7 +518,6 @@ class RuntimeStub: public RuntimeBlob {
|
||||
|
||||
// GC/Verification support
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
|
||||
bool is_alive() const { return true; }
|
||||
|
||||
void verify();
|
||||
void print_on(outputStream* st) const;
|
||||
@ -567,8 +552,6 @@ class SingletonBlob: public RuntimeBlob {
|
||||
|
||||
address entry_point() { return code_begin(); }
|
||||
|
||||
bool is_alive() const { return true; }
|
||||
|
||||
// GC/Verification support
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
|
||||
void verify(); // does nothing
|
||||
@ -801,7 +784,6 @@ class UpcallStub: public RuntimeBlob {
|
||||
// GC/Verification support
|
||||
void oops_do(OopClosure* f, const frame& frame);
|
||||
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override;
|
||||
virtual bool is_alive() const override { return true; }
|
||||
virtual void verify() override;
|
||||
|
||||
// Misc.
|
||||
|
@ -56,11 +56,11 @@
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "runtime/sweeper.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "services/memoryService.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -170,9 +170,6 @@ address CodeCache::_high_bound = 0;
|
||||
int CodeCache::_number_of_nmethods_with_dependencies = 0;
|
||||
ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
|
||||
|
||||
int CodeCache::Sweep::_compiled_method_iterators = 0;
|
||||
bool CodeCache::Sweep::_pending_sweep = false;
|
||||
|
||||
// Initialize arrays of CodeHeap subsets
|
||||
GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
|
||||
GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
|
||||
@ -481,40 +478,6 @@ CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void CodeCache::Sweep::begin_compiled_method_iteration() {
|
||||
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Reach a state without concurrent sweeping
|
||||
while (_compiled_method_iterators < 0) {
|
||||
CodeCache_lock->wait_without_safepoint_check();
|
||||
}
|
||||
_compiled_method_iterators++;
|
||||
}
|
||||
|
||||
void CodeCache::Sweep::end_compiled_method_iteration() {
|
||||
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Let the sweeper run again, if we stalled it
|
||||
_compiled_method_iterators--;
|
||||
if (_pending_sweep) {
|
||||
CodeCache_lock->notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::Sweep::begin() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
_pending_sweep = true;
|
||||
while (_compiled_method_iterators > 0) {
|
||||
CodeCache_lock->wait_without_safepoint_check();
|
||||
}
|
||||
_pending_sweep = false;
|
||||
_compiled_method_iterators = -1;
|
||||
}
|
||||
|
||||
void CodeCache::Sweep::end() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
_compiled_method_iterators = 0;
|
||||
CodeCache_lock->notify_all();
|
||||
}
|
||||
|
||||
CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(heap != NULL, "heap is null");
|
||||
@ -543,8 +506,6 @@ CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
|
||||
* instantiating.
|
||||
*/
|
||||
CodeBlob* CodeCache::allocate(int size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
|
||||
// Possibly wakes up the sweeper thread.
|
||||
NMethodSweeper::report_allocation();
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
|
||||
if (size <= 0) {
|
||||
@ -568,8 +529,6 @@ CodeBlob* CodeCache::allocate(int size, CodeBlobType code_blob_type, bool handle
|
||||
if (SegmentedCodeCache) {
|
||||
// Fallback solution: Try to store code in another code heap.
|
||||
// NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
|
||||
// Note that in the sweeper, we check the reverse_free_ratio of the code heap
|
||||
// and force stack scanning if less than 10% of the entire code cache are free.
|
||||
CodeBlobType type = code_blob_type;
|
||||
switch (type) {
|
||||
case CodeBlobType::NonNMethod:
|
||||
@ -687,33 +646,14 @@ bool CodeCache::contains(nmethod *nm) {
|
||||
return contains((void *)nm);
|
||||
}
|
||||
|
||||
static bool is_in_asgct() {
|
||||
Thread* current_thread = Thread::current_or_null_safe();
|
||||
return current_thread != NULL && current_thread->is_Java_thread() && JavaThread::cast(current_thread)->in_asgct();
|
||||
}
|
||||
|
||||
// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
|
||||
// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
|
||||
// This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
|
||||
// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
|
||||
CodeBlob* CodeCache::find_blob(void* start) {
|
||||
CodeBlob* result = find_blob_unsafe(start);
|
||||
// We could potentially look up non_entrant methods
|
||||
bool is_zombie = result != NULL && result->is_zombie();
|
||||
bool is_result_safe = !is_zombie || result->is_locked_by_vm() || VMError::is_error_reported();
|
||||
guarantee(is_result_safe || is_in_asgct(), "unsafe access to zombie method");
|
||||
// When in ASGCT the previous gurantee will pass for a zombie method but we still don't want that code blob returned in order
|
||||
// to minimize the chance of accessing dead memory
|
||||
return is_result_safe ? result : NULL;
|
||||
}
|
||||
|
||||
// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
|
||||
// what you are doing)
|
||||
CodeBlob* CodeCache::find_blob_unsafe(void* start) {
|
||||
// NMT can walk the stack before code cache is created
|
||||
if (_heaps != NULL) {
|
||||
CodeHeap* heap = get_code_heap_containing(start);
|
||||
if (heap != NULL) {
|
||||
return heap->find_blob_unsafe(start);
|
||||
return heap->find_blob(start);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
@ -744,7 +684,7 @@ void CodeCache::nmethods_do(void f(nmethod* nm)) {
|
||||
|
||||
void CodeCache::metadata_do(MetadataClosure* f) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
NMethodIterator iter(NMethodIterator::only_alive);
|
||||
NMethodIterator iter(NMethodIterator::all_blobs);
|
||||
while(iter.next()) {
|
||||
iter.method()->metadata_do(f);
|
||||
}
|
||||
@ -758,10 +698,188 @@ int CodeCache::alignment_offset() {
|
||||
return (int)_heaps->first()->alignment_offset();
|
||||
}
|
||||
|
||||
// Calculate the number of GCs after which an nmethod is expected to have been
|
||||
// used in order to not be classed as cold.
|
||||
void CodeCache::update_cold_gc_count() {
|
||||
if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
|
||||
// No aging
|
||||
return;
|
||||
}
|
||||
|
||||
size_t last_used = _last_unloading_used;
|
||||
double last_time = _last_unloading_time;
|
||||
|
||||
double time = os::elapsedTime();
|
||||
|
||||
size_t free = unallocated_capacity();
|
||||
size_t max = max_capacity();
|
||||
size_t used = max - free;
|
||||
double gc_interval = time - last_time;
|
||||
|
||||
_unloading_threshold_gc_requested = false;
|
||||
_last_unloading_time = time;
|
||||
_last_unloading_used = used;
|
||||
|
||||
if (last_time == 0.0) {
|
||||
// The first GC doesn't have enough information to make good
|
||||
// decisions, so just keep everything afloat
|
||||
log_info(codecache)("Unknown code cache pressure; don't age code");
|
||||
return;
|
||||
}
|
||||
|
||||
if (gc_interval <= 0.0 || last_used >= used) {
|
||||
// Dodge corner cases where there is no pressure or negative pressure
|
||||
// on the code cache. Just don't unload when this happens.
|
||||
_cold_gc_count = INT_MAX;
|
||||
log_info(codecache)("No code cache pressure; don't age code");
|
||||
return;
|
||||
}
|
||||
|
||||
double allocation_rate = (used - last_used) / gc_interval;
|
||||
|
||||
_unloading_allocation_rates.add(allocation_rate);
|
||||
_unloading_gc_intervals.add(gc_interval);
|
||||
|
||||
size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
|
||||
if (free < aggressive_sweeping_free_threshold) {
|
||||
// We are already in the red zone; be very aggressive to avoid disaster
|
||||
// But not more aggressive than 2. This ensures that an nmethod must
|
||||
// have been unused at least between two GCs to be considered cold still.
|
||||
_cold_gc_count = 2;
|
||||
log_info(codecache)("Code cache critically low; use aggressive aging");
|
||||
return;
|
||||
}
|
||||
|
||||
// The code cache has an expected time for cold nmethods to "time out"
|
||||
// when they have not been used. The time for nmethods to time out
|
||||
// depends on how long we expect we can keep allocating code until
|
||||
// aggressive sweeping starts, based on sampled allocation rates.
|
||||
double average_gc_interval = _unloading_gc_intervals.avg();
|
||||
double average_allocation_rate = _unloading_allocation_rates.avg();
|
||||
double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
|
||||
double cold_timeout = time_to_aggressive / NmethodSweepActivity;
|
||||
|
||||
// Convert time to GC cycles, and crop at INT_MAX. The reason for
|
||||
// that is that the _cold_gc_count will be added to an epoch number
|
||||
// and that addition must not overflow, or we can crash the VM.
|
||||
// But not more aggressive than 2. This ensures that an nmethod must
|
||||
// have been unused at least between two GCs to be considered cold still.
|
||||
_cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
|
||||
|
||||
double used_ratio = double(used) / double(max);
|
||||
double last_used_ratio = double(last_used) / double(max);
|
||||
log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
|
||||
", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
|
||||
average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
|
||||
double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
|
||||
|
||||
}
|
||||
|
||||
uint64_t CodeCache::cold_gc_count() {
|
||||
return _cold_gc_count;
|
||||
}
|
||||
|
||||
void CodeCache::gc_on_allocation() {
|
||||
if (!is_init_completed()) {
|
||||
// Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
|
||||
return;
|
||||
}
|
||||
|
||||
size_t free = unallocated_capacity();
|
||||
size_t max = max_capacity();
|
||||
size_t used = max - free;
|
||||
double free_ratio = double(free) / double(max);
|
||||
if (free_ratio <= StartAggressiveSweepingAt / 100.0) {
|
||||
// In case the GC is concurrent, we make sure only one thread requests the GC.
|
||||
if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
|
||||
log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
|
||||
Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
size_t last_used = _last_unloading_used;
|
||||
if (last_used >= used) {
|
||||
// No increase since last GC; no need to sweep yet
|
||||
return;
|
||||
}
|
||||
size_t allocated_since_last = used - last_used;
|
||||
double allocated_since_last_ratio = double(allocated_since_last) / double(max);
|
||||
double threshold = SweeperThreshold / 100.0;
|
||||
double used_ratio = double(used) / double(max);
|
||||
double last_used_ratio = double(last_used) / double(max);
|
||||
if (used_ratio > threshold) {
|
||||
// After threshold is reached, scale it by free_ratio so that more aggressive
|
||||
// GC is triggered as we approach code cache exhaustion
|
||||
threshold *= free_ratio;
|
||||
}
|
||||
// If code cache has been allocated without any GC at all, let's make sure
|
||||
// it is eventually invoked to avoid trouble.
|
||||
if (allocated_since_last_ratio > threshold) {
|
||||
// In case the GC is concurrent, we make sure only one thread requests the GC.
|
||||
if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
|
||||
log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
|
||||
threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
|
||||
Universe::heap()->collect(GCCause::_codecache_GC_threshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
|
||||
// subtracts the value by 2, and the type is unsigned. We don't want underflow.
|
||||
//
|
||||
// Odd values mean that marking is in progress, and even values mean that no
|
||||
// marking is currently active.
|
||||
uint64_t CodeCache::_gc_epoch = 2;
|
||||
|
||||
// How many GCs after an nmethod has not been used, do we consider it cold?
|
||||
uint64_t CodeCache::_cold_gc_count = INT_MAX;
|
||||
|
||||
double CodeCache::_last_unloading_time = 0.0;
|
||||
size_t CodeCache::_last_unloading_used = 0;
|
||||
volatile bool CodeCache::_unloading_threshold_gc_requested = false;
|
||||
TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
|
||||
TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
|
||||
|
||||
uint64_t CodeCache::gc_epoch() {
|
||||
return _gc_epoch;
|
||||
}
|
||||
|
||||
bool CodeCache::is_gc_marking_cycle_active() {
|
||||
// Odd means that marking is active
|
||||
return (_gc_epoch % 2) == 1;
|
||||
}
|
||||
|
||||
uint64_t CodeCache::previous_completed_gc_marking_cycle() {
|
||||
if (is_gc_marking_cycle_active()) {
|
||||
return _gc_epoch - 2;
|
||||
} else {
|
||||
return _gc_epoch - 1;
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::on_gc_marking_cycle_start() {
|
||||
assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
|
||||
++_gc_epoch;
|
||||
}
|
||||
|
||||
void CodeCache::on_gc_marking_cycle_finish() {
|
||||
assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
|
||||
++_gc_epoch;
|
||||
update_cold_gc_count();
|
||||
}
|
||||
|
||||
void CodeCache::arm_all_nmethods() {
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs_nm != NULL) {
|
||||
bs_nm->arm_all_nmethods();
|
||||
}
|
||||
}
|
||||
|
||||
// Mark nmethods for unloading if they contain otherwise unreachable oops.
|
||||
void CodeCache::do_unloading(bool unloading_occurred) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
|
||||
while(iter.next()) {
|
||||
iter.method()->do_unloading(unloading_occurred);
|
||||
}
|
||||
@ -771,24 +889,21 @@ void CodeCache::blobs_do(CodeBlobClosure* f) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_ALLOCABLE_HEAPS(heap) {
|
||||
FOR_ALL_BLOBS(cb, *heap) {
|
||||
if (cb->is_alive()) {
|
||||
f->do_code_blob(cb);
|
||||
f->do_code_blob(cb);
|
||||
#ifdef ASSERT
|
||||
if (cb->is_nmethod()) {
|
||||
Universe::heap()->verify_nmethod((nmethod*)cb);
|
||||
}
|
||||
#endif //ASSERT
|
||||
if (cb->is_nmethod()) {
|
||||
Universe::heap()->verify_nmethod((nmethod*)cb);
|
||||
}
|
||||
#endif //ASSERT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::verify_clean_inline_caches() {
|
||||
#ifdef ASSERT
|
||||
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
|
||||
NMethodIterator iter(NMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
nmethod* nm = iter.method();
|
||||
assert(!nm->is_unloaded(), "Tautology");
|
||||
nm->verify_clean_inline_caches();
|
||||
nm->verify();
|
||||
}
|
||||
@ -840,7 +955,50 @@ void CodeCache::purge_exception_caches() {
|
||||
_exception_cache_purge_list = NULL;
|
||||
}
|
||||
|
||||
// Register an is_unloading nmethod to be flushed after unlinking
|
||||
void CodeCache::register_unlinked(nmethod* nm) {
|
||||
assert(nm->unlinked_next() == NULL, "Only register for unloading once");
|
||||
for (;;) {
|
||||
// Only need acquire when reading the head, when the next
|
||||
// pointer is walked, which it is not here.
|
||||
nmethod* head = Atomic::load(&_unlinked_head);
|
||||
nmethod* next = head != NULL ? head : nm; // Self looped means end of list
|
||||
nm->set_unlinked_next(next);
|
||||
if (Atomic::cmpxchg(&_unlinked_head, head, nm) == head) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush all the nmethods the GC unlinked
|
||||
void CodeCache::flush_unlinked_nmethods() {
|
||||
nmethod* nm = _unlinked_head;
|
||||
_unlinked_head = NULL;
|
||||
size_t freed_memory = 0;
|
||||
while (nm != NULL) {
|
||||
nmethod* next = nm->unlinked_next();
|
||||
freed_memory += nm->total_size();
|
||||
nm->flush();
|
||||
if (next == nm) {
|
||||
// Self looped means end of list
|
||||
break;
|
||||
}
|
||||
nm = next;
|
||||
}
|
||||
|
||||
// Try to start the compiler again if we freed any memory
|
||||
if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
|
||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
||||
log_info(codecache)("Restarting compiler");
|
||||
EventJitRestart event;
|
||||
event.set_freedMemory(freed_memory);
|
||||
event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
|
||||
event.commit();
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t CodeCache::_unloading_cycle = 1;
|
||||
nmethod* volatile CodeCache::_unlinked_head = NULL;
|
||||
|
||||
void CodeCache::increment_unloading_cycle() {
|
||||
// 2-bit value (see IsUnloadingState in nmethod.cpp for details)
|
||||
@ -863,12 +1021,13 @@ CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
|
||||
CodeCache::UnloadingScope::~UnloadingScope() {
|
||||
IsUnloadingBehaviour::set_current(_saved_behaviour);
|
||||
DependencyContext::cleaning_end();
|
||||
CodeCache::flush_unlinked_nmethods();
|
||||
}
|
||||
|
||||
void CodeCache::verify_oops() {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
VerifyOopClosure voc;
|
||||
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
|
||||
NMethodIterator iter(NMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
nmethod* nm = iter.method();
|
||||
nm->oops_do(&voc);
|
||||
@ -1057,17 +1216,18 @@ int CodeCache::number_of_nmethods_with_dependencies() {
|
||||
|
||||
void CodeCache::clear_inline_caches() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
iter.method()->clear_inline_caches();
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::cleanup_inline_caches() {
|
||||
// Only used by whitebox API
|
||||
void CodeCache::cleanup_inline_caches_whitebox() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
|
||||
NMethodIterator iter(NMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
iter.method()->cleanup_inline_caches(/*clean_all=*/true);
|
||||
iter.method()->cleanup_inline_caches_whitebox();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1129,7 +1289,7 @@ static void reset_old_method_table() {
|
||||
}
|
||||
}
|
||||
|
||||
// Remove this method when zombied or unloaded.
|
||||
// Remove this method when flushed.
|
||||
void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
|
||||
assert_lock_strong(CodeCache_lock);
|
||||
if (old_compiled_method_table != NULL) {
|
||||
@ -1147,8 +1307,8 @@ void CodeCache::old_nmethods_do(MetadataClosure* f) {
|
||||
length = old_compiled_method_table->length();
|
||||
for (int i = 0; i < length; i++) {
|
||||
CompiledMethod* cm = old_compiled_method_table->at(i);
|
||||
// Only walk alive nmethods, the dead ones will get removed by the sweeper or GC.
|
||||
if (cm->is_alive() && !cm->is_unloading()) {
|
||||
// Only walk !is_unloading nmethods, the other ones will get removed by the GC.
|
||||
if (!cm->is_unloading()) {
|
||||
old_compiled_method_table->at(i)->metadata_do(f);
|
||||
}
|
||||
}
|
||||
@ -1164,7 +1324,7 @@ int CodeCache::mark_dependents_for_evol_deoptimization() {
|
||||
reset_old_method_table();
|
||||
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
// Walk all alive nmethods to check for old Methods.
|
||||
@ -1184,7 +1344,7 @@ int CodeCache::mark_dependents_for_evol_deoptimization() {
|
||||
|
||||
void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
if (!nm->method()->is_method_handle_intrinsic()) {
|
||||
@ -1216,7 +1376,7 @@ void CodeCache::flush_evol_dependents() {
|
||||
// Mark methods for deopt (if safe or possible).
|
||||
void CodeCache::mark_all_nmethods_for_deoptimization() {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
if (!nm->is_native_method()) {
|
||||
@ -1229,7 +1389,7 @@ int CodeCache::mark_for_deoptimization(Method* dependee) {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
if (nm->is_dependent_on_method(dependee)) {
|
||||
@ -1243,7 +1403,7 @@ int CodeCache::mark_for_deoptimization(Method* dependee) {
|
||||
}
|
||||
|
||||
void CodeCache::make_marked_nmethods_deoptimized() {
|
||||
SweeperBlockingCompiledMethodIterator iter(SweeperBlockingCompiledMethodIterator::only_alive_and_not_unloading);
|
||||
RelaxedCompiledMethodIterator iter(RelaxedCompiledMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
|
||||
@ -1298,9 +1458,7 @@ void CodeCache::verify() {
|
||||
FOR_ALL_HEAPS(heap) {
|
||||
(*heap)->verify();
|
||||
FOR_ALL_BLOBS(cb, *heap) {
|
||||
if (cb->is_alive()) {
|
||||
cb->verify();
|
||||
}
|
||||
cb->verify();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1414,10 +1572,7 @@ void CodeCache::print_internals() {
|
||||
int uncommonTrapStubCount = 0;
|
||||
int bufferBlobCount = 0;
|
||||
int total = 0;
|
||||
int nmethodAlive = 0;
|
||||
int nmethodNotEntrant = 0;
|
||||
int nmethodZombie = 0;
|
||||
int nmethodUnloaded = 0;
|
||||
int nmethodJava = 0;
|
||||
int nmethodNative = 0;
|
||||
int max_nm_size = 0;
|
||||
@ -1437,17 +1592,12 @@ void CodeCache::print_internals() {
|
||||
ResourceMark rm;
|
||||
char *method_name = nm->method()->name_and_sig_as_C_string();
|
||||
tty->print("%s", method_name);
|
||||
if(nm->is_alive()) { tty->print_cr(" alive"); }
|
||||
if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
|
||||
if(nm->is_zombie()) { tty->print_cr(" zombie"); }
|
||||
}
|
||||
|
||||
nmethodCount++;
|
||||
|
||||
if(nm->is_alive()) { nmethodAlive++; }
|
||||
if(nm->is_not_entrant()) { nmethodNotEntrant++; }
|
||||
if(nm->is_zombie()) { nmethodZombie++; }
|
||||
if(nm->is_unloaded()) { nmethodUnloaded++; }
|
||||
if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
|
||||
|
||||
if(nm->method() != NULL && nm->is_java_method()) {
|
||||
@ -1484,10 +1634,7 @@ void CodeCache::print_internals() {
|
||||
tty->print_cr("Code Cache Entries (total of %d)",total);
|
||||
tty->print_cr("-------------------------------------------------");
|
||||
tty->print_cr("nmethods: %d",nmethodCount);
|
||||
tty->print_cr("\talive: %d",nmethodAlive);
|
||||
tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
|
||||
tty->print_cr("\tzombie: %d",nmethodZombie);
|
||||
tty->print_cr("\tunloaded: %d",nmethodUnloaded);
|
||||
tty->print_cr("\tjava: %d",nmethodJava);
|
||||
tty->print_cr("\tnative: %d",nmethodNative);
|
||||
tty->print_cr("runtime_stubs: %d",runtimeStubCount);
|
||||
@ -1495,7 +1642,7 @@ void CodeCache::print_internals() {
|
||||
tty->print_cr("buffer blobs: %d",bufferBlobCount);
|
||||
tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
|
||||
tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
|
||||
tty->print_cr("\nnmethod size distribution (non-zombie java)");
|
||||
tty->print_cr("\nnmethod size distribution");
|
||||
tty->print_cr("-------------------------------------------------");
|
||||
|
||||
for(int i=0; i<bucketLimit; i++) {
|
||||
@ -1519,7 +1666,6 @@ void CodeCache::print() {
|
||||
if (!Verbose) return;
|
||||
|
||||
CodeBlob_sizes live[CompLevel_full_optimization + 1];
|
||||
CodeBlob_sizes dead[CompLevel_full_optimization + 1];
|
||||
CodeBlob_sizes runtimeStub;
|
||||
CodeBlob_sizes uncommonTrapStub;
|
||||
CodeBlob_sizes deoptimizationStub;
|
||||
@ -1532,11 +1678,7 @@ void CodeCache::print() {
|
||||
if (cb->is_nmethod()) {
|
||||
const int level = cb->as_nmethod()->comp_level();
|
||||
assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level");
|
||||
if (!cb->is_alive()) {
|
||||
dead[level].add(cb);
|
||||
} else {
|
||||
live[level].add(cb);
|
||||
}
|
||||
live[level].add(cb);
|
||||
} else if (cb->is_runtime_stub()) {
|
||||
runtimeStub.add(cb);
|
||||
} else if (cb->is_deoptimization_stub()) {
|
||||
@ -1568,7 +1710,6 @@ void CodeCache::print() {
|
||||
}
|
||||
tty->print_cr("%s:", level_name);
|
||||
live[i].print("live");
|
||||
dead[i].print("dead");
|
||||
}
|
||||
|
||||
struct {
|
||||
@ -1595,14 +1736,12 @@ void CodeCache::print() {
|
||||
int map_size = 0;
|
||||
FOR_ALL_ALLOCABLE_HEAPS(heap) {
|
||||
FOR_ALL_BLOBS(cb, *heap) {
|
||||
if (cb->is_alive()) {
|
||||
number_of_blobs++;
|
||||
code_size += cb->code_size();
|
||||
ImmutableOopMapSet* set = cb->oop_maps();
|
||||
if (set != NULL) {
|
||||
number_of_oop_maps += set->count();
|
||||
map_size += set->nr_of_bytes();
|
||||
}
|
||||
number_of_blobs++;
|
||||
code_size += cb->code_size();
|
||||
ImmutableOopMapSet* set = cb->oop_maps();
|
||||
if (set != NULL) {
|
||||
number_of_oop_maps += set->count();
|
||||
map_size += set->nr_of_bytes();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1659,7 +1798,7 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
|
||||
void CodeCache::print_codelist(outputStream* st) {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
|
||||
while (iter.next()) {
|
||||
CompiledMethod* cm = iter.method();
|
||||
ResourceMark rm;
|
||||
@ -1698,7 +1837,7 @@ void CodeCache::write_perf_map() {
|
||||
return;
|
||||
}
|
||||
|
||||
AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_alive_and_not_unloading);
|
||||
AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_not_unloading);
|
||||
while (iter.next()) {
|
||||
CodeBlob *cb = iter.method();
|
||||
ResourceMark rm;
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
|
||||
// The CodeCache implements the code cache for various pieces of generated
|
||||
// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
|
||||
@ -95,7 +96,16 @@ class CodeCache : AllStatic {
|
||||
static address _low_bound; // Lower bound of CodeHeap addresses
|
||||
static address _high_bound; // Upper bound of CodeHeap addresses
|
||||
static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
|
||||
static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded
|
||||
|
||||
static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded
|
||||
static uint64_t _gc_epoch; // Global state for tracking when nmethods were found to be on-stack
|
||||
static uint64_t _cold_gc_count; // Global state for determining how many GCs are needed before an nmethod is cold
|
||||
static size_t _last_unloading_used;
|
||||
static double _last_unloading_time;
|
||||
static TruncatedSeq _unloading_gc_intervals;
|
||||
static TruncatedSeq _unloading_allocation_rates;
|
||||
static volatile bool _unloading_threshold_gc_requested;
|
||||
static nmethod* volatile _unlinked_head;
|
||||
|
||||
static ExceptionCache* volatile _exception_cache_purge_list;
|
||||
|
||||
@ -116,21 +126,6 @@ class CodeCache : AllStatic {
|
||||
static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
|
||||
static CodeBlob* first_blob(CodeBlobType code_blob_type); // Returns the first CodeBlob of the given type
|
||||
static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next CodeBlob on the given CodeHeap
|
||||
public:
|
||||
|
||||
class Sweep {
|
||||
friend class CodeCache;
|
||||
template <class T, class Filter, bool is_compiled_method> friend class CodeBlobIterator;
|
||||
private:
|
||||
static int _compiled_method_iterators;
|
||||
static bool _pending_sweep;
|
||||
public:
|
||||
static void begin();
|
||||
static void end();
|
||||
private:
|
||||
static void begin_compiled_method_iteration();
|
||||
static void end_compiled_method_iteration();
|
||||
};
|
||||
|
||||
private:
|
||||
static size_t bytes_allocated_in_freelists();
|
||||
@ -168,7 +163,6 @@ class CodeCache : AllStatic {
|
||||
|
||||
// Lookup
|
||||
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
|
||||
static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method
|
||||
static CodeBlob* find_blob_fast(void* start); // Returns the CodeBlob containing the given address
|
||||
static CodeBlob* find_blob_and_oopmap(void* start, int& slot); // Returns the CodeBlob containing the given address
|
||||
static int find_oopmap_slot_fast(void* start); // Returns a fast oopmap slot if there is any; -1 otherwise
|
||||
@ -197,6 +191,22 @@ class CodeCache : AllStatic {
|
||||
~UnloadingScope();
|
||||
};
|
||||
|
||||
// Code cache unloading heuristics
|
||||
static uint64_t cold_gc_count();
|
||||
static void update_cold_gc_count();
|
||||
static void gc_on_allocation();
|
||||
|
||||
// The GC epoch and marking_cycle code below is there to support sweeping
|
||||
// nmethods in loom stack chunks.
|
||||
static uint64_t gc_epoch();
|
||||
static bool is_gc_marking_cycle_active();
|
||||
static uint64_t previous_completed_gc_marking_cycle();
|
||||
static void on_gc_marking_cycle_start();
|
||||
static void on_gc_marking_cycle_finish();
|
||||
static void arm_all_nmethods();
|
||||
|
||||
static void flush_unlinked_nmethods();
|
||||
static void register_unlinked(nmethod* nm);
|
||||
static void do_unloading(bool unloading_occurred);
|
||||
static uint8_t unloading_cycle() { return _unloading_cycle; }
|
||||
|
||||
@ -239,7 +249,7 @@ class CodeCache : AllStatic {
|
||||
static bool is_non_nmethod(address addr);
|
||||
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
static void cleanup_inline_caches(); // clean unloaded/zombie nmethods from inline caches
|
||||
static void cleanup_inline_caches_whitebox(); // clean bad nmethods from inline caches
|
||||
|
||||
// Returns true if an own CodeHeap for the given CodeBlobType is available
|
||||
static bool heap_available(CodeBlobType code_blob_type);
|
||||
@ -328,31 +338,18 @@ class CodeCache : AllStatic {
|
||||
|
||||
|
||||
// Iterator to iterate over code blobs in the CodeCache.
|
||||
template <class T, class Filter, bool is_compiled_method> class CodeBlobIterator : public StackObj {
|
||||
// The relaxed iterators only hold the CodeCache_lock across next calls
|
||||
template <class T, class Filter, bool is_relaxed> class CodeBlobIterator : public StackObj {
|
||||
public:
|
||||
enum LivenessFilter { all_blobs, only_alive, only_alive_and_not_unloading };
|
||||
enum LivenessFilter { all_blobs, only_not_unloading };
|
||||
|
||||
private:
|
||||
CodeBlob* _code_blob; // Current CodeBlob
|
||||
GrowableArrayIterator<CodeHeap*> _heap;
|
||||
GrowableArrayIterator<CodeHeap*> _end;
|
||||
bool _only_alive;
|
||||
bool _only_not_unloading;
|
||||
|
||||
void initialize_iteration(T* nm) {
|
||||
if (Filter::heaps() == NULL) {
|
||||
return;
|
||||
}
|
||||
_heap = Filter::heaps()->begin();
|
||||
_end = Filter::heaps()->end();
|
||||
// If set to NULL, initialized by first call to next()
|
||||
_code_blob = (CodeBlob*)nm;
|
||||
if (nm != NULL) {
|
||||
while(!(*_heap)->contains_blob(_code_blob)) {
|
||||
++_heap;
|
||||
}
|
||||
assert((*_heap)->contains_blob(_code_blob), "match not found");
|
||||
}
|
||||
}
|
||||
|
||||
bool next_impl() {
|
||||
@ -366,11 +363,6 @@ template <class T, class Filter, bool is_compiled_method> class CodeBlobIterator
|
||||
continue;
|
||||
}
|
||||
|
||||
// Filter is_alive as required
|
||||
if (_only_alive && !_code_blob->is_alive()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Filter is_unloading as required
|
||||
if (_only_not_unloading) {
|
||||
CompiledMethod* cm = _code_blob->as_compiled_method_or_null();
|
||||
@ -385,26 +377,26 @@ template <class T, class Filter, bool is_compiled_method> class CodeBlobIterator
|
||||
|
||||
public:
|
||||
CodeBlobIterator(LivenessFilter filter, T* nm = NULL)
|
||||
: _only_alive(filter == only_alive || filter == only_alive_and_not_unloading),
|
||||
_only_not_unloading(filter == only_alive_and_not_unloading)
|
||||
: _only_not_unloading(filter == only_not_unloading)
|
||||
{
|
||||
if (is_compiled_method) {
|
||||
CodeCache::Sweep::begin_compiled_method_iteration();
|
||||
initialize_iteration(nm);
|
||||
} else {
|
||||
initialize_iteration(nm);
|
||||
if (Filter::heaps() == NULL) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
~CodeBlobIterator() {
|
||||
if (is_compiled_method) {
|
||||
CodeCache::Sweep::end_compiled_method_iteration();
|
||||
_heap = Filter::heaps()->begin();
|
||||
_end = Filter::heaps()->end();
|
||||
// If set to NULL, initialized by first call to next()
|
||||
_code_blob = nm;
|
||||
if (nm != NULL) {
|
||||
while(!(*_heap)->contains_blob(_code_blob)) {
|
||||
++_heap;
|
||||
}
|
||||
assert((*_heap)->contains_blob(_code_blob), "match not found");
|
||||
}
|
||||
}
|
||||
|
||||
// Advance iterator to next blob
|
||||
bool next() {
|
||||
if (is_compiled_method) {
|
||||
if (is_relaxed) {
|
||||
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
return next_impl();
|
||||
} else {
|
||||
@ -458,10 +450,9 @@ struct AllCodeBlobsFilter {
|
||||
static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::heaps(); }
|
||||
};
|
||||
|
||||
typedef CodeBlobIterator<CompiledMethod, CompiledMethodFilter, false /* is_compiled_method */> CompiledMethodIterator;
|
||||
typedef CodeBlobIterator<nmethod, NMethodFilter, false /* is_compiled_method */> NMethodIterator;
|
||||
typedef CodeBlobIterator<CodeBlob, AllCodeBlobsFilter, false /* is_compiled_method */> AllCodeBlobsIterator;
|
||||
|
||||
typedef CodeBlobIterator<CompiledMethod, CompiledMethodFilter, true /* is_compiled_method */> SweeperBlockingCompiledMethodIterator;
|
||||
typedef CodeBlobIterator<CompiledMethod, CompiledMethodFilter, false /* is_relaxed */> CompiledMethodIterator;
|
||||
typedef CodeBlobIterator<CompiledMethod, CompiledMethodFilter, true /* is_relaxed */> RelaxedCompiledMethodIterator;
|
||||
typedef CodeBlobIterator<nmethod, NMethodFilter, false /* is_relaxed */> NMethodIterator;
|
||||
typedef CodeBlobIterator<CodeBlob, AllCodeBlobsFilter, false /* is_relaxed */> AllCodeBlobsIterator;
|
||||
|
||||
#endif // SHARE_CODE_CODECACHE_HPP
|
||||
|
@ -27,8 +27,8 @@
|
||||
#include "code/codeHeapState.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/sweeper.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
// -------------------------
|
||||
@ -216,18 +216,16 @@ const char* blobTypeName[] = {"noType"
|
||||
, "nMethod (active)"
|
||||
, "nMethod (inactive)"
|
||||
, "nMethod (deopt)"
|
||||
, "nMethod (zombie)"
|
||||
, "nMethod (unloaded)"
|
||||
, "runtime stub"
|
||||
, "ricochet stub"
|
||||
, "deopt stub"
|
||||
, "uncommon trap stub"
|
||||
, "exception stub"
|
||||
, "safepoint stub"
|
||||
, "adapter blob"
|
||||
, "MH adapter blob"
|
||||
, "buffer blob"
|
||||
, "lastType"
|
||||
, "runtime stub"
|
||||
, "ricochet stub"
|
||||
, "deopt stub"
|
||||
, "uncommon trap stub"
|
||||
, "exception stub"
|
||||
, "safepoint stub"
|
||||
, "adapter blob"
|
||||
, "MH adapter blob"
|
||||
, "buffer blob"
|
||||
, "lastType"
|
||||
};
|
||||
const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
|
||||
|
||||
@ -249,8 +247,6 @@ static bool segment_granules = false;
|
||||
static unsigned int nBlocks_t1 = 0; // counting "in_use" nmethods only.
|
||||
static unsigned int nBlocks_t2 = 0; // counting "in_use" nmethods only.
|
||||
static unsigned int nBlocks_alive = 0; // counting "not_used" and "not_entrant" nmethods only.
|
||||
static unsigned int nBlocks_dead = 0; // counting "zombie" and "unloaded" methods only.
|
||||
static unsigned int nBlocks_unloaded = 0; // counting "unloaded" nmethods only. This is a transient state.
|
||||
static unsigned int nBlocks_stub = 0;
|
||||
|
||||
static struct FreeBlk* FreeArray = NULL;
|
||||
@ -262,11 +258,6 @@ static unsigned int used_topSizeBlocks = 0;
|
||||
|
||||
static struct SizeDistributionElement* SizeDistributionArray = NULL;
|
||||
|
||||
// nMethod temperature (hotness) indicators.
|
||||
static int avgTemp = 0;
|
||||
static int maxTemp = 0;
|
||||
static int minTemp = 0;
|
||||
|
||||
static unsigned int latest_compilation_id = 0;
|
||||
static volatile bool initialization_complete = false;
|
||||
|
||||
@ -319,8 +310,6 @@ void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName)
|
||||
nBlocks_t1 = CodeHeapStatArray[ix].nBlocks_t1;
|
||||
nBlocks_t2 = CodeHeapStatArray[ix].nBlocks_t2;
|
||||
nBlocks_alive = CodeHeapStatArray[ix].nBlocks_alive;
|
||||
nBlocks_dead = CodeHeapStatArray[ix].nBlocks_dead;
|
||||
nBlocks_unloaded = CodeHeapStatArray[ix].nBlocks_unloaded;
|
||||
nBlocks_stub = CodeHeapStatArray[ix].nBlocks_stub;
|
||||
FreeArray = CodeHeapStatArray[ix].FreeArray;
|
||||
alloc_freeBlocks = CodeHeapStatArray[ix].alloc_freeBlocks;
|
||||
@ -328,9 +317,6 @@ void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName)
|
||||
alloc_topSizeBlocks = CodeHeapStatArray[ix].alloc_topSizeBlocks;
|
||||
used_topSizeBlocks = CodeHeapStatArray[ix].used_topSizeBlocks;
|
||||
SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray;
|
||||
avgTemp = CodeHeapStatArray[ix].avgTemp;
|
||||
maxTemp = CodeHeapStatArray[ix].maxTemp;
|
||||
minTemp = CodeHeapStatArray[ix].minTemp;
|
||||
} else {
|
||||
StatArray = NULL;
|
||||
seg_size = 0;
|
||||
@ -341,8 +327,6 @@ void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName)
|
||||
nBlocks_t1 = 0;
|
||||
nBlocks_t2 = 0;
|
||||
nBlocks_alive = 0;
|
||||
nBlocks_dead = 0;
|
||||
nBlocks_unloaded = 0;
|
||||
nBlocks_stub = 0;
|
||||
FreeArray = NULL;
|
||||
alloc_freeBlocks = 0;
|
||||
@ -350,9 +334,6 @@ void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName)
|
||||
alloc_topSizeBlocks = 0;
|
||||
used_topSizeBlocks = 0;
|
||||
SizeDistributionArray = NULL;
|
||||
avgTemp = 0;
|
||||
maxTemp = 0;
|
||||
minTemp = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -367,8 +348,6 @@ void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName)
|
||||
CodeHeapStatArray[ix].nBlocks_t1 = nBlocks_t1;
|
||||
CodeHeapStatArray[ix].nBlocks_t2 = nBlocks_t2;
|
||||
CodeHeapStatArray[ix].nBlocks_alive = nBlocks_alive;
|
||||
CodeHeapStatArray[ix].nBlocks_dead = nBlocks_dead;
|
||||
CodeHeapStatArray[ix].nBlocks_unloaded = nBlocks_unloaded;
|
||||
CodeHeapStatArray[ix].nBlocks_stub = nBlocks_stub;
|
||||
CodeHeapStatArray[ix].FreeArray = FreeArray;
|
||||
CodeHeapStatArray[ix].alloc_freeBlocks = alloc_freeBlocks;
|
||||
@ -376,9 +355,6 @@ void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName)
|
||||
CodeHeapStatArray[ix].alloc_topSizeBlocks = alloc_topSizeBlocks;
|
||||
CodeHeapStatArray[ix].used_topSizeBlocks = used_topSizeBlocks;
|
||||
CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray;
|
||||
CodeHeapStatArray[ix].avgTemp = avgTemp;
|
||||
CodeHeapStatArray[ix].maxTemp = maxTemp;
|
||||
CodeHeapStatArray[ix].minTemp = minTemp;
|
||||
}
|
||||
}
|
||||
|
||||
@ -659,8 +635,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
nBlocks_t1 = 0;
|
||||
nBlocks_t2 = 0;
|
||||
nBlocks_alive = 0;
|
||||
nBlocks_dead = 0;
|
||||
nBlocks_unloaded = 0;
|
||||
nBlocks_stub = 0;
|
||||
|
||||
nBlocks_free = 0;
|
||||
@ -692,19 +666,13 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
size_t aliveSpace = 0;
|
||||
size_t disconnSpace = 0;
|
||||
size_t notentrSpace = 0;
|
||||
size_t deadSpace = 0;
|
||||
size_t unloadedSpace = 0;
|
||||
size_t stubSpace = 0;
|
||||
size_t freeSpace = 0;
|
||||
size_t maxFreeSize = 0;
|
||||
HeapBlock* maxFreeBlock = NULL;
|
||||
bool insane = false;
|
||||
|
||||
int64_t hotnessAccumulator = 0;
|
||||
unsigned int n_methods = 0;
|
||||
avgTemp = 0;
|
||||
minTemp = (int)(res_size > M ? (res_size/M)*2 : 1);
|
||||
maxTemp = -minTemp;
|
||||
|
||||
for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) {
|
||||
unsigned int hb_len = (unsigned int)h->length(); // despite being size_t, length can never overflow an unsigned int.
|
||||
@ -758,7 +726,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
if (cbType != noType) {
|
||||
const char* blob_name = nullptr;
|
||||
unsigned int nm_size = 0;
|
||||
int temperature = 0;
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
|
||||
ResourceMark rm;
|
||||
@ -784,11 +751,7 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
switch (cbType) {
|
||||
case nMethod_inuse: { // only for executable methods!!!
|
||||
// space for these cbs is accounted for later.
|
||||
temperature = nm->hotness_counter();
|
||||
hotnessAccumulator += temperature;
|
||||
n_methods++;
|
||||
maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
|
||||
minTemp = (temperature < minTemp) ? temperature : minTemp;
|
||||
break;
|
||||
}
|
||||
case nMethod_notused:
|
||||
@ -803,14 +766,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
aliveSpace += hb_bytelen;
|
||||
notentrSpace += hb_bytelen;
|
||||
break;
|
||||
case nMethod_unloaded:
|
||||
nBlocks_unloaded++;
|
||||
unloadedSpace += hb_bytelen;
|
||||
break;
|
||||
case nMethod_dead:
|
||||
nBlocks_dead++;
|
||||
deadSpace += hb_bytelen;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -828,7 +783,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
TopSizeArray[0].len = hb_len;
|
||||
TopSizeArray[0].index = tsbStopper;
|
||||
TopSizeArray[0].nm_size = nm_size;
|
||||
TopSizeArray[0].temperature = temperature;
|
||||
TopSizeArray[0].compiler = cType;
|
||||
TopSizeArray[0].level = comp_lvl;
|
||||
TopSizeArray[0].type = cbType;
|
||||
@ -846,7 +800,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
TopSizeArray[used_topSizeBlocks].len = hb_len;
|
||||
TopSizeArray[used_topSizeBlocks].index = tsbStopper;
|
||||
TopSizeArray[used_topSizeBlocks].nm_size = nm_size;
|
||||
TopSizeArray[used_topSizeBlocks].temperature = temperature;
|
||||
TopSizeArray[used_topSizeBlocks].compiler = cType;
|
||||
TopSizeArray[used_topSizeBlocks].level = comp_lvl;
|
||||
TopSizeArray[used_topSizeBlocks].type = cbType;
|
||||
@ -889,7 +842,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
TopSizeArray[i].len = hb_len;
|
||||
TopSizeArray[i].index = used_topSizeBlocks;
|
||||
TopSizeArray[i].nm_size = nm_size;
|
||||
TopSizeArray[i].temperature = temperature;
|
||||
TopSizeArray[i].compiler = cType;
|
||||
TopSizeArray[i].level = comp_lvl;
|
||||
TopSizeArray[i].type = cbType;
|
||||
@ -931,7 +883,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
TopSizeArray[j].len = hb_len;
|
||||
TopSizeArray[j].index = tsbStopper; // already set!!
|
||||
TopSizeArray[i].nm_size = nm_size;
|
||||
TopSizeArray[i].temperature = temperature;
|
||||
TopSizeArray[j].compiler = cType;
|
||||
TopSizeArray[j].level = comp_lvl;
|
||||
TopSizeArray[j].type = cbType;
|
||||
@ -947,7 +898,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
TopSizeArray[i].len = hb_len;
|
||||
TopSizeArray[i].index = j;
|
||||
TopSizeArray[i].nm_size = nm_size;
|
||||
TopSizeArray[i].temperature = temperature;
|
||||
TopSizeArray[i].compiler = cType;
|
||||
TopSizeArray[i].level = comp_lvl;
|
||||
TopSizeArray[i].type = cbType;
|
||||
@ -999,20 +949,7 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
StatArray[ix_beg].level = comp_lvl;
|
||||
StatArray[ix_beg].compiler = cType;
|
||||
break;
|
||||
case nMethod_alive:
|
||||
StatArray[ix_beg].tx_count++;
|
||||
StatArray[ix_beg].tx_space += (unsigned short)hb_len;
|
||||
StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
|
||||
StatArray[ix_beg].level = comp_lvl;
|
||||
StatArray[ix_beg].compiler = cType;
|
||||
break;
|
||||
case nMethod_dead:
|
||||
case nMethod_unloaded:
|
||||
StatArray[ix_beg].dead_count++;
|
||||
StatArray[ix_beg].dead_space += (unsigned short)hb_len;
|
||||
break;
|
||||
default:
|
||||
// must be a stub, if it's not a dead or alive nMethod
|
||||
nBlocks_stub++;
|
||||
stubSpace += hb_bytelen;
|
||||
StatArray[ix_beg].stub_count++;
|
||||
@ -1055,29 +992,7 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
StatArray[ix_end].level = comp_lvl;
|
||||
StatArray[ix_end].compiler = cType;
|
||||
break;
|
||||
case nMethod_alive:
|
||||
StatArray[ix_beg].tx_count++;
|
||||
StatArray[ix_beg].tx_space += (unsigned short)beg_space;
|
||||
StatArray[ix_beg].tx_age = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
|
||||
|
||||
StatArray[ix_end].tx_count++;
|
||||
StatArray[ix_end].tx_space += (unsigned short)end_space;
|
||||
StatArray[ix_end].tx_age = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age;
|
||||
|
||||
StatArray[ix_beg].level = comp_lvl;
|
||||
StatArray[ix_beg].compiler = cType;
|
||||
StatArray[ix_end].level = comp_lvl;
|
||||
StatArray[ix_end].compiler = cType;
|
||||
break;
|
||||
case nMethod_dead:
|
||||
case nMethod_unloaded:
|
||||
StatArray[ix_beg].dead_count++;
|
||||
StatArray[ix_beg].dead_space += (unsigned short)beg_space;
|
||||
StatArray[ix_end].dead_count++;
|
||||
StatArray[ix_end].dead_space += (unsigned short)end_space;
|
||||
break;
|
||||
default:
|
||||
// must be a stub, if it's not a dead or alive nMethod
|
||||
nBlocks_stub++;
|
||||
stubSpace += hb_bytelen;
|
||||
StatArray[ix_beg].stub_count++;
|
||||
@ -1102,20 +1017,7 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
StatArray[ix].level = comp_lvl;
|
||||
StatArray[ix].compiler = cType;
|
||||
break;
|
||||
case nMethod_alive:
|
||||
StatArray[ix].tx_count++;
|
||||
StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size);
|
||||
StatArray[ix].tx_age = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age;
|
||||
StatArray[ix].level = comp_lvl;
|
||||
StatArray[ix].compiler = cType;
|
||||
break;
|
||||
case nMethod_dead:
|
||||
case nMethod_unloaded:
|
||||
StatArray[ix].dead_count++;
|
||||
StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size);
|
||||
break;
|
||||
default:
|
||||
// must be a stub, if it's not a dead or alive nMethod
|
||||
StatArray[ix].stub_count++;
|
||||
StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size);
|
||||
break;
|
||||
@ -1138,8 +1040,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
ast->print_cr(" Alive Space = " SIZE_FORMAT_W(8) "k, nBlocks_alive = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K, nBlocks_alive, (100.0*aliveSpace)/size, (100.0*aliveSpace)/res_size);
|
||||
ast->print_cr(" disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K, nBlocks_disconn, (100.0*disconnSpace)/size, (100.0*disconnSpace)/res_size);
|
||||
ast->print_cr(" not entrant = " SIZE_FORMAT_W(8) "k, nBlocks_notentr = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K, nBlocks_notentr, (100.0*notentrSpace)/size, (100.0*notentrSpace)/res_size);
|
||||
ast->print_cr(" unloadedSpace = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size);
|
||||
ast->print_cr(" deadSpace = " SIZE_FORMAT_W(8) "k, nBlocks_dead = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K, nBlocks_dead, (100.0*deadSpace)/size, (100.0*deadSpace)/res_size);
|
||||
ast->print_cr(" stubSpace = " SIZE_FORMAT_W(8) "k, nBlocks_stub = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K, nBlocks_stub, (100.0*stubSpace)/size, (100.0*stubSpace)/res_size);
|
||||
ast->print_cr("ZombieBlocks = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
|
||||
ast->cr();
|
||||
@ -1150,22 +1050,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
|
||||
ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
|
||||
ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
|
||||
ast->cr();
|
||||
|
||||
int reset_val = NMethodSweeper::hotness_counter_reset_val();
|
||||
double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size;
|
||||
printBox(ast, '-', "Method hotness information at time of this analysis", NULL);
|
||||
ast->print_cr("Highest possible method temperature: %12d", reset_val);
|
||||
ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity);
|
||||
if (n_methods > 0) {
|
||||
avgTemp = hotnessAccumulator/n_methods;
|
||||
ast->print_cr("min. hotness = %6d", minTemp);
|
||||
ast->print_cr("avg. hotness = %6d", avgTemp);
|
||||
ast->print_cr("max. hotness = %6d", maxTemp);
|
||||
} else {
|
||||
avgTemp = 0;
|
||||
ast->print_cr("No hotness data available");
|
||||
}
|
||||
BUFFEREDSTREAM_FLUSH("\n")
|
||||
|
||||
// This loop is intentionally printing directly to "out".
|
||||
@ -1185,9 +1069,6 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
if (StatArray[ix].stub_count > granule_segs) {
|
||||
out->print_cr("stub_count[%d] = %d", ix, StatArray[ix].stub_count);
|
||||
}
|
||||
if (StatArray[ix].dead_count > granule_segs) {
|
||||
out->print_cr("dead_count[%d] = %d", ix, StatArray[ix].dead_count);
|
||||
}
|
||||
if (StatArray[ix].t1_space > granule_segs) {
|
||||
out->print_cr("t1_space[%d] = %d", ix, StatArray[ix].t1_space);
|
||||
}
|
||||
@ -1200,14 +1081,11 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granular
|
||||
if (StatArray[ix].stub_space > granule_segs) {
|
||||
out->print_cr("stub_space[%d] = %d", ix, StatArray[ix].stub_space);
|
||||
}
|
||||
if (StatArray[ix].dead_space > granule_segs) {
|
||||
out->print_cr("dead_space[%d] = %d", ix, StatArray[ix].dead_space);
|
||||
}
|
||||
// this cast is awful! I need it because NT/Intel reports a signed/unsigned mismatch.
|
||||
if ((size_t)(StatArray[ix].t1_count+StatArray[ix].t2_count+StatArray[ix].tx_count+StatArray[ix].stub_count+StatArray[ix].dead_count) > granule_segs) {
|
||||
if ((size_t)(StatArray[ix].t1_count+StatArray[ix].t2_count+StatArray[ix].tx_count+StatArray[ix].stub_count) > granule_segs) {
|
||||
out->print_cr("t1_count[%d] = %d, t2_count[%d] = %d, tx_count[%d] = %d, stub_count[%d] = %d", ix, StatArray[ix].t1_count, ix, StatArray[ix].t2_count, ix, StatArray[ix].tx_count, ix, StatArray[ix].stub_count);
|
||||
}
|
||||
if ((size_t)(StatArray[ix].t1_space+StatArray[ix].t2_space+StatArray[ix].tx_space+StatArray[ix].stub_space+StatArray[ix].dead_space) > granule_segs) {
|
||||
if ((size_t)(StatArray[ix].t1_space+StatArray[ix].t2_space+StatArray[ix].tx_space+StatArray[ix].stub_space) > granule_segs) {
|
||||
out->print_cr("t1_space[%d] = %d, t2_space[%d] = %d, tx_space[%d] = %d, stub_space[%d] = %d", ix, StatArray[ix].t1_space, ix, StatArray[ix].t2_space, ix, StatArray[ix].tx_space, ix, StatArray[ix].stub_space);
|
||||
}
|
||||
}
|
||||
@ -1377,7 +1255,7 @@ void CodeHeapState::print_usedSpace(outputStream* out, CodeHeap* heap) {
|
||||
ast->print("%9s", "compiler");
|
||||
ast->fill_to(66);
|
||||
ast->print_cr("%6s", "method");
|
||||
ast->print_cr("%18s %13s %17s %4s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", " temp", "Name");
|
||||
ast->print_cr("%18s %13s %17s %9s %5s %s", "Addr(module) ", "offset", "size", "type", " type lvl", "Name");
|
||||
BUFFEREDSTREAM_FLUSH_LOCKED("")
|
||||
|
||||
//---< print Top Ten Used Blocks >---
|
||||
@ -1420,14 +1298,8 @@ void CodeHeapState::print_usedSpace(outputStream* out, CodeHeap* heap) {
|
||||
//---< compiler information >---
|
||||
ast->fill_to(56);
|
||||
ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
|
||||
//---< method temperature >---
|
||||
ast->fill_to(67);
|
||||
ast->print("%5d", TopSizeArray[i].temperature);
|
||||
//---< name and signature >---
|
||||
ast->fill_to(67+6);
|
||||
if (TopSizeArray[i].type == nMethod_dead) {
|
||||
ast->print(" zombie method ");
|
||||
}
|
||||
ast->print("%s", TopSizeArray[i].blob_name);
|
||||
} else {
|
||||
//---< block size in hex >---
|
||||
@ -1772,7 +1644,7 @@ void CodeHeapState::print_count(outputStream* out, CodeHeap* heap) {
|
||||
for (unsigned int ix = 0; ix < alloc_granules; ix++) {
|
||||
print_line_delim(out, ast, low_bound, ix, granules_per_line);
|
||||
unsigned int count = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count
|
||||
+ StatArray[ix].stub_count + StatArray[ix].dead_count;
|
||||
+ StatArray[ix].stub_count;
|
||||
print_count_single(ast, count);
|
||||
}
|
||||
}
|
||||
@ -1859,29 +1731,9 @@ void CodeHeapState::print_count(outputStream* out, CodeHeap* heap) {
|
||||
BUFFEREDSTREAM_FLUSH_LOCKED("\n\n\n")
|
||||
}
|
||||
|
||||
{
|
||||
if (nBlocks_dead > 0) {
|
||||
printBox(ast, '-', "Dead nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
|
||||
|
||||
granules_per_line = 128;
|
||||
for (unsigned int ix = 0; ix < alloc_granules; ix++) {
|
||||
print_line_delim(out, ast, low_bound, ix, granules_per_line);
|
||||
if (segment_granules && StatArray[ix].dead_count > 0) {
|
||||
print_blobType_single(ast, StatArray[ix].type);
|
||||
} else {
|
||||
print_count_single(ast, StatArray[ix].dead_count);
|
||||
}
|
||||
}
|
||||
ast->print("|");
|
||||
} else {
|
||||
ast->print("No dead nMethods found in CodeHeap.");
|
||||
}
|
||||
BUFFEREDSTREAM_FLUSH_LOCKED("\n\n\n")
|
||||
}
|
||||
|
||||
{
|
||||
if (!segment_granules) { // Prevent totally redundant printouts
|
||||
printBox(ast, '-', "Count by tier (combined, no dead blocks): <#t1>:<#t2>:<#s>, 0x0..0xf. '*' indicates >= 16 blocks", NULL);
|
||||
printBox(ast, '-', "Count by tier (combined): <#t1>:<#t2>:<#s>, 0x0..0xf. '*' indicates >= 16 blocks", NULL);
|
||||
|
||||
granules_per_line = 24;
|
||||
for (unsigned int ix = 0; ix < alloc_granules; ix++) {
|
||||
@ -1953,7 +1805,7 @@ void CodeHeapState::print_space(outputStream* out, CodeHeap* heap) {
|
||||
for (unsigned int ix = 0; ix < alloc_granules; ix++) {
|
||||
print_line_delim(out, ast, low_bound, ix, granules_per_line);
|
||||
unsigned int space = StatArray[ix].t1_space + StatArray[ix].t2_space + StatArray[ix].tx_space
|
||||
+ StatArray[ix].stub_space + StatArray[ix].dead_space;
|
||||
+ StatArray[ix].stub_space;
|
||||
print_space_single(ast, space);
|
||||
}
|
||||
}
|
||||
@ -2040,22 +1892,6 @@ void CodeHeapState::print_space(outputStream* out, CodeHeap* heap) {
|
||||
BUFFEREDSTREAM_FLUSH_LOCKED("\n\n\n")
|
||||
}
|
||||
|
||||
{
|
||||
if (nBlocks_dead > 0) {
|
||||
printBox(ast, '-', "Dead space consumption. ' ' indicates empty, '*' indicates full", NULL);
|
||||
|
||||
granules_per_line = 128;
|
||||
for (unsigned int ix = 0; ix < alloc_granules; ix++) {
|
||||
print_line_delim(out, ast, low_bound, ix, granules_per_line);
|
||||
print_space_single(ast, StatArray[ix].dead_space);
|
||||
}
|
||||
ast->print("|");
|
||||
} else {
|
||||
ast->print("No dead nMethods found in CodeHeap.");
|
||||
}
|
||||
BUFFEREDSTREAM_FLUSH_LOCKED("\n\n\n")
|
||||
}
|
||||
|
||||
{
|
||||
if (!segment_granules) { // Prevent totally redundant printouts
|
||||
printBox(ast, '-', "Space consumption by tier (combined): <t1%>:<t2%>:<s%>. ' ' indicates empty, '*' indicates full", NULL);
|
||||
@ -2250,7 +2086,7 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
|
||||
}
|
||||
// Only check granule if it contains at least one blob.
|
||||
unsigned int nBlobs = StatArray[ix].t1_count + StatArray[ix].t2_count + StatArray[ix].tx_count +
|
||||
StatArray[ix].stub_count + StatArray[ix].dead_count;
|
||||
StatArray[ix].stub_count;
|
||||
if (nBlobs > 0 ) {
|
||||
for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
|
||||
// heap->find_start() is safe. Only works on _segmap.
|
||||
@ -2293,7 +2129,7 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
|
||||
ast->print("%9s", "compiler");
|
||||
ast->fill_to(61);
|
||||
ast->print_cr("%6s", "method");
|
||||
ast->print_cr("%18s %13s %17s %9s %5s %18s %s", "Addr(module) ", "offset", "size", " type lvl", " temp", "blobType ", "Name");
|
||||
ast->print_cr("%18s %13s %17s %9s %18s %s", "Addr(module) ", "offset", "size", " type lvl", "blobType ", "Name");
|
||||
BUFFEREDSTREAM_FLUSH_AUTO("")
|
||||
}
|
||||
|
||||
@ -2310,7 +2146,6 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
|
||||
ResourceMark rm;
|
||||
//---< collect all data to locals as quickly as possible >---
|
||||
unsigned int total_size = nm->total_size();
|
||||
int hotness = nm->hotness_counter();
|
||||
bool get_name = (cbType == nMethod_inuse) || (cbType == nMethod_notused);
|
||||
//---< nMethod size in hex >---
|
||||
ast->print(PTR32_FORMAT, total_size);
|
||||
@ -2318,16 +2153,10 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
|
||||
//---< compiler information >---
|
||||
ast->fill_to(51);
|
||||
ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
|
||||
//---< method temperature >---
|
||||
ast->fill_to(62);
|
||||
ast->print("%5d", hotness);
|
||||
//---< name and signature >---
|
||||
ast->fill_to(62+6);
|
||||
ast->fill_to(62);
|
||||
ast->print("%s", blobTypeName[cbType]);
|
||||
ast->fill_to(82+6);
|
||||
if (cbType == nMethod_dead) {
|
||||
ast->print("%14s", " zombie method");
|
||||
}
|
||||
ast->fill_to(82);
|
||||
|
||||
if (get_name) {
|
||||
Symbol* methName = method->name();
|
||||
@ -2347,12 +2176,12 @@ void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
|
||||
ast->print("%s", blob_name);
|
||||
}
|
||||
} else if (blob_is_safe) {
|
||||
ast->fill_to(62+6);
|
||||
ast->fill_to(62);
|
||||
ast->print("%s", blobTypeName[cbType]);
|
||||
ast->fill_to(82+6);
|
||||
ast->fill_to(82);
|
||||
ast->print("%s", blob_name);
|
||||
} else {
|
||||
ast->fill_to(62+6);
|
||||
ast->fill_to(62);
|
||||
ast->print("<stale blob>");
|
||||
}
|
||||
ast->cr();
|
||||
@ -2534,12 +2363,9 @@ CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
|
||||
if (holding_required_locks()) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
|
||||
if (nm->is_zombie()) return nMethod_dead;
|
||||
if (nm->is_unloaded()) return nMethod_unloaded;
|
||||
if (nm->is_in_use()) return nMethod_inuse;
|
||||
if (nm->is_alive() && !(nm->is_not_entrant())) return nMethod_notused;
|
||||
if (nm->is_alive()) return nMethod_alive;
|
||||
return nMethod_dead;
|
||||
if (!nm->is_not_entrant()) return nMethod_notused;
|
||||
return nMethod_notentrant;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2558,7 +2384,7 @@ bool CodeHeapState::blob_access_is_safe(CodeBlob* this_blob) {
|
||||
// make sure the nmethod at hand (and the linked method) is not garbage.
|
||||
bool CodeHeapState::nmethod_access_is_safe(nmethod* nm) {
|
||||
Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() was found to be uninitialized, i.e. != NULL, but invalid.
|
||||
return (nm != NULL) && (method != NULL) && nm->is_alive() && (method->signature() != NULL);
|
||||
return (nm != NULL) && (method != NULL) && (method->signature() != NULL);
|
||||
}
|
||||
|
||||
bool CodeHeapState::holding_required_locks() {
|
||||
|
@ -52,12 +52,7 @@ class CodeHeapState : public CHeapObj<mtCode> {
|
||||
nMethod_inuse, // executable. This is the "normal" state for a nmethod.
|
||||
nMethod_notused, // assumed inactive, marked not entrant. Could be revived if necessary.
|
||||
nMethod_notentrant, // no new activations allowed, marked for deoptimization. Old activations may still exist.
|
||||
// Will transition to "zombie" after all activations are gone.
|
||||
nMethod_zombie, // No more activations exist, ready for purge (remove from code cache).
|
||||
nMethod_unloaded, // No activations exist, should not be called. Transient state on the way to "zombie".
|
||||
nMethod_alive = nMethod_notentrant, // Combined state: nmethod may have activations, thus can't be purged.
|
||||
nMethod_dead = nMethod_zombie, // Combined state: nmethod does not have any activations.
|
||||
runtimeStub = nMethod_unloaded + 1,
|
||||
runtimeStub,
|
||||
ricochetStub,
|
||||
deoptimizationStub,
|
||||
uncommonTrapStub,
|
||||
|
@ -68,7 +68,7 @@ bool CompiledICLocker::is_safe(CompiledMethod* method) {
|
||||
}
|
||||
|
||||
bool CompiledICLocker::is_safe(address code) {
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(code);
|
||||
CodeBlob* cb = CodeCache::find_blob(code);
|
||||
assert(cb != NULL && cb->is_compiled(), "must be compiled");
|
||||
CompiledMethod* cm = cb->as_compiled_method();
|
||||
return CompiledICProtectionBehaviour::current()->is_safe(cm);
|
||||
@ -128,7 +128,7 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
|
||||
}
|
||||
|
||||
{
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
|
||||
CodeBlob* cb = CodeCache::find_blob(_call->instruction_address());
|
||||
assert(cb != NULL && cb->is_compiled(), "must be compiled");
|
||||
_call->set_destination_mt_safe(entry_point);
|
||||
}
|
||||
@ -317,10 +317,7 @@ bool CompiledIC::is_megamorphic() const {
|
||||
bool CompiledIC::is_call_to_compiled() const {
|
||||
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
|
||||
|
||||
// Use unsafe, since an inline cache might point to a zombie method. However, the zombie
|
||||
// method is guaranteed to still exist, since we only remove methods after all inline caches
|
||||
// has been cleaned up
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
|
||||
CodeBlob* cb = CodeCache::find_blob(ic_destination());
|
||||
bool is_monomorphic = (cb != NULL && cb->is_compiled());
|
||||
// Check that the cached_value is a klass for non-optimized monomorphic calls
|
||||
// This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
|
||||
@ -328,12 +325,11 @@ bool CompiledIC::is_call_to_compiled() const {
|
||||
// For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
|
||||
// virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
|
||||
#ifdef ASSERT
|
||||
CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
|
||||
CodeBlob* caller = CodeCache::find_blob(instruction_address());
|
||||
bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
|
||||
assert( is_c1_or_jvmci_method ||
|
||||
!is_monomorphic ||
|
||||
is_optimized() ||
|
||||
!caller->is_alive() ||
|
||||
(cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
|
||||
#endif // ASSERT
|
||||
return is_monomorphic;
|
||||
@ -346,10 +342,7 @@ bool CompiledIC::is_call_to_interpreted() const {
|
||||
// is optimized), or calling to an I2C blob
|
||||
bool is_call_to_interpreted = false;
|
||||
if (!is_optimized()) {
|
||||
// must use unsafe because the destination can be a zombie (and we're cleaning)
|
||||
// and the print_compiled_ic code wants to know if site (in the non-zombie)
|
||||
// is to the interpreter.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
|
||||
CodeBlob* cb = CodeCache::find_blob(ic_destination());
|
||||
is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
|
||||
assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
|
||||
} else {
|
||||
@ -374,8 +367,6 @@ bool CompiledIC::set_to_clean(bool in_use) {
|
||||
|
||||
address entry = _call->get_resolve_call_stub(is_optimized());
|
||||
|
||||
// A zombie transition will always be safe, since the metadata has already been set to NULL, so
|
||||
// we only need to patch the destination
|
||||
bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
|
||||
|
||||
if (safe_transition) {
|
||||
@ -460,7 +451,7 @@ bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
|
||||
// Call to compiled code
|
||||
bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
|
||||
#ifdef ASSERT
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
|
||||
CodeBlob* cb = CodeCache::find_blob(info.entry());
|
||||
assert (cb != NULL && cb->is_compiled(), "must be compiled!");
|
||||
#endif /* ASSERT */
|
||||
|
||||
@ -560,7 +551,7 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
|
||||
|
||||
|
||||
bool CompiledIC::is_icholder_entry(address entry) {
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
|
||||
CodeBlob* cb = CodeCache::find_blob(entry);
|
||||
if (cb != NULL && cb->is_adapter_blob()) {
|
||||
return true;
|
||||
}
|
||||
|
@ -106,10 +106,6 @@ const char* CompiledMethod::state() const {
|
||||
return "not_used";
|
||||
case not_entrant:
|
||||
return "not_entrant";
|
||||
case zombie:
|
||||
return "zombie";
|
||||
case unloaded:
|
||||
return "unloaded";
|
||||
default:
|
||||
fatal("unexpected method state: %d", state);
|
||||
return NULL;
|
||||
@ -310,7 +306,7 @@ ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
|
||||
}
|
||||
|
||||
address CompiledMethod::oops_reloc_begin() const {
|
||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||
// If the method is not entrant then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
@ -428,11 +424,7 @@ Method* CompiledMethod::attached_method_before_pc(address pc) {
|
||||
}
|
||||
|
||||
void CompiledMethod::clear_inline_caches() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
|
||||
if (is_zombie()) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
|
||||
RelocIterator iter(this);
|
||||
while (iter.next()) {
|
||||
iter.reloc()->clear_inline_cache();
|
||||
@ -516,47 +508,11 @@ bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
|
||||
template <class CompiledICorStaticCall>
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
|
||||
bool clean_all) {
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
|
||||
CodeBlob *cb = CodeCache::find_blob(addr);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
if (nm != NULL) {
|
||||
// Clean inline caches pointing to both zombie and not_entrant methods
|
||||
// Clean inline caches pointing to bad nmethods
|
||||
if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
|
||||
// Inline cache cleaning should only be initiated on CompiledMethods that have been
|
||||
// observed to be is_alive(). However, with concurrent code cache unloading, it is
|
||||
// possible that by now, the state has become !is_alive. This can happen in two ways:
|
||||
// 1) It can be racingly flipped to unloaded if the nmethod // being cleaned (from the
|
||||
// sweeper) is_unloading(). This is fine, because if that happens, then the inline
|
||||
// caches have already been cleaned under the same CompiledICLocker that we now hold during
|
||||
// inline cache cleaning, and we will simply walk the inline caches again, and likely not
|
||||
// find much of interest to clean. However, this race prevents us from asserting that the
|
||||
// nmethod is_alive(). The is_unloading() function is completely monotonic; once set due
|
||||
// to an oop dying, it remains set forever until freed. Because of that, all unloaded
|
||||
// nmethods are is_unloading(), but notably, an unloaded nmethod may also subsequently
|
||||
// become zombie (when the sweeper converts it to zombie).
|
||||
// 2) It can be racingly flipped to zombie if the nmethod being cleaned (by the concurrent
|
||||
// GC) cleans a zombie nmethod that is concurrently made zombie by the sweeper. In this
|
||||
// scenario, the sweeper will first transition the nmethod to zombie, and then when
|
||||
// unregistering from the GC, it will wait until the GC is done. The GC will then clean
|
||||
// the inline caches *with IC stubs*, even though no IC stubs are needed. This is fine,
|
||||
// as long as the IC stubs are guaranteed to be released until the next safepoint, where
|
||||
// IC finalization requires live IC stubs to not be associated with zombie nmethods.
|
||||
// This is guaranteed, because the sweeper does not have a single safepoint check until
|
||||
// after it completes the whole transition function; it will wake up after the GC is
|
||||
// done with concurrent code cache cleaning (which blocks out safepoints using the
|
||||
// suspendible threads set), and then call clear_ic_callsites, which will release the
|
||||
// associated IC stubs, before a subsequent safepoint poll can be reached. This
|
||||
// guarantees that the spuriously created IC stubs are released appropriately before
|
||||
// IC finalization in a safepoint gets to run. Therefore, this race is fine. This is also
|
||||
// valid in a scenario where an inline cache of a zombie nmethod gets a spurious IC stub,
|
||||
// and then when cleaning another inline cache, fails to request an IC stub because we
|
||||
// exhausted the IC stub buffer. In this scenario, the GC will request a safepoint after
|
||||
// yielding the suspendible therad set, effectively unblocking safepoints. Before such
|
||||
// a safepoint can be reached, the sweeper similarly has to wake up, clear the IC stubs,
|
||||
// and reach the next safepoint poll, after the whole transition function has completed.
|
||||
// Due to the various races that can cause an nmethod to first be is_alive() and then
|
||||
// racingly become !is_alive(), it is unfortunately not possible to assert the nmethod
|
||||
// is_alive(), !is_unloaded() or !is_zombie() here.
|
||||
if (!ic->set_to_clean(!from->is_unloading())) {
|
||||
return false;
|
||||
}
|
||||
@ -618,40 +574,24 @@ void CompiledMethod::run_nmethod_entry_barrier() {
|
||||
}
|
||||
}
|
||||
|
||||
void CompiledMethod::cleanup_inline_caches(bool clean_all) {
|
||||
for (;;) {
|
||||
ICRefillVerifier ic_refill_verifier;
|
||||
{ CompiledICLocker ic_locker(this);
|
||||
if (cleanup_inline_caches_impl(false, clean_all)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Call this nmethod entry barrier from the sweeper.
|
||||
run_nmethod_entry_barrier();
|
||||
if (!clean_all) {
|
||||
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
CodeCache::Sweep::end();
|
||||
}
|
||||
InlineCacheBuffer::refill_ic_stubs();
|
||||
if (!clean_all) {
|
||||
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
CodeCache::Sweep::begin();
|
||||
}
|
||||
}
|
||||
// Only called by whitebox test
|
||||
void CompiledMethod::cleanup_inline_caches_whitebox() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
CompiledICLocker ic_locker(this);
|
||||
guarantee(cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */),
|
||||
"Inline cache cleaning in a safepoint can't fail");
|
||||
}
|
||||
|
||||
address* CompiledMethod::orig_pc_addr(const frame* fr) {
|
||||
return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
|
||||
}
|
||||
|
||||
// Called to clean up after class unloading for live nmethods and from the sweeper
|
||||
// for all methods.
|
||||
// Called to clean up after class unloading for live nmethods
|
||||
bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
|
||||
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
|
||||
ResourceMark rm;
|
||||
|
||||
// Find all calls in an nmethod and clear the ones that point to non-entrant,
|
||||
// zombie and unloaded nmethods.
|
||||
// Find all calls in an nmethod and clear the ones that point to bad nmethods.
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
bool is_in_static_stub = false;
|
||||
while(iter.next()) {
|
||||
|
@ -140,7 +140,6 @@ public:
|
||||
|
||||
class CompiledMethod : public CodeBlob {
|
||||
friend class VMStructs;
|
||||
friend class NMethodSweeper;
|
||||
|
||||
void init_defaults();
|
||||
protected:
|
||||
@ -204,11 +203,7 @@ public:
|
||||
// allowed to advance state
|
||||
in_use = 0, // executable nmethod
|
||||
not_used = 1, // not entrant, but revivable
|
||||
not_entrant = 2, // marked for deoptimization but activations may still exist,
|
||||
// will be transformed to zombie when all activations are gone
|
||||
unloaded = 3, // there should be no activations, should not be called, will be
|
||||
// transformed to zombie by the sweeper, when not "locked in vm".
|
||||
zombie = 4 // no activations exist, nmethod is ready for purge
|
||||
not_entrant = 2, // marked for deoptimization but activations may still exist
|
||||
};
|
||||
|
||||
virtual bool is_in_use() const = 0;
|
||||
@ -222,7 +217,6 @@ public:
|
||||
virtual bool make_not_entrant() = 0;
|
||||
virtual bool make_entrant() = 0;
|
||||
virtual address entry_point() const = 0;
|
||||
virtual bool make_zombie() = 0;
|
||||
virtual bool is_osr_method() const = 0;
|
||||
virtual int osr_entry_bci() const = 0;
|
||||
Method* method() const { return _method; }
|
||||
@ -344,7 +338,6 @@ private:
|
||||
address* orig_pc_addr(const frame* fr);
|
||||
|
||||
public:
|
||||
virtual bool can_convert_to_zombie() = 0;
|
||||
virtual const char* compile_kind() const = 0;
|
||||
virtual int get_state() const = 0;
|
||||
|
||||
@ -369,8 +362,8 @@ public:
|
||||
address continuation_for_implicit_exception(address pc, bool for_div0_check);
|
||||
|
||||
public:
|
||||
// Serial version used by sweeper and whitebox test
|
||||
void cleanup_inline_caches(bool clean_all);
|
||||
// Serial version used by whitebox test
|
||||
void cleanup_inline_caches_whitebox();
|
||||
|
||||
virtual void clear_inline_caches();
|
||||
void clear_ic_callsites();
|
||||
|
@ -68,9 +68,7 @@ int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
|
||||
int found = 0;
|
||||
for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
|
||||
nmethod* nm = b->get_nmethod();
|
||||
// since dependencies aren't removed until an nmethod becomes a zombie,
|
||||
// the dependency list may contain nmethods which aren't alive.
|
||||
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
|
||||
if (b->count() > 0 && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
|
||||
if (TraceDependencies) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("Marked for deoptimization");
|
||||
@ -137,40 +135,6 @@ void DependencyContext::release(nmethodBucket* b) {
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Remove an nmethod dependency from the context.
|
||||
// Decrement count of the nmethod in the dependency list and, optionally, remove
|
||||
// the bucket completely when the count goes to 0. This method must find
|
||||
// a corresponding bucket otherwise there's a bug in the recording of dependencies.
|
||||
// Can be called concurrently by parallel GC threads.
|
||||
//
|
||||
void DependencyContext::remove_dependent_nmethod(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
nmethodBucket* first = dependencies_not_unloading();
|
||||
nmethodBucket* last = NULL;
|
||||
for (nmethodBucket* b = first; b != NULL; b = b->next_not_unloading()) {
|
||||
if (nm == b->get_nmethod()) {
|
||||
int val = b->decrement();
|
||||
guarantee(val >= 0, "Underflow: %d", val);
|
||||
if (val == 0) {
|
||||
if (last == NULL) {
|
||||
// If there was not a head that was not unloading, we can set a new
|
||||
// head without a CAS, because we know there is no contending cleanup.
|
||||
set_dependencies(b->next_not_unloading());
|
||||
} else {
|
||||
// Only supports a single inserting thread (protected by CodeCache_lock)
|
||||
// for now. Therefore, the next pointer only competes with another cleanup
|
||||
// operation. That interaction does not need a CAS.
|
||||
last->set_next(b->next_not_unloading());
|
||||
}
|
||||
release(b);
|
||||
}
|
||||
return;
|
||||
}
|
||||
last = b;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Reclaim all unused buckets.
|
||||
//
|
||||
@ -225,7 +189,7 @@ int DependencyContext::remove_and_mark_for_deoptimization_all_dependents() {
|
||||
int marked = 0;
|
||||
while (b != NULL) {
|
||||
nmethod* nm = b->get_nmethod();
|
||||
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
|
||||
if (b->count() > 0 && !nm->is_marked_for_deoptimization()) {
|
||||
nm->mark_for_deoptimization();
|
||||
marked++;
|
||||
}
|
||||
|
@ -119,7 +119,6 @@ class DependencyContext : public StackObj {
|
||||
|
||||
int mark_dependent_nmethods(DepChange& changes);
|
||||
void add_dependent_nmethod(nmethod* nm);
|
||||
void remove_dependent_nmethod(nmethod* nm);
|
||||
void remove_all_dependents();
|
||||
int remove_and_mark_for_deoptimization_all_dependents();
|
||||
void clean_unloading_dependents();
|
||||
|
@ -33,8 +33,10 @@
|
||||
#include "code/nmethod.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compilationLog.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "compiler/compilerDirectives.hpp"
|
||||
#include "compiler/directivesParser.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
@ -70,7 +72,6 @@
|
||||
#include "runtime/serviceThread.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/sweeper.hpp"
|
||||
#include "runtime/threadWXSetters.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -441,14 +442,7 @@ const char* nmethod::compile_kind() const {
|
||||
void nmethod::init_defaults() {
|
||||
_state = not_installed;
|
||||
_has_flushed_dependencies = 0;
|
||||
_lock_count = 0;
|
||||
_stack_traversal_mark = 0;
|
||||
_load_reported = false; // jvmti state
|
||||
_unload_reported = false;
|
||||
|
||||
#ifdef ASSERT
|
||||
_oops_are_stale = false;
|
||||
#endif
|
||||
|
||||
_oops_do_mark_link = NULL;
|
||||
_osr_link = NULL;
|
||||
@ -611,6 +605,7 @@ nmethod::nmethod(
|
||||
ByteSize basic_lock_sp_offset,
|
||||
OopMapSet* oop_maps )
|
||||
: CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
|
||||
_unlinked_next(NULL),
|
||||
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
|
||||
_native_basic_lock_sp_offset(basic_lock_sp_offset),
|
||||
_is_unloading_state(0)
|
||||
@ -630,7 +625,7 @@ nmethod::nmethod(
|
||||
// values something that will never match a pc like the nmethod vtable entry
|
||||
_exception_offset = 0;
|
||||
_orig_pc_offset = 0;
|
||||
_gc_epoch = Continuations::gc_epoch();
|
||||
_gc_epoch = CodeCache::gc_epoch();
|
||||
|
||||
_consts_offset = data_offset();
|
||||
_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
|
||||
@ -654,7 +649,6 @@ nmethod::nmethod(
|
||||
_osr_entry_point = NULL;
|
||||
_exception_cache = NULL;
|
||||
_pc_desc_container.reset_to(NULL);
|
||||
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
|
||||
|
||||
_exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
|
||||
|
||||
@ -746,6 +740,7 @@ nmethod::nmethod(
|
||||
#endif
|
||||
)
|
||||
: CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
|
||||
_unlinked_next(NULL),
|
||||
_native_receiver_sp_offset(in_ByteSize(-1)),
|
||||
_native_basic_lock_sp_offset(in_ByteSize(-1)),
|
||||
_is_unloading_state(0)
|
||||
@ -763,8 +758,7 @@ nmethod::nmethod(
|
||||
_compile_id = compile_id;
|
||||
_comp_level = comp_level;
|
||||
_orig_pc_offset = orig_pc_offset;
|
||||
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
|
||||
_gc_epoch = Continuations::gc_epoch();
|
||||
_gc_epoch = CodeCache::gc_epoch();
|
||||
|
||||
// Section offsets
|
||||
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
|
||||
@ -937,7 +931,7 @@ void nmethod::print_on(outputStream* st, const char* msg) const {
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::maybe_print_nmethod(DirectiveSet* directive) {
|
||||
void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
|
||||
bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
|
||||
if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
|
||||
print_nmethod(printnmethods);
|
||||
@ -945,8 +939,6 @@ void nmethod::maybe_print_nmethod(DirectiveSet* directive) {
|
||||
}
|
||||
|
||||
void nmethod::print_nmethod(bool printmethod) {
|
||||
run_nmethod_entry_barrier(); // ensure all embedded OOPs are valid before printing
|
||||
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
if (xtty != NULL) {
|
||||
xtty->begin_head("print_nmethod");
|
||||
@ -1120,7 +1112,6 @@ void nmethod::make_deoptimized() {
|
||||
}
|
||||
|
||||
assert(method() == NULL || can_be_deoptimized(), "");
|
||||
assert(!is_zombie(), "");
|
||||
|
||||
CompiledICLocker ml(this);
|
||||
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
|
||||
@ -1172,12 +1163,11 @@ void nmethod::verify_clean_inline_caches() {
|
||||
case relocInfo::virtual_call_type:
|
||||
case relocInfo::opt_virtual_call_type: {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
|
||||
CodeBlob *cb = CodeCache::find_blob(ic->ic_destination());
|
||||
assert(cb != NULL, "destination not in CodeBlob?");
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if( nm != NULL ) {
|
||||
// Verify that inline caches pointing to both zombie and not_entrant methods are clean
|
||||
// Verify that inline caches pointing to bad nmethods are clean
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
assert(ic->is_clean(), "IC should be clean");
|
||||
}
|
||||
@ -1186,11 +1176,11 @@ void nmethod::verify_clean_inline_caches() {
|
||||
}
|
||||
case relocInfo::static_call_type: {
|
||||
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
|
||||
CodeBlob *cb = CodeCache::find_blob(csc->destination());
|
||||
assert(cb != NULL, "destination not in CodeBlob?");
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if( nm != NULL ) {
|
||||
// Verify that inline caches pointing to both zombie and not_entrant methods are clean
|
||||
// Verify that inline caches pointing to bad nmethods are clean
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
assert(csc->is_clean(), "IC should be clean");
|
||||
}
|
||||
@ -1203,49 +1193,14 @@ void nmethod::verify_clean_inline_caches() {
|
||||
}
|
||||
}
|
||||
|
||||
// This is a private interface with the sweeper.
|
||||
void nmethod::mark_as_seen_on_stack() {
|
||||
assert(is_alive(), "Must be an alive method");
|
||||
// Set the traversal mark to ensure that the sweeper does 2
|
||||
// cleaning passes before moving to zombie.
|
||||
set_stack_traversal_mark(NMethodSweeper::traversal_count());
|
||||
void nmethod::mark_as_maybe_on_stack() {
|
||||
Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
|
||||
}
|
||||
|
||||
void nmethod::mark_as_maybe_on_continuation() {
|
||||
assert(is_alive(), "Must be an alive method");
|
||||
_gc_epoch = Continuations::gc_epoch();
|
||||
}
|
||||
|
||||
bool nmethod::is_maybe_on_continuation_stack() {
|
||||
if (!Continuations::enabled()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool nmethod::is_maybe_on_stack() {
|
||||
// If the condition below is true, it means that the nmethod was found to
|
||||
// be alive the previous completed marking cycle.
|
||||
return _gc_epoch >= Continuations::previous_completed_gc_marking_cycle();
|
||||
}
|
||||
|
||||
// Tell if a non-entrant method can be converted to a zombie (i.e.,
|
||||
// there are no activations on the stack, not in use by the VM,
|
||||
// and not in use by the ServiceThread)
|
||||
bool nmethod::can_convert_to_zombie() {
|
||||
// Note that this is called when the sweeper has observed the nmethod to be
|
||||
// not_entrant. However, with concurrent code cache unloading, the state
|
||||
// might have moved on to unloaded if it is_unloading(), due to racing
|
||||
// concurrent GC threads.
|
||||
assert(is_not_entrant() || is_unloading() ||
|
||||
!Thread::current()->is_Code_cache_sweeper_thread(),
|
||||
"must be a non-entrant method if called from sweeper");
|
||||
|
||||
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
|
||||
// count can be greater than the stack traversal count before it hits the
|
||||
// nmethod for the second time.
|
||||
// If an is_unloading() nmethod is still not_entrant, then it is not safe to
|
||||
// convert it to zombie due to GC unloading interactions. However, if it
|
||||
// has become unloaded, then it is okay to convert such nmethods to zombie.
|
||||
return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && !is_maybe_on_continuation_stack() &&
|
||||
!is_locked_by_vm() && (!is_unloading() || is_unloaded());
|
||||
return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
|
||||
}
|
||||
|
||||
void nmethod::inc_decompile_count() {
|
||||
@ -1261,118 +1216,14 @@ void nmethod::inc_decompile_count() {
|
||||
|
||||
bool nmethod::try_transition(int new_state_int) {
|
||||
signed char new_state = new_state_int;
|
||||
#ifdef ASSERT
|
||||
if (new_state != unloaded) {
|
||||
assert_lock_strong(CompiledMethod_lock);
|
||||
assert_lock_strong(CompiledMethod_lock);
|
||||
signed char old_state = _state;
|
||||
if (old_state >= new_state) {
|
||||
// Ensure monotonicity of transitions.
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
for (;;) {
|
||||
signed char old_state = Atomic::load(&_state);
|
||||
if (old_state >= new_state) {
|
||||
// Ensure monotonicity of transitions.
|
||||
return false;
|
||||
}
|
||||
if (Atomic::cmpxchg(&_state, old_state, new_state) == old_state) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::make_unloaded() {
|
||||
post_compiled_method_unload();
|
||||
|
||||
// This nmethod is being unloaded, make sure that dependencies
|
||||
// recorded in instanceKlasses get flushed.
|
||||
// Since this work is being done during a GC, defer deleting dependencies from the
|
||||
// InstanceKlass.
|
||||
assert(Universe::heap()->is_gc_active() ||
|
||||
Thread::current()->is_ConcurrentGC_thread() ||
|
||||
Thread::current()->is_Worker_thread(),
|
||||
"should only be called during gc");
|
||||
flush_dependencies(/*delete_immediately*/false);
|
||||
|
||||
// Break cycle between nmethod & method
|
||||
LogTarget(Trace, class, unload, nmethod) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
ls.print("making nmethod " INTPTR_FORMAT
|
||||
" unloadable, Method*(" INTPTR_FORMAT
|
||||
") ",
|
||||
p2i(this), p2i(_method));
|
||||
ls.cr();
|
||||
}
|
||||
// Unlink the osr method, so we do not look this up again
|
||||
if (is_osr_method()) {
|
||||
// Invalidate the osr nmethod only once. Note that with concurrent
|
||||
// code cache unloading, OSR nmethods are invalidated before they
|
||||
// are made unloaded. Therefore, this becomes a no-op then.
|
||||
if (is_in_use()) {
|
||||
invalidate_osr_method();
|
||||
}
|
||||
#ifdef ASSERT
|
||||
if (method() != NULL) {
|
||||
// Make sure osr nmethod is invalidated, i.e. not on the list
|
||||
bool found = method()->method_holder()->remove_osr_nmethod(this);
|
||||
assert(!found, "osr nmethod should have been invalidated");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// If _method is already NULL the Method* is about to be unloaded,
|
||||
// so we don't have to break the cycle. Note that it is possible to
|
||||
// have the Method* live here, in case we unload the nmethod because
|
||||
// it is pointing to some oop (other than the Method*) being unloaded.
|
||||
if (_method != NULL) {
|
||||
_method->unlink_code(this);
|
||||
}
|
||||
|
||||
// Make the class unloaded - i.e., change state and notify sweeper
|
||||
assert(SafepointSynchronize::is_at_safepoint() ||
|
||||
Thread::current()->is_ConcurrentGC_thread() ||
|
||||
Thread::current()->is_Worker_thread(),
|
||||
"must be at safepoint");
|
||||
|
||||
{
|
||||
// Clear ICStubs and release any CompiledICHolders.
|
||||
CompiledICLocker ml(this);
|
||||
clear_ic_callsites();
|
||||
}
|
||||
|
||||
// Unregister must be done before the state change
|
||||
{
|
||||
MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
Universe::heap()->unregister_nmethod(this);
|
||||
}
|
||||
|
||||
// Clear the method of this dead nmethod
|
||||
set_method(NULL);
|
||||
|
||||
// Log the unloading.
|
||||
log_state_change();
|
||||
|
||||
// The Method* is gone at this point
|
||||
assert(_method == NULL, "Tautology");
|
||||
|
||||
set_osr_link(NULL);
|
||||
NMethodSweeper::report_state_change(this);
|
||||
|
||||
bool transition_success = try_transition(unloaded);
|
||||
|
||||
// It is an important invariant that there exists no race between
|
||||
// the sweeper and GC thread competing for making the same nmethod
|
||||
// zombie and unloaded respectively. This is ensured by
|
||||
// can_convert_to_zombie() returning false for any is_unloading()
|
||||
// nmethod, informing the sweeper not to step on any GC toes.
|
||||
assert(transition_success, "Invalid nmethod transition to unloaded");
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// Clear the link between this nmethod and a HotSpotNmethod mirror
|
||||
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
|
||||
if (nmethod_data != NULL) {
|
||||
nmethod_data->invalidate_nmethod_mirror(this);
|
||||
}
|
||||
#endif
|
||||
Atomic::store(&_state, new_state);
|
||||
return true;
|
||||
}
|
||||
|
||||
void nmethod::invalidate_osr_method() {
|
||||
@ -1387,24 +1238,17 @@ void nmethod::log_state_change() const {
|
||||
if (LogCompilation) {
|
||||
if (xtty != NULL) {
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
if (_state == unloaded) {
|
||||
xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
|
||||
os::current_thread_id());
|
||||
} else {
|
||||
xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
|
||||
os::current_thread_id(),
|
||||
(_state == zombie ? " zombie='1'" : ""));
|
||||
}
|
||||
xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'",
|
||||
os::current_thread_id());
|
||||
log_identity(xtty);
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
}
|
||||
}
|
||||
|
||||
const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
|
||||
CompileTask::print_ul(this, state_msg);
|
||||
if (PrintCompilation && _state != unloaded) {
|
||||
print_on(tty, state_msg);
|
||||
CompileTask::print_ul(this, "made not entrant");
|
||||
if (PrintCompilation) {
|
||||
print_on(tty, "made not entrant");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1414,13 +1258,18 @@ void nmethod::unlink_from_method() {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Common functionality for both make_not_entrant and make_zombie
|
||||
*/
|
||||
bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
|
||||
// Invalidate code
|
||||
bool nmethod::make_not_entrant() {
|
||||
// This can be called while the system is already at a safepoint which is ok
|
||||
NoSafepointVerifier nsv;
|
||||
|
||||
if (Atomic::load(&_state) >= state) {
|
||||
if (is_unloading()) {
|
||||
// If the nmethod is unloading, then it is already not entrant through
|
||||
// the nmethod entry barriers. No need to do anything; GC will unload it.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Atomic::load(&_state) == not_entrant) {
|
||||
// Avoid taking the lock if already in required state.
|
||||
// This is safe from races because the state is an end-state,
|
||||
// which the nmethod cannot back out of once entered.
|
||||
@ -1428,78 +1277,44 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Make sure the nmethod is not flushed.
|
||||
nmethodLocker nml(this);
|
||||
// This can be called while the system is already at a safepoint which is ok
|
||||
NoSafepointVerifier nsv;
|
||||
|
||||
// during patching, depending on the nmethod state we must notify the GC that
|
||||
// code has been unloaded, unregistering it. We cannot do this right while
|
||||
// holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
|
||||
// would be prone to deadlocks.
|
||||
// This flag is used to remember whether we need to later lock and unregister.
|
||||
bool nmethod_needs_unregister = false;
|
||||
|
||||
{
|
||||
// Enter critical section. Does not block for safepoint.
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// This logic is equivalent to the logic below for patching the
|
||||
// verified entry point of regular methods. We check that the
|
||||
// nmethod is in use to ensure that it is invalidated only once.
|
||||
if (is_osr_method() && is_in_use()) {
|
||||
// this effectively makes the osr nmethod not entrant
|
||||
invalidate_osr_method();
|
||||
}
|
||||
|
||||
if (Atomic::load(&_state) >= state) {
|
||||
if (Atomic::load(&_state) == not_entrant) {
|
||||
// another thread already performed this transition so nothing
|
||||
// to do, but return false to indicate this.
|
||||
return false;
|
||||
}
|
||||
|
||||
// The caller can be calling the method statically or through an inline
|
||||
// cache call.
|
||||
if (!is_osr_method() && !is_not_entrant()) {
|
||||
if (is_osr_method()) {
|
||||
// This logic is equivalent to the logic below for patching the
|
||||
// verified entry point of regular methods.
|
||||
// this effectively makes the osr nmethod not entrant
|
||||
invalidate_osr_method();
|
||||
} else {
|
||||
// The caller can be calling the method statically or through an inline
|
||||
// cache call.
|
||||
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
|
||||
SharedRuntime::get_handle_wrong_method_stub());
|
||||
SharedRuntime::get_handle_wrong_method_stub());
|
||||
}
|
||||
|
||||
if (is_in_use() && update_recompile_counts()) {
|
||||
// It's a true state change, so mark the method as decompiled.
|
||||
// Do it only for transition from alive.
|
||||
if (update_recompile_counts()) {
|
||||
// Mark the method as decompiled.
|
||||
inc_decompile_count();
|
||||
}
|
||||
|
||||
// If the state is becoming a zombie, signal to unregister the nmethod with
|
||||
// the heap.
|
||||
// This nmethod may have already been unloaded during a full GC.
|
||||
if ((state == zombie) && !is_unloaded()) {
|
||||
nmethod_needs_unregister = true;
|
||||
}
|
||||
|
||||
// Must happen before state change. Otherwise we have a race condition in
|
||||
// nmethod::can_convert_to_zombie(). I.e., a method can immediately
|
||||
// transition its state from 'not_entrant' to 'zombie' without having to wait
|
||||
// for stack scanning.
|
||||
if (state == not_entrant) {
|
||||
mark_as_seen_on_stack();
|
||||
OrderAccess::storestore(); // _stack_traversal_mark and _state
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
|
||||
// If nmethod entry barriers are not supported, we won't mark
|
||||
// nmethods as on-stack when they become on-stack. So we
|
||||
// degrade to a less accurate flushing strategy, for now.
|
||||
mark_as_maybe_on_stack();
|
||||
}
|
||||
|
||||
// Change state
|
||||
if (!try_transition(state)) {
|
||||
// If the transition fails, it is due to another thread making the nmethod more
|
||||
// dead. In particular, one thread might be making the nmethod unloaded concurrently.
|
||||
// If so, having patched in the jump in the verified entry unnecessarily is fine.
|
||||
// The nmethod is no longer possible to call by Java threads.
|
||||
// Incrementing the decompile count is also fine as the caller of make_not_entrant()
|
||||
// had a valid reason to deoptimize the nmethod.
|
||||
// Marking the nmethod as seen on stack also has no effect, as the nmethod is now
|
||||
// !is_alive(), and the seen on stack value is only used to convert not_entrant
|
||||
// nmethods to zombie in can_convert_to_zombie().
|
||||
return false;
|
||||
}
|
||||
bool success = try_transition(not_entrant);
|
||||
assert(success, "Transition can't fail");
|
||||
|
||||
// Log the transition once
|
||||
log_state_change();
|
||||
@ -1525,96 +1340,69 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// When the nmethod becomes zombie it is no longer alive so the
|
||||
// dependencies must be flushed. nmethods in the not_entrant
|
||||
// state will be flushed later when the transition to zombie
|
||||
// happens or they get unloaded.
|
||||
if (state == zombie) {
|
||||
{
|
||||
// Flushing dependencies must be done before any possible
|
||||
// safepoint can sneak in, otherwise the oops used by the
|
||||
// dependency logic could have become stale.
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (nmethod_needs_unregister) {
|
||||
Universe::heap()->unregister_nmethod(this);
|
||||
}
|
||||
flush_dependencies(/*delete_immediately*/true);
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// Now that the nmethod has been unregistered, it's
|
||||
// safe to clear the HotSpotNmethod mirror oop.
|
||||
if (nmethod_data != NULL) {
|
||||
nmethod_data->clear_nmethod_mirror(this);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Clear ICStubs to prevent back patching stubs of zombie or flushed
|
||||
// nmethods during the next safepoint (see ICStub::finalize), as well
|
||||
// as to free up CompiledICHolder resources.
|
||||
{
|
||||
CompiledICLocker ml(this);
|
||||
clear_ic_callsites();
|
||||
}
|
||||
|
||||
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
|
||||
// event and it hasn't already been reported for this nmethod then
|
||||
// report it now. The event may have been reported earlier if the GC
|
||||
// marked it for unloading). JvmtiDeferredEventQueue support means
|
||||
// we no longer go to a safepoint here.
|
||||
post_compiled_method_unload();
|
||||
|
||||
#ifdef ASSERT
|
||||
// It's no longer safe to access the oops section since zombie
|
||||
// nmethods aren't scanned for GC.
|
||||
_oops_are_stale = true;
|
||||
#endif
|
||||
// the Method may be reclaimed by class unloading now that the
|
||||
// nmethod is in zombie state
|
||||
set_method(NULL);
|
||||
} else {
|
||||
assert(state == not_entrant, "other cases may need to be handled differently");
|
||||
}
|
||||
|
||||
if (TraceCreateZombies && state == zombie) {
|
||||
ResourceMark m;
|
||||
tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", (state == not_entrant) ? "not entrant" : "zombie");
|
||||
}
|
||||
|
||||
NMethodSweeper::report_state_change(this);
|
||||
return true;
|
||||
}
|
||||
|
||||
// For concurrent GCs, there must be a handshake between unlink and flush
|
||||
void nmethod::unlink() {
|
||||
if (_unlinked_next != NULL) {
|
||||
// Already unlinked. It can be invoked twice because concurrent code cache
|
||||
// unloading might need to restart when inline cache cleaning fails due to
|
||||
// running out of ICStubs, which can only be refilled at safepoints
|
||||
return;
|
||||
}
|
||||
|
||||
flush_dependencies();
|
||||
|
||||
// unlink_from_method will take the CompiledMethod_lock.
|
||||
// In this case we don't strictly need it when unlinking nmethods from
|
||||
// the Method, because it is only concurrently unlinked by
|
||||
// the entry barrier, which acquires the per nmethod lock.
|
||||
unlink_from_method();
|
||||
clear_ic_callsites();
|
||||
|
||||
if (is_osr_method()) {
|
||||
invalidate_osr_method();
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// Clear the link between this nmethod and a HotSpotNmethod mirror
|
||||
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
|
||||
if (nmethod_data != NULL) {
|
||||
nmethod_data->invalidate_nmethod_mirror(this);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Post before flushing as jmethodID is being used
|
||||
post_compiled_method_unload();
|
||||
|
||||
// Register for flushing when it is safe. For concurrent class unloading,
|
||||
// that would be after the unloading handshake, and for STW class unloading
|
||||
// that would be when getting back to the VM thread.
|
||||
CodeCache::register_unlinked(this);
|
||||
}
|
||||
|
||||
void nmethod::flush() {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Note that there are no valid oops in the nmethod anymore.
|
||||
assert(!is_osr_method() || is_unloaded() || is_zombie(),
|
||||
"osr nmethod must be unloaded or zombie before flushing");
|
||||
assert(is_zombie() || is_osr_method(), "must be a zombie method");
|
||||
assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// completely deallocate this method
|
||||
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
|
||||
if (PrintMethodFlushing) {
|
||||
tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
|
||||
"/Free CodeCache:" SIZE_FORMAT "Kb",
|
||||
is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
|
||||
CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
|
||||
}
|
||||
Events::log(Thread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
|
||||
log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
|
||||
"/Free CodeCache:" SIZE_FORMAT "Kb",
|
||||
is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
|
||||
CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
|
||||
|
||||
// We need to deallocate any ExceptionCache data.
|
||||
// Note that we do not need to grab the nmethod lock for this, it
|
||||
// better be thread safe if we're disposing of it!
|
||||
ExceptionCache* ec = exception_cache();
|
||||
set_exception_cache(NULL);
|
||||
while(ec != NULL) {
|
||||
ExceptionCache* next = ec->next();
|
||||
delete ec;
|
||||
ec = next;
|
||||
}
|
||||
|
||||
Universe::heap()->flush_nmethod(this);
|
||||
Universe::heap()->unregister_nmethod(this);
|
||||
CodeCache::unregister_old_nmethod(this);
|
||||
|
||||
CodeBlob::flush();
|
||||
@ -1637,79 +1425,51 @@ oop nmethod::oop_at_phantom(int index) const {
|
||||
|
||||
//
|
||||
// Notify all classes this nmethod is dependent on that it is no
|
||||
// longer dependent. This should only be called in two situations.
|
||||
// First, when a nmethod transitions to a zombie all dependents need
|
||||
// to be clear. Since zombification happens at a safepoint there's no
|
||||
// synchronization issues. The second place is a little more tricky.
|
||||
// During phase 1 of mark sweep class unloading may happen and as a
|
||||
// result some nmethods may get unloaded. In this case the flushing
|
||||
// of dependencies must happen during phase 1 since after GC any
|
||||
// dependencies in the unloaded nmethod won't be updated, so
|
||||
// traversing the dependency information in unsafe. In that case this
|
||||
// function is called with a boolean argument and this function only
|
||||
// notifies instanceKlasses that are reachable
|
||||
// longer dependent.
|
||||
|
||||
void nmethod::flush_dependencies(bool delete_immediately) {
|
||||
DEBUG_ONLY(bool called_by_gc = Universe::heap()->is_gc_active() ||
|
||||
Thread::current()->is_ConcurrentGC_thread() ||
|
||||
Thread::current()->is_Worker_thread();)
|
||||
assert(called_by_gc != delete_immediately,
|
||||
"delete_immediately is false if and only if we are called during GC");
|
||||
void nmethod::flush_dependencies() {
|
||||
if (!has_flushed_dependencies()) {
|
||||
set_has_flushed_dependencies();
|
||||
for (Dependencies::DepStream deps(this); deps.next(); ) {
|
||||
if (deps.type() == Dependencies::call_site_target_value) {
|
||||
// CallSite dependencies are managed on per-CallSite instance basis.
|
||||
oop call_site = deps.argument_oop(0);
|
||||
if (delete_immediately) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
MethodHandles::remove_dependent_nmethod(call_site, this);
|
||||
} else {
|
||||
MethodHandles::clean_dependency_context(call_site);
|
||||
}
|
||||
MethodHandles::clean_dependency_context(call_site);
|
||||
} else {
|
||||
Klass* klass = deps.context_type();
|
||||
if (klass == NULL) {
|
||||
continue; // ignore things like evol_method
|
||||
}
|
||||
// During GC delete_immediately is false, and liveness
|
||||
// of dependee determines class that needs to be updated.
|
||||
if (delete_immediately) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
|
||||
} else if (klass->is_loader_alive()) {
|
||||
// The GC may clean dependency contexts concurrently and in parallel.
|
||||
InstanceKlass::cast(klass)->clean_dependency_context();
|
||||
}
|
||||
// During GC liveness of dependee determines class that needs to be updated.
|
||||
// The GC may clean dependency contexts concurrently and in parallel.
|
||||
InstanceKlass::cast(klass)->clean_dependency_context();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::post_compiled_method(CompileTask* task) {
|
||||
task->mark_success();
|
||||
task->set_nm_content_size(content_size());
|
||||
task->set_nm_insts_size(insts_size());
|
||||
task->set_nm_total_size(total_size());
|
||||
|
||||
// JVMTI -- compiled method notification (must be done outside lock)
|
||||
post_compiled_method_load_event();
|
||||
|
||||
if (CompilationLog::log() != NULL) {
|
||||
CompilationLog::log()->log_nmethod(JavaThread::current(), this);
|
||||
}
|
||||
|
||||
const DirectiveSet* directive = task->directive();
|
||||
maybe_print_nmethod(directive);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// post_compiled_method_load_event
|
||||
// new method for install_code() path
|
||||
// Transfer information from compilation to jvmti
|
||||
void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
|
||||
|
||||
// Don't post this nmethod load event if it is already dying
|
||||
// because the sweeper might already be deleting this nmethod.
|
||||
{
|
||||
MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||
// When the nmethod is acquired from the CodeCache iterator, it can racingly become zombie
|
||||
// before this code is called. Filter them out here under the CompiledMethod_lock.
|
||||
if (!is_alive()) {
|
||||
return;
|
||||
}
|
||||
// As for is_alive() nmethods, we also don't want them to racingly become zombie once we
|
||||
// release this lock, so we check that this is not going to be the case.
|
||||
if (is_not_entrant() && can_convert_to_zombie()) {
|
||||
return;
|
||||
}
|
||||
// Ensure the sweeper can't collect this nmethod until it become "active" with JvmtiThreadState::nmethods_do.
|
||||
mark_as_seen_on_stack();
|
||||
}
|
||||
|
||||
// This is a bad time for a safepoint. We don't want
|
||||
// this nmethod to get unloaded while we're queueing the event.
|
||||
NoSafepointVerifier nsv;
|
||||
@ -1744,37 +1504,19 @@ void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
|
||||
}
|
||||
|
||||
void nmethod::post_compiled_method_unload() {
|
||||
if (unload_reported()) {
|
||||
// During unloading we transition to unloaded and then to zombie
|
||||
// and the unloading is reported during the first transition.
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_method != NULL && !is_unloaded(), "just checking");
|
||||
assert(_method != NULL, "just checking");
|
||||
DTRACE_METHOD_UNLOAD_PROBE(method());
|
||||
|
||||
// If a JVMTI agent has enabled the CompiledMethodUnload event then
|
||||
// post the event. Sometime later this nmethod will be made a zombie
|
||||
// by the sweeper but the Method* will not be valid at that point.
|
||||
// The jmethodID is a weak reference to the Method* so if
|
||||
// it's being unloaded there's no way to look it up since the weak
|
||||
// ref will have been cleared.
|
||||
// post the event. The Method* will not be valid when this is freed.
|
||||
|
||||
// Don't bother posting the unload if the load event wasn't posted.
|
||||
if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
|
||||
assert(!unload_reported(), "already unloaded");
|
||||
JvmtiDeferredEvent event =
|
||||
JvmtiDeferredEvent::compiled_method_unload_event(
|
||||
method()->jmethod_id(), insts_begin());
|
||||
ServiceThread::enqueue_deferred_event(&event);
|
||||
}
|
||||
|
||||
// The JVMTI CompiledMethodUnload event can be enabled or disabled at
|
||||
// any time. As the nmethod is being unloaded now we mark it has
|
||||
// having the unload event reported - this will ensure that we don't
|
||||
// attempt to report the event in the unlikely scenario where the
|
||||
// event is enabled at the time the nmethod is made a zombie.
|
||||
set_unload_reported();
|
||||
}
|
||||
|
||||
// Iterate over metadata calling this function. Used by RedefineClasses
|
||||
@ -1824,8 +1566,40 @@ void nmethod::metadata_do(MetadataClosure* f) {
|
||||
if (_method != NULL) f->do_metadata(_method);
|
||||
}
|
||||
|
||||
// Heuristic for nuking nmethods even though their oops are live.
|
||||
// Main purpose is to reduce code cache pressure and get rid of
|
||||
// nmethods that don't seem to be all that relevant any longer.
|
||||
bool nmethod::is_cold() {
|
||||
if (!MethodFlushing || is_native_method() || is_not_installed()) {
|
||||
// No heuristic unloading at all
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!is_maybe_on_stack() && is_not_entrant()) {
|
||||
// Not entrant nmethods that are not on any stack can just
|
||||
// be removed
|
||||
return true;
|
||||
}
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
|
||||
// On platforms that don't support nmethod entry barriers, we can't
|
||||
// trust the temporal aspect of the gc epochs. So we can't detect
|
||||
// cold nmethods on such platforms.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!UseCodeCacheFlushing) {
|
||||
// Bail out if we don't heuristically remove nmethods
|
||||
return false;
|
||||
}
|
||||
|
||||
// Other code can be phased out more gradually after N GCs
|
||||
return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
|
||||
}
|
||||
|
||||
// The _is_unloading_state encodes a tuple comprising the unloading cycle
|
||||
// and the result of IsUnloadingBehaviour::is_unloading() fpr that cycle.
|
||||
// and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
|
||||
// This is the bit layout of the _is_unloading_state byte: 00000CCU
|
||||
// CC refers to the cycle, which has 2 bits, and U refers to the result of
|
||||
// IsUnloadingBehaviour::is_unloading() for that unloading cycle.
|
||||
@ -1876,40 +1650,11 @@ bool nmethod::is_unloading() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The IsUnloadingBehaviour is responsible for checking if there are any dead
|
||||
// oops in the CompiledMethod, by calling oops_do on it.
|
||||
// The IsUnloadingBehaviour is responsible for calculating if the nmethod
|
||||
// should be unloaded. This can be either because there is a dead oop,
|
||||
// or because is_cold() heuristically determines it is time to unload.
|
||||
state_unloading_cycle = current_cycle;
|
||||
|
||||
if (is_zombie()) {
|
||||
// Zombies without calculated unloading epoch are never unloading due to GC.
|
||||
|
||||
// There are no races where a previously observed is_unloading() nmethod
|
||||
// suddenly becomes not is_unloading() due to here being observed as zombie.
|
||||
|
||||
// With STW unloading, all is_alive() && is_unloading() nmethods are unlinked
|
||||
// and unloaded in the safepoint. That makes races where an nmethod is first
|
||||
// observed as is_alive() && is_unloading() and subsequently observed as
|
||||
// is_zombie() impossible.
|
||||
|
||||
// With concurrent unloading, all references to is_unloading() nmethods are
|
||||
// first unlinked (e.g. IC caches and dependency contexts). Then a global
|
||||
// handshake operation is performed with all JavaThreads before finally
|
||||
// unloading the nmethods. The sweeper never converts is_alive() && is_unloading()
|
||||
// nmethods to zombies; it waits for them to become is_unloaded(). So before
|
||||
// the global handshake, it is impossible for is_unloading() nmethods to
|
||||
// racingly become is_zombie(). And is_unloading() is calculated for all is_alive()
|
||||
// nmethods before taking that global handshake, meaning that it will never
|
||||
// be recalculated after the handshake.
|
||||
|
||||
// After that global handshake, is_unloading() nmethods are only observable
|
||||
// to the iterators, and they will never trigger recomputation of the cached
|
||||
// is_unloading_state, and hence may not suffer from such races.
|
||||
|
||||
state_is_unloading = false;
|
||||
} else {
|
||||
state_is_unloading = IsUnloadingBehaviour::current()->is_unloading(this);
|
||||
}
|
||||
|
||||
state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
|
||||
state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
|
||||
|
||||
RawAccess<MO_RELAXED>::store(&_is_unloading_state, state);
|
||||
@ -1925,15 +1670,11 @@ void nmethod::clear_unloading_state() {
|
||||
|
||||
// This is called at the end of the strong tracing/marking phase of a
|
||||
// GC to unload an nmethod if it contains otherwise unreachable
|
||||
// oops.
|
||||
|
||||
// oops or is heuristically found to be not important.
|
||||
void nmethod::do_unloading(bool unloading_occurred) {
|
||||
// Make sure the oop's ready to receive visitors
|
||||
assert(!is_zombie() && !is_unloaded(),
|
||||
"should not call follow on zombie or unloaded nmethod");
|
||||
|
||||
if (is_unloading()) {
|
||||
make_unloaded();
|
||||
unlink();
|
||||
} else {
|
||||
guarantee(unload_nmethod_caches(unloading_occurred),
|
||||
"Should not need transition stubs");
|
||||
@ -1945,9 +1686,6 @@ void nmethod::do_unloading(bool unloading_occurred) {
|
||||
}
|
||||
|
||||
void nmethod::oops_do(OopClosure* f, bool allow_dead) {
|
||||
// make sure the oops ready to receive visitors
|
||||
assert(allow_dead || is_alive(), "should not call follow on dead nmethod: %d", _state);
|
||||
|
||||
// Prevent extra code cache walk for platforms that don't have immediate oops.
|
||||
if (relocInfo::mustIterateImmediateOopsInCode()) {
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
@ -1979,8 +1717,8 @@ void nmethod::follow_nmethod(OopIterateClosure* cl) {
|
||||
// Process oops in the nmethod
|
||||
oops_do(cl);
|
||||
|
||||
// CodeCache sweeper support
|
||||
mark_as_maybe_on_continuation();
|
||||
// CodeCache unloading support
|
||||
mark_as_maybe_on_stack();
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
bs_nm->disarm(this);
|
||||
@ -2352,7 +2090,7 @@ void nmethod::check_all_dependencies(DepChange& changes) {
|
||||
|
||||
// Iterate over live nmethods and check dependencies of all nmethods that are not
|
||||
// marked for deoptimization. A particular dependency is only checked once.
|
||||
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
|
||||
NMethodIterator iter(NMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
nmethod* nm = iter.method();
|
||||
// Only notify for live nmethods
|
||||
@ -2406,51 +2144,11 @@ bool nmethod::is_dependent_on_method(Method* dependee) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool nmethod::is_patchable_at(address instr_addr) {
|
||||
assert(insts_contains(instr_addr), "wrong nmethod used");
|
||||
if (is_zombie()) {
|
||||
// a zombie may never be patched
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void nmethod_init() {
|
||||
// make sure you didn't forget to adjust the filler fields
|
||||
assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
|
||||
}
|
||||
|
||||
|
||||
//-------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
// QQQ might we make this work from a frame??
|
||||
nmethodLocker::nmethodLocker(address pc) {
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
guarantee(cb != NULL && cb->is_compiled(), "bad pc for a nmethod found");
|
||||
_nm = cb->as_compiled_method();
|
||||
lock_nmethod(_nm);
|
||||
}
|
||||
|
||||
// Only JvmtiDeferredEvent::compiled_method_unload_event()
|
||||
// should pass zombie_ok == true.
|
||||
void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) {
|
||||
if (cm == NULL) return;
|
||||
nmethod* nm = cm->as_nmethod();
|
||||
Atomic::inc(&nm->_lock_count);
|
||||
assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method: %p", nm);
|
||||
}
|
||||
|
||||
void nmethodLocker::unlock_nmethod(CompiledMethod* cm) {
|
||||
if (cm == NULL) return;
|
||||
nmethod* nm = cm->as_nmethod();
|
||||
Atomic::dec(&nm->_lock_count);
|
||||
assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Verification
|
||||
|
||||
@ -2486,11 +2184,7 @@ class VerifyMetadataClosure: public MetadataClosure {
|
||||
|
||||
|
||||
void nmethod::verify() {
|
||||
|
||||
// Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
|
||||
// seems odd.
|
||||
|
||||
if (is_zombie() || is_not_entrant() || is_unloaded())
|
||||
if (is_not_entrant())
|
||||
return;
|
||||
|
||||
// Make sure all the entry points are correctly aligned for patching.
|
||||
@ -3551,7 +3245,7 @@ public:
|
||||
}
|
||||
|
||||
virtual void verify_resolve_call(address dest) const {
|
||||
CodeBlob* db = CodeCache::find_blob_unsafe(dest);
|
||||
CodeBlob* db = CodeCache::find_blob(dest);
|
||||
assert(db != NULL && !db->is_adapter_blob(), "must use stub!");
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "code/compiledMethod.hpp"
|
||||
|
||||
class CompileTask;
|
||||
class DepChange;
|
||||
class DirectiveSet;
|
||||
class DebugInformationRecorder;
|
||||
@ -66,7 +67,6 @@ class JVMCINMethodData;
|
||||
class nmethod : public CompiledMethod {
|
||||
friend class VMStructs;
|
||||
friend class JVMCIVMStructs;
|
||||
friend class NMethodSweeper;
|
||||
friend class CodeCache; // scavengable oops
|
||||
friend class JVMCINMethodData;
|
||||
|
||||
@ -74,13 +74,6 @@ class nmethod : public CompiledMethod {
|
||||
|
||||
uint64_t _gc_epoch;
|
||||
|
||||
// not_entrant method removal. Each mark_sweep pass will update
|
||||
// this mark to current sweep invocation count if it is seen on the
|
||||
// stack. An not_entrant method can be removed when there are no
|
||||
// more activations, i.e., when the _stack_traversal_mark is less than
|
||||
// current sweep traversal index.
|
||||
volatile int64_t _stack_traversal_mark;
|
||||
|
||||
// To support simple linked-list chaining of nmethods:
|
||||
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
|
||||
|
||||
@ -203,6 +196,8 @@ class nmethod : public CompiledMethod {
|
||||
address _verified_entry_point; // entry point without class check
|
||||
address _osr_entry_point; // entry point for on stack replacement
|
||||
|
||||
nmethod* _unlinked_next;
|
||||
|
||||
// Shared fields for all nmethod's
|
||||
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
|
||||
|
||||
@ -240,19 +235,6 @@ class nmethod : public CompiledMethod {
|
||||
RTMState _rtm_state;
|
||||
#endif
|
||||
|
||||
// Nmethod Flushing lock. If non-zero, then the nmethod is not removed
|
||||
// and is not made into a zombie. However, once the nmethod is made into
|
||||
// a zombie, it will be locked one final time if CompiledMethodUnload
|
||||
// event processing needs to be done.
|
||||
volatile jint _lock_count;
|
||||
|
||||
// The _hotness_counter indicates the hotness of a method. The higher
|
||||
// the value the hotter the method. The hotness counter of a nmethod is
|
||||
// set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
|
||||
// is active while stack scanning (do_stack_scanning()). The hotness
|
||||
// counter is decreased (by 1) while sweeping.
|
||||
int _hotness_counter;
|
||||
|
||||
// These are used for compiled synchronized native methods to
|
||||
// locate the owner and stack slot for the BasicLock. They are
|
||||
// needed because there is no debug information for compiled native
|
||||
@ -273,17 +255,10 @@ class nmethod : public CompiledMethod {
|
||||
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
|
||||
|
||||
// used by jvmti to track if an event has been posted for this nmethod.
|
||||
bool _unload_reported;
|
||||
bool _load_reported;
|
||||
|
||||
// Protected by CompiledMethod_lock
|
||||
volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
|
||||
#endif
|
||||
|
||||
friend class nmethodLocker;
|
||||
volatile signed char _state; // {not_installed, in_use, not_used, not_entrant}
|
||||
|
||||
// For native wrappers
|
||||
nmethod(Method* method,
|
||||
@ -330,7 +305,6 @@ class nmethod : public CompiledMethod {
|
||||
|
||||
// Returns true if this thread changed the state of the nmethod or
|
||||
// false if another thread performed the transition.
|
||||
bool make_not_entrant_or_zombie(int state);
|
||||
bool make_entrant() { Unimplemented(); return false; }
|
||||
void inc_decompile_count();
|
||||
|
||||
@ -439,10 +413,6 @@ class nmethod : public CompiledMethod {
|
||||
|
||||
int total_size () const;
|
||||
|
||||
void dec_hotness_counter() { _hotness_counter--; }
|
||||
void set_hotness_counter(int val) { _hotness_counter = val; }
|
||||
int hotness_counter() const { return _hotness_counter; }
|
||||
|
||||
// Containment
|
||||
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
|
||||
bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
|
||||
@ -456,15 +426,17 @@ class nmethod : public CompiledMethod {
|
||||
// flag accessing and manipulation
|
||||
bool is_not_installed() const { return _state == not_installed; }
|
||||
bool is_in_use() const { return _state <= in_use; }
|
||||
bool is_alive() const { return _state < unloaded; }
|
||||
bool is_not_entrant() const { return _state == not_entrant; }
|
||||
bool is_zombie() const { return _state == zombie; }
|
||||
bool is_unloaded() const { return _state == unloaded; }
|
||||
|
||||
void clear_unloading_state();
|
||||
// Heuristically deduce an nmethod isn't worth keeping around
|
||||
bool is_cold();
|
||||
virtual bool is_unloading();
|
||||
virtual void do_unloading(bool unloading_occurred);
|
||||
|
||||
nmethod* unlinked_next() const { return _unlinked_next; }
|
||||
void set_unlinked_next(nmethod* next) { _unlinked_next = next; }
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
// rtm state accessing and manipulating
|
||||
RTMState rtm_state() const { return _rtm_state; }
|
||||
@ -478,22 +450,16 @@ class nmethod : public CompiledMethod {
|
||||
// alive. It is used when an uncommon trap happens. Returns true
|
||||
// if this thread changed the state of the nmethod or false if
|
||||
// another thread performed the transition.
|
||||
bool make_not_entrant() {
|
||||
assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
|
||||
return make_not_entrant_or_zombie(not_entrant);
|
||||
}
|
||||
bool make_not_entrant();
|
||||
bool make_not_used() { return make_not_entrant(); }
|
||||
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
|
||||
|
||||
int get_state() const {
|
||||
return _state;
|
||||
}
|
||||
|
||||
void make_unloaded();
|
||||
|
||||
bool has_dependencies() { return dependencies_size() != 0; }
|
||||
void print_dependencies() PRODUCT_RETURN;
|
||||
void flush_dependencies(bool delete_immediately);
|
||||
void flush_dependencies();
|
||||
bool has_flushed_dependencies() { return _has_flushed_dependencies; }
|
||||
void set_has_flushed_dependencies() {
|
||||
assert(!has_flushed_dependencies(), "should only happen once");
|
||||
@ -511,7 +477,6 @@ class nmethod : public CompiledMethod {
|
||||
oop* oop_addr_at(int index) const { // for GC
|
||||
// relocation indexes are biased by 1 (because 0 is reserved)
|
||||
assert(index > 0 && index <= oops_count(), "must be a valid non-zero index");
|
||||
assert(!_oops_are_stale, "oops are stale");
|
||||
return &oops_begin()[index - 1];
|
||||
}
|
||||
|
||||
@ -536,10 +501,6 @@ public:
|
||||
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
|
||||
void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
|
||||
|
||||
// Sweeper support
|
||||
int64_t stack_traversal_mark() { return _stack_traversal_mark; }
|
||||
void set_stack_traversal_mark(int64_t l) { _stack_traversal_mark = l; }
|
||||
|
||||
// On-stack replacement support
|
||||
int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
|
||||
address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
|
||||
@ -550,24 +511,15 @@ public:
|
||||
// Verify calls to dead methods have been cleaned.
|
||||
void verify_clean_inline_caches();
|
||||
|
||||
// unlink and deallocate this nmethod
|
||||
// Only NMethodSweeper class is expected to use this. NMethodSweeper is not
|
||||
// expected to use any other private methods/data in this class.
|
||||
// Unlink this nmethod from the system
|
||||
void unlink();
|
||||
|
||||
protected:
|
||||
// Deallocate this nmethod - called by the GC
|
||||
void flush();
|
||||
|
||||
public:
|
||||
// When true is returned, it is unsafe to remove this nmethod even if
|
||||
// it is a zombie, since the VM or the ServiceThread might still be
|
||||
// using it.
|
||||
bool is_locked_by_vm() const { return _lock_count >0; }
|
||||
|
||||
// See comment at definition of _last_seen_on_stack
|
||||
void mark_as_seen_on_stack();
|
||||
void mark_as_maybe_on_continuation();
|
||||
bool is_maybe_on_continuation_stack();
|
||||
bool can_convert_to_zombie();
|
||||
void mark_as_maybe_on_stack();
|
||||
bool is_maybe_on_stack();
|
||||
|
||||
// Evolution support. We make old (discarded) compiled methods point to new Method*s.
|
||||
void set_method(Method* method) { _method = method; }
|
||||
@ -625,9 +577,7 @@ public:
|
||||
|
||||
address* orig_pc_addr(const frame* fr);
|
||||
|
||||
// used by jvmti to track if the load and unload events has been reported
|
||||
bool unload_reported() const { return _unload_reported; }
|
||||
void set_unload_reported() { _unload_reported = true; }
|
||||
// used by jvmti to track if the load events has been reported
|
||||
bool load_reported() const { return _load_reported; }
|
||||
void set_load_reported() { _load_reported = true; }
|
||||
|
||||
@ -638,6 +588,9 @@ public:
|
||||
|
||||
int orig_pc_offset() { return _orig_pc_offset; }
|
||||
|
||||
// Post successful compilation
|
||||
void post_compiled_method(CompileTask* task);
|
||||
|
||||
// jvmti support:
|
||||
void post_compiled_method_load_event(JvmtiThreadState* state = NULL);
|
||||
|
||||
@ -682,7 +635,7 @@ public:
|
||||
void print_calls(outputStream* st) PRODUCT_RETURN;
|
||||
static void print_statistics() PRODUCT_RETURN;
|
||||
|
||||
void maybe_print_nmethod(DirectiveSet* directive);
|
||||
void maybe_print_nmethod(const DirectiveSet* directive);
|
||||
void print_nmethod(bool print_code);
|
||||
|
||||
// need to re-define this from CodeBlob else the overload hides it
|
||||
@ -730,9 +683,6 @@ public:
|
||||
// corresponds to the given method as well.
|
||||
virtual bool is_dependent_on_method(Method* dependee);
|
||||
|
||||
// is it ok to patch at address?
|
||||
bool is_patchable_at(address instr_address);
|
||||
|
||||
// JVMTI's GetLocalInstance() support
|
||||
ByteSize native_receiver_sp_offset() {
|
||||
return _native_receiver_sp_offset;
|
||||
@ -760,50 +710,4 @@ public:
|
||||
void finalize_relocations();
|
||||
};
|
||||
|
||||
// Locks an nmethod so its code will not get removed and it will not
|
||||
// be made into a zombie, even if it is a not_entrant method. After the
|
||||
// nmethod becomes a zombie, if CompiledMethodUnload event processing
|
||||
// needs to be done, then lock_nmethod() is used directly to keep the
|
||||
// generated code from being reused too early.
|
||||
class nmethodLocker : public StackObj {
|
||||
CompiledMethod* _nm;
|
||||
|
||||
public:
|
||||
|
||||
// note: nm can be NULL
|
||||
// Only JvmtiDeferredEvent::compiled_method_unload_event()
|
||||
// should pass zombie_ok == true.
|
||||
static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false);
|
||||
static void unlock_nmethod(CompiledMethod* nm); // (ditto)
|
||||
|
||||
nmethodLocker(address pc); // derive nm from pc
|
||||
nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
|
||||
nmethodLocker(CompiledMethod *nm) {
|
||||
_nm = nm;
|
||||
lock(_nm);
|
||||
}
|
||||
|
||||
static void lock(CompiledMethod* method, bool zombie_ok = false) {
|
||||
if (method == NULL) return;
|
||||
lock_nmethod(method, zombie_ok);
|
||||
}
|
||||
|
||||
static void unlock(CompiledMethod* method) {
|
||||
if (method == NULL) return;
|
||||
unlock_nmethod(method);
|
||||
}
|
||||
|
||||
nmethodLocker() { _nm = NULL; }
|
||||
~nmethodLocker() {
|
||||
unlock(_nm);
|
||||
}
|
||||
|
||||
CompiledMethod* code() { return _nm; }
|
||||
void set_code(CompiledMethod* new_nm, bool zombie_ok = false) {
|
||||
unlock(_nm); // note: This works even if _nm==new_nm.
|
||||
_nm = new_nm;
|
||||
lock(_nm, zombie_ok);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_CODE_NMETHOD_HPP
|
||||
|
75
src/hotspot/share/compiler/compilationLog.cpp
Normal file
75
src/hotspot/share/compiler/compilationLog.cpp
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "compiler/compilationLog.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
CompilationLog* CompilationLog::_log;
|
||||
|
||||
CompilationLog::CompilationLog() : StringEventLog("Compilation events", "jit") {
|
||||
}
|
||||
|
||||
void CompilationLog::log_compile(JavaThread* thread, CompileTask* task) {
|
||||
StringLogMessage lm;
|
||||
stringStream sstr(lm.buffer(), lm.size());
|
||||
// msg.time_stamp().update_to(tty->time_stamp().ticks());
|
||||
task->print(&sstr, NULL, true, false);
|
||||
log(thread, "%s", (const char*)lm);
|
||||
}
|
||||
|
||||
void CompilationLog::log_nmethod(JavaThread* thread, nmethod* nm) {
|
||||
log(thread, "nmethod %d%s " INTPTR_FORMAT " code [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",
|
||||
nm->compile_id(), nm->is_osr_method() ? "%" : "",
|
||||
p2i(nm), p2i(nm->code_begin()), p2i(nm->code_end()));
|
||||
}
|
||||
|
||||
void CompilationLog::log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message) {
|
||||
StringLogMessage lm;
|
||||
lm.print("%4d COMPILE SKIPPED: %s", task->compile_id(), reason);
|
||||
if (retry_message != NULL) {
|
||||
lm.append(" (%s)", retry_message);
|
||||
}
|
||||
lm.print("\n");
|
||||
log(thread, "%s", (const char*)lm);
|
||||
}
|
||||
|
||||
void CompilationLog::log_metaspace_failure(const char* reason) {
|
||||
// Note: This method can be called from non-Java/compiler threads to
|
||||
// log the global metaspace failure that might affect profiling.
|
||||
ResourceMark rm;
|
||||
StringLogMessage lm;
|
||||
lm.print("%4d COMPILE PROFILING SKIPPED: %s", -1, reason);
|
||||
lm.print("\n");
|
||||
log(Thread::current(), "%s", (const char*)lm);
|
||||
}
|
||||
|
||||
void CompilationLog::init() {
|
||||
_log = new CompilationLog();
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,20 +22,31 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime;
|
||||
#ifndef SHARE_COMPILER_COMPILATIONLOG_HPP
|
||||
#define SHARE_COMPILER_COMPILATIONLOG_HPP
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
#include "utilities/events.hpp"
|
||||
|
||||
public class CodeCacheSweeperThread extends JavaThread {
|
||||
public CodeCacheSweeperThread(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
class CompileTask;
|
||||
class JavaThread;
|
||||
class nmethod;
|
||||
|
||||
public boolean isJavaThread() { return false; }
|
||||
public boolean isHiddenFromExternalView() { return true; }
|
||||
public boolean isCodeCacheSweeperThread() { return true; }
|
||||
class CompilationLog : public StringEventLog {
|
||||
private:
|
||||
static CompilationLog* _log;
|
||||
|
||||
}
|
||||
CompilationLog();
|
||||
|
||||
public:
|
||||
|
||||
void log_compile(JavaThread* thread, CompileTask* task);
|
||||
void log_nmethod(JavaThread* thread, nmethod* nm);
|
||||
void log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message);
|
||||
void log_metaspace_failure(const char* reason);
|
||||
|
||||
static void init();
|
||||
static CompilationLog* log() { return _log; }
|
||||
using StringEventLog::log;
|
||||
};
|
||||
|
||||
#endif // SHARE_COMPILER_COMPILATIONLOG_HPP
|
@ -31,6 +31,7 @@
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/codeHeapState.hpp"
|
||||
#include "code/dependencyContext.hpp"
|
||||
#include "compiler/compilationLog.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
@ -63,7 +64,6 @@
|
||||
#include "runtime/perfData.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/sweeper.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "runtime/timerTrace.hpp"
|
||||
@ -194,53 +194,9 @@ CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization];
|
||||
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
|
||||
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
|
||||
|
||||
|
||||
|
||||
class CompilationLog : public StringEventLog {
|
||||
public:
|
||||
CompilationLog() : StringEventLog("Compilation events", "jit") {
|
||||
}
|
||||
|
||||
void log_compile(JavaThread* thread, CompileTask* task) {
|
||||
StringLogMessage lm;
|
||||
stringStream sstr(lm.buffer(), lm.size());
|
||||
// msg.time_stamp().update_to(tty->time_stamp().ticks());
|
||||
task->print(&sstr, NULL, true, false);
|
||||
log(thread, "%s", (const char*)lm);
|
||||
}
|
||||
|
||||
void log_nmethod(JavaThread* thread, nmethod* nm) {
|
||||
log(thread, "nmethod %d%s " INTPTR_FORMAT " code [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",
|
||||
nm->compile_id(), nm->is_osr_method() ? "%" : "",
|
||||
p2i(nm), p2i(nm->code_begin()), p2i(nm->code_end()));
|
||||
}
|
||||
|
||||
void log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message) {
|
||||
StringLogMessage lm;
|
||||
lm.print("%4d COMPILE SKIPPED: %s", task->compile_id(), reason);
|
||||
if (retry_message != NULL) {
|
||||
lm.append(" (%s)", retry_message);
|
||||
}
|
||||
lm.print("\n");
|
||||
log(thread, "%s", (const char*)lm);
|
||||
}
|
||||
|
||||
void log_metaspace_failure(const char* reason) {
|
||||
// Note: This method can be called from non-Java/compiler threads to
|
||||
// log the global metaspace failure that might affect profiling.
|
||||
ResourceMark rm;
|
||||
StringLogMessage lm;
|
||||
lm.print("%4d COMPILE PROFILING SKIPPED: %s", -1, reason);
|
||||
lm.print("\n");
|
||||
log(Thread::current(), "%s", (const char*)lm);
|
||||
}
|
||||
};
|
||||
|
||||
static CompilationLog* _compilation_log = NULL;
|
||||
|
||||
bool compileBroker_init() {
|
||||
if (LogEvents) {
|
||||
_compilation_log = new CompilationLog();
|
||||
CompilationLog::init();
|
||||
}
|
||||
|
||||
// init directives stack, adding default directive
|
||||
@ -269,7 +225,6 @@ CompileTaskWrapper::~CompileTaskWrapper() {
|
||||
CompileLog* log = thread->log();
|
||||
if (log != NULL && !task->is_unloaded()) task->log_task_done(log);
|
||||
thread->set_task(NULL);
|
||||
task->set_code_handle(NULL);
|
||||
thread->set_env(NULL);
|
||||
if (task->is_blocking()) {
|
||||
bool free_task = false;
|
||||
@ -452,10 +407,7 @@ CompileTask* CompileQueue::get(CompilerThread* thread) {
|
||||
|
||||
// If there are no compilation tasks and we can compile new jobs
|
||||
// (i.e., there is enough free space in the code cache) there is
|
||||
// no need to invoke the sweeper. As a result, the hotness of methods
|
||||
// remains unchanged. This behavior is desired, since we want to keep
|
||||
// the stable state, i.e., we do not want to evict methods from the
|
||||
// code cache if it is unnecessary.
|
||||
// no need to invoke the GC.
|
||||
// We need a timed wait here, since compiler threads can exit if compilation
|
||||
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
|
||||
// is not critical and we do not want idle compiler threads to wake up too often.
|
||||
@ -699,8 +651,8 @@ void CompileBroker::compilation_init_phase1(JavaThread* THREAD) {
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
||||
// Start the compiler thread(s) and the sweeper thread
|
||||
init_compiler_sweeper_threads();
|
||||
// Start the compiler thread(s)
|
||||
init_compiler_threads();
|
||||
// totalTime performance counter is always created as it is required
|
||||
// by the implementation of java.lang.management.CompilationMXBean.
|
||||
{
|
||||
@ -828,7 +780,7 @@ public:
|
||||
};
|
||||
|
||||
// Entry for DeoptimizeObjectsALotThread. The threads are started in
|
||||
// CompileBroker::init_compiler_sweeper_threads() iff DeoptimizeObjectsALot is enabled
|
||||
// CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled
|
||||
void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) {
|
||||
DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread);
|
||||
bool enter_single_loop;
|
||||
@ -891,9 +843,6 @@ JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, C
|
||||
new_thread = new CompilerThread(queue, counters);
|
||||
}
|
||||
break;
|
||||
case sweeper_t:
|
||||
new_thread = new CodeCacheSweeperThread();
|
||||
break;
|
||||
#if defined(ASSERT) && COMPILER2_OR_JVMCI
|
||||
case deoptimizer_t:
|
||||
new_thread = new DeoptimizeObjectsALotThread();
|
||||
@ -957,10 +906,7 @@ JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, C
|
||||
}
|
||||
|
||||
|
||||
void CompileBroker::init_compiler_sweeper_threads() {
|
||||
NMethodSweeper::set_sweep_threshold_bytes(static_cast<size_t>(SweeperThreshold * ReservedCodeCacheSize / 100.0));
|
||||
log_info(codecache, sweep)("Sweeper threshold: " SIZE_FORMAT " bytes", NMethodSweeper::sweep_threshold_bytes());
|
||||
|
||||
void CompileBroker::init_compiler_threads() {
|
||||
// Ensure any exceptions lead to vm_exit_during_initialization.
|
||||
EXCEPTION_MARK;
|
||||
#if !defined(ZERO)
|
||||
@ -1032,13 +978,6 @@ void CompileBroker::init_compiler_sweeper_threads() {
|
||||
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK);
|
||||
}
|
||||
|
||||
if (MethodFlushing) {
|
||||
// Initialize the sweeper thread
|
||||
Handle thread_oop = create_thread_oop("Sweeper thread", CHECK);
|
||||
jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop());
|
||||
make_thread(sweeper_t, thread_handle, NULL, NULL, THREAD);
|
||||
}
|
||||
|
||||
#if defined(ASSERT) && COMPILER2_OR_JVMCI
|
||||
if (DeoptimizeObjectsALot) {
|
||||
// Initialize and start the object deoptimizer threads
|
||||
@ -1756,7 +1695,6 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
|
||||
// It is harmless to check this status without the lock, because
|
||||
// completion is a stable property (until the task object is recycled).
|
||||
assert(task->is_complete(), "Compilation should have completed");
|
||||
assert(task->code_handle() == NULL, "must be reset");
|
||||
|
||||
// By convention, the waiter is responsible for recycling a
|
||||
// blocking CompileTask. Since there is only one waiter ever
|
||||
@ -1970,8 +1908,6 @@ void CompileBroker::compiler_thread_loop() {
|
||||
// CompileTaskWrapper also keeps the Method* from being deallocated if redefinition
|
||||
// occurs after fetching the compile task off the queue.
|
||||
CompileTaskWrapper ctw(task);
|
||||
nmethodLocker result_handle; // (handle for the nmethod produced by this task)
|
||||
task->set_code_handle(&result_handle);
|
||||
methodHandle method(thread, task->method());
|
||||
|
||||
// Never compile a method if breakpoints are present in it
|
||||
@ -2046,8 +1982,8 @@ void CompileBroker::init_compiler_thread_log() {
|
||||
void CompileBroker::log_metaspace_failure() {
|
||||
const char* message = "some methods may not be compiled because metaspace "
|
||||
"is out of memory";
|
||||
if (_compilation_log != NULL) {
|
||||
_compilation_log->log_metaspace_failure(message);
|
||||
if (CompilationLog::log() != NULL) {
|
||||
CompilationLog::log()->log_metaspace_failure(message);
|
||||
}
|
||||
if (PrintCompilation) {
|
||||
tty->print_cr("COMPILE PROFILING SKIPPED: %s", message);
|
||||
@ -2123,26 +2059,16 @@ static void codecache_print(outputStream* out, bool detailed) {
|
||||
}
|
||||
}
|
||||
|
||||
void CompileBroker::post_compile(CompilerThread* thread, CompileTask* task, bool success, ciEnv* ci_env,
|
||||
int compilable, const char* failure_reason) {
|
||||
if (success) {
|
||||
task->mark_success();
|
||||
if (ci_env != NULL) {
|
||||
task->set_num_inlined_bytecodes(ci_env->num_inlined_bytecodes());
|
||||
}
|
||||
if (_compilation_log != NULL) {
|
||||
nmethod* code = task->code();
|
||||
if (code != NULL) {
|
||||
_compilation_log->log_nmethod(thread, code);
|
||||
}
|
||||
}
|
||||
} else if (AbortVMOnCompilationFailure) {
|
||||
if (compilable == ciEnv::MethodCompilable_not_at_tier) {
|
||||
fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason);
|
||||
}
|
||||
if (compilable == ciEnv::MethodCompilable_never) {
|
||||
fatal("Never compilable: %s", failure_reason);
|
||||
}
|
||||
void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env,
|
||||
int compilable, const char* failure_reason) {
|
||||
if (!AbortVMOnCompilationFailure) {
|
||||
return;
|
||||
}
|
||||
if (compilable == ciEnv::MethodCompilable_not_at_tier) {
|
||||
fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason);
|
||||
}
|
||||
if (compilable == ciEnv::MethodCompilable_never) {
|
||||
fatal("Never compilable: %s", failure_reason);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2155,7 +2081,7 @@ static void post_compilation_event(EventCompilation& event, CompileTask* task) {
|
||||
task->comp_level(),
|
||||
task->is_success(),
|
||||
task->osr_bci() != CompileBroker::standard_entry_bci,
|
||||
(task->code() == NULL) ? 0 : task->code()->total_size(),
|
||||
task->nm_total_size(),
|
||||
task->num_inlined_bytecodes());
|
||||
}
|
||||
|
||||
@ -2179,8 +2105,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
CompilerThread* thread = CompilerThread::current();
|
||||
ResourceMark rm(thread);
|
||||
|
||||
if (LogEvents) {
|
||||
_compilation_log->log_compile(thread, task);
|
||||
if (CompilationLog::log() != NULL) {
|
||||
CompilationLog::log()->log_compile(thread, task);
|
||||
}
|
||||
|
||||
// Common flags.
|
||||
@ -2203,6 +2129,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
|
||||
// Look up matching directives
|
||||
directive = DirectivesStack::getMatchingDirective(method, comp);
|
||||
task->set_directive(directive);
|
||||
|
||||
// Update compile information when using perfdata.
|
||||
if (UsePerfData) {
|
||||
@ -2255,11 +2182,13 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
retry_message = "not retryable";
|
||||
compilable = ciEnv::MethodCompilable_not_at_tier;
|
||||
}
|
||||
if (task->code() == NULL) {
|
||||
if (!task->is_success()) {
|
||||
assert(failure_reason != NULL, "must specify failure_reason");
|
||||
}
|
||||
}
|
||||
post_compile(thread, task, task->code() != NULL, NULL, compilable, failure_reason);
|
||||
if (!task->is_success()) {
|
||||
handle_compile_error(thread, task, NULL, compilable, failure_reason);
|
||||
}
|
||||
if (event.should_commit()) {
|
||||
post_compilation_event(event, task);
|
||||
}
|
||||
@ -2320,7 +2249,9 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
}
|
||||
}
|
||||
|
||||
if (!ci_env.failing() && task->code() == NULL) {
|
||||
DirectivesStack::release(directive);
|
||||
|
||||
if (!ci_env.failing() && !task->is_success()) {
|
||||
//assert(false, "compiler should always document failure");
|
||||
// The compiler elected, without comment, not to register a result.
|
||||
// Do not attempt further compilations of this method.
|
||||
@ -2336,7 +2267,9 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
ci_env.report_failure(failure_reason);
|
||||
}
|
||||
|
||||
post_compile(thread, task, !ci_env.failing(), &ci_env, compilable, failure_reason);
|
||||
if (ci_env.failing()) {
|
||||
handle_compile_error(thread, task, &ci_env, compilable, failure_reason);
|
||||
}
|
||||
if (event.should_commit()) {
|
||||
post_compilation_event(event, task);
|
||||
}
|
||||
@ -2344,8 +2277,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
|
||||
if (failure_reason != NULL) {
|
||||
task->set_failure_reason(failure_reason, failure_reason_on_C_heap);
|
||||
if (_compilation_log != NULL) {
|
||||
_compilation_log->log_failure(thread, task, failure_reason, retry_message);
|
||||
if (CompilationLog::log() != NULL) {
|
||||
CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message);
|
||||
}
|
||||
if (PrintCompilation) {
|
||||
FormatBufferResource msg = retry_message != NULL ?
|
||||
@ -2361,18 +2294,12 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
|
||||
collect_statistics(thread, time, task);
|
||||
|
||||
nmethod* nm = task->code();
|
||||
if (nm != NULL) {
|
||||
nm->maybe_print_nmethod(directive);
|
||||
}
|
||||
DirectivesStack::release(directive);
|
||||
|
||||
if (PrintCompilation && PrintCompilation2) {
|
||||
tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp
|
||||
tty->print("%4d ", compile_id); // print compilation number
|
||||
tty->print("%s ", (is_osr ? "%" : " "));
|
||||
if (task->code() != NULL) {
|
||||
tty->print("size: %d(%d) ", task->code()->total_size(), task->code()->insts_size());
|
||||
if (task->is_success()) {
|
||||
tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size());
|
||||
}
|
||||
tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes());
|
||||
}
|
||||
@ -2445,7 +2372,7 @@ void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) {
|
||||
if (UseCodeCacheFlushing) {
|
||||
// Since code cache is full, immediately stop new compiles
|
||||
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
||||
NMethodSweeper::log_sweep("disable_compiler");
|
||||
log_info(codecache)("Code cache is full - disabling compilation");
|
||||
}
|
||||
} else {
|
||||
disable_compilation_forever();
|
||||
@ -2512,10 +2439,8 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
|
||||
uint compile_id = task->compile_id();
|
||||
bool is_osr = (task->osr_bci() != standard_entry_bci);
|
||||
const int comp_level = task->comp_level();
|
||||
nmethod* code = task->code();
|
||||
CompilerCounters* counters = thread->counters();
|
||||
|
||||
assert(code == NULL || code->is_locked_by_vm(), "will survive the MutexLocker");
|
||||
MutexLocker locker(CompileStatistics_lock);
|
||||
|
||||
// _perf variables are production performance counters which are
|
||||
@ -2534,7 +2459,7 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
|
||||
_perf_total_bailout_count->inc();
|
||||
}
|
||||
_t_bailedout_compilation.add(time);
|
||||
} else if (code == NULL) {
|
||||
} else if (!task->is_success()) {
|
||||
if (UsePerfData) {
|
||||
_perf_last_invalidated_method->set_value(counters->current_method());
|
||||
_perf_last_invalidated_type->set_value(counters->compile_type());
|
||||
@ -2568,8 +2493,8 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
|
||||
} else {
|
||||
stats->_standard.update(time, bytes_compiled);
|
||||
}
|
||||
stats->_nmethods_size += code->total_size();
|
||||
stats->_nmethods_code_size += code->insts_size();
|
||||
stats->_nmethods_size += task->nm_total_size();
|
||||
stats->_nmethods_code_size += task->nm_insts_size();
|
||||
} else {
|
||||
assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level);
|
||||
}
|
||||
@ -2583,8 +2508,8 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
|
||||
} else {
|
||||
stats->_standard.update(time, bytes_compiled);
|
||||
}
|
||||
stats->_nmethods_size += code->total_size();
|
||||
stats->_nmethods_code_size += code->insts_size();
|
||||
stats->_nmethods_size += task->nm_total_size();
|
||||
stats->_nmethods_code_size += task->nm_insts_size();
|
||||
} else { // if (!comp)
|
||||
assert(false, "Compiler object must exist");
|
||||
}
|
||||
@ -2613,13 +2538,13 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
|
||||
}
|
||||
|
||||
// Collect counts of successful compilations
|
||||
_sum_nmethod_size += code->total_size();
|
||||
_sum_nmethod_code_size += code->insts_size();
|
||||
_sum_nmethod_size += task->nm_total_size();
|
||||
_sum_nmethod_code_size += task->nm_insts_size();
|
||||
_total_compile_count++;
|
||||
|
||||
if (UsePerfData) {
|
||||
_perf_sum_nmethod_size->inc( code->total_size());
|
||||
_perf_sum_nmethod_code_size->inc(code->insts_size());
|
||||
_perf_sum_nmethod_size->inc( task->nm_total_size());
|
||||
_perf_sum_nmethod_code_size->inc(task->nm_insts_size());
|
||||
_perf_total_compile_count->inc();
|
||||
}
|
||||
|
||||
@ -2777,14 +2702,6 @@ void CompileBroker::print_info(outputStream *out) {
|
||||
out->print_cr(" Committed size : " SIZE_FORMAT_W(7) " KB", CodeCache::capacity() / K);
|
||||
out->print_cr(" Unallocated capacity : " SIZE_FORMAT_W(7) " KB", CodeCache::unallocated_capacity() / K);
|
||||
out->cr();
|
||||
|
||||
out->cr();
|
||||
out->print_cr("CodeCache cleaning overview");
|
||||
out->print_cr("--------------------------------------------------------");
|
||||
out->cr();
|
||||
NMethodSweeper::print(out);
|
||||
out->print_cr("--------------------------------------------------------");
|
||||
out->cr();
|
||||
}
|
||||
|
||||
// Note: tty_lock must not be held upon entry to this function.
|
||||
|
@ -38,7 +38,6 @@
|
||||
#endif
|
||||
|
||||
class nmethod;
|
||||
class nmethodLocker;
|
||||
|
||||
// CompilerCounters
|
||||
//
|
||||
@ -230,13 +229,12 @@ class CompileBroker: AllStatic {
|
||||
|
||||
enum ThreadType {
|
||||
compiler_t,
|
||||
sweeper_t,
|
||||
deoptimizer_t
|
||||
};
|
||||
|
||||
static Handle create_thread_oop(const char* name, TRAPS);
|
||||
static JavaThread* make_thread(ThreadType type, jobject thread_oop, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD);
|
||||
static void init_compiler_sweeper_threads();
|
||||
static void init_compiler_threads();
|
||||
static void possibly_add_compiler_threads(JavaThread* THREAD);
|
||||
static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded);
|
||||
|
||||
@ -255,8 +253,8 @@ class CompileBroker: AllStatic {
|
||||
#endif
|
||||
|
||||
static void invoke_compiler_on_method(CompileTask* task);
|
||||
static void post_compile(CompilerThread* thread, CompileTask* task, bool success, ciEnv* ci_env,
|
||||
int compilable, const char* failure_reason);
|
||||
static void handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env,
|
||||
int compilable, const char* failure_reason);
|
||||
static void update_compile_perf_data(CompilerThread *thread, const methodHandle& method, bool is_osr);
|
||||
|
||||
static void collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task);
|
||||
|
@ -65,7 +65,6 @@ CompileTask* CompileTask::allocate() {
|
||||
void CompileTask::free(CompileTask* task) {
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
if (!task->is_free()) {
|
||||
task->set_code(NULL);
|
||||
assert(!task->lock()->is_locked(), "Should not be locked when freed");
|
||||
if ((task->_method_holder != NULL && JNIHandles::is_weak_global_handle(task->_method_holder)) ||
|
||||
(task->_hot_method_holder != NULL && JNIHandles::is_weak_global_handle(task->_hot_method_holder))) {
|
||||
@ -110,7 +109,6 @@ void CompileTask::initialize(int compile_id,
|
||||
|
||||
_is_complete = false;
|
||||
_is_success = false;
|
||||
_code_handle = NULL;
|
||||
|
||||
_hot_method = NULL;
|
||||
_hot_method_holder = NULL;
|
||||
@ -118,6 +116,10 @@ void CompileTask::initialize(int compile_id,
|
||||
_time_queued = os::elapsed_counter();
|
||||
_time_started = 0;
|
||||
_compile_reason = compile_reason;
|
||||
_nm_content_size = 0;
|
||||
_directive = NULL;
|
||||
_nm_insts_size = 0;
|
||||
_nm_total_size = 0;
|
||||
_failure_reason = NULL;
|
||||
_failure_reason_on_C_heap = false;
|
||||
|
||||
@ -161,25 +163,6 @@ CompileTask* CompileTask::select_for_compilation() {
|
||||
return this;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::code/set_code
|
||||
//
|
||||
nmethod* CompileTask::code() const {
|
||||
if (_code_handle == NULL) return NULL;
|
||||
CodeBlob *blob = _code_handle->code();
|
||||
if (blob != NULL) {
|
||||
return blob->as_nmethod();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void CompileTask::set_code(nmethod* nm) {
|
||||
if (_code_handle == NULL && nm == NULL) return;
|
||||
guarantee(_code_handle != NULL, "");
|
||||
_code_handle->set_code(nm);
|
||||
if (nm == NULL) _code_handle = NULL; // drop the handle also
|
||||
}
|
||||
|
||||
void CompileTask::mark_on_stack() {
|
||||
if (is_unloaded()) {
|
||||
return;
|
||||
@ -257,9 +240,6 @@ void CompileTask::print_impl(outputStream* st, Method* method, int compile_id, i
|
||||
}
|
||||
st->print("%4d ", compile_id); // print compilation number
|
||||
|
||||
// For unloaded methods the transition to zombie occurs after the
|
||||
// method is cleared so it's impossible to report accurate
|
||||
// information for that case.
|
||||
bool is_synchronized = false;
|
||||
bool has_exception_handler = false;
|
||||
bool is_native = false;
|
||||
@ -399,9 +379,8 @@ void CompileTask::log_task_done(CompileLog* log) {
|
||||
}
|
||||
|
||||
// <task_done ... stamp='1.234'> </task>
|
||||
nmethod* nm = code();
|
||||
log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
|
||||
_is_success, nm == NULL ? 0 : nm->content_size(),
|
||||
_is_success, _nm_content_size,
|
||||
method->invocation_count());
|
||||
int bec = method->backedge_count();
|
||||
if (bec != 0) log->print(" backedge_count='%d'", bec);
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/xmlstream.hpp"
|
||||
|
||||
class DirectiveSet;
|
||||
|
||||
JVMCI_ONLY(class JVMCICompileState;)
|
||||
|
||||
// CompileTask
|
||||
@ -72,35 +74,38 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
}
|
||||
|
||||
private:
|
||||
static CompileTask* _task_free_list;
|
||||
Monitor* _lock;
|
||||
uint _compile_id;
|
||||
Method* _method;
|
||||
jobject _method_holder;
|
||||
int _osr_bci;
|
||||
bool _is_complete;
|
||||
bool _is_success;
|
||||
bool _is_blocking;
|
||||
static CompileTask* _task_free_list;
|
||||
Monitor* _lock;
|
||||
uint _compile_id;
|
||||
Method* _method;
|
||||
jobject _method_holder;
|
||||
int _osr_bci;
|
||||
bool _is_complete;
|
||||
bool _is_success;
|
||||
bool _is_blocking;
|
||||
CodeSection::csize_t _nm_content_size;
|
||||
CodeSection::csize_t _nm_total_size;
|
||||
CodeSection::csize_t _nm_insts_size;
|
||||
const DirectiveSet* _directive;
|
||||
#if INCLUDE_JVMCI
|
||||
bool _has_waiter;
|
||||
bool _has_waiter;
|
||||
// Compilation state for a blocking JVMCI compilation
|
||||
JVMCICompileState* _blocking_jvmci_compile_state;
|
||||
JVMCICompileState* _blocking_jvmci_compile_state;
|
||||
#endif
|
||||
int _comp_level;
|
||||
int _num_inlined_bytecodes;
|
||||
nmethodLocker* _code_handle; // holder of eventual result
|
||||
CompileTask* _next, *_prev;
|
||||
bool _is_free;
|
||||
int _comp_level;
|
||||
int _num_inlined_bytecodes;
|
||||
CompileTask* _next, *_prev;
|
||||
bool _is_free;
|
||||
// Fields used for logging why the compilation was initiated:
|
||||
jlong _time_queued; // time when task was enqueued
|
||||
jlong _time_started; // time when compilation started
|
||||
Method* _hot_method; // which method actually triggered this task
|
||||
jobject _hot_method_holder;
|
||||
int _hot_count; // information about its invocation counter
|
||||
CompileReason _compile_reason; // more info about the task
|
||||
const char* _failure_reason;
|
||||
jlong _time_queued; // time when task was enqueued
|
||||
jlong _time_started; // time when compilation started
|
||||
Method* _hot_method; // which method actually triggered this task
|
||||
jobject _hot_method_holder;
|
||||
int _hot_count; // information about its invocation counter
|
||||
CompileReason _compile_reason; // more info about the task
|
||||
const char* _failure_reason;
|
||||
// Specifies if _failure_reason is on the C heap.
|
||||
bool _failure_reason_on_C_heap;
|
||||
bool _failure_reason_on_C_heap;
|
||||
|
||||
public:
|
||||
CompileTask() : _failure_reason(NULL), _failure_reason_on_C_heap(false) {
|
||||
@ -122,6 +127,14 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
bool is_complete() const { return _is_complete; }
|
||||
bool is_blocking() const { return _is_blocking; }
|
||||
bool is_success() const { return _is_success; }
|
||||
void set_directive(const DirectiveSet* directive) { _directive = directive; }
|
||||
const DirectiveSet* directive() const { return _directive; }
|
||||
CodeSection::csize_t nm_content_size() { return _nm_content_size; }
|
||||
void set_nm_content_size(CodeSection::csize_t size) { _nm_content_size = size; }
|
||||
CodeSection::csize_t nm_insts_size() { return _nm_insts_size; }
|
||||
void set_nm_insts_size(CodeSection::csize_t size) { _nm_insts_size = size; }
|
||||
CodeSection::csize_t nm_total_size() { return _nm_total_size; }
|
||||
void set_nm_total_size(CodeSection::csize_t size) { _nm_total_size = size; }
|
||||
bool can_become_stale() const {
|
||||
switch (_compile_reason) {
|
||||
case Reason_BackedgeCount:
|
||||
@ -153,11 +166,6 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
}
|
||||
#endif
|
||||
|
||||
nmethodLocker* code_handle() const { return _code_handle; }
|
||||
void set_code_handle(nmethodLocker* l) { _code_handle = l; }
|
||||
nmethod* code() const; // _code_handle->code()
|
||||
void set_code(nmethod* nm); // _code_handle->set_code(nm)
|
||||
|
||||
Monitor* lock() const { return _lock; }
|
||||
|
||||
void mark_complete() { _is_complete = true; }
|
||||
|
@ -590,19 +590,6 @@ void CompilerConfig::ergo_initialize() {
|
||||
set_jvmci_specific_flags();
|
||||
#endif
|
||||
|
||||
if (FLAG_IS_DEFAULT(SweeperThreshold)) {
|
||||
if (Continuations::enabled()) {
|
||||
// When continuations are enabled, the sweeper needs to trigger GC to
|
||||
// be able to sweep nmethods. Therefore, it's in general a good idea
|
||||
// to be significantly less aggressive with sweeping, in order not to
|
||||
// trigger excessive GC work.
|
||||
FLAG_SET_ERGO(SweeperThreshold, SweeperThreshold * 10.0);
|
||||
} else if ((SweeperThreshold * ReservedCodeCacheSize / 100) > (1.2 * M)) {
|
||||
// Cap default SweeperThreshold value to an equivalent of 1.2 Mb
|
||||
FLAG_SET_ERGO(SweeperThreshold, (1.2 * M * 100) / ReservedCodeCacheSize);
|
||||
}
|
||||
}
|
||||
|
||||
if (UseOnStackReplacement && !UseLoopCounter) {
|
||||
warning("On-stack-replacement requires loop counters; enabling loop counters");
|
||||
FLAG_SET_DEFAULT(UseLoopCounter, true);
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "compiler/compilerThread.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/sweeper.hpp"
|
||||
|
||||
// Create a CompilerThread
|
||||
CompilerThread::CompilerThread(CompileQueue* queue,
|
||||
@ -62,34 +61,3 @@ void CompilerThread::thread_entry(JavaThread* thread, TRAPS) {
|
||||
bool CompilerThread::can_call_java() const {
|
||||
return _compiler != NULL && _compiler->is_jvmci();
|
||||
}
|
||||
|
||||
// Create sweeper thread
|
||||
CodeCacheSweeperThread::CodeCacheSweeperThread()
|
||||
: JavaThread(&CodeCacheSweeperThread::thread_entry) {
|
||||
_scanned_compiled_method = NULL;
|
||||
}
|
||||
|
||||
void CodeCacheSweeperThread::thread_entry(JavaThread* thread, TRAPS) {
|
||||
NMethodSweeper::sweeper_loop();
|
||||
}
|
||||
|
||||
void CodeCacheSweeperThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
|
||||
JavaThread::oops_do_no_frames(f, cf);
|
||||
if (_scanned_compiled_method != NULL && cf != NULL) {
|
||||
// Safepoints can occur when the sweeper is scanning an nmethod so
|
||||
// process it here to make sure it isn't unloaded in the middle of
|
||||
// a scan.
|
||||
cf->do_code_blob(_scanned_compiled_method);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCacheSweeperThread::nmethods_do(CodeBlobClosure* cf) {
|
||||
JavaThread::nmethods_do(cf);
|
||||
if (_scanned_compiled_method != NULL && cf != NULL) {
|
||||
// Safepoints can occur when the sweeper is scanning an nmethod so
|
||||
// process it here to make sure it isn't unloaded in the middle of
|
||||
// a scan.
|
||||
cf->do_code_blob(_scanned_compiled_method);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -116,29 +116,4 @@ class CompilerThread : public JavaThread {
|
||||
static void thread_entry(JavaThread* thread, TRAPS);
|
||||
};
|
||||
|
||||
// Dedicated thread to sweep the code cache
|
||||
class CodeCacheSweeperThread : public JavaThread {
|
||||
CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper
|
||||
|
||||
static void thread_entry(JavaThread* thread, TRAPS);
|
||||
|
||||
public:
|
||||
CodeCacheSweeperThread();
|
||||
// Track the nmethod currently being scanned by the sweeper
|
||||
void set_scanned_compiled_method(CompiledMethod* cm) {
|
||||
assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value");
|
||||
_scanned_compiled_method = cm;
|
||||
}
|
||||
|
||||
// Hide sweeper thread from external view.
|
||||
bool is_hidden_from_external_view() const { return true; }
|
||||
|
||||
bool is_Code_cache_sweeper_thread() const { return true; }
|
||||
|
||||
// Prevent GC from unloading _scanned_compiled_method
|
||||
void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf);
|
||||
void nmethods_do(CodeBlobClosure* cf);
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_COMPILER_COMPILERTHREAD_HPP
|
||||
|
@ -124,7 +124,6 @@ public:
|
||||
// No nmethod handling
|
||||
virtual void register_nmethod(nmethod* nm) {}
|
||||
virtual void unregister_nmethod(nmethod* nm) {}
|
||||
virtual void flush_nmethod(nmethod* nm) {}
|
||||
virtual void verify_nmethod(nmethod* nm) {}
|
||||
|
||||
// No heap verification
|
||||
|
@ -81,8 +81,8 @@ void G1CodeBlobClosure::do_evacuation_and_fixup(nmethod* nm) {
|
||||
nm->oops_do(&_oc);
|
||||
|
||||
if (_strong) {
|
||||
// CodeCache sweeper support
|
||||
nm->mark_as_maybe_on_continuation();
|
||||
// CodeCache unloading support
|
||||
nm->mark_as_maybe_on_stack();
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs_nm != NULL) {
|
||||
@ -97,8 +97,8 @@ void G1CodeBlobClosure::do_marking(nmethod* nm) {
|
||||
// Mark through oops in the nmethod
|
||||
nm->oops_do(&_marking_oc);
|
||||
|
||||
// CodeCache sweeper support
|
||||
nm->mark_as_maybe_on_continuation();
|
||||
// CodeCache unloading support
|
||||
nm->mark_as_maybe_on_stack();
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs_nm != NULL) {
|
||||
|
@ -1880,6 +1880,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||
case GCCause::_g1_humongous_allocation: return true;
|
||||
case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
|
||||
case GCCause::_wb_breakpoint: return true;
|
||||
case GCCause::_codecache_GC_aggressive: return true;
|
||||
case GCCause::_codecache_GC_threshold: return true;
|
||||
default: return is_user_requested_concurrent_full_gc(cause);
|
||||
}
|
||||
@ -3427,14 +3428,14 @@ void G1CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, boo
|
||||
}
|
||||
|
||||
void G1CollectedHeap::start_codecache_marking_cycle_if_inactive() {
|
||||
if (!Continuations::is_gc_marking_cycle_active()) {
|
||||
if (!CodeCache::is_gc_marking_cycle_active()) {
|
||||
// This is the normal case when we do not call collect when a
|
||||
// concurrent mark is ongoing. We then start a new code marking
|
||||
// cycle. If, on the other hand, a concurrent mark is ongoing, we
|
||||
// will be conservative and use the last code marking cycle. Code
|
||||
// caches marked between the two concurrent marks will live a bit
|
||||
// longer than needed.
|
||||
Continuations::on_gc_marking_cycle_start();
|
||||
Continuations::arm_all_nmethods();
|
||||
CodeCache::on_gc_marking_cycle_start();
|
||||
CodeCache::arm_all_nmethods();
|
||||
}
|
||||
}
|
||||
|
@ -1246,9 +1246,6 @@ public:
|
||||
// Unregister the given nmethod from the G1 heap.
|
||||
void unregister_nmethod(nmethod* nm) override;
|
||||
|
||||
// No nmethod flushing needed.
|
||||
void flush_nmethod(nmethod* nm) override {}
|
||||
|
||||
// No nmethod verification implemented.
|
||||
void verify_nmethod(nmethod* nm) override {}
|
||||
|
||||
|
@ -67,7 +67,6 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -1319,8 +1318,8 @@ void G1ConcurrentMark::remark() {
|
||||
report_object_count(mark_finished);
|
||||
}
|
||||
|
||||
Continuations::on_gc_marking_cycle_finish();
|
||||
Continuations::arm_all_nmethods();
|
||||
CodeCache::on_gc_marking_cycle_finish();
|
||||
CodeCache::arm_all_nmethods();
|
||||
|
||||
// Statistics
|
||||
double now = os::elapsedTime();
|
||||
|
@ -44,7 +44,6 @@
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
@ -210,8 +209,8 @@ void G1FullCollector::collect() {
|
||||
|
||||
phase4_do_compaction();
|
||||
|
||||
Continuations::on_gc_marking_cycle_finish();
|
||||
Continuations::arm_all_nmethods();
|
||||
CodeCache::on_gc_marking_cycle_finish();
|
||||
CodeCache::arm_all_nmethods();
|
||||
}
|
||||
|
||||
void G1FullCollector::complete_collection() {
|
||||
|
@ -1190,6 +1190,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
|
||||
log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
|
||||
} else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
|
||||
(cause == GCCause::_codecache_GC_threshold) ||
|
||||
(cause == GCCause::_codecache_GC_aggressive) ||
|
||||
(cause == GCCause::_wb_breakpoint)) {
|
||||
// Initiate a concurrent start. A concurrent start must be a young only
|
||||
// GC, so the collector state must be updated to reflect this.
|
||||
|
@ -363,22 +363,16 @@ public:
|
||||
nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
// Verify that the nemthod is live
|
||||
if (!nm->is_alive()) {
|
||||
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its code roots",
|
||||
VerifyCodeRootOopClosure oop_cl(_hr);
|
||||
nm->oops_do(&oop_cl);
|
||||
if (!oop_cl.has_oops_in_region()) {
|
||||
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its code roots with no pointers into region",
|
||||
p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
|
||||
_failures = true;
|
||||
} else if (oop_cl.failures()) {
|
||||
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT,
|
||||
p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
|
||||
_failures = true;
|
||||
} else {
|
||||
VerifyCodeRootOopClosure oop_cl(_hr);
|
||||
nm->oops_do(&oop_cl);
|
||||
if (!oop_cl.has_oops_in_region()) {
|
||||
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its code roots with no pointers into region",
|
||||
p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
|
||||
_failures = true;
|
||||
} else if (oop_cl.failures()) {
|
||||
log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT,
|
||||
p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
|
||||
_failures = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -842,10 +842,6 @@ void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
|
||||
ScavengableNMethods::verify_nmethod(nm);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::flush_nmethod(nmethod* nm) {
|
||||
// nothing particular
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::prune_scavengable_nmethods() {
|
||||
ScavengableNMethods::prune_nmethods();
|
||||
}
|
||||
|
@ -173,7 +173,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
virtual void register_nmethod(nmethod* nm);
|
||||
virtual void unregister_nmethod(nmethod* nm);
|
||||
virtual void verify_nmethod(nmethod* nm);
|
||||
virtual void flush_nmethod(nmethod* nm);
|
||||
|
||||
void prune_scavengable_nmethods();
|
||||
|
||||
|
@ -75,7 +75,6 @@
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
@ -962,8 +961,8 @@ void PSParallelCompact::pre_compact()
|
||||
// Increment the invocation count
|
||||
heap->increment_total_collections(true);
|
||||
|
||||
Continuations::on_gc_marking_cycle_start();
|
||||
Continuations::arm_all_nmethods();
|
||||
CodeCache::on_gc_marking_cycle_start();
|
||||
CodeCache::arm_all_nmethods();
|
||||
|
||||
// We need to track unique mark sweep invocations as well.
|
||||
_total_invocations++;
|
||||
@ -995,8 +994,8 @@ void PSParallelCompact::post_compact()
|
||||
GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
|
||||
ParCompactionManager::remove_all_shadow_regions();
|
||||
|
||||
Continuations::on_gc_marking_cycle_finish();
|
||||
Continuations::arm_all_nmethods();
|
||||
CodeCache::on_gc_marking_cycle_finish();
|
||||
CodeCache::arm_all_nmethods();
|
||||
|
||||
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
|
||||
// Clear the marking bitmap, summary data and split info.
|
||||
|
@ -55,12 +55,10 @@ static BarrierSetNMethod* select_barrier_set_nmethod(BarrierSetNMethod* barrier_
|
||||
if (barrier_set_nmethod != NULL) {
|
||||
// The GC needs nmethod entry barriers to do concurrent GC
|
||||
return barrier_set_nmethod;
|
||||
} else if (Continuations::enabled()) {
|
||||
// The GC needs nmethod entry barriers to deal with continuations
|
||||
return new BarrierSetNMethod();
|
||||
} else {
|
||||
// The GC does not need nmethod entry barriers
|
||||
return NULL;
|
||||
// The GC needs nmethod entry barriers to deal with continuations
|
||||
// and code cache unloading
|
||||
return NOT_ARM32(new BarrierSetNMethod()) ARM32_ONLY(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,8 +75,8 @@ BarrierSet::BarrierSet(BarrierSetAssembler* barrier_set_assembler,
|
||||
}
|
||||
|
||||
void BarrierSet::on_thread_attach(Thread* thread) {
|
||||
if (Continuations::enabled()) {
|
||||
BarrierSetNMethod* bs_nm = barrier_set_nmethod();
|
||||
BarrierSetNMethod* bs_nm = barrier_set_nmethod();
|
||||
if (bs_nm != nullptr) {
|
||||
thread->set_nmethod_disarm_value(bs_nm->disarmed_value());
|
||||
}
|
||||
}
|
||||
|
@ -85,8 +85,8 @@ bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
||||
OopKeepAliveClosure cl;
|
||||
nm->oops_do(&cl);
|
||||
|
||||
// CodeCache sweeper support
|
||||
nm->mark_as_maybe_on_continuation();
|
||||
// CodeCache unloading support
|
||||
nm->mark_as_maybe_on_stack();
|
||||
|
||||
disarm(nm);
|
||||
|
||||
|
@ -290,9 +290,10 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
||||
GCCauseSetter gcs(this, cause);
|
||||
switch (cause) {
|
||||
case GCCause::_codecache_GC_threshold:
|
||||
case GCCause::_codecache_GC_aggressive:
|
||||
case GCCause::_heap_inspection:
|
||||
case GCCause::_heap_dump:
|
||||
case GCCause::_metadata_GC_threshold : {
|
||||
case GCCause::_metadata_GC_threshold: {
|
||||
HandleMark hm(thread);
|
||||
do_full_collection(false); // don't clear all soft refs
|
||||
break;
|
||||
|
@ -479,8 +479,6 @@ class CollectedHeap : public CHeapObj<mtGC> {
|
||||
// Registering and unregistering an nmethod (compiled code) with the heap.
|
||||
virtual void register_nmethod(nmethod* nm) = 0;
|
||||
virtual void unregister_nmethod(nmethod* nm) = 0;
|
||||
// Callback for when nmethod is about to be deleted.
|
||||
virtual void flush_nmethod(nmethod* nm) = 0;
|
||||
virtual void verify_nmethod(nmethod* nm) = 0;
|
||||
|
||||
void trace_heap_before_gc(const GCTracer* gc_tracer);
|
||||
|
@ -29,6 +29,10 @@
|
||||
|
||||
IsUnloadingBehaviour* IsUnloadingBehaviour::_current = NULL;
|
||||
|
||||
bool IsUnloadingBehaviour::is_unloading(CompiledMethod* cm) {
|
||||
return _current->has_dead_oop(cm) || cm->as_nmethod()->is_cold();
|
||||
}
|
||||
|
||||
class IsCompiledMethodUnloadingOopClosure: public OopClosure {
|
||||
BoolObjectClosure *_cl;
|
||||
bool _is_unloading;
|
||||
@ -61,7 +65,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
bool ClosureIsUnloadingBehaviour::is_unloading(CompiledMethod* cm) const {
|
||||
bool ClosureIsUnloadingBehaviour::has_dead_oop(CompiledMethod* cm) const {
|
||||
if (cm->is_nmethod()) {
|
||||
IsCompiledMethodUnloadingOopClosure cl(_cl);
|
||||
static_cast<nmethod*>(cm)->oops_do(&cl, true /* allow_dead */);
|
||||
|
@ -34,7 +34,8 @@ class IsUnloadingBehaviour {
|
||||
static IsUnloadingBehaviour* _current;
|
||||
|
||||
public:
|
||||
virtual bool is_unloading(CompiledMethod* cm) const = 0;
|
||||
static bool is_unloading(CompiledMethod* cm);
|
||||
virtual bool has_dead_oop(CompiledMethod* cm) const = 0;
|
||||
static IsUnloadingBehaviour* current() { return _current; }
|
||||
static void set_current(IsUnloadingBehaviour* current) { _current = current; }
|
||||
};
|
||||
@ -47,7 +48,7 @@ public:
|
||||
: _cl(is_alive)
|
||||
{ }
|
||||
|
||||
virtual bool is_unloading(CompiledMethod* cm) const;
|
||||
virtual bool has_dead_oop(CompiledMethod* cm) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_GCBEHAVIOURS_HPP
|
||||
|
@ -75,6 +75,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
|
||||
case _codecache_GC_threshold:
|
||||
return "CodeCache GC Threshold";
|
||||
|
||||
case _codecache_GC_aggressive:
|
||||
return "CodeCache GC Aggressive";
|
||||
|
||||
case _metadata_GC_threshold:
|
||||
return "Metadata GC Threshold";
|
||||
|
||||
|
@ -64,6 +64,7 @@ class GCCause : public AllStatic {
|
||||
/* implementation specific */
|
||||
|
||||
_codecache_GC_threshold,
|
||||
_codecache_GC_aggressive,
|
||||
_metadata_GC_threshold,
|
||||
_metadata_GC_clear_soft_refs,
|
||||
|
||||
|
@ -64,7 +64,6 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -608,8 +607,8 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
increment_total_full_collections();
|
||||
}
|
||||
|
||||
Continuations::on_gc_marking_cycle_start();
|
||||
Continuations::arm_all_nmethods();
|
||||
CodeCache::on_gc_marking_cycle_start();
|
||||
CodeCache::arm_all_nmethods();
|
||||
|
||||
collect_generation(_old_gen,
|
||||
full,
|
||||
@ -618,8 +617,8 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
run_verification && VerifyGCLevel <= 1,
|
||||
do_clear_all_soft_refs);
|
||||
|
||||
Continuations::on_gc_marking_cycle_finish();
|
||||
Continuations::arm_all_nmethods();
|
||||
CodeCache::on_gc_marking_cycle_finish();
|
||||
CodeCache::arm_all_nmethods();
|
||||
|
||||
// Adjust generation sizes.
|
||||
_old_gen->compute_new_size();
|
||||
@ -662,10 +661,6 @@ void GenCollectedHeap::verify_nmethod(nmethod* nm) {
|
||||
ScavengableNMethods::verify_nmethod(nm);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::flush_nmethod(nmethod* nm) {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
void GenCollectedHeap::prune_scavengable_nmethods() {
|
||||
ScavengableNMethods::prune_nmethods();
|
||||
}
|
||||
|
@ -211,7 +211,6 @@ public:
|
||||
virtual void register_nmethod(nmethod* nm);
|
||||
virtual void unregister_nmethod(nmethod* nm);
|
||||
virtual void verify_nmethod(nmethod* nm);
|
||||
virtual void flush_nmethod(nmethod* nm);
|
||||
|
||||
void prune_scavengable_nmethods();
|
||||
|
||||
|
@ -38,7 +38,7 @@ CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, bool unloading_
|
||||
_first_nmethod(NULL),
|
||||
_claimed_nmethod(NULL) {
|
||||
// Get first alive nmethod
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
|
||||
if(iter.next()) {
|
||||
_first_nmethod = iter.method();
|
||||
}
|
||||
@ -52,13 +52,13 @@ CodeCacheUnloadingTask::~CodeCacheUnloadingTask() {
|
||||
|
||||
void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
|
||||
CompiledMethod* first;
|
||||
CompiledMethodIterator last(CompiledMethodIterator::only_alive);
|
||||
CompiledMethodIterator last(CompiledMethodIterator::all_blobs);
|
||||
|
||||
do {
|
||||
*num_claimed_nmethods = 0;
|
||||
|
||||
first = _claimed_nmethod;
|
||||
last = CompiledMethodIterator(CompiledMethodIterator::only_alive, first);
|
||||
last = CompiledMethodIterator(CompiledMethodIterator::all_blobs, first);
|
||||
|
||||
if (first != NULL) {
|
||||
|
||||
|
@ -149,8 +149,6 @@ void ScavengableNMethods::nmethods_do_and_prune(CodeBlobToOopClosure* cl) {
|
||||
nmethod* prev = NULL;
|
||||
nmethod* cur = _head;
|
||||
while (cur != NULL) {
|
||||
assert(cur->is_alive(), "Must be");
|
||||
|
||||
ScavengableNMethodsData data = gc_data(cur);
|
||||
debug_only(data.clear_marked());
|
||||
assert(data.on_list(), "else shouldn't be on this list");
|
||||
@ -215,7 +213,7 @@ void ScavengableNMethods::unlist_nmethod(nmethod* nm, nmethod* prev) {
|
||||
#ifndef PRODUCT
|
||||
// Temporarily mark nmethods that are claimed to be on the scavenge list.
|
||||
void ScavengableNMethods::mark_on_list_nmethods() {
|
||||
NMethodIterator iter(NMethodIterator::only_alive);
|
||||
NMethodIterator iter(NMethodIterator::all_blobs);
|
||||
while(iter.next()) {
|
||||
nmethod* nm = iter.method();
|
||||
ScavengableNMethodsData data = gc_data(nm);
|
||||
@ -228,7 +226,7 @@ void ScavengableNMethods::mark_on_list_nmethods() {
|
||||
// If the closure is given, run it on the unlisted nmethods.
|
||||
// Also make sure that the effects of mark_on_list_nmethods is gone.
|
||||
void ScavengableNMethods::verify_unlisted_nmethods(CodeBlobClosure* cl) {
|
||||
NMethodIterator iter(NMethodIterator::only_alive);
|
||||
NMethodIterator iter(NMethodIterator::all_blobs);
|
||||
while(iter.next()) {
|
||||
nmethod* nm = iter.method();
|
||||
|
||||
|
@ -63,8 +63,8 @@ bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
||||
// Heal oops
|
||||
ShenandoahNMethod::heal_nmethod(nm);
|
||||
|
||||
// CodeCache sweeper support
|
||||
nm->mark_as_maybe_on_continuation();
|
||||
// CodeCache unloading support
|
||||
nm->mark_as_maybe_on_stack();
|
||||
|
||||
// Disarm
|
||||
ShenandoahNMethod::disarm_nmethod(nm);
|
||||
|
@ -90,13 +90,11 @@ void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
|
||||
(Atomic::cmpxchg(&_claimed_idx, current, current + stride, memory_order_relaxed) == current);
|
||||
}
|
||||
if (process_block) {
|
||||
if (cb->is_alive()) {
|
||||
f->do_code_blob(cb);
|
||||
f->do_code_blob(cb);
|
||||
#ifdef ASSERT
|
||||
if (cb->is_nmethod())
|
||||
Universe::heap()->verify_nmethod((nmethod*)cb);
|
||||
if (cb->is_nmethod())
|
||||
Universe::heap()->verify_nmethod((nmethod*)cb);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -120,11 +118,6 @@ void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
|
||||
_nmethod_table->unregister_nmethod(nm);
|
||||
}
|
||||
|
||||
void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
|
||||
assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
|
||||
_nmethod_table->flush_nmethod(nm);
|
||||
}
|
||||
|
||||
void ShenandoahCodeRoots::arm_nmethods() {
|
||||
assert(BarrierSet::barrier_set()->barrier_set_nmethod() != NULL, "Sanity");
|
||||
BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
|
||||
@ -187,22 +180,6 @@ private:
|
||||
Atomic::store(&_failed, true);
|
||||
}
|
||||
|
||||
void unlink(nmethod* nm) {
|
||||
// Unlinking of the dependencies must happen before the
|
||||
// handshake separating unlink and purge.
|
||||
nm->flush_dependencies(false /* delete_immediately */);
|
||||
|
||||
// unlink_from_method will take the CompiledMethod_lock.
|
||||
// In this case we don't strictly need it when unlinking nmethods from
|
||||
// the Method, because it is only concurrently unlinked by
|
||||
// the entry barrier, which acquires the per nmethod lock.
|
||||
nm->unlink_from_method();
|
||||
|
||||
if (nm->is_osr_method()) {
|
||||
// Invalidate the osr nmethod only once
|
||||
nm->invalidate_osr_method();
|
||||
}
|
||||
}
|
||||
public:
|
||||
ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
|
||||
_unloading_occurred(unloading_occurred),
|
||||
@ -219,13 +196,9 @@ public:
|
||||
ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
|
||||
assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
|
||||
|
||||
if (!nm->is_alive()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (nm->is_unloading()) {
|
||||
ShenandoahReentrantLocker locker(nm_data->lock());
|
||||
unlink(nm);
|
||||
nm->unlink();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -235,13 +208,9 @@ public:
|
||||
if (_bs->is_armed(nm)) {
|
||||
ShenandoahEvacOOMScope oom_evac_scope;
|
||||
ShenandoahNMethod::heal_nmethod_metadata(nm_data);
|
||||
if (Continuations::enabled()) {
|
||||
// Loom needs to know about visited nmethods. Arm the nmethods to get
|
||||
// mark_as_maybe_on_continuation() callbacks when they are used again.
|
||||
_bs->arm(nm, 0);
|
||||
} else {
|
||||
_bs->disarm(nm);
|
||||
}
|
||||
// Code cache unloading needs to know about on-stack nmethods. Arm the nmethods to get
|
||||
// mark_as_maybe_on_stack() callbacks when they are used again.
|
||||
_bs->arm(nm, 0);
|
||||
}
|
||||
|
||||
// Clear compiled ICs and exception caches
|
||||
@ -308,44 +277,10 @@ void ShenandoahCodeRoots::unlink(WorkerThreads* workers, bool unloading_occurred
|
||||
}
|
||||
}
|
||||
|
||||
class ShenandoahNMethodPurgeClosure : public NMethodClosure {
|
||||
public:
|
||||
virtual void do_nmethod(nmethod* nm) {
|
||||
if (nm->is_alive() && nm->is_unloading()) {
|
||||
nm->make_unloaded();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class ShenandoahNMethodPurgeTask : public WorkerTask {
|
||||
private:
|
||||
ShenandoahNMethodPurgeClosure _cl;
|
||||
ShenandoahConcurrentNMethodIterator _iterator;
|
||||
|
||||
public:
|
||||
ShenandoahNMethodPurgeTask() :
|
||||
WorkerTask("Shenandoah Purge NMethods"),
|
||||
_cl(),
|
||||
_iterator(ShenandoahCodeRoots::table()) {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
_iterator.nmethods_do_begin();
|
||||
}
|
||||
|
||||
~ShenandoahNMethodPurgeTask() {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
_iterator.nmethods_do_end();
|
||||
}
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
_iterator.nmethods_do(&_cl);
|
||||
}
|
||||
};
|
||||
|
||||
void ShenandoahCodeRoots::purge(WorkerThreads* workers) {
|
||||
void ShenandoahCodeRoots::purge() {
|
||||
assert(ShenandoahHeap::heap()->unload_classes(), "Only when running concurrent class unloading");
|
||||
|
||||
ShenandoahNMethodPurgeTask task;
|
||||
workers->run_task(&task);
|
||||
CodeCache::flush_unlinked_nmethods();
|
||||
}
|
||||
|
||||
ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
|
||||
|
@ -96,7 +96,7 @@ public:
|
||||
|
||||
// Concurrent nmethod unloading support
|
||||
static void unlink(WorkerThreads* workers, bool unloading_occurred);
|
||||
static void purge(WorkerThreads* workers);
|
||||
static void purge();
|
||||
static void arm_nmethods();
|
||||
static void disarm_nmethods();
|
||||
static int disarmed_value() { return _disarmed_value; }
|
||||
|
@ -479,6 +479,7 @@ void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
|
||||
assert(GCCause::is_user_requested_gc(cause) ||
|
||||
GCCause::is_serviceability_requested_gc(cause) ||
|
||||
cause == GCCause::_metadata_GC_clear_soft_refs ||
|
||||
cause == GCCause::_codecache_GC_aggressive ||
|
||||
cause == GCCause::_codecache_GC_threshold ||
|
||||
cause == GCCause::_full_gc_alot ||
|
||||
cause == GCCause::_wb_full_gc ||
|
||||
|
@ -1915,10 +1915,6 @@ void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
|
||||
ShenandoahCodeRoots::unregister_nmethod(nm);
|
||||
}
|
||||
|
||||
void ShenandoahHeap::flush_nmethod(nmethod* nm) {
|
||||
ShenandoahCodeRoots::flush_nmethod(nm);
|
||||
}
|
||||
|
||||
oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
|
||||
heap_region_containing(o)->record_pin();
|
||||
return o;
|
||||
|
@ -502,7 +502,6 @@ public:
|
||||
public:
|
||||
void register_nmethod(nmethod* nm);
|
||||
void unregister_nmethod(nmethod* nm);
|
||||
void flush_nmethod(nmethod* nm);
|
||||
void verify_nmethod(nmethod* nm) {}
|
||||
|
||||
// ---------- Pinning hooks
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahUtils.hpp"
|
||||
#include "gc/shenandoah/shenandoahVerifier.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
|
||||
ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
|
||||
MetadataVisitingOopIterateClosure(rp),
|
||||
@ -47,17 +46,15 @@ ShenandoahMark::ShenandoahMark() :
|
||||
}
|
||||
|
||||
void ShenandoahMark::start_mark() {
|
||||
// Tell the sweeper that we start a marking cycle.
|
||||
if (!Continuations::is_gc_marking_cycle_active()) {
|
||||
Continuations::on_gc_marking_cycle_start();
|
||||
if (!CodeCache::is_gc_marking_cycle_active()) {
|
||||
CodeCache::on_gc_marking_cycle_start();
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahMark::end_mark() {
|
||||
// Tell the sweeper that we finished a marking cycle.
|
||||
// Unlike other GCs, we do not arm the nmethods
|
||||
// when marking terminates.
|
||||
Continuations::on_gc_marking_cycle_finish();
|
||||
CodeCache::on_gc_marking_cycle_finish();
|
||||
}
|
||||
|
||||
void ShenandoahMark::clear() {
|
||||
|
@ -168,7 +168,6 @@ void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
|
||||
// There is possibility that GC is cancelled when it arrives final mark.
|
||||
// In this case, concurrent root phase is skipped and degenerated GC should be
|
||||
// followed, where nmethods are disarmed.
|
||||
assert(heap->cancelled_gc() || Continuations::enabled(), "What else?");
|
||||
}
|
||||
}
|
||||
|
||||
@ -300,28 +299,10 @@ void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
|
||||
|
||||
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
|
||||
assert(data != NULL, "Sanity");
|
||||
if (Thread::current()->is_Code_cache_sweeper_thread()) {
|
||||
wait_until_concurrent_iteration_done();
|
||||
}
|
||||
log_unregister_nmethod(nm);
|
||||
ShenandoahLocker locker(&_lock);
|
||||
assert(contain(nm), "Must have been registered");
|
||||
|
||||
ShenandoahReentrantLocker data_locker(data->lock());
|
||||
data->mark_unregistered();
|
||||
}
|
||||
|
||||
void ShenandoahNMethodTable::flush_nmethod(nmethod* nm) {
|
||||
assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
|
||||
assert(Thread::current()->is_Code_cache_sweeper_thread(), "Must from Sweep thread");
|
||||
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
|
||||
assert(data != NULL, "Sanity");
|
||||
|
||||
// Can not alter the array when iteration is in progress
|
||||
wait_until_concurrent_iteration_done();
|
||||
log_flush_nmethod(nm);
|
||||
|
||||
ShenandoahLocker locker(&_lock);
|
||||
int idx = index_of(nm);
|
||||
assert(idx >= 0 && idx < _index, "Invalid index");
|
||||
ShenandoahNMethod::attach_gc_data(nm, NULL);
|
||||
@ -348,7 +329,6 @@ int ShenandoahNMethodTable::index_of(nmethod* nm) const {
|
||||
|
||||
void ShenandoahNMethodTable::remove(int idx) {
|
||||
shenandoah_assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(!iteration_in_progress(), "Can not happen");
|
||||
assert(_index >= 0 && _index <= _list->size(), "Sanity");
|
||||
|
||||
assert(idx >= 0 && idx < _index, "Out of bound");
|
||||
@ -429,16 +409,6 @@ void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) {
|
||||
p2i(nm));
|
||||
}
|
||||
|
||||
void ShenandoahNMethodTable::log_flush_nmethod(nmethod* nm) {
|
||||
LogTarget(Debug, gc, nmethod) log;
|
||||
if (!log.is_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
log.print("Flush NMethod: (" PTR_FORMAT ")", p2i(nm));
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void ShenandoahNMethodTable::assert_nmethods_correct() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
@ -513,11 +483,8 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// A nmethod can become a zombie before it is unregistered.
|
||||
if (nmr->nm()->is_alive()) {
|
||||
nmr->assert_correct();
|
||||
f->do_code_blob(nmr->nm());
|
||||
}
|
||||
nmr->assert_correct();
|
||||
f->do_code_blob(nmr->nm());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +150,6 @@ public:
|
||||
|
||||
void register_nmethod(nmethod* nm);
|
||||
void unregister_nmethod(nmethod* nm);
|
||||
void flush_nmethod(nmethod* nm);
|
||||
|
||||
bool contain(nmethod* nm) const;
|
||||
int length() const { return _index; }
|
||||
@ -180,7 +179,6 @@ private:
|
||||
// Logging support
|
||||
void log_register_nmethod(nmethod* nm);
|
||||
void log_unregister_nmethod(nmethod* nm);
|
||||
void log_flush_nmethod(nmethod* nm);
|
||||
};
|
||||
|
||||
class ShenandoahConcurrentNMethodIterator {
|
||||
|
@ -76,7 +76,7 @@ public:
|
||||
|
||||
class ShenandoahIsUnloadingBehaviour : public IsUnloadingBehaviour {
|
||||
public:
|
||||
virtual bool is_unloading(CompiledMethod* method) const {
|
||||
virtual bool has_dead_oop(CompiledMethod* method) const {
|
||||
nmethod* const nm = method->as_nmethod();
|
||||
assert(ShenandoahHeap::heap()->is_concurrent_weak_root_in_progress(), "Only for this phase");
|
||||
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
|
||||
@ -176,7 +176,7 @@ void ShenandoahUnload::unload() {
|
||||
{
|
||||
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_class_unload_purge_coderoots);
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
ShenandoahCodeRoots::purge(heap->workers());
|
||||
ShenandoahCodeRoots::purge();
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -350,7 +350,7 @@ inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
|
||||
// unlinking, to get a sense of what nmethods are alive. This will trigger
|
||||
// the keep alive barriers, but the oops are healed and the slow-paths
|
||||
// will not trigger. We have stronger checks in the slow-paths.
|
||||
assert(ZResurrection::is_blocked() || (Continuations::enabled() && CodeCache::contains((void*)p)),
|
||||
assert(ZResurrection::is_blocked() || (CodeCache::contains((void*)p)),
|
||||
"This operation is only valid when resurrection is blocked");
|
||||
const oop o = *p;
|
||||
root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
|
||||
|
@ -59,8 +59,8 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
||||
ZNMethod::nmethod_oops_barrier(nm);
|
||||
|
||||
|
||||
// CodeCache sweeper support
|
||||
nm->mark_as_maybe_on_continuation();
|
||||
// CodeCache unloading support
|
||||
nm->mark_as_maybe_on_stack();
|
||||
|
||||
// Disarm
|
||||
disarm(nm);
|
||||
|
@ -265,10 +265,6 @@ void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
|
||||
ZNMethod::unregister_nmethod(nm);
|
||||
}
|
||||
|
||||
void ZCollectedHeap::flush_nmethod(nmethod* nm) {
|
||||
ZNMethod::flush_nmethod(nm);
|
||||
}
|
||||
|
||||
void ZCollectedHeap::verify_nmethod(nmethod* nm) {
|
||||
// Does nothing
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user