8306767: Concurrent repacking of extra data in MethodData is potentially unsafe
Reviewed-by: eosterlund, roland, coleenp, never
This commit is contained in:
parent
ffe3bb6763
commit
746a08686b
src/hotspot/share
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,6 +31,7 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/methodData.inline.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
@ -87,8 +88,18 @@ public:
|
||||
// Preparation finished iff all Methods* were already cached.
|
||||
return true;
|
||||
}
|
||||
// Holding locks through safepoints is bad practice.
|
||||
MutexUnlocker mu(_mdo->extra_data_lock());
|
||||
// We are currently holding the extra_data_lock and ensuring
|
||||
// no safepoint breaks the lock.
|
||||
_mdo->check_extra_data_locked();
|
||||
|
||||
// We now want to cache some method data. This could cause a safepoint.
|
||||
// We temporarily release the lock and allow safepoints, and revert that
|
||||
// at the end of the scope. This is safe, since we currently do not hold
|
||||
// any extra_method_data: finish is called only after clean_extra_data,
|
||||
// and the outer scope that first aquired the lock should not hold any
|
||||
// extra_method_data while cleaning is performed, as the offsets can change.
|
||||
MutexUnlocker mu(_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
for (int i = 0; i < _uncached_methods.length(); ++i) {
|
||||
if (has_safepointed()) {
|
||||
// The metadata in the growable array might contain stale
|
||||
@ -123,7 +134,10 @@ void ciMethodData::prepare_metadata() {
|
||||
|
||||
void ciMethodData::load_remaining_extra_data() {
|
||||
MethodData* mdo = get_MethodData();
|
||||
MutexLocker ml(mdo->extra_data_lock());
|
||||
|
||||
// Lock to read ProfileData, and ensure lock is not unintentionally broken by a safepoint
|
||||
MutexLocker ml(mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// Deferred metadata cleaning due to concurrent class unloading.
|
||||
prepare_metadata();
|
||||
// After metadata preparation, there is no stale metadata,
|
||||
@ -562,6 +576,9 @@ void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
|
||||
VM_ENTRY_MARK;
|
||||
MethodData* mdo = get_MethodData();
|
||||
if (mdo != nullptr) {
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker ml(mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
ProfileData* data = mdo->bci_to_data(bci);
|
||||
if (data != nullptr) {
|
||||
if (data->is_CallTypeData()) {
|
||||
@ -586,6 +603,9 @@ void ciMethodData::set_return_type(int bci, ciKlass* k) {
|
||||
VM_ENTRY_MARK;
|
||||
MethodData* mdo = get_MethodData();
|
||||
if (mdo != nullptr) {
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker ml(mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
ProfileData* data = mdo->bci_to_data(bci);
|
||||
if (data != nullptr) {
|
||||
if (data->is_CallTypeData()) {
|
||||
|
@ -592,6 +592,10 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) {
|
||||
void BytecodePrinter::bytecode_epilog(int bci, outputStream* st) {
|
||||
MethodData* mdo = method()->method_data();
|
||||
if (mdo != nullptr) {
|
||||
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker ml(mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
ProfileData* data = mdo->bci_to_data(bci);
|
||||
if (data != nullptr) {
|
||||
st->print(" %d ", mdo->dp_to_di(data->dp()));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -547,7 +547,12 @@ JRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
|
||||
#if INCLUDE_JVMCI
|
||||
if (EnableJVMCI && h_method->method_data() != nullptr) {
|
||||
ResourceMark rm(current);
|
||||
ProfileData* pdata = h_method->method_data()->allocate_bci_to_data(current_bci, nullptr);
|
||||
MethodData* mdo = h_method->method_data();
|
||||
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker ml(mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
ProfileData* pdata = mdo->allocate_bci_to_data(current_bci, nullptr);
|
||||
if (pdata != nullptr && pdata->is_BitData()) {
|
||||
BitData* bit_data = (BitData*) pdata;
|
||||
bit_data->set_exception_seen();
|
||||
|
@ -56,6 +56,10 @@ static bool mark_mdo(Method* method, int bci, JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
MethodData* const mdo = get_mdo(method, jt);
|
||||
assert(mdo != nullptr, "invariant");
|
||||
|
||||
// Lock to access ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker ml(mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// Get the datalayout for the invocation bci.
|
||||
BitData* const bit_data = get_bit_data(mdo, bci);
|
||||
// Returns true if this callsite is not yet linked and
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1889,7 +1889,10 @@ C2V_END
|
||||
|
||||
C2V_VMENTRY_0(jint, methodDataExceptionSeen, (JNIEnv* env, jobject, jlong method_data_pointer, jint bci))
|
||||
MethodData* mdo = (MethodData*) method_data_pointer;
|
||||
MutexLocker mu(mdo->extra_data_lock());
|
||||
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker mu(mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
DataLayout* data = mdo->extra_data_base();
|
||||
DataLayout* end = mdo->args_data_limit();
|
||||
for (;; data = mdo->next_extra(data)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2531,7 +2531,6 @@ void InstanceKlass::clean_method_data() {
|
||||
for (int m = 0; m < methods()->length(); m++) {
|
||||
MethodData* mdo = methods()->at(m)->method_data();
|
||||
if (mdo != nullptr) {
|
||||
ConditionalMutexLocker ml(mdo->extra_data_lock(), !SafepointSynchronize::is_at_safepoint());
|
||||
mdo->clean_method_data(/*always_clean*/false);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1218,7 +1218,7 @@ void MethodData::post_initialize(BytecodeStream* stream) {
|
||||
MethodData::MethodData(const methodHandle& method)
|
||||
: _method(method()),
|
||||
// Holds Compile_lock
|
||||
_extra_data_lock(Mutex::safepoint-2, "MDOExtraData_lock"),
|
||||
_extra_data_lock(Mutex::nosafepoint, "MDOExtraData_lock"),
|
||||
_compiler_counters(),
|
||||
_parameters_type_data_di(parameters_uninitialized) {
|
||||
initialize();
|
||||
@ -1379,6 +1379,8 @@ address MethodData::bci_to_dp(int bci) {
|
||||
|
||||
// Translate a bci to its corresponding data, or null.
|
||||
ProfileData* MethodData::bci_to_data(int bci) {
|
||||
check_extra_data_locked();
|
||||
|
||||
DataLayout* data = data_layout_before(bci);
|
||||
for ( ; is_valid(data); data = next_data_layout(data)) {
|
||||
if (data->bci() == bci) {
|
||||
@ -1429,7 +1431,9 @@ DataLayout* MethodData::next_extra(DataLayout* dp) {
|
||||
return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
|
||||
}
|
||||
|
||||
ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
|
||||
ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) {
|
||||
check_extra_data_locked();
|
||||
|
||||
DataLayout* end = args_data_limit();
|
||||
|
||||
for (;; dp = next_extra(dp)) {
|
||||
@ -1450,14 +1454,9 @@ ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout
|
||||
case DataLayout::speculative_trap_data_tag:
|
||||
if (m != nullptr) {
|
||||
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
||||
// data->method() may be null in case of a concurrent
|
||||
// allocation. Maybe it's for the same method. Try to use that
|
||||
// entry in that case.
|
||||
if (dp->bci() == bci) {
|
||||
if (data->method() == nullptr) {
|
||||
assert(concurrent, "impossible because no concurrent allocation");
|
||||
return nullptr;
|
||||
} else if (data->method() == m) {
|
||||
assert(data->method() != nullptr, "method must be set");
|
||||
if (data->method() == m) {
|
||||
return data;
|
||||
}
|
||||
}
|
||||
@ -1473,6 +1472,8 @@ ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout
|
||||
|
||||
// Translate a bci to its corresponding extra data, or null.
|
||||
ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
|
||||
check_extra_data_locked();
|
||||
|
||||
// This code assumes an entry for a SpeculativeTrapData is 2 cells
|
||||
assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
|
||||
DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
|
||||
@ -1486,23 +1487,14 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi
|
||||
DataLayout* dp = extra_data_base();
|
||||
DataLayout* end = args_data_limit();
|
||||
|
||||
// Allocation in the extra data space has to be atomic because not
|
||||
// all entries have the same size and non atomic concurrent
|
||||
// allocation would result in a corrupted extra data space.
|
||||
ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
|
||||
if (result != nullptr) {
|
||||
// Find if already exists
|
||||
ProfileData* result = bci_to_extra_data_find(bci, m, dp);
|
||||
if (result != nullptr || dp >= end) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (create_if_missing && dp < end) {
|
||||
MutexLocker ml(&_extra_data_lock);
|
||||
// Check again now that we have the lock. Another thread may
|
||||
// have added extra data entries.
|
||||
ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
|
||||
if (result != nullptr || dp >= end) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (create_if_missing) {
|
||||
// Not found -> Allocate
|
||||
assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
|
||||
assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
|
||||
u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
|
||||
@ -1729,6 +1721,8 @@ void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
}
|
||||
|
||||
void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
|
||||
check_extra_data_locked();
|
||||
|
||||
if (shift == 0) {
|
||||
return;
|
||||
}
|
||||
@ -1770,6 +1764,8 @@ public:
|
||||
// Remove SpeculativeTrapData entries that reference an unloaded or
|
||||
// redefined method
|
||||
void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
|
||||
check_extra_data_locked();
|
||||
|
||||
DataLayout* dp = extra_data_base();
|
||||
DataLayout* end = args_data_limit();
|
||||
|
||||
@ -1814,6 +1810,8 @@ void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
|
||||
// Verify there's no unloaded or redefined method referenced by a
|
||||
// SpeculativeTrapData entry
|
||||
void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
|
||||
check_extra_data_locked();
|
||||
|
||||
#ifdef ASSERT
|
||||
DataLayout* dp = extra_data_base();
|
||||
DataLayout* end = args_data_limit();
|
||||
@ -1851,6 +1849,10 @@ void MethodData::clean_method_data(bool always_clean) {
|
||||
}
|
||||
|
||||
CleanExtraDataKlassClosure cl(always_clean);
|
||||
|
||||
// Lock to modify extra data, and prevent Safepoint from breaking the lock
|
||||
MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
clean_extra_data(&cl);
|
||||
verify_extra_data_clean(&cl);
|
||||
}
|
||||
@ -1860,6 +1862,10 @@ void MethodData::clean_method_data(bool always_clean) {
|
||||
void MethodData::clean_weak_method_links() {
|
||||
ResourceMark rm;
|
||||
CleanExtraDataMethodClosure cl;
|
||||
|
||||
// Lock to modify extra data, and prevent Safepoint from breaking the lock
|
||||
MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
clean_extra_data(&cl);
|
||||
verify_extra_data_clean(&cl);
|
||||
}
|
||||
@ -1873,3 +1879,16 @@ void MethodData::release_C_heap_structures() {
|
||||
FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodData::check_extra_data_locked() const {
|
||||
// Cast const away, just to be able to verify the lock
|
||||
// Usually we only want non-const accesses on the lock,
|
||||
// so this here is an exception.
|
||||
MethodData* self = (MethodData*)this;
|
||||
assert(self->extra_data_lock()->owned_by_self(), "must have lock");
|
||||
assert(!Thread::current()->is_Java_thread() ||
|
||||
JavaThread::current()->is_in_no_safepoint_scope(),
|
||||
"JavaThread must have NoSafepointVerifier inside lock scope");
|
||||
}
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2168,7 +2168,7 @@ private:
|
||||
// What is the index of the first data entry?
|
||||
int first_di() const { return 0; }
|
||||
|
||||
ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent);
|
||||
ProfileData* bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp);
|
||||
// Find or create an extra ProfileData:
|
||||
ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
|
||||
|
||||
@ -2310,20 +2310,12 @@ public:
|
||||
intx arg_local() { return _arg_local; }
|
||||
intx arg_stack() { return _arg_stack; }
|
||||
intx arg_returned() { return _arg_returned; }
|
||||
uint arg_modified(int a) { ArgInfoData *aid = arg_info();
|
||||
assert(aid != nullptr, "arg_info must be not null");
|
||||
assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
|
||||
return aid->arg_modified(a); }
|
||||
|
||||
uint arg_modified(int a);
|
||||
void set_eflags(intx v) { _eflags = v; }
|
||||
void set_arg_local(intx v) { _arg_local = v; }
|
||||
void set_arg_stack(intx v) { _arg_stack = v; }
|
||||
void set_arg_returned(intx v) { _arg_returned = v; }
|
||||
void set_arg_modified(int a, uint v) { ArgInfoData *aid = arg_info();
|
||||
assert(aid != nullptr, "arg_info must be not null");
|
||||
assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
|
||||
aid->set_arg_modified(a, v); }
|
||||
|
||||
void set_arg_modified(int a, uint v);
|
||||
void clear_escape_info() { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
|
||||
|
||||
// Location and size of data area
|
||||
@ -2371,6 +2363,8 @@ public:
|
||||
|
||||
// Same, but try to create an extra_data record if one is needed:
|
||||
ProfileData* allocate_bci_to_data(int bci, Method* m) {
|
||||
check_extra_data_locked();
|
||||
|
||||
ProfileData* data = nullptr;
|
||||
// If m not null, try to allocate a SpeculativeTrapData entry
|
||||
if (m == nullptr) {
|
||||
@ -2397,7 +2391,10 @@ public:
|
||||
|
||||
// Add a handful of extra data records, for trap tracking.
|
||||
// Only valid after 'set_size' is called at the end of MethodData::initialize
|
||||
DataLayout* extra_data_base() const { return limit_data_position(); }
|
||||
DataLayout* extra_data_base() const {
|
||||
check_extra_data_locked();
|
||||
return limit_data_position();
|
||||
}
|
||||
DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
|
||||
// pointers to sections in extra data
|
||||
DataLayout* args_data_limit() const { return parameters_data_base(); }
|
||||
@ -2412,7 +2409,7 @@ public:
|
||||
DataLayout* exception_handler_data_base() const { return data_layout_at(_exception_handler_data_di); }
|
||||
DataLayout* exception_handler_data_limit() const { return extra_data_limit(); }
|
||||
|
||||
int extra_data_size() const { return (int)((address)extra_data_limit() - (address)extra_data_base()); }
|
||||
int extra_data_size() const { return (int)((address)extra_data_limit() - (address)limit_data_position()); }
|
||||
static DataLayout* next_extra(DataLayout* dp);
|
||||
|
||||
// Return (uint)-1 for overflow.
|
||||
@ -2529,6 +2526,7 @@ public:
|
||||
void clean_method_data(bool always_clean);
|
||||
void clean_weak_method_links();
|
||||
Mutex* extra_data_lock() { return &_extra_data_lock; }
|
||||
void check_extra_data_locked() const NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_OOPS_METHODDATA_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include "oops/methodData.hpp"
|
||||
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
inline void DataLayout::release_set_cell_at(int index, intptr_t value) {
|
||||
Atomic::release_store(&_cells[index], value);
|
||||
@ -53,4 +54,22 @@ inline void RetData::release_set_bci(uint row, int bci) {
|
||||
release_set_int_at(bci0_offset + row * ret_row_cell_count, bci);
|
||||
}
|
||||
|
||||
inline uint MethodData::arg_modified(int a) {
|
||||
// Lock and avoid breaking lock with Safepoint
|
||||
MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
ArgInfoData* aid = arg_info();
|
||||
assert(aid != nullptr, "arg_info must be not null");
|
||||
assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
|
||||
return aid->arg_modified(a);
|
||||
}
|
||||
|
||||
inline void MethodData::set_arg_modified(int a, uint v) {
|
||||
// Lock and avoid breaking lock with Safepoint
|
||||
MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
ArgInfoData* aid = arg_info();
|
||||
assert(aid != nullptr, "arg_info must be not null");
|
||||
assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
|
||||
aid->set_arg_modified(a, v);
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_METHODDATA_INLINE_HPP
|
||||
|
@ -63,6 +63,7 @@
|
||||
#include "oops/constantPool.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "oops/methodData.inline.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -1226,7 +1227,6 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
|
||||
for (int i = 0; i < arg_count; i++) {
|
||||
mdo->set_arg_modified(i, 0);
|
||||
}
|
||||
MutexLocker mu(THREAD, mdo->extra_data_lock());
|
||||
mdo->clean_method_data(/*always_clean*/true);
|
||||
}
|
||||
|
||||
|
@ -1801,6 +1801,9 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod*
|
||||
ScopeDesc* imm_scope = cvf->scope();
|
||||
MethodData* imm_mdo = get_method_data(thread, methodHandle(thread, imm_scope->method()), true);
|
||||
if (imm_mdo != nullptr) {
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker ml(imm_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
ProfileData* pdata = imm_mdo->allocate_bci_to_data(imm_scope->bci(), nullptr);
|
||||
if (pdata != nullptr && pdata->is_BitData()) {
|
||||
BitData* bit_data = (BitData*) pdata;
|
||||
@ -2106,7 +2109,14 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
|
||||
// Print a bunch of diagnostics, if requested.
|
||||
if (TraceDeoptimization || LogCompilation || is_receiver_constraint_failure) {
|
||||
ResourceMark rm;
|
||||
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
// We must do this already now, since we cannot acquire this lock while
|
||||
// holding the tty lock (lock ordering by rank).
|
||||
MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
ttyLocker ttyl;
|
||||
|
||||
char buf[100];
|
||||
if (xtty != nullptr) {
|
||||
xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s",
|
||||
@ -2141,6 +2151,9 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
|
||||
int dcnt = trap_mdo->trap_count(reason);
|
||||
if (dcnt != 0)
|
||||
xtty->print(" count='%d'", dcnt);
|
||||
|
||||
// We need to lock to read the ProfileData. But to keep the locks ordered, we need to
|
||||
// lock extra_data_lock before the tty lock.
|
||||
ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
|
||||
int dos = (pdata == nullptr)? 0: pdata->trap_state();
|
||||
if (dos != 0) {
|
||||
@ -2316,12 +2329,18 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
|
||||
// to use the MDO to detect hot deoptimization points and control
|
||||
// aggressive optimization.
|
||||
bool inc_recompile_count = false;
|
||||
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
ConditionalMutexLocker ml((trap_mdo != nullptr) ? trap_mdo->extra_data_lock() : nullptr,
|
||||
(trap_mdo != nullptr),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
ProfileData* pdata = nullptr;
|
||||
if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != nullptr) {
|
||||
assert(trap_mdo == get_method_data(current, profiled_method, false), "sanity");
|
||||
uint this_trap_count = 0;
|
||||
bool maybe_prior_trap = false;
|
||||
bool maybe_prior_recompile = false;
|
||||
|
||||
pdata = query_update_method_data(trap_mdo, trap_bci, reason, true,
|
||||
#if INCLUDE_JVMCI
|
||||
nm->is_compiled_by_jvmci() && nm->is_osr_method(),
|
||||
@ -2477,6 +2496,8 @@ Deoptimization::query_update_method_data(MethodData* trap_mdo,
|
||||
uint& ret_this_trap_count,
|
||||
bool& ret_maybe_prior_trap,
|
||||
bool& ret_maybe_prior_recompile) {
|
||||
trap_mdo->check_extra_data_locked();
|
||||
|
||||
bool maybe_prior_trap = false;
|
||||
bool maybe_prior_recompile = false;
|
||||
uint this_trap_count = 0;
|
||||
@ -2559,6 +2580,10 @@ Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int tr
|
||||
assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
|
||||
// JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
|
||||
bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler);
|
||||
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
query_update_method_data(trap_mdo, trap_bci,
|
||||
(DeoptReason)reason,
|
||||
update_total_counts,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -258,6 +258,7 @@ class JavaThread: public Thread {
|
||||
public:
|
||||
void inc_no_safepoint_count() { _no_safepoint_count++; }
|
||||
void dec_no_safepoint_count() { _no_safepoint_count--; }
|
||||
bool is_in_no_safepoint_scope() { return _no_safepoint_count > 0; }
|
||||
#endif // ASSERT
|
||||
public:
|
||||
// These functions check conditions before possibly going to a safepoint.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -615,6 +615,10 @@ void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h
|
||||
Bytecode_invoke call = Bytecode_invoke_check(method, bci);
|
||||
if (call.is_valid()) {
|
||||
ResourceMark rm(current);
|
||||
|
||||
// Lock to read ProfileData, and ensure lock is not broken by a safepoint
|
||||
MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
|
||||
if (pdata != nullptr && pdata->is_BitData()) {
|
||||
BitData* bit_data = (BitData*) pdata;
|
||||
|
Loading…
x
Reference in New Issue
Block a user