8318776: Require supports_cx8 to always be true
Reviewed-by: eosterlund, shade, dcubed
This commit is contained in:
parent
14557e72ef
commit
c75c38871e
@ -203,7 +203,6 @@ JVM_SetStackWalkContinuation
|
||||
JVM_SetThreadPriority
|
||||
JVM_SleepNanos
|
||||
JVM_StartThread
|
||||
JVM_SupportsCX8
|
||||
JVM_TotalMemory
|
||||
JVM_UnloadLibrary
|
||||
JVM_WaitForReferencePendingList
|
||||
|
@ -1511,7 +1511,6 @@ void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
|
||||
|
||||
|
||||
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
assert(VM_Version::supports_cx8(), "wrong machine");
|
||||
Register addr;
|
||||
if (op->addr()->is_register()) {
|
||||
addr = as_reg(op->addr());
|
||||
|
@ -68,7 +68,6 @@ static SpinWait get_spin_wait_desc() {
|
||||
}
|
||||
|
||||
void VM_Version::initialize() {
|
||||
_supports_cx8 = true;
|
||||
_supports_atomic_getset4 = true;
|
||||
_supports_atomic_getadd4 = true;
|
||||
_supports_atomic_getset8 = true;
|
||||
|
@ -1385,7 +1385,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
__ mov(dest, 1, eq);
|
||||
__ mov(dest, 0, ne);
|
||||
} else if (op->code() == lir_cas_long) {
|
||||
assert(VM_Version::supports_cx8(), "wrong machine");
|
||||
Register cmp_value_lo = op->cmp_value()->as_register_lo();
|
||||
Register cmp_value_hi = op->cmp_value()->as_register_hi();
|
||||
Register new_value_lo = op->new_value()->as_register_lo();
|
||||
|
@ -128,10 +128,16 @@ void VM_Version::early_initialize() {
|
||||
// use proper dmb instruction
|
||||
get_os_cpu_info();
|
||||
|
||||
// Future cleanup: if SUPPORTS_NATIVE_CX8 is defined then we should not need
|
||||
// any alternative solutions. At present this allows for the theoretical
|
||||
// possibility of building for ARMv7 and then running on ARMv5 or 6. If that
|
||||
// is impossible then the ARM port folk should clean this up.
|
||||
_kuser_helper_version = *(int*)KUSER_HELPER_VERSION_ADDR;
|
||||
#ifndef SUPPORTS_NATIVE_CX8
|
||||
// armv7 has the ldrexd instruction that can be used to implement cx8
|
||||
// armv5 with linux >= 3.1 can use kernel helper routine
|
||||
_supports_cx8 = (supports_ldrexd() || supports_kuser_cmpxchg64());
|
||||
#endif
|
||||
}
|
||||
|
||||
void VM_Version::initialize() {
|
||||
@ -278,7 +284,7 @@ void VM_Version::initialize() {
|
||||
_supports_atomic_getadd8 = supports_ldrexd();
|
||||
|
||||
#ifdef COMPILER2
|
||||
assert(_supports_cx8 && _supports_atomic_getset4 && _supports_atomic_getadd4
|
||||
assert(supports_cx8() && _supports_atomic_getset4 && _supports_atomic_getadd4
|
||||
&& _supports_atomic_getset8 && _supports_atomic_getadd8, "C2: atomic operations must be supported");
|
||||
#endif
|
||||
char buf[512];
|
||||
|
@ -200,10 +200,6 @@ void VM_Version::initialize() {
|
||||
print_features();
|
||||
}
|
||||
|
||||
// PPC64 supports 8-byte compare-exchange operations (see Atomic::cmpxchg)
|
||||
// and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
|
||||
_supports_cx8 = true;
|
||||
|
||||
// Used by C1.
|
||||
_supports_atomic_getset4 = true;
|
||||
_supports_atomic_getadd4 = true;
|
||||
|
@ -1251,7 +1251,6 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
assert(VM_Version::supports_cx8(), "wrong machine");
|
||||
Register addr;
|
||||
if (op->addr()->is_register()) {
|
||||
addr = as_reg(op->addr());
|
||||
|
@ -46,7 +46,6 @@ RV_FEATURE_FLAGS(ADD_RV_FEATURE_IN_LIST)
|
||||
nullptr};
|
||||
|
||||
void VM_Version::initialize() {
|
||||
_supports_cx8 = true;
|
||||
_supports_atomic_getset4 = true;
|
||||
_supports_atomic_getadd4 = true;
|
||||
_supports_atomic_getset8 = true;
|
||||
|
@ -2670,7 +2670,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
Register addr = op->addr()->as_pointer_register();
|
||||
Register t1_cmp = Z_R1_scratch;
|
||||
if (op->code() == lir_cas_long) {
|
||||
assert(VM_Version::supports_cx8(), "wrong machine");
|
||||
Register cmp_value_lo = op->cmp_value()->as_register_lo();
|
||||
Register new_value_lo = op->new_value()->as_register_lo();
|
||||
__ z_lgr(t1_cmp, cmp_value_lo);
|
||||
|
@ -287,11 +287,6 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
|
||||
}
|
||||
|
||||
// z/Architecture supports 8-byte compare-exchange operations
|
||||
// (see Atomic::cmpxchg)
|
||||
// and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
|
||||
_supports_cx8 = true;
|
||||
|
||||
_supports_atomic_getadd4 = VM_Version::has_LoadAndALUAtomicV1();
|
||||
_supports_atomic_getadd8 = VM_Version::has_LoadAndALUAtomicV1();
|
||||
|
||||
|
@ -1929,7 +1929,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
|
||||
|
||||
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
|
||||
if (LP64_ONLY(false &&) op->code() == lir_cas_long) {
|
||||
assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
|
||||
assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
|
||||
assert(op->new_value()->as_register_lo() == rbx, "wrong register");
|
||||
|
@ -33,7 +33,6 @@ instruct compareAndSwapP_shenandoah(rRegI res,
|
||||
rax_RegP oldval, rRegP newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set res (ShenandoahCompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
match(Set res (ShenandoahWeakCompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval);
|
||||
@ -95,7 +94,6 @@ instruct compareAndExchangeP_shenandoah(memory mem_ptr,
|
||||
rRegP tmp1, rRegP tmp2,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set oldval (ShenandoahCompareAndExchangeP mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr, TEMP tmp1, TEMP tmp2);
|
||||
ins_cost(1000);
|
||||
|
@ -816,7 +816,6 @@ void VM_Version::get_processor_features() {
|
||||
_L1_data_cache_line_size = L1_line_size();
|
||||
}
|
||||
|
||||
_supports_cx8 = supports_cmpxchg8();
|
||||
// xchg and xadd instructions
|
||||
_supports_atomic_getset4 = true;
|
||||
_supports_atomic_getadd4 = true;
|
||||
@ -3236,4 +3235,3 @@ bool VM_Version::is_intrinsic_supported(vmIntrinsicID id) {
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -643,7 +643,6 @@ public:
|
||||
// Feature identification
|
||||
//
|
||||
static bool supports_cpuid() { return _features != 0; }
|
||||
static bool supports_cmpxchg8() { return (_features & CPU_CX8) != 0; }
|
||||
static bool supports_cmov() { return (_features & CPU_CMOV) != 0; }
|
||||
static bool supports_fxsr() { return (_features & CPU_FXSR) != 0; }
|
||||
static bool supports_ht() { return (_features & CPU_HT) != 0; }
|
||||
|
@ -1510,9 +1510,6 @@ bool Matcher::match_rule_supported(int opcode) {
|
||||
#ifdef _LP64
|
||||
case Op_CompareAndSwapP:
|
||||
#endif
|
||||
if (!VM_Version::supports_cx8()) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case Op_StrIndexOf:
|
||||
if (!UseSSE42Intrinsics) {
|
||||
@ -10119,5 +10116,3 @@ instruct DoubleClassCheck_reg_reg_vfpclass(rRegI dst, regD src, kReg ktmp, rFlag
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
|
||||
|
@ -7281,7 +7281,6 @@ instruct castDD_PR( regDPR dst ) %{
|
||||
// No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
|
||||
|
||||
instruct compareAndSwapL( rRegI res, eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr, KILL oldval);
|
||||
@ -7350,7 +7349,6 @@ instruct compareAndSwapI( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newv
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeL( eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set oldval (CompareAndExchangeL mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr);
|
||||
format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
|
||||
|
@ -7174,7 +7174,7 @@ instruct compareAndSwapP(rRegI res,
|
||||
rax_RegP oldval, rRegP newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0);
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr, KILL oldval);
|
||||
@ -7197,7 +7197,6 @@ instruct compareAndSwapL(rRegI res,
|
||||
rax_RegL oldval, rRegL newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr, KILL oldval);
|
||||
@ -7358,7 +7357,6 @@ instruct compareAndExchangeL(
|
||||
rax_RegL oldval, rRegL newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set oldval (CompareAndExchangeL mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr);
|
||||
|
||||
@ -7392,7 +7390,7 @@ instruct compareAndExchangeP(
|
||||
rax_RegP oldval, rRegP newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0);
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2009 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -138,11 +138,13 @@ void VM_Version::initialize() {
|
||||
UNSUPPORTED_OPTION(CountCompiledCalls);
|
||||
#endif
|
||||
|
||||
#ifndef SUPPORTS_NATIVE_CX8
|
||||
// Supports 8-byte cmpxchg with compiler built-ins.
|
||||
// These built-ins are supposed to be implemented on
|
||||
// all platforms (even if not natively), so we claim
|
||||
// the support unconditionally.
|
||||
_supports_cx8 = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
void VM_Version::initialize_cpu_information(void) {
|
||||
|
@ -153,7 +153,6 @@ inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
|
||||
inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
|
||||
int64_t volatile* dest,
|
||||
int64_t compare_value) {
|
||||
assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
|
||||
// Warning: Arguments are swapped to avoid moving them for kernel call
|
||||
return (*ARMAtomicFuncs::_cmpxchg_long_func)(compare_value, exchange_value, dest);
|
||||
}
|
||||
|
@ -110,7 +110,6 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
|
||||
bool Compiler::is_intrinsic_supported(vmIntrinsics::ID id) {
|
||||
switch (id) {
|
||||
case vmIntrinsics::_compareAndSetLong:
|
||||
if (!VM_Version::supports_cx8()) return false;
|
||||
break;
|
||||
case vmIntrinsics::_getAndAddInt:
|
||||
if (!VM_Version::supports_atomic_getadd4()) return false;
|
||||
|
@ -781,12 +781,6 @@ JVM_DesiredAssertionStatus(JNIEnv *env, jclass unused, jclass cls);
|
||||
JNIEXPORT jobject JNICALL
|
||||
JVM_AssertionStatusDirectives(JNIEnv *env, jclass unused);
|
||||
|
||||
/*
|
||||
* java.util.concurrent.atomic.AtomicLong
|
||||
*/
|
||||
JNIEXPORT jboolean JNICALL
|
||||
JVM_SupportsCX8(void);
|
||||
|
||||
/*
|
||||
* java.lang.ref.Finalizer
|
||||
*/
|
||||
|
@ -35,12 +35,10 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// returns updated value
|
||||
static traceid atomic_inc(traceid volatile* const dest, traceid stride = 1) {
|
||||
assert(VM_Version::supports_cx8(), "invariant");
|
||||
traceid compare_value;
|
||||
traceid exchange_value;
|
||||
do {
|
||||
@ -294,4 +292,3 @@ void JfrTraceId::untag_jdk_jfr_event_sub(const Klass* k) {
|
||||
}
|
||||
assert(IS_NOT_AN_EVENT_SUB_KLASS(k), "invariant");
|
||||
}
|
||||
|
||||
|
@ -29,14 +29,12 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
|
||||
#ifdef ASSERT
|
||||
static jlong atomic_add_jlong(jlong value, jlong volatile* const dest) {
|
||||
assert(VM_Version::supports_cx8(), "unsupported");
|
||||
jlong compare_value;
|
||||
jlong exchange_value;
|
||||
do {
|
||||
|
@ -27,8 +27,6 @@
|
||||
|
||||
#include "runtime/javaThread.hpp"
|
||||
|
||||
// this utility could be useful for non cx8 platforms
|
||||
|
||||
class JfrSpinlockHelper {
|
||||
private:
|
||||
volatile int* const _lock;
|
||||
|
@ -28,60 +28,11 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
|
||||
namespace AccessInternal {
|
||||
// VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
|
||||
//
|
||||
// On platforms which do not support atomic compare-and-swap of jlong (8 byte)
|
||||
// values we have to use a lock-based scheme to enforce atomicity. This has to be
|
||||
// applied to all Unsafe operations that set the value of a jlong field. Even so
|
||||
// the compareAndSwapLong operation will not be atomic with respect to direct stores
|
||||
// to the field from Java code. It is important therefore that any Java code that
|
||||
// utilizes these Unsafe jlong operations does not perform direct stores. To permit
|
||||
// direct loads of the field from Java code we must also use Atomic::store within the
|
||||
// locked regions. And for good measure, in case there are direct stores, we also
|
||||
// employ Atomic::load within those regions. Note that the field in question must be
|
||||
// volatile and so must have atomic load/store accesses applied at the Java level.
|
||||
//
|
||||
// The locking scheme could utilize a range of strategies for controlling the locking
|
||||
// granularity: from a lock per-field through to a single global lock. The latter is
|
||||
// the simplest and is used for the current implementation. Note that the Java object
|
||||
// that contains the field, can not, in general, be used for locking. To do so can lead
|
||||
// to deadlocks as we may introduce locking into what appears to the Java code to be a
|
||||
// lock-free path.
|
||||
//
|
||||
// As all the locked-regions are very short and themselves non-blocking we can treat
|
||||
// them as leaf routines and elide safepoint checks (ie we don't perform any thread
|
||||
// state transitions even when blocking for the lock). Note that if we do choose to
|
||||
// add safepoint checks and thread state transitions, we must ensure that we calculate
|
||||
// the address of the field _after_ we have acquired the lock, else the object may have
|
||||
// been moved by the GC
|
||||
|
||||
#ifndef SUPPORTS_NATIVE_CX8
|
||||
|
||||
// This is intentionally in the cpp file rather than the .inline.hpp file. It seems
|
||||
// desirable to trade faster JDK build times (not propagating vm_version.hpp)
|
||||
// for slightly worse runtime atomic jlong performance on 32 bit machines with
|
||||
// support for 64 bit atomics.
|
||||
bool wide_atomic_needs_locking() {
|
||||
return !VM_Version::supports_cx8();
|
||||
}
|
||||
|
||||
AccessLocker::AccessLocker() {
|
||||
assert(!VM_Version::supports_cx8(), "why else?");
|
||||
UnsafeJlong_lock->lock_without_safepoint_check();
|
||||
}
|
||||
|
||||
AccessLocker::~AccessLocker() {
|
||||
UnsafeJlong_lock->unlock();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// These forward copying calls to Copy without exposing the Copy type in headers unnecessarily
|
||||
|
||||
void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length) {
|
||||
|
@ -81,15 +81,6 @@ namespace AccessInternal {
|
||||
reinterpret_cast<intptr_t>((void*)base) + byte_offset);
|
||||
}
|
||||
|
||||
// This metafunction returns whether it is possible for a type T to require
|
||||
// locking to support wide atomics or not.
|
||||
template <typename T>
|
||||
#ifdef SUPPORTS_NATIVE_CX8
|
||||
struct PossiblyLockedAccess: public std::false_type {};
|
||||
#else
|
||||
struct PossiblyLockedAccess: public std::integral_constant<bool, (sizeof(T) > 4)> {};
|
||||
#endif
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
struct AccessFunctionTypes {
|
||||
typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
|
||||
@ -140,13 +131,6 @@ namespace AccessInternal {
|
||||
template <DecoratorSet decorators, typename T, BarrierType barrier_type>
|
||||
typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
|
||||
|
||||
class AccessLocker {
|
||||
public:
|
||||
AccessLocker();
|
||||
~AccessLocker();
|
||||
};
|
||||
bool wide_atomic_needs_locking();
|
||||
|
||||
void* field_addr(oop base, ptrdiff_t offset);
|
||||
|
||||
// Forward calls to Copy:: in the cpp file to reduce dependencies and allow
|
||||
@ -281,34 +265,6 @@ protected:
|
||||
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
|
||||
atomic_xchg_internal(void* addr, T new_value);
|
||||
|
||||
// The following *_locked mechanisms serve the purpose of handling atomic operations
|
||||
// that are larger than a machine can handle, and then possibly opt for using
|
||||
// a slower path using a mutex to perform the operation.
|
||||
|
||||
template <DecoratorSet ds, typename T>
|
||||
static inline typename EnableIf<
|
||||
!AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) {
|
||||
return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet ds, typename T>
|
||||
static typename EnableIf<
|
||||
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value);
|
||||
|
||||
template <DecoratorSet ds, typename T>
|
||||
static inline typename EnableIf<
|
||||
!AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
atomic_xchg_maybe_locked(void* addr, T new_value) {
|
||||
return atomic_xchg_internal<ds>(addr, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet ds, typename T>
|
||||
static typename EnableIf<
|
||||
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
atomic_xchg_maybe_locked(void* addr, T new_value);
|
||||
|
||||
public:
|
||||
template <typename T>
|
||||
static inline void store(void* addr, T value) {
|
||||
@ -322,12 +278,12 @@ public:
|
||||
|
||||
template <typename T>
|
||||
static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
|
||||
return atomic_cmpxchg_maybe_locked<decorators>(addr, compare_value, new_value);
|
||||
return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline T atomic_xchg(void* addr, T new_value) {
|
||||
return atomic_xchg_maybe_locked<decorators>(addr, new_value);
|
||||
return atomic_xchg_internal<decorators>(addr, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -214,44 +214,6 @@ RawAccessBarrier<decorators>::atomic_xchg_internal(void* addr, T new_value) {
|
||||
new_value);
|
||||
}
|
||||
|
||||
// For platforms that do not have native support for wide atomics,
|
||||
// we can emulate the atomicity using a lock. So here we check
|
||||
// whether that is necessary or not.
|
||||
|
||||
template <DecoratorSet ds>
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline typename EnableIf<
|
||||
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
RawAccessBarrier<ds>::atomic_xchg_maybe_locked(void* addr, T new_value) {
|
||||
if (!AccessInternal::wide_atomic_needs_locking()) {
|
||||
return atomic_xchg_internal<ds>(addr, new_value);
|
||||
} else {
|
||||
AccessInternal::AccessLocker access_lock;
|
||||
volatile T* p = reinterpret_cast<volatile T*>(addr);
|
||||
T old_val = RawAccess<>::load(p);
|
||||
RawAccess<>::store(p, new_value);
|
||||
return old_val;
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet ds>
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline typename EnableIf<
|
||||
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) {
|
||||
if (!AccessInternal::wide_atomic_needs_locking()) {
|
||||
return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value);
|
||||
} else {
|
||||
AccessInternal::AccessLocker access_lock;
|
||||
volatile T* p = reinterpret_cast<volatile T*>(addr);
|
||||
T old_val = RawAccess<>::load(p);
|
||||
if (old_val == compare_value) {
|
||||
RawAccess<>::store(p, new_value);
|
||||
}
|
||||
return old_val;
|
||||
}
|
||||
}
|
||||
|
||||
class RawAccessBarrierArrayCopy: public AllStatic {
|
||||
template<typename T> struct IsHeapWordSized: public std::integral_constant<bool, sizeof(T) == HeapWordSize> { };
|
||||
public:
|
||||
|
@ -3586,12 +3586,6 @@ JVM_ENTRY(jobject, JVM_NewInstanceFromConstructor(JNIEnv *env, jobject c, jobjec
|
||||
return res;
|
||||
JVM_END
|
||||
|
||||
// Atomic ///////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
JVM_LEAF(jboolean, JVM_SupportsCX8())
|
||||
return VM_Version::supports_cx8();
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY(void, JVM_InitializeFromArchive(JNIEnv* env, jclass cls))
|
||||
Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
|
||||
assert(k->is_klass(), "just checking");
|
||||
|
@ -35,7 +35,9 @@ const char* Abstract_VM_Version::_s_internal_vm_info_string = Abstract_VM_Versio
|
||||
uint64_t Abstract_VM_Version::_features = 0;
|
||||
const char* Abstract_VM_Version::_features_string = "";
|
||||
|
||||
#ifndef SUPPORTS_NATIVE_CX8
|
||||
bool Abstract_VM_Version::_supports_cx8 = false;
|
||||
#endif
|
||||
bool Abstract_VM_Version::_supports_atomic_getset4 = false;
|
||||
bool Abstract_VM_Version::_supports_atomic_getset8 = false;
|
||||
bool Abstract_VM_Version::_supports_atomic_getadd4 = false;
|
||||
|
@ -59,7 +59,9 @@ class Abstract_VM_Version: AllStatic {
|
||||
static const char* _features_string;
|
||||
|
||||
// These are set by machine-dependent initializations
|
||||
#ifndef SUPPORTS_NATIVE_CX8
|
||||
static bool _supports_cx8;
|
||||
#endif
|
||||
static bool _supports_atomic_getset4;
|
||||
static bool _supports_atomic_getset8;
|
||||
static bool _supports_atomic_getadd4;
|
||||
@ -133,6 +135,8 @@ class Abstract_VM_Version: AllStatic {
|
||||
static void print_platform_virtualization_info(outputStream*) { }
|
||||
|
||||
// does HW support an 8-byte compare-exchange operation?
|
||||
// Required to be true but still dynamically checked at runtime
|
||||
// for platforms that don't set SUPPORTS_NATIVE_CX8
|
||||
static bool supports_cx8() {
|
||||
#ifdef SUPPORTS_NATIVE_CX8
|
||||
return true;
|
||||
|
@ -55,11 +55,13 @@ enum ScopedFenceType {
|
||||
|
||||
class Atomic : AllStatic {
|
||||
public:
|
||||
// Atomic operations on int64 types are not available on all 32-bit
|
||||
// platforms. If atomic ops on int64 are defined here they must only
|
||||
// be used from code that verifies they are available at runtime and
|
||||
// can provide an alternative action if not - see supports_cx8() for
|
||||
// a means to test availability.
|
||||
// Atomic operations on int64 types are required to be available on
|
||||
// all platforms. At a minimum a 64-bit cmpxchg must be available
|
||||
// from which other atomic operations can be constructed if needed.
|
||||
// The legacy `Abstract_VMVersion::supports_cx8()` function used to
|
||||
// indicate if this support existed, allowing for alternative lock-
|
||||
// based mechanism to be used. But today this function is required
|
||||
// to return true and in the future will be removed entirely.
|
||||
|
||||
// The memory operations that are mentioned with each of the atomic
|
||||
// function families come from src/share/vm/runtime/orderAccess.hpp,
|
||||
|
@ -123,9 +123,6 @@ Mutex* JfrBuffer_lock = nullptr;
|
||||
Monitor* JfrThreadSampler_lock = nullptr;
|
||||
#endif
|
||||
|
||||
#ifndef SUPPORTS_NATIVE_CX8
|
||||
Mutex* UnsafeJlong_lock = nullptr;
|
||||
#endif
|
||||
Mutex* CodeHeapStateAnalytics_lock = nullptr;
|
||||
|
||||
Monitor* ContinuationRelativize_lock = nullptr;
|
||||
@ -298,10 +295,6 @@ void mutex_init() {
|
||||
MUTEX_DEFN(JfrThreadSampler_lock , PaddedMonitor, nosafepoint);
|
||||
#endif
|
||||
|
||||
#ifndef SUPPORTS_NATIVE_CX8
|
||||
MUTEX_DEFN(UnsafeJlong_lock , PaddedMutex , nosafepoint);
|
||||
#endif
|
||||
|
||||
MUTEX_DEFN(ContinuationRelativize_lock , PaddedMonitor, nosafepoint-3);
|
||||
MUTEX_DEFN(CodeHeapStateAnalytics_lock , PaddedMutex , safepoint);
|
||||
MUTEX_DEFN(ThreadsSMRDelete_lock , PaddedMonitor, service-2); // Holds ConcurrentHashTableResize_lock
|
||||
|
@ -136,10 +136,6 @@ extern Mutex* JfrBuffer_lock; // protects JFR buffer operatio
|
||||
extern Monitor* JfrThreadSampler_lock; // used to suspend/resume JFR thread sampler
|
||||
#endif
|
||||
|
||||
#ifndef SUPPORTS_NATIVE_CX8
|
||||
extern Mutex* UnsafeJlong_lock; // provides Unsafe atomic updates to jlongs on platforms that don't support cx8
|
||||
#endif
|
||||
|
||||
extern Mutex* Metaspace_lock; // protects Metaspace virtualspace and chunk expansions
|
||||
extern Monitor* MetaspaceCritical_lock; // synchronizes failed metaspace allocations that risk throwing metaspace OOM
|
||||
extern Mutex* ClassLoaderDataGraph_lock; // protects CLDG list, needed for concurrent unloading
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
void VM_Version_init() {
|
||||
VM_Version::initialize();
|
||||
|
||||
guarantee(VM_Version::supports_cx8(), "Support for 64-bit atomic operations is required");
|
||||
if (log_is_enabled(Info, os, cpu)) {
|
||||
char buf[1024];
|
||||
ResourceMark rm;
|
||||
|
@ -55,20 +55,6 @@ import jdk.internal.misc.Unsafe;
|
||||
public class AtomicLong extends Number implements java.io.Serializable {
|
||||
private static final long serialVersionUID = 1927816293512124184L;
|
||||
|
||||
/**
|
||||
* Records whether the underlying JVM supports lockless
|
||||
* compareAndSet for longs. While the intrinsic compareAndSetLong
|
||||
* method works in either case, some constructions should be
|
||||
* handled at Java level to avoid locking user-visible locks.
|
||||
*/
|
||||
static final boolean VM_SUPPORTS_LONG_CAS = VMSupportsCS8();
|
||||
|
||||
/**
|
||||
* Returns whether underlying JVM supports lockless CompareAndSet
|
||||
* for longs. Called only once and cached in VM_SUPPORTS_LONG_CAS.
|
||||
*/
|
||||
private static native boolean VMSupportsCS8();
|
||||
|
||||
/*
|
||||
* This class intended to be implemented using VarHandles, but there
|
||||
* are unresolved cyclic startup dependencies.
|
||||
|
@ -90,10 +90,7 @@ public abstract class AtomicLongFieldUpdater<T> {
|
||||
public static <U> AtomicLongFieldUpdater<U> newUpdater(Class<U> tclass,
|
||||
String fieldName) {
|
||||
Class<?> caller = Reflection.getCallerClass();
|
||||
if (AtomicLong.VM_SUPPORTS_LONG_CAS)
|
||||
return new CASUpdater<U>(tclass, fieldName, caller);
|
||||
else
|
||||
return new LockedUpdater<U>(tclass, fieldName, caller);
|
||||
return new CASUpdater<U>(tclass, fieldName, caller);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -515,126 +512,6 @@ public abstract class AtomicLongFieldUpdater<T> {
|
||||
}
|
||||
}
|
||||
|
||||
private static final class LockedUpdater<T> extends AtomicLongFieldUpdater<T> {
|
||||
private static final Unsafe U = Unsafe.getUnsafe();
|
||||
private final long offset;
|
||||
/**
|
||||
* if field is protected, the subclass constructing updater, else
|
||||
* the same as tclass
|
||||
*/
|
||||
private final Class<?> cclass;
|
||||
/** class holding the field */
|
||||
private final Class<T> tclass;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
LockedUpdater(final Class<T> tclass, final String fieldName,
|
||||
final Class<?> caller) {
|
||||
final Field field;
|
||||
final int modifiers;
|
||||
try {
|
||||
field = AccessController.doPrivileged(
|
||||
new PrivilegedExceptionAction<Field>() {
|
||||
public Field run() throws NoSuchFieldException {
|
||||
return tclass.getDeclaredField(fieldName);
|
||||
}
|
||||
});
|
||||
modifiers = field.getModifiers();
|
||||
sun.reflect.misc.ReflectUtil.ensureMemberAccess(
|
||||
caller, tclass, null, modifiers);
|
||||
ClassLoader cl = tclass.getClassLoader();
|
||||
ClassLoader ccl = caller.getClassLoader();
|
||||
if ((ccl != null) && (ccl != cl) &&
|
||||
((cl == null) || !isAncestor(cl, ccl))) {
|
||||
sun.reflect.misc.ReflectUtil.checkPackageAccess(tclass);
|
||||
}
|
||||
} catch (PrivilegedActionException pae) {
|
||||
throw new RuntimeException(pae.getException());
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
|
||||
if (field.getType() != long.class)
|
||||
throw new IllegalArgumentException("Must be long type");
|
||||
|
||||
if (!Modifier.isVolatile(modifiers))
|
||||
throw new IllegalArgumentException("Must be volatile type");
|
||||
|
||||
// Access to protected field members is restricted to receivers only
|
||||
// of the accessing class, or one of its subclasses, and the
|
||||
// accessing class must in turn be a subclass (or package sibling)
|
||||
// of the protected member's defining class.
|
||||
// If the updater refers to a protected field of a declaring class
|
||||
// outside the current package, the receiver argument will be
|
||||
// narrowed to the type of the accessing class.
|
||||
this.cclass = (Modifier.isProtected(modifiers) &&
|
||||
tclass.isAssignableFrom(caller) &&
|
||||
!isSamePackage(tclass, caller))
|
||||
? caller : tclass;
|
||||
this.tclass = tclass;
|
||||
this.offset = U.objectFieldOffset(field);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that target argument is instance of cclass. On
|
||||
* failure, throws cause.
|
||||
*/
|
||||
private final void accessCheck(T obj) {
|
||||
if (!cclass.isInstance(obj))
|
||||
throw accessCheckException(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns access exception if accessCheck failed due to
|
||||
* protected access, else ClassCastException.
|
||||
*/
|
||||
private final RuntimeException accessCheckException(T obj) {
|
||||
if (cclass == tclass)
|
||||
return new ClassCastException();
|
||||
else
|
||||
return new RuntimeException(
|
||||
new IllegalAccessException(
|
||||
"Class " +
|
||||
cclass.getName() +
|
||||
" can not access a protected member of class " +
|
||||
tclass.getName() +
|
||||
" using an instance of " +
|
||||
obj.getClass().getName()));
|
||||
}
|
||||
|
||||
public final boolean compareAndSet(T obj, long expect, long update) {
|
||||
accessCheck(obj);
|
||||
synchronized (this) {
|
||||
long v = U.getLong(obj, offset);
|
||||
if (v != expect)
|
||||
return false;
|
||||
U.putLong(obj, offset, update);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public final boolean weakCompareAndSet(T obj, long expect, long update) {
|
||||
return compareAndSet(obj, expect, update);
|
||||
}
|
||||
|
||||
public final void set(T obj, long newValue) {
|
||||
accessCheck(obj);
|
||||
synchronized (this) {
|
||||
U.putLong(obj, offset, newValue);
|
||||
}
|
||||
}
|
||||
|
||||
public final void lazySet(T obj, long newValue) {
|
||||
set(obj, newValue);
|
||||
}
|
||||
|
||||
public final long get(T obj) {
|
||||
accessCheck(obj);
|
||||
synchronized (this) {
|
||||
return U.getLong(obj, offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the second classloader can be found in the first
|
||||
* classloader's delegation chain.
|
||||
|
@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include <jni.h>
|
||||
#include <jvm.h>
|
||||
#include "java_util_concurrent_atomic_AtomicLong.h"
|
||||
|
||||
JNIEXPORT jboolean JNICALL
|
||||
Java_java_util_concurrent_atomic_AtomicLong_VMSupportsCS8(JNIEnv *env, jclass cls)
|
||||
{
|
||||
return JVM_SupportsCX8();
|
||||
}
|
@ -60,9 +60,6 @@ TEST_VM(AtomicAddTest, int32) {
|
||||
}
|
||||
|
||||
TEST_VM(AtomicAddTest, int64) {
|
||||
// Check if 64-bit atomics are available on the machine.
|
||||
if (!VM_Version::supports_cx8()) return;
|
||||
|
||||
using Support = AtomicAddTestSupport<int64_t>;
|
||||
Support().test_add();
|
||||
Support().test_fetch_add();
|
||||
@ -109,9 +106,6 @@ TEST_VM(AtomicXchgTest, int32) {
|
||||
}
|
||||
|
||||
TEST_VM(AtomicXchgTest, int64) {
|
||||
// Check if 64-bit atomics are available on the machine.
|
||||
if (!VM_Version::supports_cx8()) return;
|
||||
|
||||
using Support = AtomicXchgTestSupport<int64_t>;
|
||||
Support().test();
|
||||
}
|
||||
@ -349,15 +343,9 @@ TEST_VM(AtomicBitopsTest, uint32) {
|
||||
}
|
||||
|
||||
TEST_VM(AtomicBitopsTest, int64) {
|
||||
// Check if 64-bit atomics are available on the machine.
|
||||
if (!VM_Version::supports_cx8()) return;
|
||||
|
||||
AtomicBitopsTestSupport<int64_t>()();
|
||||
}
|
||||
|
||||
TEST_VM(AtomicBitopsTest, uint64) {
|
||||
// Check if 64-bit atomics are available on the machine.
|
||||
if (!VM_Version::supports_cx8()) return;
|
||||
|
||||
AtomicBitopsTestSupport<uint64_t>()();
|
||||
}
|
||||
|
@ -1,53 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 4992443 4994819
|
||||
* @modules java.base/java.util.concurrent.atomic:open
|
||||
* @run main VMSupportsCS8
|
||||
* @summary Checks that the value of VMSupportsCS8 matches system properties.
|
||||
*/
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
public class VMSupportsCS8 {
|
||||
public static void main(String[] args) throws Exception {
|
||||
String isalist = System.getProperty("sun.cpu.isalist");
|
||||
if (isalist != null && isalist.matches
|
||||
(".*\\b(pentium_pro|ia64|amd64).*")
|
||||
||
|
||||
System.getProperty("os.arch").matches
|
||||
(".*\\b(ia64|amd64).*")) {
|
||||
|
||||
System.out.println("This system is known to have hardware CS8");
|
||||
|
||||
Class klass = Class.forName("java.util.concurrent.atomic.AtomicLong");
|
||||
Field field = klass.getDeclaredField("VM_SUPPORTS_LONG_CAS");
|
||||
field.setAccessible(true);
|
||||
boolean VMSupportsCS8 = field.getBoolean(null);
|
||||
if (! VMSupportsCS8)
|
||||
throw new Exception("Unexpected value for VMSupportsCS8");
|
||||
}
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user