8248817: Windows: Improving common cross-platform code
Reviewed-by: kbarrett, dholmes
This commit is contained in:
parent
7685e53426
commit
257809d744
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -429,38 +429,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Implementation of int32_t atomic_xchg(int32_t exchange_value, volatile int32_t* dest)
|
||||
// used by Atomic::xchg(volatile int32_t* dest, int32_t exchange_value)
|
||||
//
|
||||
// xchg exists as far back as 8086, lock needed for MP only
|
||||
// Stack layout immediately after call:
|
||||
//
|
||||
// 0 [ret addr ] <--- rsp
|
||||
// 1 [ ex ]
|
||||
// 2 [ dest ]
|
||||
//
|
||||
// Result: *dest <- ex, return (old *dest)
|
||||
//
|
||||
// Note: win32 does not currently use this code
|
||||
|
||||
address generate_atomic_xchg() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
|
||||
address start = __ pc();
|
||||
|
||||
__ push(rdx);
|
||||
Address exchange(rsp, 2 * wordSize);
|
||||
Address dest_addr(rsp, 3 * wordSize);
|
||||
__ movl(rax, exchange);
|
||||
__ movptr(rdx, dest_addr);
|
||||
__ xchgl(rax, Address(rdx, 0));
|
||||
__ pop(rdx);
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Support for void verify_mxcsr()
|
||||
//
|
||||
@ -3797,9 +3765,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// is referenced by megamorphic call
|
||||
StubRoutines::_catch_exception_entry = generate_catch_exception();
|
||||
|
||||
// These are currently used by Solaris/Intel
|
||||
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
||||
|
||||
// platform dependent
|
||||
create_control_words();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -552,170 +552,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Implementation of jint atomic_xchg(jint add_value, volatile jint* dest)
|
||||
// used by Atomic::xchg(volatile jint* dest, jint exchange_value)
|
||||
//
|
||||
// Arguments :
|
||||
// c_rarg0: exchange_value
|
||||
// c_rarg0: dest
|
||||
//
|
||||
// Result:
|
||||
// *dest <- ex, return (orig *dest)
|
||||
address generate_atomic_xchg() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
|
||||
address start = __ pc();
|
||||
|
||||
__ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
|
||||
__ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Implementation of intptr_t atomic_xchg(jlong add_value, volatile jlong* dest)
|
||||
// used by Atomic::xchg(volatile jlong* dest, jlong exchange_value)
|
||||
//
|
||||
// Arguments :
|
||||
// c_rarg0: exchange_value
|
||||
// c_rarg1: dest
|
||||
//
|
||||
// Result:
|
||||
// *dest <- ex, return (orig *dest)
|
||||
address generate_atomic_xchg_long() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long");
|
||||
address start = __ pc();
|
||||
|
||||
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
|
||||
__ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
|
||||
// jint compare_value)
|
||||
//
|
||||
// Arguments :
|
||||
// c_rarg0: exchange_value
|
||||
// c_rarg1: dest
|
||||
// c_rarg2: compare_value
|
||||
//
|
||||
// Result:
|
||||
// if ( compare_value == *dest ) {
|
||||
// *dest = exchange_value
|
||||
// return compare_value;
|
||||
// else
|
||||
// return *dest;
|
||||
address generate_atomic_cmpxchg() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
|
||||
address start = __ pc();
|
||||
|
||||
__ movl(rax, c_rarg2);
|
||||
__ lock();
|
||||
__ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest,
|
||||
// int8_t compare_value)
|
||||
//
|
||||
// Arguments :
|
||||
// c_rarg0: exchange_value
|
||||
// c_rarg1: dest
|
||||
// c_rarg2: compare_value
|
||||
//
|
||||
// Result:
|
||||
// if ( compare_value == *dest ) {
|
||||
// *dest = exchange_value
|
||||
// return compare_value;
|
||||
// else
|
||||
// return *dest;
|
||||
address generate_atomic_cmpxchg_byte() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte");
|
||||
address start = __ pc();
|
||||
|
||||
__ movsbq(rax, c_rarg2);
|
||||
__ lock();
|
||||
__ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value,
|
||||
// volatile int64_t* dest,
|
||||
// int64_t compare_value)
|
||||
// Arguments :
|
||||
// c_rarg0: exchange_value
|
||||
// c_rarg1: dest
|
||||
// c_rarg2: compare_value
|
||||
//
|
||||
// Result:
|
||||
// if ( compare_value == *dest ) {
|
||||
// *dest = exchange_value
|
||||
// return compare_value;
|
||||
// else
|
||||
// return *dest;
|
||||
address generate_atomic_cmpxchg_long() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
|
||||
address start = __ pc();
|
||||
|
||||
__ movq(rax, c_rarg2);
|
||||
__ lock();
|
||||
__ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Implementation of jint atomic_add(jint add_value, volatile jint* dest)
|
||||
// used by Atomic::add(volatile jint* dest, jint add_value)
|
||||
//
|
||||
// Arguments :
|
||||
// c_rarg0: add_value
|
||||
// c_rarg1: dest
|
||||
//
|
||||
// Result:
|
||||
// *dest += add_value
|
||||
// return *dest;
|
||||
address generate_atomic_add() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_add");
|
||||
address start = __ pc();
|
||||
|
||||
__ movl(rax, c_rarg0);
|
||||
__ lock();
|
||||
__ xaddl(Address(c_rarg1, 0), c_rarg0);
|
||||
__ addl(rax, c_rarg0);
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest)
|
||||
// used by Atomic::add(volatile intptr_t* dest, intptr_t add_value)
|
||||
//
|
||||
// Arguments :
|
||||
// c_rarg0: add_value
|
||||
// c_rarg1: dest
|
||||
//
|
||||
// Result:
|
||||
// *dest += add_value
|
||||
// return *dest;
|
||||
address generate_atomic_add_long() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_add_long");
|
||||
address start = __ pc();
|
||||
|
||||
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
|
||||
__ lock();
|
||||
__ xaddptr(Address(c_rarg1, 0), c_rarg0);
|
||||
__ addptr(rax, c_rarg0);
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for intptr_t OrderAccess::fence()
|
||||
//
|
||||
// Arguments :
|
||||
@ -6332,13 +6168,6 @@ address generate_avx_ghash_processBlocks() {
|
||||
StubRoutines::_catch_exception_entry = generate_catch_exception();
|
||||
|
||||
// atomic calls
|
||||
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
||||
StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
|
||||
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
|
||||
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
|
||||
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
||||
StubRoutines::_atomic_add_entry = generate_atomic_add();
|
||||
StubRoutines::_atomic_add_long_entry = generate_atomic_add_long();
|
||||
StubRoutines::_fence_entry = generate_orderaccess_fence();
|
||||
|
||||
// platform dependent
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#ifndef OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP
|
||||
#define OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP
|
||||
|
||||
#include <intrin.h>
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
// Note that in MSVC, volatile memory accesses are explicitly
|
||||
@ -38,21 +39,6 @@ template<> inline void ScopedFence<RELEASE_X>::prefix() { }
|
||||
template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
|
||||
template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
|
||||
|
||||
// The following alternative implementations are needed because
|
||||
// Windows 95 doesn't support (some of) the corresponding Windows NT
|
||||
// calls. Furthermore, these versions allow inlining in the caller.
|
||||
// (More precisely: The documentation for InterlockedExchange says
|
||||
// it is supported for Windows 95. However, when single-stepping
|
||||
// through the assembly code we cannot step into the routine and
|
||||
// when looking at the routine address we see only garbage code.
|
||||
// Better safe then sorry!). Was bug 7/31/98 (gri).
|
||||
//
|
||||
// Performance note: On uniprocessors, the 'lock' prefixes are not
|
||||
// necessary (and expensive). We should generate separate cases if
|
||||
// this becomes a performance problem.
|
||||
|
||||
#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
|
||||
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd {
|
||||
template<typename D, typename I>
|
||||
@ -64,140 +50,70 @@ struct Atomic::PlatformAdd {
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef AMD64
|
||||
template<>
|
||||
template<typename D, typename I>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
|
||||
atomic_memory_order order) const {
|
||||
return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
|
||||
}
|
||||
// The Interlocked* APIs only take long and will not accept __int32. That is
|
||||
// acceptable on Windows, since long is a 32-bits integer type.
|
||||
|
||||
template<>
|
||||
template<typename D, typename I>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
|
||||
atomic_memory_order order) const {
|
||||
return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value);
|
||||
}
|
||||
|
||||
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \
|
||||
T exchange_value, \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(ByteSize == sizeof(T)); \
|
||||
return xchg_using_helper<StubType>(StubName, dest, exchange_value); \
|
||||
#define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \
|
||||
template<> \
|
||||
template<typename D, typename I> \
|
||||
inline D Atomic::PlatformAdd<sizeof(IntrinsicType)>::add_and_fetch(D volatile* dest, \
|
||||
I add_value, \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \
|
||||
return PrimitiveConversions::cast<D>( \
|
||||
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
|
||||
PrimitiveConversions::cast<IntrinsicType>(add_value))); \
|
||||
}
|
||||
|
||||
DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
|
||||
DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
|
||||
DEFINE_INTRINSIC_ADD(InterlockedAdd, long)
|
||||
DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64)
|
||||
|
||||
#undef DEFINE_STUB_XCHG
|
||||
#undef DEFINE_INTRINSIC_ADD
|
||||
|
||||
#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T volatile* dest, \
|
||||
T compare_value, \
|
||||
T exchange_value, \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(ByteSize == sizeof(T)); \
|
||||
return cmpxchg_using_helper<StubType>(StubName, dest, compare_value, exchange_value); \
|
||||
#define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformXchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
|
||||
T exchange_value, \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
|
||||
return PrimitiveConversions::cast<T>( \
|
||||
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
|
||||
PrimitiveConversions::cast<IntrinsicType>(exchange_value))); \
|
||||
}
|
||||
|
||||
DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func)
|
||||
DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
|
||||
DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
|
||||
DEFINE_INTRINSIC_XCHG(InterlockedExchange, long)
|
||||
DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64)
|
||||
|
||||
#undef DEFINE_STUB_CMPXCHG
|
||||
#undef DEFINE_INTRINSIC_XCHG
|
||||
|
||||
#else // !AMD64
|
||||
// Note: the order of the parameters is different between
|
||||
// Atomic::PlatformCmpxchg<*>::operator() and the
|
||||
// InterlockedCompareExchange* API.
|
||||
|
||||
template<>
|
||||
template<typename D, typename I>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
__asm {
|
||||
mov edx, dest;
|
||||
mov eax, add_value;
|
||||
mov ecx, eax;
|
||||
lock xadd dword ptr [edx], eax;
|
||||
add eax, ecx;
|
||||
#define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
|
||||
T compare_value, \
|
||||
T exchange_value, \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
|
||||
return PrimitiveConversions::cast<T>( \
|
||||
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
|
||||
PrimitiveConversions::cast<IntrinsicType>(exchange_value), \
|
||||
PrimitiveConversions::cast<IntrinsicType>(compare_value))); \
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// alternative for InterlockedExchange
|
||||
__asm {
|
||||
mov eax, exchange_value;
|
||||
mov ecx, dest;
|
||||
xchg eax, dword ptr [ecx];
|
||||
}
|
||||
}
|
||||
DEFINE_INTRINSIC_CMPXCHG(_InterlockedCompareExchange8, char) // Use the intrinsic as InterlockedCompareExchange8 does not exist
|
||||
DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange, long)
|
||||
DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange64, __int64)
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
// alternative for InterlockedCompareExchange
|
||||
__asm {
|
||||
mov edx, dest
|
||||
mov cl, exchange_value
|
||||
mov al, compare_value
|
||||
lock cmpxchg byte ptr [edx], cl
|
||||
}
|
||||
}
|
||||
#undef DEFINE_INTRINSIC_CMPXCHG
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// alternative for InterlockedCompareExchange
|
||||
__asm {
|
||||
mov edx, dest
|
||||
mov ecx, exchange_value
|
||||
mov eax, compare_value
|
||||
lock cmpxchg dword ptr [edx], ecx
|
||||
}
|
||||
}
|
||||
#ifndef AMD64
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
int32_t ex_lo = (int32_t)exchange_value;
|
||||
int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 );
|
||||
int32_t cmp_lo = (int32_t)compare_value;
|
||||
int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
|
||||
__asm {
|
||||
push ebx
|
||||
push edi
|
||||
mov eax, cmp_lo
|
||||
mov edx, cmp_hi
|
||||
mov edi, dest
|
||||
mov ebx, ex_lo
|
||||
mov ecx, ex_hi
|
||||
lock cmpxchg8b qword ptr [edi]
|
||||
pop edi
|
||||
pop ebx
|
||||
}
|
||||
}
|
||||
#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
@ -228,11 +144,8 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
|
||||
}
|
||||
}
|
||||
|
||||
#endif // AMD64
|
||||
|
||||
#pragma warning(default: 4035) // Enables warnings reporting missing return statement
|
||||
|
||||
#ifndef AMD64
|
||||
template<>
|
||||
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
|
||||
{
|
||||
|
@ -211,138 +211,6 @@ bool os::register_code_area(char *low, char *high) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Atomics and Stub Functions
|
||||
|
||||
typedef int32_t xchg_func_t (int32_t, volatile int32_t*);
|
||||
typedef int64_t xchg_long_func_t (int64_t, volatile int64_t*);
|
||||
typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t);
|
||||
typedef int8_t cmpxchg_byte_func_t (int8_t, volatile int8_t*, int8_t);
|
||||
typedef int64_t cmpxchg_long_func_t (int64_t, volatile int64_t*, int64_t);
|
||||
typedef int32_t add_func_t (int32_t, volatile int32_t*);
|
||||
typedef int64_t add_long_func_t (int64_t, volatile int64_t*);
|
||||
|
||||
#ifdef AMD64
|
||||
|
||||
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
|
||||
// try to use the stub:
|
||||
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
|
||||
|
||||
if (func != NULL) {
|
||||
os::atomic_xchg_func = func;
|
||||
return (*func)(exchange_value, dest);
|
||||
}
|
||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||
|
||||
int32_t old_value = *dest;
|
||||
*dest = exchange_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
|
||||
// try to use the stub:
|
||||
xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
|
||||
|
||||
if (func != NULL) {
|
||||
os::atomic_xchg_long_func = func;
|
||||
return (*func)(exchange_value, dest);
|
||||
}
|
||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||
|
||||
int64_t old_value = *dest;
|
||||
*dest = exchange_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
|
||||
int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
|
||||
// try to use the stub:
|
||||
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
||||
|
||||
if (func != NULL) {
|
||||
os::atomic_cmpxchg_func = func;
|
||||
return (*func)(exchange_value, dest, compare_value);
|
||||
}
|
||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||
|
||||
int32_t old_value = *dest;
|
||||
if (old_value == compare_value)
|
||||
*dest = exchange_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
|
||||
// try to use the stub:
|
||||
cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
|
||||
|
||||
if (func != NULL) {
|
||||
os::atomic_cmpxchg_byte_func = func;
|
||||
return (*func)(exchange_value, dest, compare_value);
|
||||
}
|
||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||
|
||||
int8_t old_value = *dest;
|
||||
if (old_value == compare_value)
|
||||
*dest = exchange_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
#endif // AMD64
|
||||
|
||||
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
|
||||
// try to use the stub:
|
||||
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
||||
|
||||
if (func != NULL) {
|
||||
os::atomic_cmpxchg_long_func = func;
|
||||
return (*func)(exchange_value, dest, compare_value);
|
||||
}
|
||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||
|
||||
int64_t old_value = *dest;
|
||||
if (old_value == compare_value)
|
||||
*dest = exchange_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
#ifdef AMD64
|
||||
|
||||
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
|
||||
// try to use the stub:
|
||||
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
|
||||
|
||||
if (func != NULL) {
|
||||
os::atomic_add_func = func;
|
||||
return (*func)(add_value, dest);
|
||||
}
|
||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||
|
||||
return (*dest) += add_value;
|
||||
}
|
||||
|
||||
int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
|
||||
// try to use the stub:
|
||||
add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
|
||||
|
||||
if (func != NULL) {
|
||||
os::atomic_add_long_func = func;
|
||||
return (*func)(add_value, dest);
|
||||
}
|
||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||
|
||||
return (*dest) += add_value;
|
||||
}
|
||||
|
||||
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
||||
xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstrap;
|
||||
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
||||
cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
|
||||
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
||||
add_long_func_t* os::atomic_add_long_func = os::atomic_add_long_bootstrap;
|
||||
|
||||
#endif // AMD64
|
||||
|
||||
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
||||
|
||||
#ifdef AMD64
|
||||
/*
|
||||
* Windows/x64 does not use stack frames the way expected by Java:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,34 +28,6 @@
|
||||
//
|
||||
// NOTE: we are back in class os here, not win32
|
||||
//
|
||||
#ifdef AMD64
|
||||
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
|
||||
static int64_t (*atomic_xchg_long_func) (int64_t, volatile int64_t*);
|
||||
|
||||
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
|
||||
static int8_t (*atomic_cmpxchg_byte_func) (int8_t, volatile int8_t*, int8_t);
|
||||
static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t);
|
||||
|
||||
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
|
||||
static int64_t (*atomic_add_long_func) (int64_t, volatile int64_t*);
|
||||
|
||||
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
|
||||
static int64_t atomic_xchg_long_bootstrap (int64_t, volatile int64_t*);
|
||||
|
||||
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
|
||||
static int8_t atomic_cmpxchg_byte_bootstrap(int8_t, volatile int8_t*, int8_t);
|
||||
#else
|
||||
|
||||
static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t);
|
||||
|
||||
#endif // AMD64
|
||||
|
||||
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
|
||||
|
||||
#ifdef AMD64
|
||||
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
|
||||
static int64_t atomic_add_long_bootstrap (int64_t, volatile int64_t*);
|
||||
#endif // AMD64
|
||||
|
||||
static void setup_fpu();
|
||||
static bool supports_sse() { return true; }
|
||||
|
Loading…
x
Reference in New Issue
Block a user