8290324: Move atomic operations outside of os_xxx.hpp

Reviewed-by: dholmes, kbarrett
This commit is contained in:
Ioi Lam 2022-07-21 05:12:10 +00:00
parent e8975be94b
commit 2c73a1f39d
15 changed files with 176 additions and 188 deletions

View File

@ -549,6 +549,10 @@ void os::current_thread_enable_wx(WXMode mode) {
pthread_jit_write_protect_np(mode == WXExec);
}
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}
extern "C" {
int SpinPause() {
return 0;
@ -582,18 +586,19 @@ extern "C" {
*(to--) = *(from--);
}
}
void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
atomic_copy64(from--, to--);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -35,9 +35,4 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}
#endif // OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -285,12 +285,31 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
return value;
}
// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
#if defined(PPC32)
double tmp;
asm volatile ("lfd %0, 0(%1)\n"
"stfd %0, 0(%2)\n"
: "=f"(tmp)
: "b"(src), "b"(dst));
#elif defined(S390) && !defined(_LP64)
double tmp;
asm volatile ("ld %0, 0(%1)\n"
"std %0, 0(%2)\n"
: "=r"(tmp)
: "a"(src), "a"(dst));
#else
*(jlong *) dst = *(const jlong *) src;
#endif
}
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile int64_t dest;
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest);
}
@ -299,7 +318,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP

View File

@ -31,6 +31,7 @@
// no precompiled headers
#include "jvm.h"
#include "asm/assembler.inline.hpp"
#include "atomic_bsd_zero.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
@ -295,14 +296,14 @@ extern "C" {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
atomic_copy64(from--, to--);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -32,23 +32,4 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
#if defined(PPC32)
double tmp;
asm volatile ("lfd %0, 0(%1)\n"
"stfd %0, 0(%2)\n"
: "=f"(tmp)
: "b"(src), "b"(dst));
#elif defined(S390) && !defined(_LP64)
double tmp;
asm volatile ("ld %0, 0(%1)\n"
"std %0, 0(%2)\n"
: "=r"(tmp)
: "a"(src), "a"(dst));
#else
*(jlong *) dst = *(const jlong *) src;
#endif
}
#endif // OS_CPU_BSD_ZERO_OS_BSD_ZERO_HPP

View File

@ -387,6 +387,10 @@ int os::extra_bang_size_in_bytes() {
return 0;
}
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}
extern "C" {
int SpinPause() {
using spin_wait_func_ptr_t = void (*)();
@ -433,18 +437,19 @@ extern "C" {
*(to--) = *(from--);
}
}
void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
atomic_copy64(from--, to--);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -36,9 +36,4 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}
#endif // OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,11 +25,43 @@
#ifndef OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
#define OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
#include "memory/allStatic.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
// Implementation of class atomic
class ARMAtomicFuncs : AllStatic {
public:
typedef int64_t (*cmpxchg_long_func_t)(int64_t, int64_t, volatile int64_t*);
typedef int64_t (*load_long_func_t)(const volatile int64_t*);
typedef void (*store_long_func_t)(int64_t, volatile int64_t*);
typedef int32_t (*atomic_add_func_t)(int32_t add_value, volatile int32_t *dest);
typedef int32_t (*atomic_xchg_func_t)(int32_t exchange_value, volatile int32_t *dest);
typedef int32_t (*cmpxchg_func_t)(int32_t, int32_t, volatile int32_t*);
static cmpxchg_long_func_t _cmpxchg_long_func;
static load_long_func_t _load_long_func;
static store_long_func_t _store_long_func;
static atomic_add_func_t _add_func;
static atomic_xchg_func_t _xchg_func;
static cmpxchg_func_t _cmpxchg_func;
static int64_t cmpxchg_long_bootstrap(int64_t, int64_t, volatile int64_t*);
static int64_t load_long_bootstrap(const volatile int64_t*);
static void store_long_bootstrap(int64_t, volatile int64_t*);
static int32_t add_bootstrap(int32_t add_value, volatile int32_t *dest);
static int32_t xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest);
static int32_t cmpxchg_bootstrap(int32_t compare_value,
int32_t exchange_value,
volatile int32_t *dest);
};
/*
* Atomic long operations on 32-bit ARM
* ARM v7 supports LDREXD/STREXD synchronization instructions so no problem.
@ -49,7 +81,7 @@ template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
(*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
(*ARMAtomicFuncs::_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
}
template<>
@ -57,7 +89,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
(*os::atomic_store_long_func)(
(*ARMAtomicFuncs::_store_long_func)(
PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
}
@ -83,7 +115,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
return add_using_helper<int32_t>(ARMAtomicFuncs::_add_func, dest, add_value);
}
@ -93,7 +125,7 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
return xchg_using_helper<int32_t>(os::atomic_xchg_func, dest, exchange_value);
return xchg_using_helper<int32_t>(ARMAtomicFuncs::_xchg_func, dest, exchange_value);
}
@ -108,7 +140,7 @@ inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
int32_t volatile* dest,
int32_t compare_value) {
// Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
return (*ARMAtomicFuncs::_cmpxchg_func)(compare_value, exchange_value, dest);
}
inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
@ -116,7 +148,7 @@ inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
int64_t compare_value) {
assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
// Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
return (*ARMAtomicFuncs::_cmpxchg_long_func)(compare_value, exchange_value, dest);
}

View File

@ -75,6 +75,13 @@
#define SPELL_REG_SP "sp"
#ifndef __thumb__
enum {
// Offset to add to frame::_fp when dealing with non-thumb C frames
C_frame_offset = -1,
};
#endif
// Don't #define SPELL_REG_FP for thumb because it is not safe to use, so this makes sure we never fetch it.
#ifndef __thumb__
#define SPELL_REG_FP "fp"
@ -154,7 +161,7 @@ address os::fetch_frame_from_context(const void* ucVoid,
#ifndef __thumb__
if (CodeCache::find_blob(epc) == NULL) {
// It's a C frame. We need to adjust the fp.
fp += os::C_frame_offset;
fp += C_frame_offset;
}
#endif
// Clear FP when stack walking is dangerous so that
@ -192,7 +199,7 @@ frame os::get_sender_for_C_frame(frame* fr) {
if (! is_safe_for_fp(pc)) {
return frame(fr->sender_sp(), (intptr_t *)NULL, pc);
} else {
return frame(fr->sender_sp(), fr->link() + os::C_frame_offset, pc);
return frame(fr->sender_sp(), fr->link() + C_frame_offset, pc);
}
#endif
}
@ -211,7 +218,7 @@ frame os::current_frame() {
#else
register intptr_t* fp __asm__ (SPELL_REG_FP);
// fp is for os::current_frame. We want the fp for our caller.
frame myframe((intptr_t*)os::current_stack_pointer(), fp + os::C_frame_offset,
frame myframe((intptr_t*)os::current_stack_pointer(), fp + C_frame_offset,
CAST_FROM_FN_PTR(address, os::current_frame));
frame caller_frame = os::get_sender_for_C_frame(&myframe);
@ -494,16 +501,20 @@ void os::print_register_info(outputStream *st, const void *context) {
st->cr();
}
typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
ARMAtomicFuncs::cmpxchg_long_func_t ARMAtomicFuncs::_cmpxchg_long_func = ARMAtomicFuncs::cmpxchg_long_bootstrap;
ARMAtomicFuncs::load_long_func_t ARMAtomicFuncs::_load_long_func = ARMAtomicFuncs::load_long_bootstrap;
ARMAtomicFuncs::store_long_func_t ARMAtomicFuncs::_store_long_func = ARMAtomicFuncs::store_long_bootstrap;
ARMAtomicFuncs::atomic_add_func_t ARMAtomicFuncs::_add_func = ARMAtomicFuncs::add_bootstrap;
ARMAtomicFuncs::atomic_xchg_func_t ARMAtomicFuncs::_xchg_func = ARMAtomicFuncs::xchg_bootstrap;
ARMAtomicFuncs::cmpxchg_func_t ARMAtomicFuncs::_cmpxchg_func = ARMAtomicFuncs::cmpxchg_bootstrap;
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
int64_t ARMAtomicFuncs::cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
// try to use the stub:
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
cmpxchg_long_func_t func = CAST_TO_FN_PTR(cmpxchg_long_func_t, StubRoutines::atomic_cmpxchg_long_entry());
if (func != NULL) {
os::atomic_cmpxchg_long_func = func;
_cmpxchg_long_func = func;
return (*func)(compare_value, exchange_value, dest);
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
@ -513,16 +524,13 @@ int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchang
*dest = exchange_value;
return old_value;
}
typedef int64_t load_long_func_t(const volatile int64_t*);
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
int64_t ARMAtomicFuncs::load_long_bootstrap(const volatile int64_t* src) {
// try to use the stub:
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
load_long_func_t func = CAST_TO_FN_PTR(load_long_func_t, StubRoutines::atomic_load_long_entry());
if (func != NULL) {
os::atomic_load_long_func = func;
_load_long_func = func;
return (*func)(src);
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
@ -531,16 +539,12 @@ int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
return old_value;
}
typedef void store_long_func_t(int64_t, volatile int64_t*);
store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
void ARMAtomicFuncs::store_long_bootstrap(int64_t val, volatile int64_t* dest) {
// try to use the stub:
store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
store_long_func_t func = CAST_TO_FN_PTR(store_long_func_t, StubRoutines::atomic_store_long_entry());
if (func != NULL) {
os::atomic_store_long_func = func;
_store_long_func = func;
return (*func)(val, dest);
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
@ -548,15 +552,11 @@ void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
*dest = val;
}
typedef int32_t atomic_add_func_t(int32_t add_value, volatile int32_t *dest);
atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
StubRoutines::atomic_add_entry());
int32_t ARMAtomicFuncs::add_bootstrap(int32_t add_value, volatile int32_t *dest) {
atomic_add_func_t func = CAST_TO_FN_PTR(atomic_add_func_t,
StubRoutines::atomic_add_entry());
if (func != NULL) {
os::atomic_add_func = func;
_add_func = func;
return (*func)(add_value, dest);
}
@ -565,15 +565,11 @@ int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
return (old_value + add_value);
}
typedef int32_t atomic_xchg_func_t(int32_t exchange_value, volatile int32_t *dest);
atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
StubRoutines::atomic_xchg_entry());
int32_t ARMAtomicFuncs::xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
atomic_xchg_func_t func = CAST_TO_FN_PTR(atomic_xchg_func_t,
StubRoutines::atomic_xchg_entry());
if (func != NULL) {
os::atomic_xchg_func = func;
_xchg_func = func;
return (*func)(exchange_value, dest);
}
@ -582,16 +578,12 @@ int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *des
return (old_value);
}
typedef int32_t cmpxchg_func_t(int32_t, int32_t, volatile int32_t*);
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
int32_t ARMAtomicFuncs::cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
// try to use the stub:
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
cmpxchg_func_t func = CAST_TO_FN_PTR(cmpxchg_func_t, StubRoutines::atomic_cmpxchg_entry());
if (func != NULL) {
os::atomic_cmpxchg_func = func;
_cmpxchg_func = func;
return (*func)(compare_value, exchange_value, dest);
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,47 +25,10 @@
#ifndef OS_CPU_LINUX_ARM_OS_LINUX_ARM_HPP
#define OS_CPU_LINUX_ARM_OS_LINUX_ARM_HPP
#ifndef __thumb__
enum {
// Offset to add to frame::_fp when dealing with non-thumb C frames
C_frame_offset = -1,
};
#endif
static void setup_fpu();
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
static int64_t (*atomic_cmpxchg_long_func)(int64_t compare_value,
int64_t exchange_value,
volatile int64_t *dest);
static int64_t (*atomic_load_long_func)(const volatile int64_t*);
static void (*atomic_store_long_func)(int64_t, volatile int64_t*);
static int32_t (*atomic_add_func)(int32_t add_value, volatile int32_t *dest);
static int32_t (*atomic_xchg_func)(int32_t exchange_value, volatile int32_t *dest);
static int32_t (*atomic_cmpxchg_func)(int32_t compare_value,
int32_t exchange_value,
volatile int32_t *dest);
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, int64_t, volatile int64_t*);
static int64_t atomic_load_long_bootstrap(const volatile int64_t*);
static void atomic_store_long_bootstrap(int64_t, volatile int64_t*);
static int32_t atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest);
static int32_t atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest);
static int32_t atomic_cmpxchg_bootstrap(int32_t compare_value,
int32_t exchange_value,
volatile int32_t *dest);
#endif // OS_CPU_LINUX_ARM_OS_LINUX_ARM_HPP

View File

@ -395,6 +395,10 @@ int os::extra_bang_size_in_bytes() {
return 0;
}
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}
extern "C" {
int SpinPause() {
return 0;
@ -430,18 +434,19 @@ extern "C" {
}
}
}
void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
if (from > to) {
const jlong *end = from + count;
while (from < end) {
os::atomic_copy64(from++, to++);
atomic_copy64(from++, to++);
}
} else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end) {
os::atomic_copy64(from--, to--);
atomic_copy64(from--, to--);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -32,11 +32,6 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}
// SYSCALL_RISCV_FLUSH_ICACHE is used to flush instruction cache. The "fence.i" instruction
// only work on the current hart, so kernel provides the icache flush syscall to flush icache
// on each hart. You can pass a flag to determine a global or local icache flush.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -27,7 +27,6 @@
#define OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP
#include "orderAccess_linux_zero.hpp"
#include "runtime/os.hpp"
// Implementation of class atomic
@ -133,12 +132,55 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
return value;
}
// Atomically copy 64 bits of data
inline void atomic_copy64(const volatile void *src, volatile void *dst) {
#if defined(PPC32) && !defined(__SPE__)
double tmp;
asm volatile ("lfd %0, %2\n"
"stfd %0, %1\n"
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
: "Q"(*(volatile double*)src));
#elif defined(PPC32) && defined(__SPE__)
long tmp;
asm volatile ("evldd %0, %2\n"
"evstdd %0, %1\n"
: "=&r"(tmp), "=Q"(*(volatile long*)dst)
: "Q"(*(volatile long*)src));
#elif defined(S390) && !defined(_LP64)
double tmp;
asm volatile ("ld %0, %2\n"
"std %0, %1\n"
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
: "Q"(*(volatile double*)src));
#elif defined(__ARM_ARCH_7A__)
// The only way to perform the atomic 64-bit load/store
// is to use ldrexd/strexd for both reads and writes.
// For store, we need to have the matching (fake) load first.
// Put clrex between exclusive ops on src and dst for clarity.
uint64_t tmp_r, tmp_w;
uint32_t flag_w;
asm volatile ("ldrexd %[tmp_r], [%[src]]\n"
"clrex\n"
"1:\n"
"ldrexd %[tmp_w], [%[dst]]\n"
"strexd %[flag_w], %[tmp_r], [%[dst]]\n"
"cmp %[flag_w], 0\n"
"bne 1b\n"
: [tmp_r] "=&r" (tmp_r), [tmp_w] "=&r" (tmp_w),
[flag_w] "=&r" (flag_w)
: [src] "r" (src), [dst] "r" (dst)
: "cc", "memory");
#else
*(jlong *) dst = *(const jlong *) src;
#endif
}
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile int64_t dest;
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest);
}
@ -147,7 +189,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif // OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP

View File

@ -26,6 +26,7 @@
// no precompiled headers
#include "jvm.h"
#include "asm/assembler.inline.hpp"
#include "atomic_linux_zero.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
@ -339,14 +340,14 @@ extern "C" {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
atomic_copy64(from--, to--);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010, 2018, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -45,47 +45,4 @@
*/
static void workaround_expand_exec_shield_cs_limit();
// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
#if defined(PPC32) && !defined(__SPE__)
double tmp;
asm volatile ("lfd %0, %2\n"
"stfd %0, %1\n"
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
: "Q"(*(volatile double*)src));
#elif defined(PPC32) && defined(__SPE__)
long tmp;
asm volatile ("evldd %0, %2\n"
"evstdd %0, %1\n"
: "=&r"(tmp), "=Q"(*(volatile long*)dst)
: "Q"(*(volatile long*)src));
#elif defined(S390) && !defined(_LP64)
double tmp;
asm volatile ("ld %0, %2\n"
"std %0, %1\n"
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
: "Q"(*(volatile double*)src));
#elif defined(__ARM_ARCH_7A__)
// The only way to perform the atomic 64-bit load/store
// is to use ldrexd/strexd for both reads and writes.
// For store, we need to have the matching (fake) load first.
// Put clrex between exclusive ops on src and dst for clarity.
uint64_t tmp_r, tmp_w;
uint32_t flag_w;
asm volatile ("ldrexd %[tmp_r], [%[src]]\n"
"clrex\n"
"1:\n"
"ldrexd %[tmp_w], [%[dst]]\n"
"strexd %[flag_w], %[tmp_r], [%[dst]]\n"
"cmp %[flag_w], 0\n"
"bne 1b\n"
: [tmp_r] "=&r" (tmp_r), [tmp_w] "=&r" (tmp_w),
[flag_w] "=&r" (flag_w)
: [src] "r" (src), [dst] "r" (dst)
: "cc", "memory");
#else
*(jlong *) dst = *(const jlong *) src;
#endif
}
#endif // OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP