8229422: Taskqueue: Outdated selection of weak memory model platforms

Reviewed-by: tschatzl, dholmes, drwhite
This commit is contained in:
Martin Doerr 2019-08-12 19:20:12 +02:00
parent c25e2fd8ec
commit e77e5da785
8 changed files with 24 additions and 6 deletions

View File

@ -34,6 +34,12 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORTS_NATIVE_CX8 #define SUPPORTS_NATIVE_CX8
// Aarch64 was not originally defined as multi-copy-atomic, but now is.
// See: "Simplifying ARM Concurrency: Multicopy-atomic Axiomatic and
// Operational Models for ARMv8"
// So we could #define CPU_MULTI_COPY_ATOMIC but historically we have
// not done so.
// According to the ARMv8 ARM, "Concurrent modification and execution // According to the ARMv8 ARM, "Concurrent modification and execution
// of instructions can lead to the resulting instruction performing // of instructions can lead to the resulting instruction performing
// any behavior that can be achieved by executing any sequence of // any behavior that can be achieved by executing any sequence of

View File

@ -45,6 +45,9 @@ const bool HaveVFP = true;
#define SUPPORTS_NATIVE_CX8 #define SUPPORTS_NATIVE_CX8
#endif #endif
// arm32 is not specified as multi-copy-atomic
// So we must not #define CPU_MULTI_COPY_ATOMIC
#define STUBROUTINES_MD_HPP "stubRoutines_arm.hpp" #define STUBROUTINES_MD_HPP "stubRoutines_arm.hpp"
#define INTERP_MASM_MD_HPP "interp_masm_arm.hpp" #define INTERP_MASM_MD_HPP "interp_masm_arm.hpp"
#define TEMPLATETABLE_MD_HPP "templateTable_arm.hpp" #define TEMPLATETABLE_MD_HPP "templateTable_arm.hpp"

View File

@ -41,8 +41,8 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORTS_NATIVE_CX8 #define SUPPORTS_NATIVE_CX8
// The PPC CPUs are NOT multiple-copy-atomic. // PPC64 is not specified as multi-copy-atomic
#define CPU_NOT_MULTIPLE_COPY_ATOMIC // So we must not #define CPU_MULTI_COPY_ATOMIC
// The expected size in bytes of a cache line, used to pad data structures. // The expected size in bytes of a cache line, used to pad data structures.
#define DEFAULT_CACHE_LINE_SIZE 128 #define DEFAULT_CACHE_LINE_SIZE 128

View File

@ -42,6 +42,8 @@ const int StackAlignmentInBytes = 16;
#define SUPPORTS_NATIVE_CX8 #define SUPPORTS_NATIVE_CX8
#define CPU_MULTI_COPY_ATOMIC
// Indicates whether the C calling conventions require that // Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits. // 32-bit integer argument values are extended to 64 bits.
// This is the case on z/Architecture. // This is the case on z/Architecture.

View File

@ -36,6 +36,8 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORTS_NATIVE_CX8 #define SUPPORTS_NATIVE_CX8
#define CPU_MULTI_COPY_ATOMIC
// The expected size in bytes of a cache line, used to pad data structures. // The expected size in bytes of a cache line, used to pad data structures.
#if defined(TIERED) #if defined(TIERED)
// tiered, 64-bit, large machine // tiered, 64-bit, large machine

View File

@ -33,6 +33,8 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORTS_NATIVE_CX8 #define SUPPORTS_NATIVE_CX8
#define CPU_MULTI_COPY_ATOMIC
// The expected size in bytes of a cache line, used to pad data structures. // The expected size in bytes of a cache line, used to pad data structures.
#if defined(TIERED) #if defined(TIERED)
#ifdef _LP64 #ifdef _LP64

View File

@ -207,7 +207,7 @@ bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
// Architectures with weak memory model require a barrier here // Architectures with weak memory model require a barrier here
// to guarantee that bottom is not older than age, // to guarantee that bottom is not older than age,
// which is crucial for the correctness of the algorithm. // which is crucial for the correctness of the algorithm.
#if !(defined SPARC || defined IA32 || defined AMD64) #ifndef CPU_MULTI_COPY_ATOMIC
OrderAccess::fence(); OrderAccess::fence();
#endif #endif
uint localBot = OrderAccess::load_acquire(&_bottom); uint localBot = OrderAccess::load_acquire(&_bottom);

View File

@ -481,10 +481,13 @@ const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlas
// assure their ordering, instead of after volatile stores. // assure their ordering, instead of after volatile stores.
// (See "A Tutorial Introduction to the ARM and POWER Relaxed Memory Models" // (See "A Tutorial Introduction to the ARM and POWER Relaxed Memory Models"
// by Luc Maranget, Susmit Sarkar and Peter Sewell, INRIA/Cambridge) // by Luc Maranget, Susmit Sarkar and Peter Sewell, INRIA/Cambridge)
#ifdef CPU_NOT_MULTIPLE_COPY_ATOMIC #ifdef CPU_MULTI_COPY_ATOMIC
const bool support_IRIW_for_not_multiple_copy_atomic_cpu = true; // Not needed.
#else
const bool support_IRIW_for_not_multiple_copy_atomic_cpu = false; const bool support_IRIW_for_not_multiple_copy_atomic_cpu = false;
#else
// From all non-multi-copy-atomic architectures, only PPC64 supports IRIW at the moment.
// Final decision is subject to JEP 188: Java Memory Model Update.
const bool support_IRIW_for_not_multiple_copy_atomic_cpu = PPC64_ONLY(true) NOT_PPC64(false);
#endif #endif
// The expected size in bytes of a cache line, used to pad data structures. // The expected size in bytes of a cache line, used to pad data structures.