8220351: Cross-modifying code

Reviewed-by: rrich, mdoerr, dholmes, eosterlund
This commit is contained in:
Robbin Ehn 2019-03-28 11:08:23 +01:00
parent dbe0da648a
commit 76cdc8016f
21 changed files with 96 additions and 13 deletions

View File

@ -77,6 +77,8 @@ inline void OrderAccess::storeload() { inlasm_sync(); }
inline void OrderAccess::acquire() { inlasm_lwsync(); }
inline void OrderAccess::release() { inlasm_lwsync(); }
inline void OrderAccess::fence() { inlasm_sync(); }
inline void OrderAccess::cross_modify_fence()
{ inlasm_isync(); }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>

View File

@ -59,6 +59,11 @@ inline void OrderAccess::fence() {
compiler_barrier();
}
inline void OrderAccess::cross_modify_fence() {
int idx = 0;
__asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
}
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
{

View File

@ -73,5 +73,6 @@ inline void OrderAccess::storeload() { FULL_MEM_BARRIER; }
inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::release() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::fence() { FULL_MEM_BARRIER; }
inline void OrderAccess::cross_modify_fence() { }
#endif // OS_CPU_BSD_ZERO_ORDERACCESS_BSD_ZERO_HPP

View File

@ -49,6 +49,8 @@ inline void OrderAccess::fence() {
FULL_MEM_BARRIER;
}
inline void OrderAccess::cross_modify_fence() { }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{

View File

@ -101,5 +101,6 @@ inline void OrderAccess::storestore() { dmb_st(); }
inline void OrderAccess::storeload() { dmb_sy(); }
inline void OrderAccess::release() { dmb_sy(); }
inline void OrderAccess::fence() { dmb_sy(); }
inline void OrderAccess::cross_modify_fence() { }
#endif // OS_CPU_LINUX_ARM_ORDERACCESS_LINUX_ARM_HPP

View File

@ -79,7 +79,8 @@ inline void OrderAccess::storeload() { inlasm_sync(); }
inline void OrderAccess::acquire() { inlasm_lwsync(); }
inline void OrderAccess::release() { inlasm_lwsync(); }
inline void OrderAccess::fence() { inlasm_sync(); }
inline void OrderAccess::cross_modify_fence()
{ inlasm_isync(); }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>

View File

@ -74,6 +74,7 @@ inline void OrderAccess::storeload() { inlasm_zarch_sync(); }
inline void OrderAccess::acquire() { inlasm_zarch_acquire(); }
inline void OrderAccess::release() { inlasm_zarch_release(); }
inline void OrderAccess::fence() { inlasm_zarch_sync(); }
inline void OrderAccess::cross_modify_fence() { inlasm_zarch_sync(); }
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>

View File

@ -48,4 +48,6 @@ inline void OrderAccess::fence() {
__asm__ volatile ("membar #StoreLoad" : : : "memory");
}
inline void OrderAccess::cross_modify_fence() { }
#endif // OS_CPU_LINUX_SPARC_ORDERACCESS_LINUX_SPARC_HPP

View File

@ -55,6 +55,11 @@ inline void OrderAccess::fence() {
compiler_barrier();
}
inline void OrderAccess::cross_modify_fence() {
int idx = 0;
__asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
}
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
{

View File

@ -82,5 +82,6 @@ inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::release() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::fence() { FULL_MEM_BARRIER; }
inline void OrderAccess::cross_modify_fence() { }
#endif // OS_CPU_LINUX_ZERO_ORDERACCESS_LINUX_ZERO_HPP

View File

@ -51,4 +51,6 @@ inline void OrderAccess::fence() {
__asm__ volatile ("membar #StoreLoad" : : : "memory");
}
inline void OrderAccess::cross_modify_fence() { }
#endif // OS_CPU_SOLARIS_SPARC_ORDERACCESS_SOLARIS_SPARC_HPP

View File

@ -54,4 +54,9 @@ inline void OrderAccess::fence() {
compiler_barrier();
}
inline void OrderAccess::cross_modify_fence() {
int idx = 0;
__asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
}
#endif // OS_CPU_SOLARIS_X86_ORDERACCESS_SOLARIS_X86_HPP

View File

@ -69,6 +69,11 @@ inline void OrderAccess::fence() {
compiler_barrier();
}
inline void OrderAccess::cross_modify_fence() {
int regs[4];
__cpuid(regs, 0);
}
#ifndef AMD64
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>

View File

@ -281,7 +281,7 @@ void HandshakeState::set_operation(JavaThread* target, HandshakeOperation* op) {
void HandshakeState::clear_handshake(JavaThread* target) {
_operation = NULL;
SafepointMechanism::disarm_local_poll_release(target);
SafepointMechanism::disarm_if_needed(target, true /* release */);
}
void HandshakeState::process_self_inner(JavaThread* thread) {

View File

@ -282,6 +282,7 @@ class ThreadBlockInVM : public ThreadStateTransition {
}
~ThreadBlockInVM() {
trans_and_fence(_thread_blocked, _thread_in_vm);
OrderAccess::cross_modify_fence();
// We don't need to clear_walkable because it will happen automagically when we return to java
}
};
@ -336,6 +337,8 @@ class ThreadBlockInVMWithDeadlockCheck : public ThreadStateTransition {
_thread->set_thread_state(_thread_in_vm);
CHECK_UNHANDLED_OOPS_ONLY(_thread->clear_unhandled_oops();)
OrderAccess::cross_modify_fence();
}
};

View File

@ -266,6 +266,8 @@ class OrderAccess : private Atomic {
static void release();
static void fence();
static void cross_modify_fence();
template <typename T>
static T load_acquire(const volatile T* p);

View File

@ -477,7 +477,8 @@ void SafepointSynchronize::disarm_safepoint() {
assert(!cur_state->is_running(), "Thread not suspended at safepoint");
cur_state->restart(); // TSS _running
assert(cur_state->is_running(), "safepoint state has not been reset");
SafepointMechanism::disarm_local_poll(current);
SafepointMechanism::disarm_if_needed(current, false /* NO release */);
}
} // ~JavaThreadIteratorWithHandle
@ -716,8 +717,6 @@ static bool safepoint_safe_with(JavaThread *thread, JavaThreadState state) {
}
bool SafepointSynchronize::handshake_safe(JavaThread *thread) {
// The polls must be armed otherwise the safe state can change to unsafe at any time.
assert(SafepointMechanism::should_block(thread), "Must be armed");
// This function must be called with the Threads_lock held so an externally
// suspended thread cannot be resumed thus it is safe.
assert(Threads_lock->owned_by_self() && Thread::current()->is_VM_thread(),
@ -851,6 +850,9 @@ void SafepointSynchronize::block(JavaThread *thread) {
thread->handle_special_runtime_exit_condition(
!thread->is_at_poll_safepoint() && (state != _thread_in_native_trans));
}
// cross_modify_fence is done by SafepointMechanism::block_if_requested_slow
// which is the only caller here.
}
// ------------------------------------------------------------------------------------------------------

View File

@ -83,8 +83,7 @@ void SafepointMechanism::default_initialize() {
}
}
void SafepointMechanism::block_if_requested_slow(JavaThread *thread) {
// local poll already checked, if used.
void SafepointMechanism::block_or_handshake(JavaThread *thread) {
if (global_poll()) {
// Any load in ::block must not pass the global poll load.
// Otherwise we might load an old safepoint counter (for example).
@ -92,10 +91,31 @@ void SafepointMechanism::block_if_requested_slow(JavaThread *thread) {
SafepointSynchronize::block(thread);
}
if (uses_thread_local_poll() && thread->has_handshake()) {
thread->handshake_process_by_self();
thread->handshake_process_by_self();
}
}
void SafepointMechanism::block_if_requested_slow(JavaThread *thread) {
// Read global poll and has_handshake after local poll
OrderAccess::loadload();
// local poll already checked, if used.
block_or_handshake(thread);
OrderAccess::loadload();
if (uses_thread_local_poll() && local_poll_armed(thread)) {
disarm_local_poll_release(thread);
// We might have disarmed next safepoint/handshake
OrderAccess::storeload();
if (global_poll() || thread->has_handshake()) {
arm_local_poll(thread);
}
}
OrderAccess::cross_modify_fence();
}
void SafepointMechanism::initialize_header(JavaThread* thread) {
disarm_local_poll(thread);
}

View File

@ -46,9 +46,13 @@ class SafepointMechanism : public AllStatic {
static inline bool local_poll_armed(JavaThread* thread);
static inline void disarm_local_poll(JavaThread* thread);
static inline void disarm_local_poll_release(JavaThread* thread);
static inline bool local_poll(Thread* thread);
static inline bool global_poll();
static void block_or_handshake(JavaThread *thread);
static void block_if_requested_slow(JavaThread *thread);
static void default_initialize();
@ -80,10 +84,10 @@ public:
// Caller is responsible for using a memory barrier if needed.
static inline void arm_local_poll(JavaThread* thread);
static inline void disarm_local_poll(JavaThread* thread);
// Release semantics
static inline void arm_local_poll_release(JavaThread* thread);
static inline void disarm_local_poll_release(JavaThread* thread);
// Optional release
static inline void disarm_if_needed(JavaThread* thread, bool memory_order_release);
// Setup the selected safepoint mechanism
static void initialize();

View File

@ -56,7 +56,7 @@ bool SafepointMechanism::should_block(Thread* thread) {
}
void SafepointMechanism::block_if_requested(JavaThread *thread) {
if (uses_thread_local_poll() && !SafepointMechanism::local_poll_armed(thread)) {
if (uses_thread_local_poll() && !local_poll_armed(thread)) {
return;
}
block_if_requested_slow(thread);
@ -70,6 +70,19 @@ void SafepointMechanism::disarm_local_poll(JavaThread* thread) {
thread->set_polling_page(poll_disarmed_value());
}
void SafepointMechanism::disarm_if_needed(JavaThread* thread, bool memory_order_release) {
JavaThreadState jts = thread->thread_state();
if (jts == _thread_in_native || jts == _thread_in_native_trans) {
// JavaThread will disarm itself and execute cross_modify_fence() before continuing
return;
}
if (memory_order_release) {
thread->set_polling_page_release(poll_disarmed_value());
} else {
thread->set_polling_page(poll_disarmed_value());
}
}
void SafepointMechanism::arm_local_poll_release(JavaThread* thread) {
thread->set_polling_page_release(poll_armed_value());
}

View File

@ -1836,6 +1836,10 @@ void JavaThread::run() {
// Thread is now sufficiently initialized to be handled by the safepoint code as being
// in the VM. Change thread state from _thread_new to _thread_in_vm
ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm);
// Before a thread is on the threads list it is always safe, so after leaving the
// _thread_new we should emit a instruction barrier. The distance to modified code
// from here is probably far enough, but this is consistent and safe.
OrderAccess::cross_modify_fence();
assert(JavaThread::current() == this, "sanity check");
assert(!Thread::current()->owns_locks(), "sanity check");
@ -2439,7 +2443,6 @@ int JavaThread::java_suspend_self() {
this->SR_lock()->wait(Mutex::_no_safepoint_check_flag);
}
}
return ret;
}
@ -2467,6 +2470,9 @@ void JavaThread::java_suspend_self_with_safepoint_check() {
set_thread_state(_thread_blocked);
java_suspend_self();
set_thread_state(state);
// Since we are not using a regular thread-state transition helper here,
// we must manually emit the instruction barrier after leaving a safe state.
OrderAccess::cross_modify_fence();
InterfaceSupport::serialize_thread_state_with_handler(this);
if (state != _thread_in_native) {
SafepointMechanism::block_if_requested(this);