diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp index a8b1efb65f2..dcbab0aff61 100644 --- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -65,8 +65,6 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); -define_pd_global(bool, UseMembar, true); - define_pd_global(bool, PreserveFramePointer, false); // GC Ergo Flags diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 47c93928158..96106a9f997 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -288,10 +288,6 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) { return address(((uint64_t)insn_addr + (offset << 2))); } -void MacroAssembler::serialize_memory(Register thread, Register tmp) { - dsb(Assembler::SY); -} - void MacroAssembler::safepoint_poll(Label& slow_path) { if (SafepointMechanism::uses_thread_local_poll()) { ldr(rscratch1, Address(rthread, Thread::polling_page_offset())); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index 6eeaa30b802..ad9df68f47f 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -975,9 +975,6 @@ public: Register tmp, int offset); - // Support for serializing memory accesses between threads - void serialize_memory(Register thread, Register tmp); - // Arithmetics void addptr(const Address &dst, int32_t src); diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index ad94693a77c..fbe4b08636f 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -1950,21 +1950,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // didn't see any synchronization is progress, and escapes. __ mov(rscratch1, _thread_in_native_trans); - if (UseMembar) { - __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset())); + __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset())); - // Force this write out before the read below - __ dmb(Assembler::ISH); - } else { - __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); - __ stlrw(rscratch1, rscratch2); - - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(rthread, r2); - } + // Force this write out before the read below + __ dmb(Assembler::ISH); // check for safepoint operation in progress and/or pending suspend requests Label safepoint_in_progress, safepoint_in_progress_done; diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp index 8ec91b5951b..2609974513f 100644 --- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp @@ -1394,16 +1394,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); __ stlrw(rscratch1, rscratch2); - if (UseMembar) { - // Force this write out before the read below - __ dmb(Assembler::ISH); - } else { - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(rthread, rscratch2); - } + // Force this write out before the read below + __ dmb(Assembler::ISH); // check for safepoint operation in progress and/or pending suspend requests { diff --git a/src/hotspot/cpu/arm/globals_arm.hpp b/src/hotspot/cpu/arm/globals_arm.hpp index 302339e3f90..cb58453c241 100644 --- a/src/hotspot/cpu/arm/globals_arm.hpp +++ b/src/hotspot/cpu/arm/globals_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,8 +65,6 @@ define_pd_global(intx, InlineSmallCode, 1500); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); -define_pd_global(bool, UseMembar, true); - define_pd_global(bool, PreserveFramePointer, false); // GC Ergo Flags diff --git a/src/hotspot/cpu/ppc/globals_ppc.hpp b/src/hotspot/cpu/ppc/globals_ppc.hpp index 6d96ad05ee4..c1b024c4530 100644 --- a/src/hotspot/cpu/ppc/globals_ppc.hpp +++ b/src/hotspot/cpu/ppc/globals_ppc.hpp @@ -69,8 +69,6 @@ define_pd_global(intx, InlineSmallCode, 1500); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); -define_pd_global(bool, UseMembar, true); - define_pd_global(bool, PreserveFramePointer, false); // GC Ergo Flags diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index d0c9d9019f6..7729d9ba592 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -1302,35 +1302,6 @@ bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext, #endif } -bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { -#ifdef LINUX - ucontext_t* uc = (ucontext_t*) ucontext; - - if (is_stwx(instruction) || is_stwux(instruction)) { - int ra = inv_ra_field(instruction); - int rb = inv_rb_field(instruction); - - // look up content of ra and rb in ucontext - address ra_val=(address)uc->uc_mcontext.regs->gpr[ra]; - long rb_val=(long)uc->uc_mcontext.regs->gpr[rb]; - return os::is_memory_serialize_page(thread, ra_val+rb_val); - } else if (is_stw(instruction) || is_stwu(instruction)) { - int ra = inv_ra_field(instruction); - int d1 = inv_d1_field(instruction); - - // look up content of ra in ucontext - address ra_val=(address)uc->uc_mcontext.regs->gpr[ra]; - return os::is_memory_serialize_page(thread, ra_val+d1); - } else { - return false; - } -#else - // workaround not needed on !LINUX :-) - ShouldNotCallThis(); - return false; -#endif -} - void MacroAssembler::bang_stack_with_offset(int offset) { // When increasing the stack, the old stack pointer will be written // to the new top of stack according to the PPC64 abi. @@ -3046,27 +3017,6 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe // flag == NE indicates failure } -// Write serialization page so VM thread can do a pseudo remote membar. -// We use the current thread pointer to calculate a thread specific -// offset to write to within the page. This minimizes bus traffic -// due to cache line collision. -void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { - srdi(tmp2, thread, os::get_serialize_page_shift_count()); - - int mask = os::vm_page_size() - sizeof(int); - if (Assembler::is_simm(mask, 16)) { - andi(tmp2, tmp2, mask); - } else { - lis(tmp1, (int)((signed short) (mask >> 16))); - ori(tmp1, tmp1, mask & 0x0000ffff); - andr(tmp2, tmp2, tmp1); - } - - load_const(tmp1, (long) os::get_memory_serialize_page()); - release(); - stwx(R0, tmp1, tmp2); -} - void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) { if (SafepointMechanism::uses_thread_local_poll()) { ld(temp_reg, in_bytes(Thread::polling_page_offset()), R16_thread); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp index d41a33d4e26..d4d4fc0e293 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp @@ -395,11 +395,6 @@ class MacroAssembler: public Assembler { static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/, address* polling_address_ptr = NULL); - // Check whether instruction is a write access to the memory - // serialization page realized by one of the instructions stw, stwu, - // stwx, or stwux. - static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext); - // Support for NULL-checks // // Generates code that causes a NULL OS exception if the content of reg is NULL. @@ -645,9 +640,6 @@ class MacroAssembler: public Assembler { Register tmp1, Register tmp2, Register tmp3, bool try_bias = UseBiasedLocking, bool use_rtm = false); - // Support for serializing memory accesses between threads - void serialize_memory(Register thread, Register tmp1, Register tmp2); - // Check if safepoint requested and if so branch void safepoint_poll(Label& slow_path, Register temp_reg); diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp index 2399ec6f045..5d74962b488 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp @@ -100,12 +100,6 @@ class NativeInstruction { return MacroAssembler::is_load_from_polling_page(long_at(0), NULL); } - bool is_memory_serialization(JavaThread *thread, void *ucontext) { - // Is the current instruction a write access of thread to the - // memory serialization page? - return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext); - } - address get_stack_bang_address(void *ucontext) { // If long_at(0) is not a stack bang, return 0. Otherwise, return // banged address. diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 1c546c62e9c..c5470e3ae0f 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -2430,16 +2430,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, { Label no_block, sync; - if (UseMembar) { - // Force this write out before the read below. - __ fence(); - } else { - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(R16_thread, r_temp_4, r_temp_5); - } + // Force this write out before the read below. + __ fence(); Register sync_state_addr = r_temp_4; Register sync_state = r_temp_5; diff --git a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp index a2e5a05eb4f..eb084dd1c95 100644 --- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp @@ -1486,16 +1486,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { __ li(R0/*thread_state*/, _thread_in_native_trans); __ release(); __ stw(R0/*thread_state*/, thread_(thread_state)); - if (UseMembar) { - __ fence(); - } - // Write serialization page so that the VM thread can do a pseudo remote - // membar. We use the current thread pointer to calculate a thread - // specific offset to write to within the page. This minimizes bus - // traffic due to cache line collision. - else { - __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2); - } + __ fence(); // Now before we return to java we must look for a current safepoint // (a new safepoint can not start since we entered native_trans). diff --git a/src/hotspot/cpu/s390/globals_s390.hpp b/src/hotspot/cpu/s390/globals_s390.hpp index 2354e5ab045..c4607a16ec5 100644 --- a/src/hotspot/cpu/s390/globals_s390.hpp +++ b/src/hotspot/cpu/s390/globals_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -71,8 +71,6 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGE define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); -define_pd_global(bool, UseMembar, true); - define_pd_global(bool, PreserveFramePointer, false); // GC Ergo Flags diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index 7494b33c48b..5e66f1f11d3 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -2685,33 +2685,6 @@ uint MacroAssembler::get_poll_register(address instr_loc) { return 0; } -bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { - ShouldNotCallThis(); - return false; -} - -// Write serialization page so VM thread can do a pseudo remote membar -// We use the current thread pointer to calculate a thread specific -// offset to write to within the page. This minimizes bus traffic -// due to cache line collision. -void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { - assert_different_registers(tmp1, tmp2); - z_sllg(tmp2, thread, os::get_serialize_page_shift_count()); - load_const_optimized(tmp1, (long) os::get_memory_serialize_page()); - - int mask = os::get_serialize_page_mask(); - if (Immediate::is_uimm16(mask)) { - z_nill(tmp2, mask); - z_llghr(tmp2, tmp2); - } else { - z_nilf(tmp2, mask); - z_llgfr(tmp2, tmp2); - } - - z_release(); - z_st(Z_R0, 0, tmp2, tmp1); -} - void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) { if (SafepointMechanism::uses_thread_local_poll()) { const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp index 90afb7e5f6c..21b601337c0 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp @@ -635,13 +635,6 @@ class MacroAssembler: public Assembler { // Extract poll register from instruction. static uint get_poll_register(address instr_loc); - // Check if instruction is a write access to the memory serialization page - // realized by one of the instructions stw, stwu, stwx, or stwux. - static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext); - - // Support for serializing memory accesses between threads. - void serialize_memory(Register thread, Register tmp1, Register tmp2); - // Check if safepoint requested and if so branch void safepoint_poll(Label& slow_path, Register temp_reg); diff --git a/src/hotspot/cpu/s390/nativeInst_s390.hpp b/src/hotspot/cpu/s390/nativeInst_s390.hpp index 5a7a8a27145..68f0732aa69 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.hpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp @@ -104,12 +104,6 @@ class NativeInstruction { return MacroAssembler::get_poll_register(addr_at(0)); } - bool is_memory_serialization(JavaThread *thread, void *ucontext) { - // Is the current instruction a write access of thread to the - // memory serialization page? - return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext); - } - public: // The output of __ breakpoint_trap(). diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp index fb4cfa33534..33bcb034221 100644 --- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp @@ -2161,16 +2161,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, save_native_result(masm, ret_type, workspace_slot_offset); // Make Z_R2 available as work reg. - if (UseMembar) { - // Force this write out before the read below. - __ z_fence(); - } else { - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(Z_thread, Z_R1, Z_R2); - } + // Force this write out before the read below. + __ z_fence(); __ safepoint_poll(sync, Z_R1); diff --git a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp index afaac928b04..0f420e2e40c 100644 --- a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp @@ -1598,15 +1598,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // synchronization is progress, and escapes. __ set_thread_state(_thread_in_native_trans); - if (UseMembar) { - __ z_fence(); - } else { - // Write serialization page so VM thread can do a pseudo remote - // membar. We use the current thread pointer to calculate a thread - // specific offset to write to within the page. This minimizes bus - // traffic due to cache line collision. - __ serialize_memory(Z_thread, Z_R1, Z_R0); - } + __ z_fence(); + // Now before we return to java we must look for a current safepoint // (a new safepoint can not start since we entered native_trans). // We must check here because a current safepoint could be modifying diff --git a/src/hotspot/cpu/sparc/globals_sparc.hpp b/src/hotspot/cpu/sparc/globals_sparc.hpp index 38d2834df60..479f84bbb24 100644 --- a/src/hotspot/cpu/sparc/globals_sparc.hpp +++ b/src/hotspot/cpu/sparc/globals_sparc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,8 +74,6 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); -define_pd_global(bool, UseMembar, true); - define_pd_global(bool, PreserveFramePointer, false); // GC Ergo Flags diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp index 183ceda1477..e008297ed56 100644 --- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp @@ -236,24 +236,6 @@ void MacroAssembler::breakpoint_trap() { trap(ST_RESERVED_FOR_USER_0); } -// Write serialization page so VM thread can do a pseudo remote membar -// We use the current thread pointer to calculate a thread specific -// offset to write to within the page. This minimizes bus traffic -// due to cache line collision. -void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { - srl(thread, os::get_serialize_page_shift_count(), tmp2); - if (Assembler::is_simm13(os::vm_page_size())) { - and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); - } - else { - set((os::vm_page_size() - sizeof(int)), tmp1); - and3(tmp2, tmp1, tmp2); - } - set(os::get_memory_serialize_page(), tmp1); - st(G0, tmp1, tmp2); -} - - void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) { if (SafepointMechanism::uses_thread_local_poll()) { ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0); diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp index d795815df2b..c36a504b5de 100644 --- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp @@ -951,9 +951,6 @@ public: void breakpoint_trap(); void breakpoint_trap(Condition c, CC cc); - // Support for serializing memory accesses between threads - void serialize_memory(Register thread, Register tmp1, Register tmp2); - void safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg); // Stack frame creation/removal diff --git a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp index 8b58118177e..bbb4c4a4600 100644 --- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp +++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp @@ -2372,16 +2372,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ set(_thread_in_native_trans, G3_scratch); __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); - if (UseMembar) { - // Force this write out before the read below - __ membar(Assembler::StoreLoad); - } else { - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(G2_thread, G1_scratch, G3_scratch); - } + // Force this write out before the read below + __ membar(Assembler::StoreLoad); Label L; Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); diff --git a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp index 498c2ceaa92..0b5c0f3da86 100644 --- a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp +++ b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp @@ -1374,16 +1374,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { __ set(_thread_in_native_trans, G3_scratch); __ st(G3_scratch, thread_state); - if (UseMembar) { - // Force this write out before the read below - __ membar(Assembler::StoreLoad); - } else { - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(G2_thread, G1_scratch, G3_scratch); - } + // Force this write out before the read below + __ membar(Assembler::StoreLoad); Label L; __ safepoint_poll(L, false, G2_thread, G3_scratch); diff --git a/src/hotspot/cpu/x86/globals_x86.hpp b/src/hotspot/cpu/x86/globals_x86.hpp index 423e1b1e478..0918662f355 100644 --- a/src/hotspot/cpu/x86/globals_x86.hpp +++ b/src/hotspot/cpu/x86/globals_x86.hpp @@ -84,8 +84,6 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); -define_pd_global(bool, UseMembar, true); - // GC Ergo Flags define_pd_global(size_t, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index 2ab361d88b8..0c5190abf07 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -3517,22 +3517,6 @@ void MacroAssembler::save_rax(Register tmp) { else if (tmp != rax) mov(tmp, rax); } -// Write serialization page so VM thread can do a pseudo remote membar. -// We use the current thread pointer to calculate a thread specific -// offset to write to within the page. This minimizes bus traffic -// due to cache line collision. -void MacroAssembler::serialize_memory(Register thread, Register tmp) { - movl(tmp, thread); - shrl(tmp, os::get_serialize_page_shift_count()); - andl(tmp, (os::vm_page_size() - sizeof(int))); - - Address index(noreg, tmp, Address::times_1); - ExternalAddress page(os::get_memory_serialize_page()); - - // Size of store must match masking code above - movl(as_Address(ArrayAddress(page, index)), tmp); -} - void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg) { if (SafepointMechanism::uses_thread_local_poll()) { #ifdef _LP64 diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 433f1ca5903..344a432a72c 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -644,9 +644,6 @@ class MacroAssembler: public Assembler { Register tmp, int offset); - // Support for serializing memory accesses between threads - void serialize_memory(Register thread, Register tmp); - // If thread_reg is != noreg the code assumes the register passed contains // the thread (required on 64 bit). void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg); diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp index 782c389a2de..a2e3b10a38d 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp @@ -2088,18 +2088,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // didn't see any synchronization is progress, and escapes. __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); - if (UseMembar) { - // Force this write out before the read below - __ membar(Assembler::Membar_mask_bits( - Assembler::LoadLoad | Assembler::LoadStore | - Assembler::StoreLoad | Assembler::StoreStore)); - } else { - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(thread, rcx); - } + // Force this write out before the read below + __ membar(Assembler::Membar_mask_bits( + Assembler::LoadLoad | Assembler::LoadStore | + Assembler::StoreLoad | Assembler::StoreStore)); if (AlwaysRestoreFPU) { // Make sure the control word is correct. diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index 466c84581d1..4ef36c52cab 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -2560,18 +2560,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // didn't see any synchronization is progress, and escapes. __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans); - if (UseMembar) { - // Force this write out before the read below - __ membar(Assembler::Membar_mask_bits( - Assembler::LoadLoad | Assembler::LoadStore | - Assembler::StoreLoad | Assembler::StoreStore)); - } else { - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(r15_thread, rcx); - } + // Force this write out before the read below + __ membar(Assembler::Membar_mask_bits( + Assembler::LoadLoad | Assembler::LoadStore | + Assembler::StoreLoad | Assembler::StoreStore)); Label after_transition; diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp index e77456b0d3f..0754f63a4ba 100644 --- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp @@ -1090,18 +1090,10 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); - if (UseMembar) { - // Force this write out before the read below - __ membar(Assembler::Membar_mask_bits( - Assembler::LoadLoad | Assembler::LoadStore | - Assembler::StoreLoad | Assembler::StoreStore)); - } else { - // Write serialization page so VM thread can do a pseudo remote membar. - // We use the current thread pointer to calculate a thread specific - // offset to write to within the page. This minimizes bus traffic - // due to cache line collision. - __ serialize_memory(thread, rcx); - } + // Force this write out before the read below + __ membar(Assembler::Membar_mask_bits( + Assembler::LoadLoad | Assembler::LoadStore | + Assembler::StoreLoad | Assembler::StoreStore)); #ifndef _LP64 if (AlwaysRestoreFPU) { diff --git a/src/hotspot/cpu/zero/globals_zero.hpp b/src/hotspot/cpu/zero/globals_zero.hpp index d59454327f9..f30a2565de1 100644 --- a/src/hotspot/cpu/zero/globals_zero.hpp +++ b/src/hotspot/cpu/zero/globals_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -69,8 +69,6 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); -define_pd_global(bool, UseMembar, true); - // GC Ergo Flags define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread diff --git a/src/hotspot/os/posix/os_posix.hpp b/src/hotspot/os/posix/os_posix.hpp index 1c1b04e526f..176fe5b9eaa 100644 --- a/src/hotspot/os/posix/os_posix.hpp +++ b/src/hotspot/os/posix/os_posix.hpp @@ -130,11 +130,6 @@ public: #endif }; -// On POSIX platforms the signal handler is global so we just do the write. -static void write_memory_serialize_page_with_handler(JavaThread* thread) { - write_memory_serialize_page(thread); -} - /* * Crash protection for the watcher thread. Wrap the callback * with a sigsetjmp and in case of a SIGSEGV/SIGBUS we siglongjmp diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 0b3b9d9aadc..1ddc527f07b 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -2414,23 +2414,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { } #endif // _WIN64 - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so just return. - if (exception_code == EXCEPTION_ACCESS_VIOLATION) { - if (t != NULL && t->is_Java_thread()) { - JavaThread* thread = (JavaThread*) t; - PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; - address addr = (address) exceptionRecord->ExceptionInformation[1]; - if (os::is_memory_serialize_page(thread, addr)) { - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return EXCEPTION_CONTINUE_EXECUTION; - } - } - } - if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && VM_Version::is_cpuinfo_segv_addr(pc)) { // Verify that OS save/restore AVX registers. @@ -5330,22 +5313,6 @@ bool os::find(address addr, outputStream* st) { return result; } -LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { - DWORD exception_code = e->ExceptionRecord->ExceptionCode; - - if (exception_code == EXCEPTION_ACCESS_VIOLATION) { - JavaThread* thread = JavaThread::current(); - PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; - address addr = (address) exceptionRecord->ExceptionInformation[1]; - - if (os::is_memory_serialize_page(thread, addr)) { - return EXCEPTION_CONTINUE_EXECUTION; - } - } - - return EXCEPTION_CONTINUE_SEARCH; -} - static jint initSock() { WSADATA wsadata; diff --git a/src/hotspot/os/windows/os_windows.hpp b/src/hotspot/os/windows/os_windows.hpp index b056c81b584..9d58e746ef9 100644 --- a/src/hotspot/os/windows/os_windows.hpp +++ b/src/hotspot/os/windows/os_windows.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,9 +108,6 @@ class win32 { static address fast_jni_accessor_wrapper(BasicType); #endif - // filter function to ignore faults on serializations page - static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e); - // Fast access to current thread protected: static int _thread_ptr_offset; @@ -123,21 +120,6 @@ public: static inline int get_thread_ptr_offset() { return _thread_ptr_offset; } }; -static void write_memory_serialize_page_with_handler(JavaThread* thread) { - // Due to chained nature of SEH handlers we have to be sure - // that our handler is always last handler before an attempt to write - // into serialization page - it can fault if we access this page - // right in the middle of protect/unprotect sequence by remote - // membar logic. - // __try/__except are very lightweight operations (only several - // instructions not affecting control flow directly on x86) - // so we can use it here, on very time critical path - __try { - write_memory_serialize_page(thread); - } __except (win32::serialize_fault_filter((_EXCEPTION_POINTERS*)_exception_info())) - {} -} - /* * Crash protection for the watcher thread. Wrap the callback * with a __try { call() } diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp index 8955d2fce97..9afe6d55ddb 100644 --- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp +++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp @@ -469,18 +469,6 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec return 1; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - if ((sig == SIGSEGV) && - os::is_memory_serialize_page(thread, addr)) { - // Synchronization problem in the pseudo memory barrier code (bug id 6546278) - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return true; - } } run_stub: diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp index 56dadda030e..80fa4f30a9a 100644 --- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp +++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -674,17 +674,6 @@ JVM_handle_bsd_signal(int sig, stub = addr; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - if ((sig == SIGSEGV || sig == SIGBUS) && - os::is_memory_serialize_page(thread, (address) info->si_addr)) { - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return true; - } } #ifndef AMD64 diff --git a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp index cb992cbf760..7a559c001ad 100644 --- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp +++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -206,17 +206,6 @@ JVM_handle_bsd_signal(int sig, stub = addr; } }*/ - - // Check to see if we caught the safepoint code in the process - // of write protecting the memory serialization page. It write - // enables the page immediately after protecting it so we can - // just return to retry the write. - if ((sig == SIGSEGV || sig == SIGBUS) && - os::is_memory_serialize_page(thread, (address) info->si_addr)) { - // Block current thread until permission is restored. - os::block_on_serialize_page_trap(); - return true; - } } // signal-chaining diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp index 13b7214af77..19c8cae55a5 100644 --- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp @@ -475,17 +475,6 @@ JVM_handle_linux_signal(int sig, stub = addr; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - if ((sig == SIGSEGV) && - os::is_memory_serialize_page(thread, (address) info->si_addr)) { - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return true; - } } if (stub != NULL) { diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp index 930d9b23a97..a2cddb188dd 100644 --- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp +++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp @@ -407,16 +407,6 @@ extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info, stub = addr; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - if (sig == SIGSEGV && os::is_memory_serialize_page(thread, (address) info->si_addr)) { - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return true; - } } if (unsafe_access && stub == NULL) { diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp index 63ca3089c0d..1d6dd990e18 100644 --- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp +++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp @@ -477,20 +477,6 @@ JVM_handle_linux_signal(int sig, return true; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - if ((sig == SIGSEGV) && - // Si_addr may not be valid due to a bug in the linux-ppc64 kernel (see comment above). - // Use is_memory_serialization instead of si_addr. - ((NativeInstruction*)pc)->is_memory_serialization(thread, ucVoid)) { - // Synchronization problem in the pseudo memory barrier code (bug id 6546278) - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return true; - } } if (stub != NULL) { diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp index 9d6e1d9b39e..5e877611818 100644 --- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp +++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp @@ -477,19 +477,6 @@ JVM_handle_linux_signal(int sig, return true; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - // Info->si_addr need not be the exact address, it is only - // guaranteed to be on the same page as the address that caused - // the SIGSEGV. - if ((sig == SIGSEGV) && !UseMembar && - (os::get_memory_serialize_page() == - (address)((uintptr_t)info->si_addr & ~(os::vm_page_size()-1)))) { - return true; - } } if (stub != NULL) { diff --git a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp index 984aa7c8bee..55e7b70fccb 100644 --- a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp +++ b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp @@ -439,10 +439,6 @@ inline static bool checkFastJNIAccess(address pc, address* stub) { return false; } -inline static bool checkSerializePage(JavaThread* thread, address addr) { - return os::is_memory_serialize_page(thread, addr); -} - inline static bool checkZombie(sigcontext* uc, address* pc, address* stub) { if (nativeInstruction_at(*pc)->is_zombie()) { // zombie method (ld [%g0],%o7 instruction) @@ -542,16 +538,6 @@ JVM_handle_linux_signal(int sig, pc = address(SIG_PC(uc)); npc = address(SIG_NPC(uc)); - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - if ((sig == SIGSEGV) && checkSerializePage(thread, (address)info->si_addr)) { - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return 1; - } - if (checkPrefetch(uc, pc)) { return 1; } diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp index e0de85839ba..5fbc4f7b896 100644 --- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp +++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp @@ -498,17 +498,6 @@ JVM_handle_linux_signal(int sig, stub = addr; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - if ((sig == SIGSEGV) && - os::is_memory_serialize_page(thread, (address) info->si_addr)) { - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return true; - } } #ifndef AMD64 diff --git a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp index 32f496459e1..6ec6c0d87d7 100644 --- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp +++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -220,17 +220,6 @@ JVM_handle_linux_signal(int sig, stub = addr; } }*/ - - // Check to see if we caught the safepoint code in the process - // of write protecting the memory serialization page. It write - // enables the page immediately after protecting it so we can - // just return to retry the write. - if (sig == SIGSEGV && - os::is_memory_serialize_page(thread, (address) info->si_addr)) { - // Block current thread until permission is restored. - os::block_on_serialize_page_trap(); - return true; - } } // signal-chaining diff --git a/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp b/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp index 295478ae30b..f53b7d1de3c 100644 --- a/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp +++ b/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -519,17 +519,6 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, stub = addr; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so just return. - if ((sig == SIGSEGV) && - os::is_memory_serialize_page(thread, (address)info->si_addr)) { - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return true; - } } if (stub != NULL) { diff --git a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp index 9a8afe35be4..67e52a009b6 100644 --- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp +++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -593,17 +593,6 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, stub = addr; } } - - // Check to see if we caught the safepoint code in the - // process of write protecting the memory serialization page. - // It write enables the page immediately after protecting it - // so we can just return to retry the write. - if ((sig == SIGSEGV) && - os::is_memory_serialize_page(thread, (address)info->si_addr)) { - // Block current thread until the memory serialize page permission restored. - os::block_on_serialize_page_trap(); - return true; - } } // Execution protection violation diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp index 688e9450734..0f0ab09610e 100644 --- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp +++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp @@ -359,9 +359,6 @@ bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame* bool ret = false; thread->set_trace_flag(); - if (!UseMembar) { - os::serialize_thread_states(); - } if (JAVA_SAMPLE == type) { if (thread_state_in_java(thread)) { ret = sample_thread_in_java(thread, frames, max_frames); diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index 655aad0bbdb..e914bcf8c54 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -532,7 +532,7 @@ static SpecialFlag const special_jvm_flags[] = { { "MaxRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, - { "UseMembar", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, + { "UseMembar", JDK_Version::jdk(10), JDK_Version::jdk(12), JDK_Version::undefined() }, { "CompilerThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, { "VMThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 3b5895113c5..bac83f55f13 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -247,12 +247,6 @@ define_pd_global(uint64_t,MaxRAM, 1ULL*G); range(8, 256) \ constraint(ObjectAlignmentInBytesConstraintFunc,AtParse) \ \ - /* UseMembar is theoretically a temp flag used for memory barrier */ \ - /* removal testing. It was supposed to be removed before FCS but has */ \ - /* been re-added (see 6401008) */ \ - product_pd(bool, UseMembar, \ - "(Unstable) Issues membars on thread state transitions") \ - \ develop(bool, CleanChunkPoolAsync, true, \ "Clean the chunk pool asynchronously") \ \ diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp index 84b8be3d99f..d22f169a81a 100644 --- a/src/hotspot/share/runtime/handshake.cpp +++ b/src/hotspot/share/runtime/handshake.cpp @@ -126,10 +126,6 @@ class VM_HandshakeOneThread: public VM_Handshake { return; } - if (!UseMembar) { - os::serialize_thread_states(); - } - log_trace(handshake)("Thread signaled, begin processing by VMThtread"); jlong start_time = os::elapsed_counter(); do { @@ -173,10 +169,6 @@ class VM_HandshakeAllThreads: public VM_Handshake { return; } - if (!UseMembar) { - os::serialize_thread_states(); - } - log_debug(handshake)("Threads signaled, begin processing blocked threads by VMThtread"); const jlong start_time = os::elapsed_counter(); int number_of_threads_completed = 0; diff --git a/src/hotspot/share/runtime/interfaceSupport.inline.hpp b/src/hotspot/share/runtime/interfaceSupport.inline.hpp index 8025bc4a893..f37950c6ca5 100644 --- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp +++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp @@ -79,17 +79,7 @@ class InterfaceSupport: AllStatic { private: static void serialize_thread_state_internal(JavaThread* thread, bool needs_exception_handler) { // Make sure new state is seen by VM thread - if (UseMembar) { - // Force a fence between the write above and read below - OrderAccess::fence(); - } else { - // store to serialize page so VM thread can do pseudo remote membar - if (needs_exception_handler) { - os::write_memory_serialize_page_with_handler(thread); - } else { - os::write_memory_serialize_page(thread); - } - } + OrderAccess::fence(); } }; @@ -126,9 +116,7 @@ class ThreadStateTransition : public StackObj { // transition_and_fence must be used on any thread state transition // where there might not be a Java call stub on the stack, in // particular on Windows where the Structured Exception Handler is - // set up in the call stub. os::write_memory_serialize_page() can - // fault and we can't recover from it on Windows without a SEH in - // place. + // set up in the call stub. static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) { assert(thread->thread_state() == from, "coming from wrong thread state"); assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states"); diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 494197fc0a0..9eca2fd380f 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -71,8 +71,6 @@ OSThread* os::_starting_thread = NULL; address os::_polling_page = NULL; -volatile int32_t* os::_mem_serialize_page = NULL; -uintptr_t os::_serialize_page_mask = 0; volatile unsigned int os::_rand_seed = 1; int os::_processor_count = 0; int os::_initial_active_processor_count = 0; @@ -1351,49 +1349,6 @@ char** os::split_path(const char* path, int* n) { return opath; } -void os::set_memory_serialize_page(address page) { - int count = log2_intptr(sizeof(class JavaThread)) - log2_intptr(64); - _mem_serialize_page = (volatile int32_t *)page; - // We initialize the serialization page shift count here - // We assume a cache line size of 64 bytes - assert(SerializePageShiftCount == count, "JavaThread size changed; " - "SerializePageShiftCount constant should be %d", count); - set_serialize_page_mask((uintptr_t)(vm_page_size() - sizeof(int32_t))); -} - -static volatile intptr_t SerializePageLock = 0; - -// This method is called from signal handler when SIGSEGV occurs while the current -// thread tries to store to the "read-only" memory serialize page during state -// transition. -void os::block_on_serialize_page_trap() { - log_debug(safepoint)("Block until the serialize page permission restored"); - - // When VMThread is holding the SerializePageLock during modifying the - // access permission of the memory serialize page, the following call - // will block until the permission of that page is restored to rw. - // Generally, it is unsafe to manipulate locks in signal handlers, but in - // this case, it's OK as the signal is synchronous and we know precisely when - // it can occur. - Thread::muxAcquire(&SerializePageLock, "set_memory_serialize_page"); - Thread::muxRelease(&SerializePageLock); -} - -// Serialize all thread state variables -void os::serialize_thread_states() { - // On some platforms such as Solaris & Linux, the time duration of the page - // permission restoration is observed to be much longer than expected due to - // scheduler starvation problem etc. To avoid the long synchronization - // time and expensive page trap spinning, 'SerializePageLock' is used to block - // the mutator thread if such case is encountered. See bug 6546278 for details. - Thread::muxAcquire(&SerializePageLock, "serialize_thread_states"); - os::protect_memory((char *)os::get_memory_serialize_page(), - os::vm_page_size(), MEM_PROT_READ); - os::protect_memory((char *)os::get_memory_serialize_page(), - os::vm_page_size(), MEM_PROT_RW); - Thread::muxRelease(&SerializePageLock); -} - // Returns true if the current stack pointer is above the stack shadow // pages, false otherwise. bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp) { diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index b098c107f28..5e6c0a65f0d 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -100,8 +100,6 @@ class os: AllStatic { private: static OSThread* _starting_thread; static address _polling_page; - static volatile int32_t * _mem_serialize_page; - static uintptr_t _serialize_page_mask; public: static size_t _page_sizes[page_sizes_max]; @@ -420,54 +418,6 @@ class os: AllStatic { static bool is_readable_pointer(const void* p); static bool is_readable_range(const void* from, const void* to); - // Routines used to serialize the thread state without using membars - static void serialize_thread_states(); - - // Since we write to the serialize page from every thread, we - // want stores to be on unique cache lines whenever possible - // in order to minimize CPU cross talk. We pre-compute the - // amount to shift the thread* to make this offset unique to - // each thread. - static int get_serialize_page_shift_count() { - return SerializePageShiftCount; - } - - static void set_serialize_page_mask(uintptr_t mask) { - _serialize_page_mask = mask; - } - - static unsigned int get_serialize_page_mask() { - return _serialize_page_mask; - } - - static void set_memory_serialize_page(address page); - - static address get_memory_serialize_page() { - return (address)_mem_serialize_page; - } - - static inline void write_memory_serialize_page(JavaThread *thread) { - uintptr_t page_offset = ((uintptr_t)thread >> - get_serialize_page_shift_count()) & - get_serialize_page_mask(); - *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; - } - - static bool is_memory_serialize_page(JavaThread *thread, address addr) { - if (UseMembar) return false; - // Previously this function calculated the exact address of this - // thread's serialize page, and checked if the faulting address - // was equal. However, some platforms mask off faulting addresses - // to the page size, so now we just check that the address is - // within the page. This makes the thread argument unnecessary, - // but we retain the NULL check to preserve existing behavior. - if (thread == NULL) return false; - address page = (address) _mem_serialize_page; - return addr >= page && addr < (page + os::vm_page_size()); - } - - static void block_on_serialize_page_trap(); - // threads enum ThreadType { diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index 3e99a0032b1..ca99786c814 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -213,16 +213,7 @@ void SafepointSynchronize::begin() { // writes and reads of both the safepoint state and the Java // threads state is critical. In order to guarantee that the // memory writes are serialized with respect to each other, - // the VM thread issues a memory barrier instruction - // (on MP systems). In order to avoid the overhead of issuing - // a memory barrier for each Java thread making native calls, each Java - // thread performs a write to a single memory page after changing - // the thread state. The VM thread performs a sequence of - // mprotect OS calls which forces all previous writes from all - // Java threads to be serialized. This is done in the - // os::serialize_thread_states() call. This has proven to be - // much more efficient than executing a membar instruction - // on every call to native code. + // the VM thread issues a memory barrier instruction. // 3. Running compiled Code // Compiled code reads a global (Safepoint Polling) page that // is set to fault if we are trying to get to a safepoint. @@ -251,11 +242,6 @@ void SafepointSynchronize::begin() { } OrderAccess::fence(); // storestore|storeload, global state -> local state - // Flush all thread states to memory - if (!UseMembar) { - os::serialize_thread_states(); - } - if (SafepointMechanism::uses_global_page_poll()) { // Make interpreter safepoint aware Interpreter::notice_safepoints(); diff --git a/src/hotspot/share/runtime/safepointMechanism.cpp b/src/hotspot/share/runtime/safepointMechanism.cpp index f16a04438cf..7ac190c9c28 100644 --- a/src/hotspot/share/runtime/safepointMechanism.cpp +++ b/src/hotspot/share/runtime/safepointMechanism.cpp @@ -87,17 +87,6 @@ void SafepointMechanism::initialize_header(JavaThread* thread) { disarm_local_poll(thread); } -void SafepointMechanism::initialize_serialize_page() { - if (!UseMembar) { - const size_t page_size = os::vm_page_size(); - char* serialize_page = os::reserve_memory(page_size, NULL, page_size); - os::commit_memory_or_exit(serialize_page, page_size, false, "Unable to commit memory serialization page"); - log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(serialize_page)); - os::set_memory_serialize_page((address)(serialize_page)); - } -} - void SafepointMechanism::initialize() { pd_initialize(); - initialize_serialize_page(); } diff --git a/src/hotspot/share/runtime/safepointMechanism.hpp b/src/hotspot/share/runtime/safepointMechanism.hpp index 1e2e9b15679..34fa204c3dd 100644 --- a/src/hotspot/share/runtime/safepointMechanism.hpp +++ b/src/hotspot/share/runtime/safepointMechanism.hpp @@ -52,7 +52,6 @@ class SafepointMechanism : public AllStatic { static inline void block_if_requested_local_poll(JavaThread *thread); static void default_initialize(); - static void initialize_serialize_page(); static void pd_initialize() NOT_AIX({ default_initialize(); }); diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index dd35a86ed02..601e900381a 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -164,15 +164,6 @@ const int BitsPerSize_t = size_tSize * BitsPerByte; // Size of a char[] needed to represent a jint as a string in decimal. const int jintAsStringSize = 12; -// In fact this should be -// log2_intptr(sizeof(class JavaThread)) - log2_intptr(64); -// see os::set_memory_serialize_page() -#ifdef _LP64 -const int SerializePageShiftCount = 4; -#else -const int SerializePageShiftCount = 3; -#endif - // An opaque struct of heap-word width, so that HeapWord* can be a generic // pointer into the heap. We require that object sizes be measured in // units of heap words, so that that diff --git a/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java b/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java index 9584c5259f1..2562e669052 100644 --- a/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java +++ b/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java @@ -44,7 +44,6 @@ public class VMDeprecatedOptions { {"MaxRAMFraction", "8"}, {"MinRAMFraction", "2"}, {"InitialRAMFraction", "64"}, - {"UseMembar", "true"}, {"TLABStats", "false"}, // deprecated alias flags (see also aliased_jvm_flags):