8216557: Aarch64: Add support for Concurrent Class Unloading

Co-authored-by: Andrew Haley <aph@redhat.com>
Reviewed-by: aph, eosterlund, pliden, njian
This commit is contained in:
Stuart Monteith 2020-04-29 14:53:48 +08:00
parent 408bc486e0
commit 739e8e322d
16 changed files with 366 additions and 25 deletions

@ -1635,6 +1635,20 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
st->print("mov rscratch1, #%d\n\t", framesize - 2 * wordSize);
st->print("sub sp, sp, rscratch1");
}
if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
st->print("\n\t");
st->print("ldr rscratch1, [guard]\n\t");
st->print("dmb ishld\n\t");
st->print("ldr rscratch2, [rthread, #thread_disarmed_offset]\n\t");
st->print("cmp rscratch1, rscratch2\n\t");
st->print("b.eq skip");
st->print("\n\t");
st->print("blr #nmethod_entry_barrier_stub\n\t");
st->print("b skip\n\t");
st->print("guard: int\n\t");
st->print("\n\t");
st->print("skip:\n\t");
}
}
#endif
@ -1667,6 +1681,11 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ build_frame(framesize);
if (C->stub_function() == NULL) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(&_masm);
}
if (VerifyStackAtCalls) {
Unimplemented();
}

@ -27,6 +27,7 @@
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/arrayOop.hpp"
@ -336,6 +337,10 @@ void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
// Note that we do this before doing an enter().
generate_stack_overflow_check(bang_size_in_bytes);
MacroAssembler::build_frame(framesize + 2 * wordSize);
// Insert nmethod entry barrier into frame.
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(this);
}
void C1_MacroAssembler::remove_frame(int framesize) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,17 @@
*/
#include "precompiled.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#include "memory/universe.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#define __ masm->
void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
@ -229,3 +234,67 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
}
__ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
}
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == NULL) {
return;
}
Label skip, guard;
Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
__ ldrw(rscratch1, guard);
// Subsequent loads of oops must occur after load of guard value.
// BarrierSetNMethod::disarm sets guard with release semantics.
__ membar(__ LoadLoad);
__ ldrw(rscratch2, thread_disarmed_addr);
__ cmpw(rscratch1, rscratch2);
__ br(Assembler::EQ, skip);
__ mov(rscratch1, StubRoutines::aarch64::method_entry_barrier());
__ blr(rscratch1);
__ b(skip);
__ bind(guard);
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
__ bind(skip);
}
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs == NULL) {
return;
}
Label bad_call;
__ cbz(rmethod, bad_call);
// Pointer chase to the method holder to find out if the method is concurrently unloading.
Label method_live;
__ load_method_holder_cld(rscratch1, rmethod);
// Is it a strong CLD?
__ ldr(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_offset()));
__ cbnz(rscratch2, method_live);
// Is it a weak but alive CLD?
__ stp(r10, r11, Address(__ pre(sp, -2 * wordSize)));
__ ldr(r10, Address(rscratch1, ClassLoaderData::holder_offset()));
// Uses rscratch1 & rscratch2, so we must pass new temporaries.
__ resolve_weak_handle(r10, r11);
__ mov(rscratch1, r10);
__ ldp(r10, r11, Address(__ post(sp, 2 * wordSize)));
__ cbnz(rscratch1, method_live);
__ bind(bad_call);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(method_live);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,8 @@
#define CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
@ -72,6 +74,10 @@ public:
Label& slow_case // continuation point if fast allocation fails
);
virtual void barrier_stubs_init() {}
virtual void nmethod_entry_barrier(MacroAssembler* masm);
virtual void c2i_entry_barrier(MacroAssembler* masm);
};
#endif // CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,18 +23,143 @@
*/
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/nativeInst.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
class NativeNMethodBarrier: public NativeInstruction {
address instruction_address() const { return addr_at(0); }
int *guard_addr() {
return reinterpret_cast<int*>(instruction_address() + 10 * 4);
}
public:
int get_value() {
return Atomic::load_acquire(guard_addr());
}
void set_value(int value) {
Atomic::release_store(guard_addr(), value);
}
void verify() const;
};
// Store the instruction bitmask, bits and name for checking the barrier.
struct CheckInsn {
uint32_t mask;
uint32_t bits;
const char *name;
};
static const struct CheckInsn barrierInsn[] = {
{ 0xff000000, 0x18000000, "ldr (literal)" },
{ 0xfffff0ff, 0xd50330bf, "dmb" },
{ 0xffc00000, 0xb9400000, "ldr"},
{ 0x7f20001f, 0x6b00001f, "cmp"},
{ 0xff00001f, 0x54000000, "b.eq"},
{ 0xff800000, 0xd2800000, "mov"},
{ 0xff800000, 0xf2800000, "movk"},
{ 0xff800000, 0xf2800000, "movk"},
{ 0xfffffc1f, 0xd63f0000, "blr"},
{ 0xfc000000, 0x14000000, "b"}
};
// The encodings must match the instructions emitted by
// BarrierSetAssembler::nmethod_entry_barrier. The matching ignores the specific
// register numbers and immediate values in the encoding.
void NativeNMethodBarrier::verify() const {
intptr_t addr = (intptr_t) instruction_address();
for(unsigned int i = 0; i < sizeof(barrierInsn)/sizeof(struct CheckInsn); i++ ) {
uint32_t inst = *((uint32_t*) addr);
if ((inst & barrierInsn[i].mask) != barrierInsn[i].bits) {
tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", addr, inst);
fatal("not an %s instruction.", barrierInsn[i].name);
}
addr +=4;
}
}
/* We're called from an nmethod when we need to deoptimize it. We do
this by throwing away the nmethod's frame and jumping to the
ic_miss stub. This looks like there has been an IC miss at the
entry of the nmethod, so we resolve the call, which will fall back
to the interpreter if the nmethod has been unloaded. */
void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
ShouldNotReachHere();
typedef struct {
intptr_t *sp; intptr_t *fp; address lr; address pc;
} frame_pointers_t;
frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
JavaThread *thread = (JavaThread*)Thread::current();
RegisterMap reg_map(thread, false);
frame frame = thread->last_frame();
assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
assert(frame.cb() == nm, "must be");
frame = frame.sender(&reg_map);
LogTarget(Trace, nmethod, barrier) out;
if (out.is_enabled()) {
Thread* thread = Thread::current();
assert(thread->is_Java_thread(), "must be JavaThread");
JavaThread* jth = (JavaThread*) thread;
ResourceMark mark;
log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
nm->method()->name_and_sig_as_C_string(),
nm, *(address *) return_address_ptr, nm->is_osr_method(), jth,
jth->get_thread_name(), frame.sp(), nm->verified_entry_point());
}
new_frame->sp = frame.sp();
new_frame->fp = frame.fp();
new_frame->lr = frame.pc();
new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
}
// This is the offset of the entry barrier from where the frame is completed.
// If any code changes between the end of the verified entry where the entry
// barrier resides, and the completion of the frame, then
// NativeNMethodCmpBarrier::verify() will immediately complain when it does
// not find the expected native instruction at this offset, which needs updating.
// Note that this offset is invariant of PreserveFramePointer.
static const int entry_barrier_offset = -4 * 11;
static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset;
NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
debug_only(barrier->verify());
return barrier;
}
void BarrierSetNMethod::disarm(nmethod* nm) {
ShouldNotReachHere();
if (!supports_entry_barrier(nm)) {
return;
}
// Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier.
// Symmetric "LDR; DMB ISHLD" is in the nmethod barrier.
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(disarmed_value());
}
bool BarrierSetNMethod::is_armed(nmethod* nm) {
ShouldNotReachHere();
return false;
if (!supports_entry_barrier(nm)) {
return false;
}
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
return barrier->get_value() != disarmed_value();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,7 @@
#include "precompiled.hpp"
#include "gc/z/zArguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/debug.hpp"
void ZArguments::initialize_platform() {
// Disable class unloading - we don't support concurrent class unloading yet.
FLAG_SET_DEFAULT(ClassUnloading, false);
FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
// Does nothing
}

@ -3693,6 +3693,11 @@ void MacroAssembler::cmpoop(Register obj1, Register obj2) {
bs->obj_equals(this, obj1, obj2);
}
void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
load_method_holder(rresult, rmethod);
ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
}
void MacroAssembler::load_method_holder(Register holder, Register method) {
ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
@ -3714,6 +3719,22 @@ void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
}
// ((WeakHandle)result).resolve();
void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
assert_different_registers(rresult, rtmp);
Label resolved;
// A null weak handle resolves to null.
cbz(rresult, resolved);
// Only 64 bit platforms support GCs that require a tmp register
// Only IN_HEAP loads require a thread_tmp register
// WeakHandle::resolve is an indirection like jweak.
access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
rresult, Address(rresult), rtmp, /*tmp_thread*/noreg);
bind(resolved);
}
void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
ldr(dst, Address(rmethod, Method::const_offset()));
@ -4108,9 +4129,9 @@ Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
}
// Move an oop into a register. immediate is true if we want
// immediate instrcutions, i.e. we are not going to patch this
// instruction while the code is being executed by another thread. In
// that case we can use move immediates rather than the constant pool.
// immediate instructions and nmethod entry barriers are not enabled.
// i.e. we are not going to patch this instruction while the code is being
// executed by another thread.
void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
int oop_index;
if (obj == NULL) {
@ -4125,11 +4146,16 @@ void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
oop_index = oop_recorder()->find_index(obj);
}
RelocationHolder rspec = oop_Relocation::spec(oop_index);
if (! immediate) {
// nmethod entry barrier necessitate using the constant pool. They have to be
// ordered with respected to oop accesses.
// Using immediate literals would necessitate ISBs.
if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate) {
address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
ldr_constant(dst, Address(dummy, rspec));
} else
mov(dst, Address((address)obj, rspec));
}
// Move a metadata address into a register.

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -176,6 +176,8 @@ class MacroAssembler: public Assembler {
using Assembler::ldr;
using Assembler::str;
using Assembler::ldrw;
using Assembler::strw;
void ldr(Register Rx, const Address &adr);
void ldrw(Register Rw, const Address &adr);
@ -809,6 +811,7 @@ public:
// C 'boolean' to Java boolean: x == 0 ? 0 : 1
void c2bool(Register x);
void load_method_holder_cld(Register rresult, Register rmethod);
void load_method_holder(Register holder, Register method);
// oop manipulations
@ -816,6 +819,7 @@ public:
void store_klass(Register dst, Register src);
void cmp_klass(Register oop, Register trial_klass, Register tmp);
void resolve_weak_handle(Register result, Register tmp);
void resolve_oop_handle(Register result, Register tmp = r5);
void load_mirror(Register dst, Register method, Register tmp = r5);

@ -29,6 +29,7 @@
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "logging/log.hpp"
@ -732,6 +733,9 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
c2i_no_clinit_check_entry = __ pc();
}
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
__ flush();
@ -1504,6 +1508,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// -2 because return address is already present and so is saved rfp
__ sub(sp, sp, stack_size - 2*wordSize);
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(masm);
// Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -4155,6 +4155,50 @@ class StubGenerator: public StubCodeGenerator {
return entry;
}
address generate_method_entry_barrier() {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier");
Label deoptimize_label;
address start = __ pc();
__ set_last_Java_frame(sp, rfp, lr, rscratch1);
__ enter();
__ add(rscratch2, sp, wordSize); // rscratch2 points to the saved lr
__ sub(sp, sp, 4 * wordSize); // four words for the returned {sp, fp, lr, pc}
__ push_call_clobbered_registers();
__ mov(c_rarg0, rscratch2);
__ call_VM_leaf
(CAST_FROM_FN_PTR
(address, BarrierSetNMethod::nmethod_stub_entry_barrier), 1);
__ reset_last_Java_frame(true);
__ mov(rscratch1, r0);
__ pop_call_clobbered_registers();
__ cbnz(rscratch1, deoptimize_label);
__ leave();
__ ret(lr);
__ BIND(deoptimize_label);
__ ldp(/* new sp */ rscratch1, rfp, Address(sp, 0 * wordSize));
__ ldp(lr, /* new pc*/ rscratch2, Address(sp, 2 * wordSize));
__ mov(sp, rscratch1);
__ br(rscratch2);
return start;
}
// r0 = result
// r1 = str1
// r2 = cnt1
@ -5745,6 +5789,10 @@ class StubGenerator: public StubCodeGenerator {
// byte_array_inflate stub for large arrays.
StubRoutines::aarch64::_large_byte_array_inflate = generate_large_byte_array_inflate();
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
StubRoutines::aarch64::_method_entry_barrier = generate_method_entry_barrier();
}
#ifdef COMPILER2
if (UseMultiplyToLenIntrinsic) {
StubRoutines::_multiplyToLen = generate_multiplyToLen();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -56,6 +56,7 @@ address StubRoutines::aarch64::_string_indexof_linear_ll = NULL;
address StubRoutines::aarch64::_string_indexof_linear_uu = NULL;
address StubRoutines::aarch64::_string_indexof_linear_ul = NULL;
address StubRoutines::aarch64::_large_byte_array_inflate = NULL;
address StubRoutines::aarch64::_method_entry_barrier = NULL;
bool StubRoutines::aarch64::_completed = false;
/**

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -69,6 +69,9 @@ class aarch64 {
static address _string_indexof_linear_uu;
static address _string_indexof_linear_ul;
static address _large_byte_array_inflate;
static address _method_entry_barrier;
static bool _completed;
public:
@ -171,6 +174,10 @@ class aarch64 {
return _large_byte_array_inflate;
}
static address method_entry_barrier() {
return _method_entry_barrier;
}
static bool complete() {
return _completed;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,6 +62,16 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
assert(!nm->is_osr_method(), "Should not reach here");
// Called upon first entry after being armed
bool may_enter = bs_nm->nmethod_entry_barrier(nm);
// Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
// a very rare event.
if (DeoptimizeNMethodBarriersALot) {
static volatile uint32_t counter=0;
if (Atomic::add(&counter, 1u) % 3 == 0) {
may_enter = false;
}
}
if (!may_enter) {
log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
bs_nm->deoptimize(nm, return_address_ptr);

@ -2483,7 +2483,8 @@ const size_t minimumSymbolTableSize = 1024;
product(bool, UseEmptySlotsInSupers, true, \
"Allow allocating fields in empty slots of super-classes") \
\
diagnostic(bool, DeoptimizeNMethodBarriersALot, false, \
"Make nmethod barriers deoptimise a lot.") \
// Interface macros
#define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;

@ -2851,6 +2851,12 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
CodeBuffer buffer(buf);
double locs_buf[20];
buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
#if defined(AARCH64)
// On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
// in the constant pool to ensure ordering between the barrier and oops
// accesses. For native_wrappers we need a constant.
buffer.initialize_consts_size(8);
#endif
MacroAssembler _masm(&buffer);
// Fill in the signature array, for the calling-convention call.

@ -35,6 +35,18 @@ import java.io.IOException;
* @summary Stress ZGC
* @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx384m -server -XX:+UseZGC gc.stress.gcbasher.TestGCBasherWithZ 120000
*/
/*
* @test TestGCBasherDeoptWithZ
* @key gc stress
* @library /
* @requires vm.gc.Z
* @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled & vm.opt.ClassUnloading != false
* @summary Stress ZGC with nmethod barrier forced deoptimization enabled.
* @run main/othervm/timeout=200 -Xlog:gc*=info,nmethod+barrier=trace -Xmx384m -server -XX:+UseZGC
* -XX:+UnlockDiagnosticVMOptions -XX:+DeoptimizeNMethodBarriersALot -XX:-Inline
* gc.stress.gcbasher.TestGCBasherWithZ 120000
*/
public class TestGCBasherWithZ {
public static void main(String[] args) throws IOException {
TestGCBasher.main(args);