8325821: [REDO] use "dmb.ishst+dmb.ishld" for release barrier

Reviewed-by: shade, aph
This commit is contained in:
Kuai Wei 2024-06-10 12:57:03 +00:00 committed by Aleksey Shipilev
parent e7dc76b577
commit 2a242db01e
9 changed files with 523 additions and 13 deletions

View File

@ -7780,7 +7780,7 @@ instruct membar_acquire() %{
ins_cost(VOLATILE_REF_COST);
format %{ "membar_acquire\n\t"
"dmb ish" %}
"dmb ishld" %}
ins_encode %{
__ block_comment("membar_acquire");
@ -7834,11 +7834,13 @@ instruct membar_release() %{
ins_cost(VOLATILE_REF_COST);
format %{ "membar_release\n\t"
"dmb ish" %}
"dmb ishst\n\tdmb ishld" %}
ins_encode %{
__ block_comment("membar_release");
__ membar(Assembler::LoadStore|Assembler::StoreStore);
// These will be merged if AlwaysMergeDMB is enabled.
__ membar(Assembler::StoreStore);
__ membar(Assembler::LoadStore);
%}
ins_pipe(pipe_serial);
%}

View File

@ -124,6 +124,8 @@ define_pd_global(intx, InlineSmallCode, 1000);
range(1, 99) \
product(ccstr, UseBranchProtection, "none", \
"Branch Protection to use: none, standard, pac-ret") \
product(bool, AlwaysMergeDMB, true, DIAGNOSTIC, \
"Always merge DMB instructions in code emission") \
// end of ARCH_FLAGS

View File

@ -2350,14 +2350,36 @@ void MacroAssembler::membar(Membar_mask_bits order_constraint) {
address last = code()->last_insn();
if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
NativeMembar *bar = NativeMembar_at(prev);
// We are merging two memory barrier instructions. On AArch64 we
// can do this simply by ORing them together.
bar->set_kind(bar->get_kind() | order_constraint);
BLOCK_COMMENT("merged membar");
} else {
code()->set_last_insn(pc());
dmb(Assembler::barrier(order_constraint));
if (AlwaysMergeDMB) {
bar->set_kind(bar->get_kind() | order_constraint);
BLOCK_COMMENT("merged membar(always)");
return;
}
// Don't promote DMB ST|DMB LD to DMB (a full barrier) because
// doing so would introduce a StoreLoad which the caller did not
// intend
if (bar->get_kind() == order_constraint
|| bar->get_kind() == AnyAny
|| order_constraint == AnyAny) {
// We are merging two memory barrier instructions. On AArch64 we
// can do this simply by ORing them together.
bar->set_kind(bar->get_kind() | order_constraint);
BLOCK_COMMENT("merged membar");
return;
} else {
// A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped
// We need check the last 2 instructions
address prev2 = prev - NativeMembar::instruction_size;
if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) {
NativeMembar *bar2 = NativeMembar_at(prev2);
assert(bar2->get_kind() == order_constraint, "it should be merged before");
BLOCK_COMMENT("merged membar(elided)");
return;
}
}
}
code()->set_last_insn(pc());
dmb(Assembler::barrier(order_constraint));
}
bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {

View File

@ -150,6 +150,7 @@ class MacroAssembler: public Assembler {
void bind(Label& L) {
Assembler::bind(L);
code()->clear_last_insn();
code()->set_last_label(pc());
}
void membar(Membar_mask_bits order_constraint);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -230,6 +230,9 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(OnSpinWaitInstCount)) {
FLAG_SET_DEFAULT(OnSpinWaitInstCount, 1);
}
if (FLAG_IS_DEFAULT(AlwaysMergeDMB)) {
FLAG_SET_DEFAULT(AlwaysMergeDMB, false);
}
}
if (_cpu == CPU_ARM) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -928,6 +928,10 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
// Move all the code and relocations to the new blob:
relocate_code_to(&cb);
// some internal addresses, _last_insn _last_label, are used during code emission,
// adjust them in expansion
adjust_internal_address(insts_begin(), cb.insts_begin());
// Copy the temporary code buffer into the current code buffer.
// Basically, do {*this = cb}, except for some control information.
this->take_over_code_from(&cb);
@ -949,6 +953,15 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
#endif //PRODUCT
}
void CodeBuffer::adjust_internal_address(address from, address to) {
if (_last_insn != nullptr) {
_last_insn += to - from;
}
if (_last_label != nullptr) {
_last_label += to - from;
}
}
void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
// Must already have disposed of the old blob somehow.
assert(blob() == nullptr, "must be empty");

View File

@ -433,6 +433,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
Arena* _overflow_arena;
address _last_insn; // used to merge consecutive memory barriers, loads or stores.
address _last_label; // record last bind label address, it's also the start of current bb.
SharedStubToInterpRequests* _shared_stub_to_interp_requests; // used to collect requests for shared iterpreter stubs
SharedTrampolineRequests* _shared_trampoline_requests; // used to collect requests for shared trampolines
@ -457,6 +458,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
_oop_recorder = nullptr;
_overflow_arena = nullptr;
_last_insn = nullptr;
_last_label = nullptr;
_finalize_stubs = false;
_shared_stub_to_interp_requests = nullptr;
_shared_trampoline_requests = nullptr;
@ -510,6 +512,9 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
// moves code sections to new buffer (assumes relocs are already in there)
void relocate_code_to(CodeBuffer* cb) const;
// adjust some internal address during expand
void adjust_internal_address(address from, address to);
// set up a model of the final layout of my contents
void compute_final_layout(CodeBuffer* dest) const;
@ -679,6 +684,9 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
void set_last_insn(address a) { _last_insn = a; }
void clear_last_insn() { set_last_insn(nullptr); }
address last_label() const { return _last_label; }
void set_last_label(address a) { _last_label = a; }
#ifndef PRODUCT
AsmRemarks &asm_remarks() { return _asm_remarks; }
DbgStrings &dbg_strings() { return _dbg_strings; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -28,8 +28,10 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
#include "unittest.hpp"
#define __ _masm.
@ -81,4 +83,376 @@ TEST_VM(AssemblerAArch64, validate) {
BufferBlob::free(b);
}
constexpr uint32_t test_encode_dmb_ld = 0xd50339bf;
constexpr uint32_t test_encode_dmb_st = 0xd5033abf;
constexpr uint32_t test_encode_dmb = 0xd5033bbf;
constexpr uint32_t test_encode_nop = 0xd503201f;
static void asm_dump(address start, address end) {
ResourceMark rm;
stringStream ss;
ss.print_cr("Insns:");
Disassembler::decode(start, end, &ss);
printf("%s\n", ss.as_string());
}
void test_merge_dmb() {
BufferBlob* b = BufferBlob::create("aarch64Test", 400);
CodeBuffer code(b);
MacroAssembler _masm(&code);
{
// merge with same type
__ membar(Assembler::Membar_mask_bits::StoreStore);
__ membar(Assembler::Membar_mask_bits::StoreStore);
__ membar(Assembler::Membar_mask_bits::StoreStore);
__ nop();
__ membar(Assembler::Membar_mask_bits::LoadStore);
__ membar(Assembler::Membar_mask_bits::LoadStore);
__ membar(Assembler::Membar_mask_bits::LoadStore);
__ membar(Assembler::Membar_mask_bits::LoadStore);
__ nop();
// merge with high rank
__ membar(Assembler::Membar_mask_bits::LoadStore);
__ membar(Assembler::Membar_mask_bits::LoadStore);
__ membar(Assembler::Membar_mask_bits::AnyAny);
__ membar(Assembler::Membar_mask_bits::StoreStore);
__ membar(Assembler::Membar_mask_bits::StoreStore);
__ nop();
// merge with different type
__ membar(Assembler::Membar_mask_bits::LoadStore);
__ membar(Assembler::Membar_mask_bits::StoreStore);
__ membar(Assembler::Membar_mask_bits::LoadStore);
__ membar(Assembler::Membar_mask_bits::StoreStore);
}
asm_dump(code.insts()->start(), code.insts()->end());
// AlwaysMergeDMB
static const unsigned int insns1[] = {
test_encode_dmb_st,
test_encode_nop,
test_encode_dmb_ld,
test_encode_nop,
test_encode_dmb,
test_encode_nop,
test_encode_dmb,
};
// !AlwaysMergeDMB
static const unsigned int insns2[] = {
test_encode_dmb_st,
test_encode_nop,
test_encode_dmb_ld,
test_encode_nop,
test_encode_dmb,
test_encode_nop,
test_encode_dmb_ld,
test_encode_dmb_st,
};
if (AlwaysMergeDMB) {
EXPECT_EQ(code.insts()->size(), (CodeSection::csize_t)(sizeof insns1));
asm_check((const unsigned int *)code.insts()->start(), insns1, sizeof insns1 / sizeof insns1[0]);
} else {
EXPECT_EQ(code.insts()->size(), (CodeSection::csize_t)(sizeof insns2));
asm_check((const unsigned int *)code.insts()->start(), insns2, sizeof insns2 / sizeof insns2[0]);
}
BufferBlob::free(b);
}
TEST_VM(AssemblerAArch64, merge_dmb_1) {
FlagSetting fs(AlwaysMergeDMB, true);
test_merge_dmb();
}
TEST_VM(AssemblerAArch64, merge_dmb_2) {
FlagSetting fs(AlwaysMergeDMB, false);
test_merge_dmb();
}
TEST_VM(AssemblerAArch64, merge_dmb_block_by_label) {
BufferBlob* b = BufferBlob::create("aarch64Test", 400);
CodeBuffer code(b);
MacroAssembler _masm(&code);
{
Label l;
// merge can not cross the label
__ membar(Assembler::Membar_mask_bits::StoreStore);
__ bind(l);
__ membar(Assembler::Membar_mask_bits::StoreStore);
}
asm_dump(code.insts()->start(), code.insts()->end());
static const unsigned int insns[] = {
0xd5033abf, // dmb.ishst
0xd5033abf, // dmb.ishst
};
EXPECT_EQ(code.insts()->size(), (CodeSection::csize_t)(sizeof insns));
asm_check((const unsigned int *)code.insts()->start(), insns, sizeof insns / sizeof insns[0]);
BufferBlob::free(b);
}
TEST_VM(AssemblerAArch64, merge_dmb_after_expand) {
ResourceMark rm;
BufferBlob* b = BufferBlob::create("aarch64Test", 400);
CodeBuffer code(b);
code.set_blob(b);
MacroAssembler _masm(&code);
{
__ membar(Assembler::Membar_mask_bits::StoreStore);
code.insts()->maybe_expand_to_ensure_remaining(50000);
__ membar(Assembler::Membar_mask_bits::StoreStore);
}
asm_dump(code.insts()->start(), code.insts()->end());
static const unsigned int insns[] = {
0xd5033abf, // dmb.ishst
};
EXPECT_EQ(code.insts()->size(), (CodeSection::csize_t)(sizeof insns));
asm_check((const unsigned int *)code.insts()->start(), insns, sizeof insns / sizeof insns[0]);
}
void expect_dmbld(void* addr) {
if (*((uint32_t *) addr) != test_encode_dmb_ld) {
tty->print_cr("Expected dmb.ld");
FAIL();
}
}
void expect_dmbst(void* addr) {
if (*((uint32_t *) addr) != test_encode_dmb_st) {
tty->print_cr("Expected dmb.st");
FAIL();
}
}
void expect_dmb(void* addr) {
if (*((uint32_t *) addr) != test_encode_dmb) {
tty->print_cr("Expected dmb");
FAIL();
}
}
void expect_any_dmb(void* addr) {
uint32_t encode = *((uint32_t *) addr);
if (encode != test_encode_dmb && encode != test_encode_dmb_ld && encode != test_encode_dmb_st) {
tty->print_cr("Expected a dmb.* instruction");
FAIL();
}
}
void expect_different_dmb_kind(void* addr) {
uint32_t pos1 = *((uint32_t *) addr);
uint32_t pos2 = *(((uint32_t *) addr) + 1);
if (pos1 == pos2) {
tty->print_cr("Expected different dmb kind");
FAIL();
}
}
void expect_dmb_at_least_one(void* addr) {
uint32_t pos1 = *((uint32_t *) addr);
uint32_t pos2 = *(((uint32_t *) addr) + 1);
if (pos1 != test_encode_dmb && pos2 != test_encode_dmb) {
tty->print_cr("Expected at least one dmb");
FAIL();
}
}
void expect_dmb_none(void* addr) {
uint32_t pos1 = *((uint32_t *) addr);
uint32_t pos2 = *(((uint32_t *) addr) + 1);
if (pos1 == test_encode_dmb || pos2 == test_encode_dmb) {
tty->print_cr("Expected no dmb");
FAIL();
}
}
void test_merge_dmb_all_kinds() {
BufferBlob* b = BufferBlob::create("aarch64Test", 20000);
CodeBuffer code(b);
MacroAssembler _masm(&code);
constexpr int count = 5;
struct {
const char* label;
Assembler::Membar_mask_bits flavor;
// Two groups of two bits describing the ordering, can be OR-ed to figure out composite semantics.
// First group describes ops before the barrier. Second group describes ops after the barrier.
// "01" means "load", "10" means "store", "100" means "any".
int mask;
} kind[count] = {
{"storestore", Assembler::StoreStore, 0b010010},
{"loadstore", Assembler::LoadStore, 0b001010},
{"loadload", Assembler::LoadLoad, 0b001001},
{"storeload", Assembler::StoreLoad, 0b100100}, // quirk: StoreLoad is as powerful as AnyAny
{"anyany", Assembler::AnyAny, 0b100100},
};
for (int b1 = 0; b1 < count; b1++) {
for (int b2 = 0; b2 < count; b2++) {
for (int b3 = 0; b3 < count; b3++) {
for (int b4 = 0; b4 < count; b4++) {
// tty->print_cr("%s + %s + %s + %s", kind[b1].label, kind[b2].label, kind[b3].label, kind[b4].label);
address start = __ pc();
__ membar(kind[b1].flavor);
__ membar(kind[b2].flavor);
__ membar(kind[b3].flavor);
__ membar(kind[b4].flavor);
address end = __ pc();
__ nop();
size_t size = pointer_delta(end, start, 1);
if (AlwaysMergeDMB) {
// Expect only a single barrier.
EXPECT_EQ(size, (size_t) NativeMembar::instruction_size);
} else {
EXPECT_LE(size, (size_t) NativeMembar::instruction_size * 2);
}
// Composite ordering for this group of barriers.
int composite_mask = kind[b1].mask | kind[b2].mask | kind[b3].mask | kind[b4].mask;
if (size == NativeMembar::instruction_size) {
// If there is a single barrier, we can easily test its type.
switch (composite_mask) {
case 0b001001:
case 0b001010:
case 0b001011:
case 0b001101:
case 0b001110:
case 0b001111:
// Any combination of Load(Load|Store|Any) gets dmb.ld
expect_dmbld(start);
break;
case 0b010010:
// Only StoreStore gets dmb.st
expect_dmbst(start);
break;
default:
// Everything else gets folded into full dmb
expect_dmb(start);
break;
}
} else if (size == 2 * NativeMembar::instruction_size) {
// There are two barriers. Make a few sanity checks.
// They must be different kind
expect_any_dmb(start);
expect_any_dmb(start + NativeMembar::instruction_size);
expect_different_dmb_kind(start);
if ((composite_mask & 0b100100) != 0) {
// There was "any" barrier in the group, a full dmb is expected
expect_dmb_at_least_one(start);
} else {
// Otherwise expect no full dmb
expect_dmb_none(start);
}
} else {
// Merging code does not produce this result.
FAIL();
}
}
}
}
}
BufferBlob::free(b);
}
TEST_VM(AssemblerAArch64, merge_dmb_all_kinds_1) {
FlagSetting fs(AlwaysMergeDMB, true);
test_merge_dmb_all_kinds();
}
TEST_VM(AssemblerAArch64, merge_dmb_all_kinds_2) {
FlagSetting fs(AlwaysMergeDMB, false);
test_merge_dmb_all_kinds();
}
TEST_VM(AssemblerAArch64, merge_ldst) {
BufferBlob* b = BufferBlob::create("aarch64Test", 400);
CodeBuffer code(b);
MacroAssembler _masm(&code);
{
Label l;
// merge ld/st into ldp/stp
__ ldr(r0, Address(sp, 8));
__ ldr(r1, Address(sp, 0));
__ nop();
__ str(r0, Address(sp, 0));
__ str(r1, Address(sp, 8));
__ nop();
__ ldrw(r0, Address(sp, 0));
__ ldrw(r1, Address(sp, 4));
__ nop();
__ strw(r0, Address(sp, 4));
__ strw(r1, Address(sp, 0));
__ nop();
// can not merge
__ ldrw(r0, Address(sp, 4));
__ ldr(r1, Address(sp, 8));
__ nop();
__ ldrw(r0, Address(sp, 0));
__ ldrw(r1, Address(sp, 8));
__ nop();
__ str(r0, Address(sp, 0));
__ bind(l); // block by label
__ str(r1, Address(sp, 8));
__ nop();
}
asm_dump(code.insts()->start(), code.insts()->end());
static const unsigned int insns1[] = {
0xa94003e1, // ldp x1, x0, [sp]
0xd503201f, // nop
0xa90007e0, // stp x0, x1, [sp]
0xd503201f, // nop
0x294007e0, // ldp w0, w1, [sp]
0xd503201f, // nop
0x290003e1, // stp w1, w0, [sp]
0xd503201f, // nop
0xb94007e0, // ldr w0, [sp, 4]
0xf94007e1, // ldr x1, [sp, 8]
0xd503201f, // nop
0xb94003e0, // ldr w0, [sp]
0xb9400be1, // ldr w1, [sp, 8]
0xd503201f, // nop
0xf90003e0, // str x0, [sp]
0xf90007e1, // str x1, [sp, 8]
0xd503201f, // nop
};
EXPECT_EQ(code.insts()->size(), (CodeSection::csize_t)(sizeof insns1));
asm_check((const unsigned int *)code.insts()->start(), insns1, sizeof insns1 / sizeof insns1[0]);
BufferBlob::free(b);
}
TEST_VM(AssemblerAArch64, merge_ldst_after_expand) {
ResourceMark rm;
BufferBlob* b = BufferBlob::create("aarch64Test", 400);
CodeBuffer code(b);
code.set_blob(b);
MacroAssembler _masm(&code);
{
__ ldr(r0, Address(sp, 8));
code.insts()->maybe_expand_to_ensure_remaining(10000);
__ ldr(r1, Address(sp, 0));
__ nop();
__ str(r0, Address(sp, 0));
code.insts()->maybe_expand_to_ensure_remaining(100000);
__ str(r1, Address(sp, 8));
__ nop();
}
asm_dump(code.insts()->start(), code.insts()->end());
static const unsigned int insns[] = {
0xa94003e1, // ldp x1, x0, [sp]
0xd503201f, // nop
0xa90007e0, // stp x0, x1, [sp]
0xd503201f, // nop
};
EXPECT_EQ(code.insts()->size(), (CodeSection::csize_t)(sizeof insns));
asm_check((const unsigned int *)code.insts()->start(), insns, sizeof insns / sizeof insns[0]);
}
#endif // AARCH64

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2024, Alibaba Group Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.openjdk.bench.vm.compiler;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.*;
import java.util.concurrent.TimeUnit;
import org.openjdk.jmh.infra.Blackhole;
/* test allocation speed of object with final field */
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.SECONDS)
@State(Scope.Benchmark)
@Warmup(iterations = 5, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 3, time = 3, timeUnit = TimeUnit.SECONDS)
@Fork(value = 3)
public class FinalFieldInitialize {
final static int LEN = 100_000;
Object arr[] = null;
@Setup
public void setup(){
arr = new Object[LEN];
}
@Benchmark
public void testAlloc(Blackhole bh) {
for (int i=0; i<LEN; i++) {
arr[i] = new TObj();
}
bh.consume(arr);
}
@Benchmark
public void testAllocWithFinal(Blackhole bh) {
for (int i=0; i<LEN; i++) {
arr[i] = new TObjWithFinal();
}
bh.consume(arr);
}
}
class TObj {
private int i;
private long l;
private boolean b;
public TObj() {
i = 10;
l = 100L;
b = true;
}
}
class TObjWithFinal {
private int i;
private long l;
private final boolean b;
public TObjWithFinal() {
i = 10;
l = 100L;
b = true;
}
}