8238355: Update Graal

Reviewed-by: kvn
This commit is contained in:
Igor Veresov 2020-02-20 10:11:07 -08:00
parent 2d93a28447
commit 23458bf4c5
375 changed files with 8543 additions and 3581 deletions

View File

@ -40,15 +40,17 @@ void NativeInstruction::wrote(int offset) {
ICache::invalidate_word(addr_at(offset));
}
#ifdef ASSERT
void NativeLoadGot::report_and_fail() const {
tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
tty->print_cr("Addr: " INTPTR_FORMAT " Code: %x %x %x", p2i(instruction_address()),
(has_rex ? ubyte_at(0) : 0), ubyte_at(rex_size), ubyte_at(rex_size + 1));
fatal("not a indirect rip mov to rbx");
}
void NativeLoadGot::verify() const {
if (has_rex) {
int rex = ubyte_at(0);
if (rex != rex_prefix) {
if (rex != rex_prefix && rex != rex_b_prefix) {
report_and_fail();
}
}
@ -62,6 +64,7 @@ void NativeLoadGot::verify() const {
report_and_fail();
}
}
#endif
intptr_t NativeLoadGot::data() const {
return *(intptr_t *) got_address();
@ -149,14 +152,30 @@ address NativeGotJump::destination() const {
return *got_entry;
}
#ifdef ASSERT
void NativeGotJump::report_and_fail() const {
tty->print_cr("Addr: " INTPTR_FORMAT " Code: %x %x %x", p2i(instruction_address()),
(has_rex() ? ubyte_at(0) : 0), ubyte_at(rex_size()), ubyte_at(rex_size() + 1));
fatal("not a indirect rip jump");
}
void NativeGotJump::verify() const {
int inst = ubyte_at(0);
if (has_rex()) {
int rex = ubyte_at(0);
if (rex != rex_prefix) {
report_and_fail();
}
}
int inst = ubyte_at(rex_size());
if (inst != instruction_code) {
tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
inst);
fatal("not a indirect rip jump");
report_and_fail();
}
int modrm = ubyte_at(rex_size() + 1);
if (modrm != modrm_code) {
report_and_fail();
}
}
#endif
void NativeCall::verify() {
// Make sure code pattern is actually a call imm32 instruction.

View File

@ -455,9 +455,10 @@ class NativeLoadGot: public NativeInstruction {
static const bool has_rex = false;
static const int rex_size = 0;
#endif
public:
enum Intel_specific_constants {
rex_prefix = 0x48,
rex_b_prefix = 0x49,
instruction_code = 0x8b,
modrm_rbx_code = 0x1d,
modrm_rax_code = 0x05,
@ -465,11 +466,16 @@ public:
offset_offset = 2 + rex_size
};
address instruction_address() const { return addr_at(0); }
address rip_offset_address() const { return addr_at(offset_offset); }
int rip_offset() const { return int_at(offset_offset); }
address return_address() const { return addr_at(instruction_length); }
address got_address() const { return return_address() + rip_offset(); }
#ifdef ASSERT
void report_and_fail() const;
address instruction_address() const { return addr_at(0); }
#endif
public:
address next_instruction_address() const { return return_address(); }
intptr_t data() const;
void set_data(intptr_t data) {
@ -477,9 +483,7 @@ public:
*addr = data;
}
void verify() const;
private:
void report_and_fail() const;
DEBUG_ONLY( void verify() const );
};
inline NativeLoadGot* nativeLoadGot_at(address addr) {
@ -607,27 +611,37 @@ inline NativeGeneralJump* nativeGeneralJump_at(address address) {
}
class NativeGotJump: public NativeInstruction {
public:
enum Intel_specific_constants {
rex_prefix = 0x41,
instruction_code = 0xff,
instruction_offset = 0,
modrm_code = 0x25,
instruction_size = 6,
rip_offset = 2
};
void verify() const;
address instruction_address() const { return addr_at(instruction_offset); }
address destination() const;
address return_address() const { return addr_at(instruction_size); }
int got_offset() const { return (jint) int_at(rip_offset); }
address got_address() const { return return_address() + got_offset(); }
address next_instruction_address() const { return addr_at(instruction_size); }
bool is_GotJump() const { return ubyte_at(0) == instruction_code; }
bool has_rex() const { return ubyte_at(0) == rex_prefix; }
int rex_size() const { return has_rex() ? 1 : 0; }
address return_address() const { return addr_at(instruction_size + rex_size()); }
int got_offset() const { return (jint) int_at(rip_offset + rex_size()); }
#ifdef ASSERT
void report_and_fail() const;
address instruction_address() const { return addr_at(0); }
#endif
public:
address got_address() const { return return_address() + got_offset(); }
address next_instruction_address() const { return return_address(); }
bool is_GotJump() const { return ubyte_at(rex_size()) == instruction_code; }
address destination() const;
void set_jump_destination(address dest) {
address *got_entry = (address *) got_address();
*got_entry = dest;
}
DEBUG_ONLY( void verify() const; )
};
inline NativeGotJump* nativeGotJump_at(address addr) {

View File

@ -337,34 +337,43 @@ public final class BinaryContainer implements SymbolTable {
private void recordConfiguration(GraalHotSpotVMConfig graalHotSpotVMConfig, GraphBuilderConfiguration graphBuilderConfig, int gc) {
// @Checkstyle: stop
// @formatter:off
boolean[] booleanFlags = { graalHotSpotVMConfig.cAssertions, // Debug VM
graalHotSpotVMConfig.useCompressedOops,
graalHotSpotVMConfig.useCompressedClassPointers,
graalHotSpotVMConfig.useTLAB,
graalHotSpotVMConfig.useBiasedLocking,
TieredAOT.getValue(graalOptions),
graalHotSpotVMConfig.enableContended,
graalHotSpotVMConfig.restrictContended,
graphBuilderConfig.omitAssertions(),
};
ArrayList<Boolean> booleanFlagsList = new ArrayList<>();
booleanFlagsList.addAll(Arrays.asList(graalHotSpotVMConfig.cAssertions, // Debug VM
graalHotSpotVMConfig.useCompressedOops,
graalHotSpotVMConfig.useCompressedClassPointers));
if (JavaVersionUtil.JAVA_SPEC < 15) {
// See JDK-8236224. FieldsAllocationStyle and CompactFields flags were removed in JDK15.
booleanFlagsList.add(graalHotSpotVMConfig.compactFields);
}
booleanFlagsList.addAll(Arrays.asList(graalHotSpotVMConfig.useTLAB,
graalHotSpotVMConfig.useBiasedLocking,
TieredAOT.getValue(graalOptions),
graalHotSpotVMConfig.enableContended,
graalHotSpotVMConfig.restrictContended,
graphBuilderConfig.omitAssertions()));
if (JavaVersionUtil.JAVA_SPEC < 14) {
// See JDK-8220049. Thread local handshakes are on by default since JDK14, the command line option has been removed.
booleanFlagsList.add(graalHotSpotVMConfig.threadLocalHandshakes);
}
ArrayList<Integer> intFlagsList = new ArrayList<>();
intFlagsList.addAll(Arrays.asList(graalHotSpotVMConfig.getOopEncoding().getShift(),
graalHotSpotVMConfig.getKlassEncoding().getShift(),
graalHotSpotVMConfig.contendedPaddingWidth));
if (JavaVersionUtil.JAVA_SPEC < 15) {
// See JDK-8236224. FieldsAllocationStyle and CompactFields flags were removed in JDK15.
intFlagsList.add(graalHotSpotVMConfig.fieldsAllocationStyle);
}
intFlagsList.addAll(Arrays.asList(1 << graalHotSpotVMConfig.logMinObjAlignment(),
graalHotSpotVMConfig.codeSegmentSize,
gc));
int[] intFlags = { graalHotSpotVMConfig.getOopEncoding().getShift(),
graalHotSpotVMConfig.getKlassEncoding().getShift(),
graalHotSpotVMConfig.contendedPaddingWidth,
1 << graalHotSpotVMConfig.logMinObjAlignment(),
graalHotSpotVMConfig.codeSegmentSize,
gc
};
// @formatter:on
// @Checkstyle: resume
if (JavaVersionUtil.JAVA_SPEC < 14) {
// See JDK-8220049. Thread local handshakes are on by default since JDK14, the command line option has been removed.
booleanFlags = Arrays.copyOf(booleanFlags, booleanFlags.length + 1);
booleanFlags[booleanFlags.length - 1] = graalHotSpotVMConfig.threadLocalHandshakes;
}
byte[] booleanFlagsAsBytes = flagsToByteArray(booleanFlags);
byte[] booleanFlagsAsBytes = booleanListToByteArray(booleanFlagsList);
int[] intFlags = intFlagsList.stream().mapToInt(i -> i).toArray();
int size0 = configContainer.getByteStreamSize();
// @formatter:off
@ -381,10 +390,10 @@ public final class BinaryContainer implements SymbolTable {
assert size == computedSize;
}
private static byte[] flagsToByteArray(boolean[] flags) {
byte[] byteArray = new byte[flags.length];
for (int i = 0; i < flags.length; ++i) {
byteArray[i] = boolToByte(flags[i]);
private static byte[] booleanListToByteArray(ArrayList<Boolean> list) {
byte[] byteArray = new byte[list.size()];
for (int i = 0; i < list.size(); ++i) {
byteArray[i] = boolToByte(list.get(i));
}
return byteArray;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@ import java.util.ArrayList;
import org.graalvm.compiler.code.CompilationResult;
import org.graalvm.compiler.hotspot.HotSpotForeignCallLinkage;
import org.graalvm.compiler.options.OptionValues;
import jdk.tools.jaotc.binformat.BinaryContainer;
import jdk.tools.jaotc.binformat.CodeContainer;
@ -42,6 +43,8 @@ import jdk.vm.ci.meta.ResolvedJavaMethod;
final class CodeSectionProcessor {
private final OptionValues optionValues;
private final TargetDescription target;
private final BinaryContainer binaryContainer;
@ -49,6 +52,7 @@ final class CodeSectionProcessor {
CodeSectionProcessor(DataBuilder dataBuilder) {
this.target = dataBuilder.getBackend().getTarget();
this.binaryContainer = dataBuilder.getBinaryContainer();
this.optionValues = dataBuilder.getBackend().getRuntime().getOptions();
}
/**
@ -131,7 +135,7 @@ final class CodeSectionProcessor {
private StubInformation addCallStub(boolean isVirtualCall) {
final int startOffset = binaryContainer.getCodeContainer().getByteStreamSize();
StubInformation stub = new StubInformation(startOffset, isVirtualCall);
ELFMacroAssembler masm = ELFMacroAssembler.getELFMacroAssembler(target);
ELFMacroAssembler masm = ELFMacroAssembler.getELFMacroAssembler(target, optionValues);
byte[] code;
if (isVirtualCall) {
code = masm.getPLTVirtualEntryCode(stub);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
package jdk.tools.jaotc;
import org.graalvm.compiler.options.OptionValues;
import jdk.tools.jaotc.aarch64.AArch64ELFMacroAssembler;
import jdk.tools.jaotc.amd64.AMD64ELFMacroAssembler;
import jdk.vm.ci.aarch64.AArch64;
@ -34,10 +36,10 @@ import jdk.vm.ci.code.TargetDescription;
public interface ELFMacroAssembler {
static ELFMacroAssembler getELFMacroAssembler(TargetDescription target) {
static ELFMacroAssembler getELFMacroAssembler(TargetDescription target, OptionValues optionValues) {
Architecture architecture = target.arch;
if (architecture instanceof AMD64) {
return new AMD64ELFMacroAssembler(target);
return new AMD64ELFMacroAssembler(target, optionValues);
} else if (architecture instanceof AArch64) {
return new AArch64ELFMacroAssembler(target);
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,7 +62,7 @@ final class ForeignGotCallSiteRelocationSymbol extends CallSiteRelocationSymbol
}
private static int addPltJump(DataBuilder dataBuilder) {
ELFMacroAssembler masm = ELFMacroAssembler.getELFMacroAssembler(dataBuilder.getBackend().getTarget());
ELFMacroAssembler masm = ELFMacroAssembler.getELFMacroAssembler(dataBuilder.getBackend().getTarget(), dataBuilder.getBackend().getRuntime().getOptions());
byte[] code = masm.getPLTJumpCode(); // It includes alignment nops.
int size = masm.currentEndOfInstruction();
dataBuilder.getBinaryContainer().appendCodeBytes(code, 0, code.length);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,7 +98,9 @@ final class MetadataBuilder {
byte[] scopeDesc = metaData.scopesDescBytes();
byte[] relocationInfo = metaData.relocBytes();
byte[] oopMapInfo = metaData.oopMaps();
// this may be null as the field does not exist before JDK 13
byte[] implicitExceptionBytes = HotSpotGraalServices.getImplicitExceptionBytes(metaData);
byte[] exceptionBytes = metaData.exceptionBytes();
// create a global symbol at this position for this method
NativeOrderOutputStream metadataStream = new NativeOrderOutputStream();
@ -160,7 +162,7 @@ final class MetadataBuilder {
metadataStream.put(relocationInfo).align(8);
exceptionOffset.set(metadataStream.position());
metadataStream.put(metaData.exceptionBytes()).align(8);
metadataStream.put(exceptionBytes).align(8);
if (implicitExceptionBytes != null) {
implictTableOffset.set(metadataStream.position());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,13 +35,14 @@ import org.graalvm.compiler.asm.amd64.AMD64Address;
import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
import jdk.vm.ci.code.TargetDescription;
import org.graalvm.compiler.options.OptionValues;
public final class AMD64ELFMacroAssembler extends AMD64MacroAssembler implements ELFMacroAssembler {
private int currentEndOfInstruction;
public AMD64ELFMacroAssembler(TargetDescription target) {
super(target);
public AMD64ELFMacroAssembler(TargetDescription target, OptionValues optionValues) {
super(target, optionValues);
}
@Override

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,6 +71,14 @@ public final class GraalDirectives {
public static void controlFlowAnchor() {
}
/**
* A call to this method will assume a stable dimension array if {@code t} is a constant array
* and {@code i} a constant integer.
*/
public static <T> T assumeStableDimension(T t, @SuppressWarnings("unused") int i) {
return t;
}
/**
* A call to this method will force the compiler to assume this instruction has a visible memory
* effect killing all memory locations.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@ import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.TargetDescription;
public class AArch64MacroAssemblerTest extends GraalTest {
public class AArch64AddressingModeTest extends GraalTest {
private AArch64MacroAssembler masm;
private TestProtectedAssembler asm;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*

View File

@ -0,0 +1,227 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited and affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.asm.aarch64.test;
import jdk.vm.ci.aarch64.AArch64;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.runtime.JVMCI;
import org.graalvm.compiler.asm.aarch64.AArch64Assembler;
import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
import org.graalvm.compiler.test.GraalTest;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertArrayEquals;
public class AArch64MoveConstantTest extends GraalTest {
private AArch64MacroAssembler masm;
private TestProtectedAssembler asm;
private Register dst;
private Register zr;
@Before
public void setupEnvironment() {
// Setup AArch64 MacroAssembler and Assembler.
TargetDescription target = JVMCI.getRuntime().getHostJVMCIBackend().getTarget();
masm = new AArch64MacroAssembler(target);
asm = new TestProtectedAssembler(target);
dst = AArch64.r10;
zr = AArch64.zr;
}
/**
* MacroAssembler behavior test for 32-bit constant move.
*/
@Test
public void testMoveIntZero() {
masm.mov(dst, 0); // zero is specially handled by OR(dst, zr, zr).
asm.orr(32, dst, zr, zr, AArch64Assembler.ShiftType.LSL, 0);
compareAssembly();
}
@Test
public void testMoveIntLogicalImm() {
masm.mov(dst, 0x5555_5555); // 0b01010101...0101 is a 32-bit logical immediate.
asm.orr(32, dst, zr, 0x5555_5555);
compareAssembly();
}
@Test
public void testMoveIntMinusOne() {
masm.mov(dst, -1);
asm.movn(32, dst, 0, 0);
compareAssembly();
}
@Test
public void testMoveIntHighZero() {
masm.mov(dst, 0x0000_1234);
asm.movz(32, dst, 0x1234, 0);
compareAssembly();
}
@Test
public void testMoveIntLowZero() {
masm.mov(dst, 0x5678_0000);
asm.movz(32, dst, 0x5678, 16);
compareAssembly();
}
@Test
public void testMoveIntHighNeg() {
masm.mov(dst, 0xFFFF_CAFE);
asm.movn(32, dst, 0xCAFE ^ 0xFFFF, 0);
compareAssembly();
}
@Test
public void testMoveIntLowNeg() {
masm.mov(dst, 0xBABE_FFFF);
asm.movn(32, dst, 0xBABE ^ 0xFFFF, 16);
compareAssembly();
}
@Test
public void testMoveIntCommon() {
masm.mov(dst, 0x1357_BEEF);
asm.movz(32, dst, 0xBEEF, 0);
asm.movk(32, dst, 0x1357, 16);
compareAssembly();
}
/**
* MacroAssembler behavior test for 64-bit constant move.
*/
@Test
public void testMoveLongZero() {
masm.mov(dst, 0L); // zero is specially handled by OR(dst, zr, zr).
asm.orr(64, dst, zr, zr, AArch64Assembler.ShiftType.LSL, 0);
compareAssembly();
}
@Test
public void testMoveLongLogicalImm() {
masm.mov(dst, 0x3333_3333_3333_3333L); // 0b00110011...0011 is a 64-bit logical immediate.
asm.orr(64, dst, zr, 0x3333_3333_3333_3333L);
compareAssembly();
}
@Test
public void testMoveLongSignExtendedLogicalImm() {
masm.mov(dst, 0xFFFF_FFFF_8888_8888L); // 0x88888888 is a 32-bit logical immediate.
asm.orr(32, dst, zr, 0x8888_8888);
asm.sbfm(64, dst, dst, 0, 31);
compareAssembly();
}
@Test
public void testMoveLongWithTwoZeros() {
masm.mov(dst, 0x1357_0000_ABCD_0000L);
asm.movz(64, dst, 0xABCD, 16);
asm.movk(64, dst, 0x1357, 48);
compareAssembly();
}
@Test
public void testMoveLongWithTwoNegs() {
masm.mov(dst, 0x2222_FFFF_FFFF_7777L);
asm.movn(64, dst, 0x7777 ^ 0xFFFF, 0);
asm.movk(64, dst, 0x2222, 48);
compareAssembly();
}
@Test
public void testMoveLongWithOneZero() {
masm.mov(dst, 0x0000_6666_5555_4444L);
asm.movz(64, dst, 0x4444, 0);
asm.movk(64, dst, 0x5555, 16);
asm.movk(64, dst, 0x6666, 32);
compareAssembly();
}
@Test
public void testMoveLongWithOneNeg() {
masm.mov(dst, 0xDDDD_CCCC_BBBB_FFFFL);
asm.movn(64, dst, 0xBBBB ^ 0xFFFF, 16);
asm.movk(64, dst, 0xCCCC, 32);
asm.movk(64, dst, 0xDDDD, 48);
compareAssembly();
}
@Test
public void testMoveLongCommon() {
masm.mov(dst, 0x3D38_2A05_B001_1942L);
asm.movz(64, dst, 0x1942, 0);
asm.movk(64, dst, 0xB001, 16);
asm.movk(64, dst, 0x2A05, 32);
asm.movk(64, dst, 0x3D38, 48);
compareAssembly();
}
/**
* Compares assembly generated by the macro assembler to the hand-generated assembly.
*/
private void compareAssembly() {
byte[] expected = asm.close(true);
byte[] actual = masm.close(true);
assertArrayEquals(expected, actual);
}
/**
* Compare constant values with corresponding hex strings.
*/
@Test
public void testConstantHexRepresentation() {
checkInt(0, "0");
checkInt(-1, "FFFFFFFF");
checkInt(0x4B95_0000, "4B950000");
checkInt(0xEE2A, "EE2A");
checkInt(0x31C2_FFFF, "31C2FFFF");
checkInt(0xFFFF_5678, "FFFF5678");
checkInt(0xB39F_01CC, "B39F01CC");
checkLong(0L, "0");
checkLong(-1L, "FFFFFFFFFFFFFFFF");
checkLong(0x94DDL, "94DD");
checkLong(0x351C_0000_7B7BL, "351C00007B7B");
checkLong(0x9012_ABCD_3333_0000L, "9012ABCD33330000");
checkLong(0xFFFFL, "FFFF");
checkLong(0xFFFF_0001L, "FFFF0001");
checkLong(0xFFFF_9302_FFFF_CDEFL, "FFFF9302FFFFCDEF");
checkLong(0x102A_FFFF_FFFF_FFFFL, "102AFFFFFFFFFFFF");
checkLong(0x9E8C_3A50_0BC9_44F8L, "9E8C3A500BC944F8");
}
private static void checkInt(int value, String hexString) {
assertTrue(Integer.toHexString(value).toUpperCase().equals(hexString), "Expected: " + hexString);
}
private static void checkLong(long value, String hexString) {
assertTrue(Long.toHexString(value).toUpperCase().equals(hexString), "Expected: " + hexString);
}
}

View File

@ -323,8 +323,8 @@ class TestProtectedAssembler extends AArch64Assembler {
}
@Override
protected void ror(int size, Register dst, Register src1, Register src2) {
super.ror(size, dst, src1, src2);
protected void rorv(int size, Register dst, Register src1, Register src2) {
super.rorv(size, dst, src1, src2);
}
@Override

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -82,6 +82,7 @@ import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.FSQR
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.FSUB;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.HINT;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.HLT;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.ISB;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.LDADD;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.LDAR;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.LDAXR;
@ -676,6 +677,7 @@ public abstract class AArch64Assembler extends Assembler {
MRS(0xD5300000),
MSR(0xD5100000),
DC(0xD5087000),
ISB(0x000000C0),
BLR_NATIVE(0xc0000000),
@ -2178,14 +2180,14 @@ public abstract class AArch64Assembler extends Assembler {
}
/**
* dst = rotateRight(src1, (src2 & log2(size))).
* dst = rotateRight(src1, (src2 & (size - 1))).
*
* @param size register size. Has to be 32 or 64.
* @param dst general purpose register. May not be null or stackpointer.
* @param src1 general purpose register. May not be null or stackpointer.
* @param src2 general purpose register. May not be null or stackpointer.
*/
protected void ror(int size, Register dst, Register src1, Register src2) {
protected void rorv(int size, Register dst, Register src1, Register src2) {
dataProcessing2SourceOp(RORV, dst, src1, src2, generalFromSize(size));
}
@ -2989,7 +2991,8 @@ public abstract class AArch64Assembler extends Assembler {
LOAD_LOAD(0x9, "ISHLD"),
LOAD_STORE(0x9, "ISHLD"),
STORE_STORE(0xA, "ISHST"),
ANY_ANY(0xB, "ISH");
ANY_ANY(0xB, "ISH"),
SYSTEM(0xF, "SYS");
public final int encoding;
public final String optionName;
@ -3009,6 +3012,13 @@ public abstract class AArch64Assembler extends Assembler {
emitInt(DMB.encoding | BarrierOp | barrierKind.encoding << BarrierKindOffset);
}
/**
* Instruction Synchronization Barrier.
*/
public void isb() {
emitInt(ISB.encoding | BarrierOp | BarrierKind.SYSTEM.encoding << BarrierKindOffset);
}
public void mrs(Register dst, SystemRegister systemRegister) {
emitInt(MRS.encoding | systemRegister.encoding() | rt(dst));
}
@ -3027,13 +3037,56 @@ public abstract class AArch64Assembler extends Assembler {
}
}
void annotateImmediateMovSequence(int pos, int numInstrs) {
if (codePatchingAnnotationConsumer != null) {
codePatchingAnnotationConsumer.accept(new MovSequenceAnnotation(pos, numInstrs));
public abstract static class PatchableCodeAnnotation extends CodeAnnotation {
PatchableCodeAnnotation(int instructionStartPosition) {
super(instructionStartPosition);
}
abstract void patch(int codePos, int relative, byte[] code);
}
/**
* Contains methods used for patching instruction(s) within AArch64.
*/
public static class PatcherUtil {
/**
* Method to patch a series a bytes within a byte address with a given value.
*
* @param code the array of bytes in which patch is to be performed
* @param codePos where in the array the patch should be performed
* @param value the value to be added to the series of bytes
* @param bitsUsed the number of bits to patch within each byte
* @param offsets where with the bytes the value should be added
*/
public static void writeBitSequence(byte[] code, int codePos, int value, int[] bitsUsed, int[] offsets) {
assert bitsUsed.length == offsets.length : "bitsUsed and offsets parameter arrays do not match";
int curValue = value;
for (int i = 0; i < bitsUsed.length; i++) {
int usedBits = bitsUsed[i];
if (usedBits == 0) {
continue;
}
int offset = offsets[i];
int mask = (1 << usedBits) - 1;
byte patchTarget = code[codePos + i];
byte patch = (byte) (((curValue & mask) << offset) & 0xFF);
byte retainedPatchTarget = (byte) (patchTarget & ((~(mask << offset)) & 0xFF));
patchTarget = (byte) (retainedPatchTarget | patch);
code[codePos + i] = patchTarget;
curValue = curValue >> usedBits;
}
}
public static int computeRelativePageDifference(int target, int curPos, int pageSize) {
int relative = target / pageSize - curPos / pageSize;
return relative;
}
}
public static class SingleInstructionAnnotation extends CodeAnnotation {
public static class SingleInstructionAnnotation extends PatchableCodeAnnotation {
/**
* The size of the operand, in bytes.
@ -3050,18 +3103,44 @@ public abstract class AArch64Assembler extends Assembler {
this.shift = shift;
this.instruction = instruction;
}
}
public static class MovSequenceAnnotation extends CodeAnnotation {
@Override
public String toString() {
return "SINGLE_INSTRUCTION";
}
/**
* The size of the operand, in bytes.
*/
public final int numInstrs;
@Override
public void patch(int codePos, int relative, byte[] code) {
int curValue = relative;
assert (curValue & ((1 << shift) - 1)) == 0 : "relative offset has incorrect alignment";
curValue = curValue >> shift;
MovSequenceAnnotation(int instructionPosition, int numInstrs) {
super(instructionPosition);
this.numInstrs = numInstrs;
// right this is only BL instructions are being patched here
assert instruction == AArch64Assembler.Instruction.BL : "trying to patch an unexpected instruction";
GraalError.guarantee(NumUtil.isSignedNbit(operandSizeBits, curValue), "value too large to fit into space");
// fill in immediate operand of operandSizeBits starting at offsetBits within
// instruction
int bitsRemaining = operandSizeBits;
int offsetRemaining = offsetBits;
int[] bitsUsed = new int[4];
int[] offsets = new int[4];
for (int i = 0; i < 4; ++i) {
if (offsetRemaining >= 8) {
offsetRemaining -= 8;
continue;
}
offsets[i] = offsetRemaining;
// number of bits to be filled within this byte
int bits = Math.min(8 - offsetRemaining, bitsRemaining);
bitsUsed[i] = bits;
bitsRemaining -= bits;
offsetRemaining = 0;
}
PatcherUtil.writeBitSequence(code, instructionPosition, curValue, bitsUsed, offsets);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,6 +43,7 @@ import static org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.AddressGene
import org.graalvm.compiler.asm.BranchTargetOutOfBoundsException;
import org.graalvm.compiler.asm.Label;
import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.MovSequenceAnnotation.MovAction;
import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.debug.GraalError;
@ -502,37 +503,196 @@ public class AArch64MacroAssembler extends AArch64Assembler {
}
}
/**
* Generates a 32-bit immediate move code sequence.
*
* @param dst general purpose register. May not be null, stackpointer or zero-register.
* @param imm the value to move into the register.
* @param needsImmAnnotation Flag denoting if annotation should be added.
*/
private void mov32(Register dst, int imm, boolean needsImmAnnotation) {
MovAction[] includeSet = {MovAction.SKIPPED, MovAction.SKIPPED};
int pos = position();
// Split 32-bit imm into low16 and high16 parts.
int low16 = imm & 0xFFFF;
int high16 = (imm >>> 16) & 0xFFFF;
// Generate code sequence with a combination of MOVZ or MOVN with MOVK.
if (high16 == 0) {
movz(32, dst, low16, 0);
includeSet[0] = MovAction.USED;
} else if (high16 == 0xFFFF) {
movn(32, dst, low16 ^ 0xFFFF, 0);
includeSet[0] = MovAction.NEGATED;
} else if (low16 == 0) {
movz(32, dst, high16, 16);
includeSet[1] = MovAction.USED;
} else if (low16 == 0xFFFF) {
movn(32, dst, high16 ^ 0xFFFF, 16);
includeSet[1] = MovAction.NEGATED;
} else {
// Neither of the 2 parts is all-0s or all-1s. Generate 2 instructions.
movz(32, dst, low16, 0);
movk(32, dst, high16, 16);
includeSet[0] = MovAction.USED;
includeSet[1] = MovAction.USED;
}
if (needsImmAnnotation) {
annotateImmediateMovSequence(pos, includeSet);
}
}
/**
* Generates a 64-bit immediate move code sequence.
*
* @param dst general purpose register. May not be null, stackpointer or zero-register.
* @param imm the value to move into the register
* @param annotateImm Flag denoting if annotation should be added.
* @param needsImmAnnotation Flag denoting if annotation should be added.
*/
private void mov64(Register dst, long imm, boolean annotateImm) {
// We have to move all non zero parts of the immediate in 16-bit chunks
int numMovs = 0;
private void mov64(Register dst, long imm, boolean needsImmAnnotation) {
MovAction[] includeSet = {MovAction.SKIPPED, MovAction.SKIPPED, MovAction.SKIPPED, MovAction.SKIPPED};
int pos = position();
boolean firstMove = true;
for (int offset = 0; offset < 64; offset += 16) {
int chunk = (int) (imm >> offset) & NumUtil.getNbitNumberInt(16);
int[] chunks = new int[4];
int zeroCount = 0;
int negCount = 0;
// Split 64-bit imm into 4 chunks and count the numbers of all-0 and all-1 chunks.
for (int i = 0; i < 4; i++) {
int chunk = (int) ((imm >>> (i * 16)) & 0xFFFFL);
if (chunk == 0) {
continue;
zeroCount++;
} else if (chunk == 0xFFFF) {
negCount++;
}
if (firstMove) {
movz(64, dst, chunk, offset);
firstMove = false;
} else {
movk(64, dst, chunk, offset);
chunks[i] = chunk;
}
// Generate code sequence with a combination of MOVZ or MOVN with MOVK.
if (zeroCount == 4) {
// Generate only one MOVZ.
movz(64, dst, 0, 0);
includeSet[0] = MovAction.USED;
} else if (negCount == 4) {
// Generate only one MOVN.
movn(64, dst, 0, 0);
includeSet[0] = MovAction.NEGATED;
} else if (zeroCount == 3) {
// Generate only one MOVZ.
for (int i = 0; i < 4; i++) {
if (chunks[i] != 0) {
movz(64, dst, chunks[i], i * 16);
includeSet[i] = MovAction.USED;
break;
}
}
++numMovs;
} else if (negCount == 3) {
// Generate only one MOVN.
for (int i = 0; i < 4; i++) {
if (chunks[i] != 0xFFFF) {
movn(64, dst, chunks[i] ^ 0xFFFF, i * 16);
includeSet[i] = MovAction.NEGATED;
break;
}
}
} else if (zeroCount == 2) {
// Generate one MOVZ and one MOVK.
int i;
for (i = 0; i < 4; i++) {
if (chunks[i] != 0) {
movz(64, dst, chunks[i], i * 16);
includeSet[i] = MovAction.USED;
break;
}
}
for (int k = i + 1; k < 4; k++) {
if (chunks[k] != 0) {
movk(64, dst, chunks[k], k * 16);
includeSet[k] = MovAction.USED;
break;
}
}
} else if (negCount == 2) {
// Generate one MOVN and one MOVK.
int i;
for (i = 0; i < 4; i++) {
if (chunks[i] != 0xFFFF) {
movn(64, dst, chunks[i] ^ 0xFFFF, i * 16);
includeSet[i] = MovAction.NEGATED;
break;
}
}
for (int k = i + 1; k < 4; k++) {
if (chunks[k] != 0xFFFF) {
movk(64, dst, chunks[k], k * 16);
includeSet[k] = MovAction.USED;
break;
}
}
} else if (zeroCount == 1) {
// Generate one MOVZ and two MOVKs.
int i;
for (i = 0; i < 4; i++) {
if (chunks[i] != 0) {
movz(64, dst, chunks[i], i * 16);
includeSet[i] = MovAction.USED;
break;
}
}
int numMovks = 0;
for (int k = i + 1; k < 4; k++) {
if (chunks[k] != 0) {
movk(64, dst, chunks[k], k * 16);
includeSet[k] = MovAction.USED;
numMovks++;
}
}
assert numMovks == 2;
} else if (negCount == 1) {
// Generate one MOVN and two MOVKs.
int i;
for (i = 0; i < 4; i++) {
if (chunks[i] != 0xFFFF) {
movn(64, dst, chunks[i] ^ 0xFFFF, i * 16);
includeSet[i] = MovAction.NEGATED;
break;
}
}
int numMovks = 0;
for (int k = i + 1; k < 4; k++) {
if (chunks[k] != 0xFFFF) {
movk(64, dst, chunks[k], k * 16);
includeSet[k] = MovAction.USED;
numMovks++;
}
}
assert numMovks == 2;
} else {
// Generate one MOVZ and three MOVKs
movz(64, dst, chunks[0], 0);
movk(64, dst, chunks[1], 16);
movk(64, dst, chunks[2], 32);
movk(64, dst, chunks[3], 48);
includeSet[0] = MovAction.USED;
includeSet[1] = MovAction.USED;
includeSet[2] = MovAction.USED;
includeSet[3] = MovAction.USED;
}
assert !firstMove;
if (annotateImm) {
annotateImmediateMovSequence(pos, numMovs);
if (needsImmAnnotation) {
annotateImmediateMovSequence(pos, includeSet);
}
}
/**
* Loads immediate into register.
*
* @param dst general purpose register. May not be null, zero-register or stackpointer.
* @param imm immediate loaded into register.
*/
public void mov(Register dst, int imm) {
mov(dst, imm, false);
}
/**
* Loads immediate into register.
*
@ -548,13 +708,30 @@ public class AArch64MacroAssembler extends AArch64Assembler {
*
* @param dst general purpose register. May not be null, zero-register or stackpointer.
* @param imm immediate loaded into register.
* @param annotateImm Flag to signal of the immediate value should be annotated.
* @param needsImmAnnotation Flag to signal of the immediate value should be annotated.
*/
public void mov(Register dst, long imm, boolean annotateImm) {
public void mov(Register dst, int imm, boolean needsImmAnnotation) {
if (imm == 0) {
mov(32, dst, zr);
} else if (isLogicalImmediate(imm)) {
or(32, dst, zr, imm);
} else {
mov32(dst, imm, needsImmAnnotation);
}
}
/**
* Loads immediate into register.
*
* @param dst general purpose register. May not be null, zero-register or stackpointer.
* @param imm immediate loaded into register.
* @param needsImmAnnotation Flag to signal of the immediate value should be annotated.
*/
public void mov(Register dst, long imm, boolean needsImmAnnotation) {
assert dst.getRegisterCategory().equals(CPU);
if (imm == 0L) {
movx(dst, zr);
} else if (LogicalImmediateTable.isRepresentable(true, imm) != LogicalImmediateTable.Representable.NO) {
} else if (isLogicalImmediate(imm)) {
or(64, dst, zr, imm);
} else if (imm >> 32 == -1L && (int) imm < 0 && LogicalImmediateTable.isRepresentable((int) imm) != LogicalImmediateTable.Representable.NO) {
// If the higher 32-bit are 1s and the sign bit of the lower 32-bits is set *and* we can
@ -564,20 +741,10 @@ public class AArch64MacroAssembler extends AArch64Assembler {
mov(dst, (int) imm);
sxt(64, 32, dst, dst);
} else {
mov64(dst, imm, annotateImm);
mov64(dst, imm, needsImmAnnotation);
}
}
/**
* Loads immediate into register.
*
* @param dst general purpose register. May not be null, zero-register or stackpointer.
* @param imm immediate loaded into register.
*/
public void mov(Register dst, int imm) {
mov(dst, imm & 0xFFFF_FFFFL);
}
/**
* Generates a 48-bit immediate move code sequence. The immediate may later be updated by
* HotSpot.
@ -601,9 +768,9 @@ public class AArch64MacroAssembler extends AArch64Assembler {
*
* @param dst general purpose register. May not be null, stackpointer or zero-register.
* @param imm The immediate address
* @param annotateImm Flag to signal of the immediate value should be annotated.
* @param needsImmAnnotation Flag to signal of the immediate value should be annotated.
*/
public void movNativeAddress(Register dst, long imm, boolean annotateImm) {
public void movNativeAddress(Register dst, long imm, boolean needsImmAnnotation) {
assert (imm & 0xFFFF_0000_0000_0000L) == 0;
// We have to move all non zero parts of the immediate in 16-bit chunks
boolean firstMove = true;
@ -617,8 +784,9 @@ public class AArch64MacroAssembler extends AArch64Assembler {
movk(64, dst, chunk, offset);
}
}
if (annotateImm) {
annotateImmediateMovSequence(pos, 3);
if (needsImmAnnotation) {
MovAction[] includeSet = {MovAction.USED, MovAction.USED, MovAction.USED};
annotateImmediateMovSequence(pos, includeSet);
}
assert !firstMove;
}
@ -1259,6 +1427,34 @@ public class AArch64MacroAssembler extends AArch64Assembler {
super.asr(size, dst, src, shift);
}
/**
* Rotate right (register). dst = rotateRight(src1, (src2 & (size - 1))).
*
* @param size register size. Has to be 32 or 64.
* @param dst general purpose register. May not be null or stackpointer.
* @param src1 general purpose register. May not be null or stackpointer.
* @param src2 general purpose register. It holds a shift amount from 0 to (size - 1) in its
* bottom 5 bits. May not be null or stackpointer.
*/
@Override
public void rorv(int size, Register dst, Register src1, Register src2) {
super.rorv(size, dst, src1, src2);
}
/**
* Rotate right (immediate). dst = rotateRight(src1, shift).
*
* @param size register size. Has to be 32 or 64.
* @param dst general purpose register. May not be null or stackpointer.
* @param src general purpose register. May not be null or stackpointer.
* @param shift amount by which src is rotated. The value depends on the instruction variant, it
* can be 0 to (size - 1).
*/
public void ror(int size, Register dst, Register src, int shift) {
assert (0 <= shift && shift <= (size - 1));
super.extr(size, dst, src, src, shift);
}
/**
* Clamps shiftAmt into range 0 <= shiftamt < size according to JLS.
*
@ -1320,6 +1516,42 @@ public class AArch64MacroAssembler extends AArch64Assembler {
super.orr(size, dst, src, bimm);
}
/**
* dst = src1 & (~src2).
*
* @param size register size. Has to be 32 or 64.
* @param dst general purpose register. May not be null or stackpointer.
* @param src1 general purpose register. May not be null or stackpointer.
* @param src2 general purpose register. May not be null or stackpointer.
*/
public void bic(int size, Register dst, Register src1, Register src2) {
super.bic(size, dst, src1, src2, ShiftType.LSL, 0);
}
/**
* dst = src1 ^ (~src2).
*
* @param size register size. Has to be 32 or 64.
* @param dst general purpose register. May not be null or stackpointer.
* @param src1 general purpose register. May not be null or stackpointer.
* @param src2 general purpose register. May not be null or stackpointer.
*/
public void eon(int size, Register dst, Register src1, Register src2) {
super.eon(size, dst, src1, src2, ShiftType.LSL, 0);
}
/**
* dst = src1 | (~src2).
*
* @param size register size. Has to be 32 or 64.
* @param dst general purpose register. May not be null or stackpointer.
* @param src1 general purpose register. May not be null or stackpointer.
* @param src2 general purpose register. May not be null or stackpointer.
*/
public void orn(int size, Register dst, Register src1, Register src2) {
super.orn(size, dst, src1, src2, ShiftType.LSL, 0);
}
/**
* dst = ~src.
*
@ -2059,24 +2291,23 @@ public class AArch64MacroAssembler extends AArch64Assembler {
umov(fixedSize, dst, 0, vreg);
}
public interface MacroInstruction {
void patch(int codePos, int relative, byte[] code);
}
/**
* Emits elf patchable adrp ldr sequence.
*/
public void adrpLdr(int srcSize, Register result, AArch64Address a) {
if (codePatchingAnnotationConsumer != null) {
codePatchingAnnotationConsumer.accept(new AdrpLdrMacroInstruction(position()));
codePatchingAnnotationConsumer.accept(new AdrpLdrMacroInstruction(position(), srcSize));
}
super.adrp(a.getBase());
this.ldr(srcSize, result, a);
}
public static class AdrpLdrMacroInstruction extends CodeAnnotation implements MacroInstruction {
public AdrpLdrMacroInstruction(int position) {
public static class AdrpLdrMacroInstruction extends AArch64Assembler.PatchableCodeAnnotation {
public final int srcSize;
public AdrpLdrMacroInstruction(int position, int srcSize) {
super(position);
this.srcSize = srcSize;
}
@Override
@ -2086,11 +2317,50 @@ public class AArch64MacroAssembler extends AArch64Assembler {
@Override
public void patch(int codePos, int relative, byte[] code) {
throw GraalError.unimplemented();
int shiftSize = 0;
switch (srcSize) {
case 64:
shiftSize = 3;
break;
case 32:
shiftSize = 2;
break;
case 16:
shiftSize = 1;
break;
case 8:
shiftSize = 0;
break;
default:
assert false : "srcSize must be either 8, 16, 32, or 64";
}
int pos = instructionPosition;
int targetAddress = pos + relative;
assert shiftSize == 0 || (targetAddress & ((1 << shiftSize) - 1)) == 0 : "shift bits must be zero";
int relativePageDifference = PatcherUtil.computeRelativePageDifference(targetAddress, pos, 1 << 12);
// adrp imm_hi bits
int curValue = (relativePageDifference >> 2) & 0x7FFFF;
int[] adrHiBits = {3, 8, 8};
int[] adrHiOffsets = {5, 0, 0};
PatcherUtil.writeBitSequence(code, pos, curValue, adrHiBits, adrHiOffsets);
// adrp imm_lo bits
curValue = relativePageDifference & 0x3;
int[] adrLoBits = {2};
int[] adrLoOffsets = {5};
PatcherUtil.writeBitSequence(code, pos + 3, curValue, adrLoBits, adrLoOffsets);
// ldr bits
curValue = (targetAddress >> shiftSize) & 0x1FF;
int[] ldrBits = {6, 6};
int[] ldrOffsets = {2, 0};
PatcherUtil.writeBitSequence(code, pos + 5, curValue, ldrBits, ldrOffsets);
}
}
public static class AdrpAddMacroInstruction extends CodeAnnotation implements MacroInstruction {
public static class AdrpAddMacroInstruction extends AArch64Assembler.PatchableCodeAnnotation {
public AdrpAddMacroInstruction(int position) {
super(position);
}
@ -2102,7 +2372,94 @@ public class AArch64MacroAssembler extends AArch64Assembler {
@Override
public void patch(int codePos, int relative, byte[] code) {
throw GraalError.unimplemented();
int pos = instructionPosition;
int targetAddress = pos + relative;
int relativePageDifference = PatcherUtil.computeRelativePageDifference(targetAddress, pos, 1 << 12);
// adrp imm_hi bits
int curValue = (relativePageDifference >> 2) & 0x7FFFF;
int[] adrHiBits = {3, 8, 8};
int[] adrHiOffsets = {5, 0, 0};
PatcherUtil.writeBitSequence(code, pos, curValue, adrHiBits, adrHiOffsets);
// adrp imm_lo bits
curValue = relativePageDifference & 0x3;
int[] adrLoBits = {2};
int[] adrLoOffsets = {5};
PatcherUtil.writeBitSequence(code, pos + 3, curValue, adrLoBits, adrLoOffsets);
// add bits
curValue = targetAddress & 0xFFF;
int[] addBits = {6, 6};
int[] addOffsets = {2, 0};
PatcherUtil.writeBitSequence(code, pos + 5, curValue, addBits, addOffsets);
}
}
private void annotateImmediateMovSequence(int pos, MovSequenceAnnotation.MovAction[] includeSet) {
if (codePatchingAnnotationConsumer != null) {
codePatchingAnnotationConsumer.accept(new MovSequenceAnnotation(pos, includeSet));
}
}
public static class MovSequenceAnnotation extends AArch64Assembler.PatchableCodeAnnotation {
/**
* An enum to indicate how each 16-bit immediate chunk is represented within a sequence of
* mov instructions.
*/
public enum MovAction {
USED, // mov instruction is in place for this chunk.
SKIPPED, // no mov instruction is in place for this chunk.
NEGATED; // movn instruction is in place for this chunk.
}
/**
* The size of the operand, in bytes.
*/
public final MovAction[] includeSet;
MovSequenceAnnotation(int instructionPosition, MovAction[] includeSet) {
super(instructionPosition);
this.includeSet = includeSet;
}
@Override
public String toString() {
return "MOV_SEQ";
}
@Override
public void patch(int codePos, int relative, byte[] code) {
/*
* Each move has a 16 bit immediate operand. We use a series of shifted moves to
* represent immediate values larger than 16 bits.
*/
int curValue = relative;
int[] bitsUsed = {3, 8, 5};
int[] offsets = {5, 0, 0};
int siteOffset = 0;
boolean containsNegatedMov = false;
for (MovAction include : includeSet) {
if (include == MovAction.NEGATED) {
containsNegatedMov = true;
break;
}
}
for (int i = 0; i < includeSet.length; i++) {
int value = curValue & 0xFFFF;
curValue = curValue >> 16;
switch (includeSet[i]) {
case USED:
break;
case SKIPPED:
assert value == (containsNegatedMov ? 0xFFFF : 0) : "Unable to patch this value.";
continue;
case NEGATED:
value = value ^ 0xFFFF;
break;
}
int bytePosition = instructionPosition + siteOffset;
PatcherUtil.writeBitSequence(code, bytePosition, value, bitsUsed, offsets);
siteOffset += 4;
}
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,9 +24,11 @@
package org.graalvm.compiler.asm.amd64;
import jdk.vm.ci.code.Register;
import org.graalvm.compiler.asm.AbstractAddress;
import org.graalvm.compiler.debug.GraalError;
import jdk.vm.ci.code.CodeUtil;
import jdk.vm.ci.code.Register;
/**
* Represents an address in target machine memory, specified via some combination of a base
@ -126,6 +128,11 @@ public final class AMD64Address extends AbstractAddress {
*/
public final int log2;
/**
* Creates a {@link Scale} for the scaling factor in {@code scale}.
*
* @throws IllegalArgumentException if {@code scale} is an unsupported scaling factor
*/
public static Scale fromInt(int scale) {
switch (scale) {
case 1:
@ -137,10 +144,15 @@ public final class AMD64Address extends AbstractAddress {
case 8:
return Times8;
default:
return null;
throw new IllegalArgumentException("Unsupported SIB addressing mode scaling factor: " + scale);
}
}
/**
* Creates a {@link Scale} for the log2 scaling factor {@code shift}.
*
* @throws IllegalArgumentException if {@code shift} is an unsupported scaling factor
*/
public static Scale fromShift(int shift) {
switch (shift) {
case 0:
@ -152,9 +164,23 @@ public final class AMD64Address extends AbstractAddress {
case 3:
return Times8;
default:
return null;
throw GraalError.shouldNotReachHere("Unsupported SIB addressing mode scaling factor: " + (1 << shift));
}
}
/**
* Determines if the scaling factor {@code scale} is supported.
*/
public static boolean isScaleSupported(int scale) {
return CodeUtil.isPowerOf2(scale) && scale <= 8;
}
/**
* Determines if the log2 scaling factor {@code shift} is supported.
*/
public static boolean isScaleShiftSupported(int shift) {
return shift >= 0 && shift <= 3;
}
}
@Override

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,10 @@ package org.graalvm.compiler.asm.amd64;
import static jdk.vm.ci.amd64.AMD64.CPU;
import static jdk.vm.ci.amd64.AMD64.MASK;
import static jdk.vm.ci.amd64.AMD64.XMM;
import static jdk.vm.ci.amd64.AMD64.r12;
import static jdk.vm.ci.amd64.AMD64.r13;
import static jdk.vm.ci.amd64.AMD64.rbp;
import static jdk.vm.ci.amd64.AMD64.rsp;
import static jdk.vm.ci.amd64.AMD64.CPUFeature.AVX512BW;
import static jdk.vm.ci.amd64.AMD64.CPUFeature.AVX512CD;
import static jdk.vm.ci.amd64.AMD64.CPUFeature.AVX512DQ;
@ -84,6 +88,10 @@ import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
import org.graalvm.compiler.core.common.calc.Condition;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.options.Option;
import org.graalvm.compiler.options.OptionKey;
import org.graalvm.compiler.options.OptionType;
import org.graalvm.compiler.options.OptionValues;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.amd64.AMD64.CPUFeature;
@ -96,11 +104,38 @@ import jdk.vm.ci.code.TargetDescription;
*/
public class AMD64Assembler extends AMD64BaseAssembler {
public static class Options {
// @formatter:off
@Option(help = "Force branch instructions to align with 32-bytes boundary, to mitigate the jcc erratum. " +
"See https://www.intel.com/content/dam/support/us/en/documents/processors/mitigations-jump-conditional-code-erratum.pdf for more details.", type = OptionType.User)
public static final OptionKey<Boolean> UseBranchesWithin32ByteBoundary = new OptionKey<>(false);
// @formatter:on
}
private final boolean useBranchesWithin32ByteBoundary;
public interface CodePatchShifter {
void shift(int pos, int bytesToShift);
}
protected CodePatchShifter codePatchShifter = null;
public AMD64Assembler(TargetDescription target) {
super(target);
useBranchesWithin32ByteBoundary = false;
}
/**
* Constructs an assembler for the AMD64 architecture.
*/
public AMD64Assembler(TargetDescription target) {
public AMD64Assembler(TargetDescription target, OptionValues optionValues) {
super(target);
useBranchesWithin32ByteBoundary = Options.UseBranchesWithin32ByteBoundary.getValue(optionValues);
}
public void setCodePatchShifter(CodePatchShifter codePatchShifter) {
assert this.codePatchShifter == null : "overwriting existing value";
this.codePatchShifter = codePatchShifter;
}
/**
@ -251,8 +286,8 @@ public class AMD64Assembler extends AMD64BaseAssembler {
protected final int prefix2;
protected final int op;
private final boolean dstIsByte;
private final boolean srcIsByte;
final boolean dstIsByte;
final boolean srcIsByte;
private final OpAssertion assertion;
private final CPUFeature feature;
@ -359,11 +394,11 @@ public class AMD64Assembler extends AMD64BaseAssembler {
}
}
protected final int immediateSize(OperandSize size) {
public final int immediateSize(OperandSize size) {
if (immIsByte) {
return 1;
} else {
return size.getBytes();
return size.immediateSize();
}
}
}
@ -605,7 +640,7 @@ public class AMD64Assembler extends AMD64BaseAssembler {
/**
* Opcodes with operand order of M.
*/
public static class AMD64MOp extends AMD64Op {
public static final class AMD64MOp extends AMD64Op {
// @formatter:off
public static final AMD64MOp NOT = new AMD64MOp("NOT", 0xF7, 2);
public static final AMD64MOp NEG = new AMD64MOp("NEG", 0xF7, 3);
@ -622,11 +657,7 @@ public class AMD64Assembler extends AMD64BaseAssembler {
private final int ext;
protected AMD64MOp(String opcode, int op, int ext) {
this(opcode, 0, op, ext);
}
protected AMD64MOp(String opcode, int prefix, int op, int ext) {
this(opcode, prefix, op, ext, OpAssertion.WordOrLargerAssertion);
this(opcode, 0, op, ext, OpAssertion.WordOrLargerAssertion);
}
protected AMD64MOp(String opcode, int op, int ext, OpAssertion assertion) {
@ -638,13 +669,13 @@ public class AMD64Assembler extends AMD64BaseAssembler {
this.ext = ext;
}
public final void emit(AMD64Assembler asm, OperandSize size, Register dst) {
public void emit(AMD64Assembler asm, OperandSize size, Register dst) {
assert verify(asm, size, dst, null);
emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
asm.emitModRM(ext, dst);
}
public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst) {
public void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst) {
assert verify(asm, size, null, null);
emitOpcode(asm, size, getRXB(null, dst), 0, 0);
asm.emitOperandHelper(ext, dst, 0);
@ -1915,9 +1946,7 @@ public class AMD64Assembler extends AMD64BaseAssembler {
}
public final void decl(AMD64Address dst) {
prefix(dst);
emitByte(0xFF);
emitOperandHelper(1, dst, 0);
DEC.emit(this, DWORD, dst);
}
public final void divsd(Register dst, Register src) {
@ -1937,26 +1966,98 @@ public class AMD64Assembler extends AMD64BaseAssembler {
}
public final void incl(AMD64Address dst) {
prefix(dst);
emitByte(0xFF);
emitOperandHelper(0, dst, 0);
INC.emit(this, DWORD, dst);
}
public static final int JCC_ERRATUM_MITIGATION_BOUNDARY = 0x20;
public static final int OPCODE_IN_BYTES = 1;
public static final int MODRM_IN_BYTES = 1;
protected static int getPrefixInBytes(OperandSize size, Register dst, boolean dstIsByte) {
boolean needsRex = needsRex(dst, dstIsByte);
if (size == WORD) {
return needsRex ? 2 : 1;
}
return size == QWORD || needsRex ? 1 : 0;
}
protected static int getPrefixInBytes(OperandSize size, AMD64Address src) {
boolean needsRex = needsRex(src.getBase()) || needsRex(src.getIndex());
if (size == WORD) {
return needsRex ? 2 : 1;
}
return size == QWORD || needsRex ? 1 : 0;
}
protected static int getPrefixInBytes(OperandSize size, Register dst, boolean dstIsByte, Register src, boolean srcIsByte) {
boolean needsRex = needsRex(dst, dstIsByte) || needsRex(src, srcIsByte);
if (size == WORD) {
return needsRex ? 2 : 1;
}
return size == QWORD || needsRex ? 1 : 0;
}
protected static int getPrefixInBytes(OperandSize size, Register dst, boolean dstIsByte, AMD64Address src) {
boolean needsRex = needsRex(dst, dstIsByte) || needsRex(src.getBase()) || needsRex(src.getIndex());
if (size == WORD) {
return needsRex ? 2 : 1;
}
return size == QWORD || needsRex ? 1 : 0;
}
protected boolean mayCrossBoundary(int opStart, int opEnd) {
return (opStart / JCC_ERRATUM_MITIGATION_BOUNDARY) != ((opEnd - 1) / JCC_ERRATUM_MITIGATION_BOUNDARY) || (opEnd % JCC_ERRATUM_MITIGATION_BOUNDARY) == 0;
}
private static int bytesUntilBoundary(int pos) {
return JCC_ERRATUM_MITIGATION_BOUNDARY - (pos % JCC_ERRATUM_MITIGATION_BOUNDARY);
}
protected boolean ensureWithinBoundary(int opStart) {
if (useBranchesWithin32ByteBoundary) {
assert !mayCrossBoundary(opStart, position());
}
return true;
}
protected final void testAndAlign(int bytesToEmit) {
if (useBranchesWithin32ByteBoundary) {
int beforeNextOp = position();
int afterNextOp = beforeNextOp + bytesToEmit;
if (mayCrossBoundary(beforeNextOp, afterNextOp)) {
int bytesToShift = bytesUntilBoundary(beforeNextOp);
nop(bytesToShift);
if (codePatchShifter != null) {
codePatchShifter.shift(beforeNextOp, bytesToShift);
}
}
}
}
public void jcc(ConditionFlag cc, int jumpTarget, boolean forceDisp32) {
int shortSize = 2;
int longSize = 6;
final int shortSize = 2;
final int longSize = 6;
long disp = jumpTarget - position();
if (!forceDisp32 && isByte(disp - shortSize)) {
// 0111 tttn #8-bit disp
emitByte(0x70 | cc.getValue());
emitByte((int) ((disp - shortSize) & 0xFF));
} else {
// 0000 1111 1000 tttn #32-bit disp
assert isInt(disp - longSize) : "must be 32bit offset (call4)";
emitByte(0x0F);
emitByte(0x80 | cc.getValue());
emitInt((int) (disp - longSize));
testAndAlign(shortSize);
// After alignment, isByte(disp - shortSize) might not hold. Need to check again.
disp = jumpTarget - position();
if (isByte(disp - shortSize)) {
// 0111 tttn #8-bit disp
emitByte(0x70 | cc.getValue());
emitByte((int) ((disp - shortSize) & 0xFF));
return;
}
}
// 0000 1111 1000 tttn #32-bit disp
assert forceDisp32 || isInt(disp - longSize) : "must be 32bit offset (call4)";
testAndAlign(longSize);
disp = jumpTarget - position();
emitByte(0x0F);
emitByte(0x80 | cc.getValue());
emitInt((int) (disp - longSize));
}
public final void jcc(ConditionFlag cc, Label l) {
@ -1964,6 +2065,7 @@ public class AMD64Assembler extends AMD64BaseAssembler {
if (l.isBound()) {
jcc(cc, l.position(), false);
} else {
testAndAlign(6);
// Note: could eliminate cond. jumps to this jump if condition
// is the same however, seems to be rather unlikely case.
// Note: use jccb() if label to be bound is very close to get
@ -1973,14 +2075,14 @@ public class AMD64Assembler extends AMD64BaseAssembler {
emitByte(0x80 | cc.getValue());
emitInt(0);
}
}
public final void jccb(ConditionFlag cc, Label l) {
final int shortSize = 2;
testAndAlign(shortSize);
if (l.isBound()) {
int shortSize = 2;
int entry = l.position();
assert isByte(entry - (position() + shortSize)) : "Dispacement too large for a short jmp";
assert isByte(entry - (position() + shortSize)) : "Displacement too large for a short jmp";
long disp = entry - position();
// 0111 tttn #8-bit disp
emitByte(0x70 | cc.getValue());
@ -1992,19 +2094,48 @@ public class AMD64Assembler extends AMD64BaseAssembler {
}
}
public final void jmp(int jumpTarget, boolean forceDisp32) {
int shortSize = 2;
int longSize = 5;
long disp = jumpTarget - position();
if (!forceDisp32 && isByte(disp - shortSize)) {
emitByte(0xEB);
emitByte((int) ((disp - shortSize) & 0xFF));
public final void jcc(ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
if (branchTarget == null) {
// jump to placeholder
jcc(cc, 0, true);
} else if (isShortJmp) {
jccb(cc, branchTarget);
} else {
emitByte(0xE9);
emitInt((int) (disp - longSize));
jcc(cc, branchTarget);
}
}
/**
* Emit a jmp instruction given a known target address.
*
* @return the position where the jmp instruction starts.
*/
public final int jmp(int jumpTarget, boolean forceDisp32) {
final int shortSize = 2;
final int longSize = 5;
// For long jmp, the jmp instruction will cross the jcc-erratum-mitigation-boundary when the
// current position is between [0x1b, 0x1f]. For short jmp [0x1e, 0x1f], which is covered by
// the long jmp triggering range.
if (!forceDisp32) {
// We first align the next jmp assuming it will be short jmp.
testAndAlign(shortSize);
int pos = position();
long disp = jumpTarget - pos;
if (isByte(disp - shortSize)) {
emitByte(0xEB);
emitByte((int) ((disp - shortSize) & 0xFF));
return pos;
}
}
testAndAlign(longSize);
int pos = position();
long disp = jumpTarget - pos;
emitByte(0xE9);
emitInt((int) (disp - longSize));
return pos;
}
@Override
public final void jmp(Label l) {
if (l.isBound()) {
@ -2014,28 +2145,84 @@ public class AMD64Assembler extends AMD64BaseAssembler {
// we can't yet know where the label will be bound. If you're sure that
// the forward jump will not run beyond 256 bytes, use jmpb to
// force an 8-bit displacement.
testAndAlign(5);
l.addPatchAt(position(), this);
emitByte(0xE9);
emitInt(0);
}
}
public final void jmp(Register entry) {
protected final void jmpWithoutAlignment(Register entry) {
prefix(entry);
emitByte(0xFF);
emitModRM(4, entry);
}
public final void jmp(Register entry) {
int bytesToEmit = needsRex(entry) ? 3 : 2;
testAndAlign(bytesToEmit);
int beforeJmp = position();
jmpWithoutAlignment(entry);
assert beforeJmp + bytesToEmit == position();
}
public final void jmp(AMD64Address adr) {
int bytesToEmit = getPrefixInBytes(DWORD, adr) + OPCODE_IN_BYTES + addressInBytes(adr);
testAndAlign(bytesToEmit);
int beforeJmp = position();
prefix(adr);
emitByte(0xFF);
emitOperandHelper(AMD64.rsp, adr, 0);
assert beforeJmp + bytesToEmit == position();
}
/**
* This method should be synchronized with
* {@link AMD64BaseAssembler#emitOperandHelper(Register, AMD64Address, int)}}.
*/
protected static int addressInBytes(AMD64Address addr) {
Register base = addr.getBase();
Register index = addr.getIndex();
int disp = addr.getDisplacement();
if (base.equals(AMD64.rip)) {
return 5;
} else if (base.isValid()) {
final boolean isZeroDisplacement = disp == 0 && !base.equals(rbp) && !base.equals(r13);
if (index.isValid()) {
if (isZeroDisplacement) {
return 2;
} else if (isByte(disp)) {
return 3;
} else {
return 6;
}
} else if (base.equals(rsp) || base.equals(r12)) {
if (disp == 0) {
return 2;
} else if (isByte(disp)) {
return 3;
} else {
return 6;
}
} else {
if (isZeroDisplacement) {
return 1;
} else if (isByte(disp)) {
return 2;
} else {
return 5;
}
}
} else {
return 6;
}
}
public final void jmpb(Label l) {
final int shortSize = 2;
testAndAlign(shortSize);
if (l.isBound()) {
int shortSize = 2;
// Displacement is relative to byte just after jmpb instruction
int displacement = l.position() - position() - shortSize;
GraalError.guarantee(isByte(displacement), "Displacement too large to be encoded as a byte: %d", displacement);
@ -2998,8 +3185,10 @@ public class AMD64Assembler extends AMD64BaseAssembler {
public final void ret(int imm16) {
if (imm16 == 0) {
testAndAlign(1);
emitByte(0xC3);
} else {
testAndAlign(3);
emitByte(0xC2);
emitShort(imm16);
}
@ -3089,24 +3278,18 @@ public class AMD64Assembler extends AMD64BaseAssembler {
// 8bit operands
if (dst.encoding == 0) {
emitByte(0xA9);
emitInt(imm32);
} else {
prefix(dst);
emitByte(0xF7);
emitModRM(0, dst);
AMD64MIOp.TEST.emit(this, DWORD, dst, imm32);
}
emitInt(imm32);
}
public final void testl(Register dst, Register src) {
prefix(dst, src);
emitByte(0x85);
emitModRM(dst, src);
AMD64RMOp.TEST.emit(this, DWORD, dst, src);
}
public final void testl(Register dst, AMD64Address src) {
prefix(src, dst);
emitByte(0x85);
emitOperandHelper(dst, src, 0);
AMD64RMOp.TEST.emit(this, DWORD, dst, src);
}
public final void unpckhpd(Register dst, Register src) {
@ -3141,16 +3324,12 @@ public class AMD64Assembler extends AMD64BaseAssembler {
public final void decl(Register dst) {
// Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
prefix(dst);
emitByte(0xFF);
emitModRM(1, dst);
DEC.emit(this, DWORD, dst);
}
public final void incl(Register dst) {
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
prefix(dst);
emitByte(0xFF);
emitModRM(0, dst);
INC.emit(this, DWORD, dst);
}
public final void addq(Register dst, int imm32) {
@ -3267,9 +3446,7 @@ public class AMD64Assembler extends AMD64BaseAssembler {
public final void decq(Register dst) {
// Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
prefixq(dst);
emitByte(0xFF);
emitModRM(1, dst);
DEC.emit(this, QWORD, dst);
}
public final void decq(AMD64Address dst) {
@ -3286,9 +3463,7 @@ public class AMD64Assembler extends AMD64BaseAssembler {
public final void incq(Register dst) {
// Don't use it directly. Use Macroincrementq() instead.
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
prefixq(dst);
emitByte(0xFF);
emitModRM(0, dst);
INC.emit(this, QWORD, dst);
}
public final void incq(AMD64Address dst) {
@ -3486,9 +3661,7 @@ public class AMD64Assembler extends AMD64BaseAssembler {
}
public final void testq(Register dst, Register src) {
prefixq(dst, src);
emitByte(0x85);
emitModRM(dst, src);
AMD64RMOp.TEST.emit(this, QWORD, dst, src);
}
public final void btrq(Register src, int imm8) {
@ -3964,6 +4137,14 @@ public class AMD64Assembler extends AMD64BaseAssembler {
emitOperandHelper(dst, src, 0, EVEXTuple.FVM.getDisp8ScalingFactor(AVXSize.ZMM));
}
public final void evmovdqu64(AMD64Address dst, Register src) {
assert supports(CPUFeature.AVX512F);
assert inRC(XMM, src);
evexPrefix(src, Register.None, Register.None, dst, AVXSize.ZMM, P_F3, M_0F, W1, Z0, B0);
emitByte(0x7F);
emitOperandHelper(src, dst, 0, EVEXTuple.FVM.getDisp8ScalingFactor(AVXSize.ZMM));
}
// Insn: VPMOVZXBW zmm1, m256
public final void evpmovzxbw(Register dst, AMD64Address src) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -291,11 +291,16 @@ public abstract class AMD64BaseAssembler extends Assembler {
emitByte(Prefix.REXW);
}
private static boolean isInvalidEncoding(Register reg) {
return Register.None.equals(reg) || AMD64.rip.equals(reg);
}
protected final void prefix(Register reg) {
prefix(reg, false);
}
protected final void prefix(Register reg, boolean byteinst) {
assert !isInvalidEncoding(reg);
int regEnc = reg.encoding;
if (regEnc >= 8) {
emitByte(Prefix.REXB);
@ -305,6 +310,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
protected final void prefixq(Register reg) {
assert !isInvalidEncoding(reg);
if (reg.encoding < 8) {
emitByte(Prefix.REXW);
} else {
@ -317,6 +323,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
protected final void prefix(Register dst, boolean dstIsByte, Register src, boolean srcIsByte) {
assert !isInvalidEncoding(dst) && !isInvalidEncoding(src);
int dstEnc = dst.encoding;
int srcEnc = src.encoding;
if (dstEnc < 8) {
@ -339,6 +346,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
* in the prefix.
*/
protected final void prefixq(Register reg, Register rm) {
assert !isInvalidEncoding(reg) && !isInvalidEncoding(rm);
int regEnc = reg.encoding;
int rmEnc = rm.encoding;
if (regEnc < 8) {
@ -356,10 +364,15 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
}
private static boolean needsRex(Register reg) {
protected static boolean needsRex(Register reg) {
// rip is excluded implicitly.
return reg.encoding >= MinEncodingNeedsRex;
}
protected static boolean needsRex(Register src, boolean srcIsByte) {
return srcIsByte ? src.encoding >= 4 : needsRex(src);
}
protected final void prefix(AMD64Address adr) {
if (needsRex(adr.getBase())) {
if (needsRex(adr.getIndex())) {
@ -399,6 +412,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
protected void prefix(AMD64Address adr, Register reg, boolean byteinst) {
assert !isInvalidEncoding(reg);
if (reg.encoding < 8) {
if (needsRex(adr.getBase())) {
if (needsRex(adr.getIndex())) {
@ -431,6 +445,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
protected void prefixq(AMD64Address adr, Register src) {
assert !isInvalidEncoding(src);
if (src.encoding < 8) {
if (needsRex(adr.getBase())) {
if (needsRex(adr.getIndex())) {
@ -468,6 +483,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
* field. The X bit must be 0.
*/
protected static int getRXB(Register reg, Register rm) {
assert !isInvalidEncoding(rm) && !isInvalidEncoding(reg);
int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
rxb |= (rm == null ? 0 : rm.encoding & 0x08) >> 3;
return rxb;
@ -481,11 +497,12 @@ public abstract class AMD64BaseAssembler extends Assembler {
* There is an SIB byte: In that case, X extends SIB.index and B extends SIB.base.
*/
protected static int getRXB(Register reg, AMD64Address rm) {
assert !isInvalidEncoding(reg);
int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
if (!rm.getIndex().equals(Register.None)) {
if (!isInvalidEncoding(rm.getIndex())) {
rxb |= (rm.getIndex().encoding & 0x08) >> 2;
}
if (!rm.getBase().equals(Register.None)) {
if (!isInvalidEncoding(rm.getBase())) {
rxb |= (rm.getBase().encoding & 0x08) >> 3;
}
return rxb;
@ -498,6 +515,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
*/
protected final void emitModRM(int reg, Register rm) {
assert (reg & 0x07) == reg;
assert !isInvalidEncoding(rm);
emitByte(0xC0 | (reg << 3) | (rm.encoding & 0x07));
}
@ -507,6 +525,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
* Format: [ 11 reg r/m ]
*/
protected final void emitModRM(Register reg, Register rm) {
assert !isInvalidEncoding(reg);
emitModRM(reg.encoding & 0x07, rm);
}
@ -518,7 +537,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
* @param force4Byte use 4 byte encoding for displacements that would normally fit in a byte
*/
protected final void emitOperandHelper(Register reg, AMD64Address addr, boolean force4Byte, int additionalInstructionSize) {
assert !reg.equals(Register.None);
assert !isInvalidEncoding(reg);
emitOperandHelper(encode(reg), addr, force4Byte, additionalInstructionSize, DEFAULT_DISP8_SCALE);
}
@ -527,12 +546,12 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
protected final void emitOperandHelper(Register reg, AMD64Address addr, int additionalInstructionSize) {
assert !reg.equals(Register.None);
assert !isInvalidEncoding(reg);
emitOperandHelper(encode(reg), addr, false, additionalInstructionSize, DEFAULT_DISP8_SCALE);
}
protected final void emitOperandHelper(Register reg, AMD64Address addr, int additionalInstructionSize, int evexDisp8Scale) {
assert !reg.equals(Register.None);
assert !isInvalidEncoding(reg);
emitOperandHelper(encode(reg), addr, false, additionalInstructionSize, evexDisp8Scale);
}
@ -559,7 +578,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
int disp = addr.getDisplacement();
if (base.equals(AMD64.rip)) { // also matches addresses returned by getPlaceholder()
// [00 000 101] disp32
// [00 reg 101] disp32
assert index.equals(Register.None) : "cannot use RIP relative addressing with index register";
emitByte(0x05 | regenc);
if (codePatchingAnnotationConsumer != null && addr.instructionStartPosition >= 0) {
@ -1037,6 +1056,17 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
}
public static final class EVEXComparisonPredicate {
public static final int EQ = 0;
public static final int LT = 1;
public static final int LE = 2;
public static final int FALSE = 3;
public static final int NEQ = 4;
public static final int NLT = 5;
public static final int NLE = 6;
public static final int TRUE = 7;
}
// @formatter:off
//
// Instruction Format and EVEX illustrated below (optional []):

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,9 +27,23 @@ package org.graalvm.compiler.asm.amd64;
import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseIncDec;
import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseXmmLoadAndClearUpper;
import static org.graalvm.compiler.asm.amd64.AMD64AsmOptions.UseXmmRegToRegMoveAll;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.DEC;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.INC;
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD;
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD;
import static org.graalvm.compiler.core.common.NumUtil.isByte;
import java.util.function.IntConsumer;
import java.util.function.Supplier;
import org.graalvm.compiler.asm.Label;
import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.options.OptionValues;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.amd64.AMD64Kind;
@ -45,6 +59,10 @@ public class AMD64MacroAssembler extends AMD64Assembler {
super(target);
}
public AMD64MacroAssembler(TargetDescription target, OptionValues optionValues) {
super(target, optionValues);
}
public final void decrementq(Register reg, int value) {
if (value == Integer.MIN_VALUE) {
subq(reg, value);
@ -371,4 +389,275 @@ public class AMD64MacroAssembler extends AMD64Assembler {
addq(AMD64.rsp, AMD64Kind.DOUBLE.getSizeInBytes());
}
/**
* Emit a direct call to a fixed address, which will be patched later during code installation.
*
* @param align indicates whether the displacement bytes (offset by
* {@code callDisplacementOffset}) of this call instruction should be aligned to
* {@code wordSize}.
* @return where the actual call instruction starts.
*/
public final int directCall(boolean align, int callDisplacementOffset, int wordSize) {
emitAlignmentForDirectCall(align, callDisplacementOffset, wordSize);
testAndAlign(5);
// After padding to mitigate JCC erratum, the displacement may be unaligned again. The
// previous pass is essential because JCC erratum padding may not trigger without the
// displacement alignment.
emitAlignmentForDirectCall(align, callDisplacementOffset, wordSize);
int beforeCall = position();
call();
return beforeCall;
}
private void emitAlignmentForDirectCall(boolean align, int callDisplacementOffset, int wordSize) {
if (align) {
// make sure that the displacement word of the call ends up word aligned
int offset = position();
offset += callDisplacementOffset;
int modulus = wordSize;
if (offset % modulus != 0) {
nop(modulus - offset % modulus);
}
}
}
public final int indirectCall(Register callReg) {
int bytesToEmit = needsRex(callReg) ? 3 : 2;
testAndAlign(bytesToEmit);
int beforeCall = position();
call(callReg);
assert beforeCall + bytesToEmit == position();
return beforeCall;
}
public final int directCall(long address, Register scratch) {
int bytesToEmit = needsRex(scratch) ? 13 : 12;
testAndAlign(bytesToEmit);
int beforeCall = position();
movq(scratch, address);
call(scratch);
assert beforeCall + bytesToEmit == position();
return beforeCall;
}
public final int directJmp(long address, Register scratch) {
int bytesToEmit = needsRex(scratch) ? 13 : 12;
testAndAlign(bytesToEmit);
int beforeJmp = position();
movq(scratch, address);
jmpWithoutAlignment(scratch);
assert beforeJmp + bytesToEmit == position();
return beforeJmp;
}
// This should guarantee that the alignment in AMD64Assembler.jcc methods will be not triggered.
private void alignFusedPair(Label branchTarget, boolean isShortJmp, int prevOpInBytes) {
assert prevOpInBytes < 26 : "Fused pair may be longer than 0x20 bytes.";
if (branchTarget == null) {
testAndAlign(prevOpInBytes + 6);
} else if (isShortJmp) {
testAndAlign(prevOpInBytes + 2);
} else if (!branchTarget.isBound()) {
testAndAlign(prevOpInBytes + 6);
} else {
long disp = branchTarget.position() - (position() + prevOpInBytes);
// assuming short jump first
if (isByte(disp - 2)) {
testAndAlign(prevOpInBytes + 2);
// After alignment, isByte(disp - shortSize) might not hold. Need to check
// again.
disp = branchTarget.position() - (position() + prevOpInBytes);
if (isByte(disp - 2)) {
return;
}
}
testAndAlign(prevOpInBytes + 6);
}
}
private void applyMIOpAndJcc(AMD64MIOp op, OperandSize size, Register src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp, boolean annotateImm,
IntConsumer applyBeforeFusedPair) {
final int bytesToEmit = getPrefixInBytes(size, src, op.srcIsByte) + OPCODE_IN_BYTES + MODRM_IN_BYTES + op.immediateSize(size);
alignFusedPair(branchTarget, isShortJmp, bytesToEmit);
final int beforeFusedPair = position();
if (applyBeforeFusedPair != null) {
applyBeforeFusedPair.accept(beforeFusedPair);
}
op.emit(this, size, src, imm32, annotateImm);
assert beforeFusedPair + bytesToEmit == position();
jcc(cc, branchTarget, isShortJmp);
assert ensureWithinBoundary(beforeFusedPair);
}
private void applyMIOpAndJcc(AMD64MIOp op, OperandSize size, AMD64Address src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp, boolean annotateImm,
IntConsumer applyBeforeFusedPair) {
final int bytesToEmit = getPrefixInBytes(size, src) + OPCODE_IN_BYTES + addressInBytes(src) + op.immediateSize(size);
alignFusedPair(branchTarget, isShortJmp, bytesToEmit);
final int beforeFusedPair = position();
if (applyBeforeFusedPair != null) {
applyBeforeFusedPair.accept(beforeFusedPair);
}
op.emit(this, size, src, imm32, annotateImm);
assert beforeFusedPair + bytesToEmit == position();
jcc(cc, branchTarget, isShortJmp);
assert ensureWithinBoundary(beforeFusedPair);
}
private int applyRMOpAndJcc(AMD64RMOp op, OperandSize size, Register src1, Register src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
final int bytesToEmit = getPrefixInBytes(size, src1, op.dstIsByte, src2, op.srcIsByte) + OPCODE_IN_BYTES + MODRM_IN_BYTES;
alignFusedPair(branchTarget, isShortJmp, bytesToEmit);
final int beforeFusedPair = position();
op.emit(this, size, src1, src2);
final int beforeJcc = position();
assert beforeFusedPair + bytesToEmit == beforeJcc;
jcc(cc, branchTarget, isShortJmp);
assert ensureWithinBoundary(beforeFusedPair);
return beforeJcc;
}
private int applyRMOpAndJcc(AMD64RMOp op, OperandSize size, Register src1, AMD64Address src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp, IntConsumer applyBeforeFusedPair) {
final int bytesToEmit = getPrefixInBytes(size, src1, op.dstIsByte, src2) + OPCODE_IN_BYTES + addressInBytes(src2);
alignFusedPair(branchTarget, isShortJmp, bytesToEmit);
final int beforeFusedPair = position();
if (applyBeforeFusedPair != null) {
applyBeforeFusedPair.accept(beforeFusedPair);
}
op.emit(this, size, src1, src2);
final int beforeJcc = position();
assert beforeFusedPair + bytesToEmit == beforeJcc;
jcc(cc, branchTarget, isShortJmp);
assert ensureWithinBoundary(beforeFusedPair);
return beforeJcc;
}
public void applyMOpAndJcc(AMD64MOp op, OperandSize size, Register dst, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
final int bytesToEmit = getPrefixInBytes(size, dst, op.srcIsByte) + OPCODE_IN_BYTES + MODRM_IN_BYTES;
alignFusedPair(branchTarget, isShortJmp, bytesToEmit);
final int beforeFusedPair = position();
op.emit(this, size, dst);
assert beforeFusedPair + bytesToEmit == position();
jcc(cc, branchTarget, isShortJmp);
assert ensureWithinBoundary(beforeFusedPair);
}
public final void testAndJcc(OperandSize size, Register src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMIOpAndJcc(AMD64MIOp.TEST, size, src, imm32, cc, branchTarget, isShortJmp, false, null);
}
public final void testlAndJcc(Register src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMIOpAndJcc(AMD64MIOp.TEST, DWORD, src, imm32, cc, branchTarget, isShortJmp, false, null);
}
public final void testAndJcc(OperandSize size, AMD64Address src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp, IntConsumer applyBeforeFusedPair) {
applyMIOpAndJcc(AMD64MIOp.TEST, size, src, imm32, cc, branchTarget, isShortJmp, false, applyBeforeFusedPair);
}
public final void testAndJcc(OperandSize size, Register src1, Register src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(AMD64RMOp.TEST, size, src1, src2, cc, branchTarget, isShortJmp);
}
public final void testlAndJcc(Register src1, Register src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(AMD64RMOp.TEST, DWORD, src1, src2, cc, branchTarget, isShortJmp);
}
public final int testqAndJcc(Register src1, Register src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
return applyRMOpAndJcc(AMD64RMOp.TEST, QWORD, src1, src2, cc, branchTarget, isShortJmp);
}
public final void testAndJcc(OperandSize size, Register src1, AMD64Address src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp, IntConsumer applyBeforeFusedPair) {
applyRMOpAndJcc(AMD64RMOp.TEST, size, src1, src2, cc, branchTarget, isShortJmp, applyBeforeFusedPair);
}
public final void testbAndJcc(Register src1, Register src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(AMD64RMOp.TESTB, OperandSize.BYTE, src1, src2, cc, branchTarget, isShortJmp);
}
public final void testbAndJcc(Register src1, AMD64Address src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(AMD64RMOp.TESTB, OperandSize.BYTE, src1, src2, cc, branchTarget, isShortJmp, null);
}
public final void cmpAndJcc(OperandSize size, Register src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp, boolean annotateImm, IntConsumer applyBeforeFusedPair) {
applyMIOpAndJcc(CMP.getMIOpcode(size, isByte(imm32)), size, src, imm32, cc, branchTarget, isShortJmp, annotateImm, applyBeforeFusedPair);
}
public final void cmplAndJcc(Register src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMIOpAndJcc(CMP.getMIOpcode(DWORD, isByte(imm32)), DWORD, src, imm32, cc, branchTarget, isShortJmp, false, null);
}
public final void cmpqAndJcc(Register src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMIOpAndJcc(CMP.getMIOpcode(QWORD, isByte(imm32)), QWORD, src, imm32, cc, branchTarget, isShortJmp, false, null);
}
public final void cmpAndJcc(OperandSize size, AMD64Address src, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp, boolean annotateImm, IntConsumer applyBeforeFusedPair) {
applyMIOpAndJcc(CMP.getMIOpcode(size, NumUtil.isByte(imm32)), size, src, imm32, cc, branchTarget, isShortJmp, annotateImm, applyBeforeFusedPair);
}
public final void cmpAndJcc(OperandSize size, Register src1, Register src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(CMP.getRMOpcode(size), size, src1, src2, cc, branchTarget, isShortJmp);
}
public final void cmplAndJcc(Register src1, Register src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(CMP.getRMOpcode(DWORD), DWORD, src1, src2, cc, branchTarget, isShortJmp);
}
public final int cmpqAndJcc(Register src1, Register src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
return applyRMOpAndJcc(CMP.getRMOpcode(QWORD), QWORD, src1, src2, cc, branchTarget, isShortJmp);
}
public final void cmpAndJcc(OperandSize size, Register src1, AMD64Address src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp, IntConsumer applyBeforeFusedPair) {
applyRMOpAndJcc(CMP.getRMOpcode(size), size, src1, src2, cc, branchTarget, isShortJmp, applyBeforeFusedPair);
}
public final void cmplAndJcc(Register src1, AMD64Address src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(CMP.getRMOpcode(DWORD), DWORD, src1, src2, cc, branchTarget, isShortJmp, null);
}
public final int cmpqAndJcc(Register src1, AMD64Address src2, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
return applyRMOpAndJcc(CMP.getRMOpcode(QWORD), QWORD, src1, src2, cc, branchTarget, isShortJmp, null);
}
public final void cmpAndJcc(OperandSize size, Register src1, Supplier<AMD64Address> src2, ConditionFlag cc, Label branchTarget) {
AMD64Address placeHolder = getPlaceholder(position());
final AMD64RMOp op = CMP.getRMOpcode(size);
final int bytesToEmit = getPrefixInBytes(size, src1, op.dstIsByte, placeHolder) + OPCODE_IN_BYTES + addressInBytes(placeHolder);
alignFusedPair(branchTarget, false, bytesToEmit);
final int beforeFusedPair = position();
AMD64Address src2AsAddress = src2.get();
op.emit(this, size, src1, src2AsAddress);
assert beforeFusedPair + bytesToEmit == position();
jcc(cc, branchTarget, false);
assert ensureWithinBoundary(beforeFusedPair);
}
public final void andlAndJcc(Register dst, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMIOpAndJcc(AND.getMIOpcode(DWORD, isByte(imm32)), DWORD, dst, imm32, cc, branchTarget, isShortJmp, false, null);
}
public final void addqAndJcc(Register dst, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMIOpAndJcc(ADD.getMIOpcode(QWORD, isByte(imm32)), QWORD, dst, imm32, cc, branchTarget, isShortJmp, false, null);
}
public final void sublAndJcc(Register dst, Register src, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(SUB.getRMOpcode(DWORD), DWORD, dst, src, cc, branchTarget, isShortJmp);
}
public final void subqAndJcc(Register dst, Register src, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyRMOpAndJcc(SUB.getRMOpcode(QWORD), QWORD, dst, src, cc, branchTarget, isShortJmp);
}
public final void sublAndJcc(Register dst, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMIOpAndJcc(SUB.getMIOpcode(DWORD, isByte(imm32)), DWORD, dst, imm32, cc, branchTarget, isShortJmp, false, null);
}
public final void subqAndJcc(Register dst, int imm32, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMIOpAndJcc(SUB.getMIOpcode(QWORD, isByte(imm32)), QWORD, dst, imm32, cc, branchTarget, isShortJmp, false, null);
}
public final void incqAndJcc(Register dst, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMOpAndJcc(INC, QWORD, dst, cc, branchTarget, isShortJmp);
}
public final void decqAndJcc(Register dst, ConditionFlag cc, Label branchTarget, boolean isShortJmp) {
applyMOpAndJcc(DEC, QWORD, dst, cc, branchTarget, isShortJmp);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.function.Function;
import jdk.internal.vm.compiler.collections.EconomicSet;
import org.graalvm.compiler.core.common.CompilationIdentifier;
@ -70,7 +71,7 @@ public class CompilationResult {
*/
public abstract static class CodeAnnotation {
public final int position;
private int position;
public CodeAnnotation(int position) {
this.position = position;
@ -88,6 +89,14 @@ public class CompilationResult {
@Override
public abstract boolean equals(Object obj);
public int getPosition() {
return position;
}
void setPosition(int position) {
this.position = position;
}
}
/**
@ -109,7 +118,7 @@ public class CompilationResult {
}
if (obj instanceof CodeComment) {
CodeComment that = (CodeComment) obj;
if (this.position == that.position && this.value.equals(that.value)) {
if (this.getPosition() == that.getPosition() && this.value.equals(that.value)) {
return true;
}
}
@ -118,7 +127,7 @@ public class CompilationResult {
@Override
public String toString() {
return getClass().getSimpleName() + "@" + position + ": " + value;
return getClass().getSimpleName() + "@" + getPosition() + ": " + value;
}
}
@ -162,7 +171,7 @@ public class CompilationResult {
}
if (obj instanceof JumpTable) {
JumpTable that = (JumpTable) obj;
if (this.position == that.position && this.entrySize == that.entrySize && this.low == that.low && this.high == that.high) {
if (this.getPosition() == that.getPosition() && this.entrySize == that.entrySize && this.low == that.low && this.high == that.high) {
return true;
}
}
@ -171,7 +180,7 @@ public class CompilationResult {
@Override
public String toString() {
return getClass().getSimpleName() + "@" + position + ": [" + low + " .. " + high + "]";
return getClass().getSimpleName() + "@" + getPosition() + ": [" + low + " .. " + high + "]";
}
}
@ -767,4 +776,35 @@ public class CompilationResult {
dataSection.close();
closed = true;
}
public void shiftCodePatch(int pos, int bytesToShift) {
iterateAndReplace(infopoints, pos, site -> {
if (site instanceof Call) {
Call call = (Call) site;
return new Call(call.target, site.pcOffset + bytesToShift, call.size, call.direct, call.debugInfo);
} else {
return new Infopoint(site.pcOffset + bytesToShift, site.debugInfo, site.reason);
}
});
iterateAndReplace(dataPatches, pos, site -> new DataPatch(site.pcOffset + bytesToShift, site.reference, site.note));
iterateAndReplace(exceptionHandlers, pos, site -> new ExceptionHandler(site.pcOffset + bytesToShift, site.handlerPos));
iterateAndReplace(marks, pos, site -> new Mark(site.pcOffset + bytesToShift, site.id));
if (annotations != null) {
for (CodeAnnotation annotation : annotations) {
int annotationPos = annotation.position;
if (pos <= annotationPos) {
annotation.setPosition(annotationPos + bytesToShift);
}
}
}
}
private static <T extends Site> void iterateAndReplace(List<T> sites, int pos, Function<T, T> replacement) {
for (int i = 0; i < sites.size(); i++) {
T site = sites.get(i);
if (pos <= site.pcOffset) {
sites.set(i, replacement.apply(site));
}
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -173,7 +173,7 @@ public class HexCodeFile {
ps.printf("HexCode %x %s %s%n", startAddress, HexCodeFile.hexCodeString(code), SECTION_DELIM);
for (JumpTable table : jumpTables) {
ps.printf("JumpTable %d %d %d %d %s%n", table.position, table.entrySize, table.low, table.high, SECTION_DELIM);
ps.printf("JumpTable %d %d %d %d %s%n", table.getPosition(), table.entrySize, table.low, table.high, SECTION_DELIM);
}
for (Map.Entry<Integer, List<String>> e : comments.entrySet()) {
@ -247,7 +247,7 @@ public class HexCodeFile {
hcf.jumpTables.add(table);
} else if (a instanceof CodeComment) {
CodeComment comment = (CodeComment) a;
hcf.addComment(comment.position, comment.value);
hcf.addComment(comment.getPosition(), comment.value);
}
}
}

View File

@ -0,0 +1,225 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.aarch64.test;
import org.graalvm.compiler.api.directives.GraalDirectives;
import org.graalvm.compiler.lir.LIRInstruction;
import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ExtendedAddShiftOp;
import org.junit.Test;
import java.util.ArrayDeque;
import java.util.HashSet;
import java.util.Set;
import java.util.function.Predicate;
public class AArch64ArrayAddressTest extends AArch64MatchRuleTest {
private static final Predicate<LIRInstruction> predicate = op -> (op instanceof ExtendedAddShiftOp);
public static byte loadByte(byte[] arr, int n) {
return arr[n];
}
@Test
public void testLoadByte() {
byte[] arr = {3, 4, 5, 6, 7, 8};
test("loadByte", arr, 5);
checkLIR("loadByte", predicate, 1, 1);
}
public static char loadChar(char[] arr, int n) {
return arr[n];
}
@Test
public void testLoadChar() {
char[] arr = {'a', 'b', 'c', 'd', 'e', 'f', 'g'};
test("loadChar", arr, 5);
checkLIR("loadChar", predicate, 1, 1);
}
public static short loadShort(short[] arr, int n) {
return arr[n];
}
@Test
public void testLoadShort() {
short[] arr = {3, 4, 5, 6, 7, 8};
test("loadShort", arr, 5);
checkLIR("loadShort", predicate, 1, 1);
}
public static int loadInt(int[] arr, int n) {
return arr[n];
}
@Test
public void testLoadInt() {
int[] arr = {3, 4, 5, 6, 7, 8};
test("loadInt", arr, 5);
checkLIR("loadInt", predicate, 1, 1);
}
public static long loadLong(long[] arr, int n) {
return arr[n];
}
@Test
public void testLoadLong() {
long[] arr = {3L, 4L, 5L, 6L, 7L, 8L};
test("loadLong", arr, 5);
checkLIR("loadLong", predicate, 1, 1);
}
public static float loadFloat(float[] arr, int n) {
return arr[n];
}
@Test
public void testLoadFloat() {
float[] arr = {3.0F, 4.0F, 5.0F, 6.0F, 7.0F, 8.0F};
test("loadFloat", arr, 5);
checkLIR("loadFloat", predicate, 1, 1);
}
public static double loadDouble(double[] arr, int n) {
return arr[n];
}
@Test
public void testLoadDouble() {
double[] arr = {3.0, 4.0, 5.0, 6.0, 7.0, 8.0};
test("loadDouble", arr, 5);
checkLIR("loadDouble", predicate, 1, 1);
}
public static String loadObject(String[] arr, int n) {
return arr[n];
}
@Test
public void testLoadObject() {
String[] arr = {"ac", "ad", "ew", "asf", "sdad", "aff"};
test("loadObject", arr, 5);
checkLIRforAll("loadObject", predicate, 1);
}
public static int storeInt(int[] arr, int n) {
arr[n] = n * n;
return arr[n];
}
@Test
public void testStoreInt() {
int[] arr = {3, 4, 5, 6, 7, 8};
test("storeInt", arr, 5);
checkLIRforAll("storeInt", predicate, 1);
}
public static Integer loadAndStoreObject(Integer[] arr, int i) {
if (arr[i] > 0) {
return 0;
}
arr[i] += 3;
return arr[i];
}
@Test
public void testLoadAndStoreObject() {
Integer[] arr = new Integer[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
test("loadAndStoreObject", arr, 2);
checkLIRforAll("loadAndStoreObject", predicate, 2);
}
public static int useArrayInLoop(int[] arr) {
int ret = 0;
for (int i = 0; i < arr.length; i++) {
ret += GraalDirectives.opaque(arr[i]);
}
return ret;
}
@Test
public void testUseArrayInLoop() {
int[] arr = {1, 2, 3, 4, 5, 6, 7, 8};
test("useArrayInLoop", arr);
checkLIRforAll("useArrayInLoop", predicate, 1);
}
public static int useArrayDeque(ArrayDeque<Integer> ad) {
ad.addFirst(4);
return ad.removeFirst();
}
@Test
public void testUseArrayDeque() {
ArrayDeque<Integer> ad = new ArrayDeque<>();
test("useArrayDeque", ad);
}
// Array load test when the index is narrowed firstly.
private static class Frame {
int index;
Frame(int index) {
this.index = index;
}
}
private static final Frame[] frameCache = new Frame[256];
private static Frame newFrame(byte data) {
return frameCache[data & 255];
}
public static int getFrameIndex(int n) {
return newFrame((byte) n).index;
}
@Test
public void testGetFrameIndex() {
for (int i = 0; i < 256; i++) {
frameCache[i] = new Frame(i * i);
}
test("getFrameIndex", 258);
checkLIRforAll("getFrameIndex", predicate, 1);
}
static Set<Long> allBarcodes = new HashSet<>();
static Set<Long> localBarcodes = new HashSet<>();
public static long useConstReferenceAsBase(long l) {
localBarcodes.add(l);
allBarcodes.add(l);
return l;
}
@Test
public void testUseConstReferenceAsBase() {
test("useConstReferenceAsBase", 2L);
int l = localBarcodes.size() + allBarcodes.size();
test("useConstReferenceAsBase", (long) l);
}
}

View File

@ -0,0 +1,145 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.aarch64.test;
import org.graalvm.compiler.lir.LIRInstruction;
import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp;
import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.BinaryOp;
import org.junit.Test;
import java.util.function.Predicate;
public class AArch64BitwiseLogicalNotTest extends AArch64MatchRuleTest {
private static final String BIC = AArch64ArithmeticOp.BIC.name();
private static final String ORN = AArch64ArithmeticOp.ORN.name();
private static final String EON = AArch64ArithmeticOp.EON.name();
private void test(String methodName, String opName, Object... args) {
test(methodName, args);
Predicate<LIRInstruction> predicate = op -> {
if (op instanceof BinaryOp && op.name().equalsIgnoreCase(opName)) {
return true;
}
return false;
};
checkLIR(methodName, predicate, 1);
}
// Tests for and not.
public int andNotInt1(int m, int n) {
return n & (~m);
}
@Test
public void testAndNotInt1() {
test("andNotInt1", BIC, 5, 6);
}
public int andNotInt2(int m, int n) {
return n & (m ^ 0XFFFFFFFF);
}
@Test
public void testAndNotInt2() {
test("andNotInt2", BIC, 325, -1);
}
public long andNotLong(long m, long n) {
return m & (n ^ -1L);
}
@Test
public void testAndNotLong() {
test("andNotLong", BIC, 3L, 425L);
}
// Tests for or not.
public int orNotInt(int m, int n) {
return (n ^ 0XFFFFFFFF) | m;
}
@Test
public void testOrNotInt() {
test("orNotInt", ORN, -1, Integer.MAX_VALUE);
}
public long orNotLong(long m, long n) {
return m | (~n);
}
@Test
public void testOrNotLong() {
test("orNotLong", ORN, 23L, -1L);
}
// Tests for xor not.
public int xorNotInt(int m, int n) {
return (~n) ^ m;
}
@Test
public void testXorNotInt() {
test("xorNotInt", EON, 4132, 24);
}
public long xorNotLong(long m, long n) {
return m ^ (~n);
}
@Test
public void testXorNotLong() {
test("xorNotLong", EON, Long.MIN_VALUE, Long.MAX_VALUE);
}
// Tests for not xor.
public int notXorInt1(int m, int n) {
return ~(m ^ n);
}
@Test
public void testNotXorInt1() {
test("notXorInt1", EON, 235, 98);
}
public int notXorInt2(int m, int n) {
return (m ^ n) ^ 0XFFFFFFFF;
}
@Test
public void testNotXorInt2() {
test("notXorInt2", EON, 245, 34654);
}
public long notXorLong(long m, long n) {
return ~(m ^ n);
}
@Test
public void testNotXorLong() {
test("notXorLong", EON, 324L, 235L);
}
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ public class AArch64ElideL2ITest extends AArch64MatchRuleTest {
@Test
public void testAddWithTwoNarrow() {
test("addWithTwoNarrow", 0x80000000L, 6L);
checkLIR("addWithTwoNarrow", predicate, 1);
checkLIR("addWithTwoNarrow", predicate, 0);
}
public int subSingleL2I(int m, long n) {

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.aarch64.test;
import org.graalvm.compiler.lir.LIRInstruction;
import org.junit.Test;
import java.util.function.Predicate;
public class AArch64FloatSqrtTest extends AArch64MatchRuleTest {
private static final Predicate<LIRInstruction> p1 = op -> op.name().equals("SQRT");
private static final Predicate<LIRInstruction> p2 = op -> op.name().equals("AArch64FloatConvert");
public float floatSqrt(float f) {
return (float) Math.sqrt(f);
}
private float[] input = {-1, 0f, -0f, Float.MAX_VALUE, Float.MIN_NORMAL, Float.MIN_VALUE, Float.NaN, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY};
@Test
public void testFloatSqrt() {
for (float f : input) {
test("floatSqrt", f);
checkLIR("floatSqrt", p1, 1);
checkLIR("floatSqrt", p2, 0);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*

View File

@ -0,0 +1,89 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.aarch64.test;
import org.graalvm.compiler.lir.LIRInstruction;
import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.BinaryConstOp;
import org.junit.Test;
import java.util.function.Predicate;
public class AArch64MergeNarrowWithExtendTest extends AArch64MatchRuleTest {
private static final Predicate<LIRInstruction> PRED_AND = op -> (op instanceof BinaryConstOp && op.name().toUpperCase().equals("AND"));
private static final long[] VALUES = {-1L, 0L, 0x1234567812345678L, 0xFFFFFFFFL, 0x12L, 0x1234L, Long.MIN_VALUE, Long.MAX_VALUE};
public long l2i2l(long x) {
return (int) x;
}
public long l2s2l(long x) {
return (short) x;
}
public int l2s2i(long x) {
return (short) x;
}
public long l2b2l(long x) {
return (byte) x;
}
public int l2b2i(long x) {
return (byte) x;
}
@Test
public void testSignedExtendedNarrow() {
String[] testCases = {"l2i2l", "l2i2l", "l2s2l", "l2s2i", "l2b2l", "l2b2i"};
for (String fname : testCases) {
for (long value : VALUES) {
test(fname, value);
checkLIR(fname, PRED_AND, 0);
}
}
}
public long l2c2l(long x) {
return (char) x;
}
public int l2c2i(long x) {
return (char) x;
}
@Test
public void testZeroExtendedNarrow() {
String[] testCases = {"l2c2l", "l2c2i"};
for (String fname : testCases) {
for (long value : VALUES) {
test(fname, value);
checkLIR(fname, PRED_AND, 1);
}
}
}
}

View File

@ -0,0 +1,239 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.aarch64.test;
import org.graalvm.compiler.lir.LIRInstruction;
import org.junit.Test;
import java.util.function.Predicate;
public class AArch64RotationTest extends AArch64MatchRuleTest {
private static final Predicate<LIRInstruction> ROR_PRED = op -> op.name().equals("ROR");
private static final Predicate<LIRInstruction> RORV_PRED = op -> op.name().equals("RORV");
private static final Predicate<LIRInstruction> NEG_PRED = op -> op.name().equals("NEG");
private static final int CONST = 10;
private static final int[] intInput = {-1, 0, 0x12, 0x23, 0x34, 0x45, 0xf1, 0xf2, Integer.MAX_VALUE, Integer.MIN_VALUE};
private static final long[] longInput = {-1, 0, 0x1234, 0x2345, 0x3456, 0xdead, 0xbeaf, Long.MAX_VALUE, Long.MIN_VALUE};
// ror expander
public int rorIntC0(int x, int shift) {
// same as `x >>> shift | x << (0 - shift)`
return x >>> shift | x << (-shift);
}
public int rorIntC32(int x, int shift) {
return x >>> shift | x << (32 - shift);
}
public int rorIntC32Add(int x, int shift) {
return x >>> -shift | x << (32 + shift);
}
public long rorLongC0(long x, int shift) {
return x >>> shift | x << (-shift);
}
public long rorLongC64(long x, int shift) {
return x >>> shift | x << (64 - shift);
}
public long rorLongC64Add(long x, int shift) {
return x >>> -shift | x << (64 + shift);
}
@Test
public void testRorExpand() {
final String[] intCases = {"rorIntC0", "rorIntC32", "rolIntC32Add"};
for (String name : intCases) {
for (int shift = 0; shift <= Integer.SIZE; shift++) {
for (int value : intInput) {
test(name, value, shift);
checkLIR(name, RORV_PRED, 1);
}
}
}
final String[] longCases = {"rorLongC0", "rorLongC64", "rolLongC64Add"};
for (String name : longCases) {
for (int shift = 0; shift <= Long.SIZE; shift++) {
for (long value : longInput) {
test(name, value, shift);
checkLIR(name, RORV_PRED, 1);
}
}
}
}
// rol expander
public int rolIntC0(int x, int shift) {
return x << shift | x >>> (-shift);
}
public int rolIntC32(int x, int shift) {
return x << shift | x >>> (32 - shift);
}
public int rolIntC32Add(int x, int shift) {
return x << -shift | x >>> (32 + shift);
}
public long rolLongC0(long x, int shift) {
return x << shift | x >>> (-shift);
}
public long rolLongC64(long x, int shift) {
return x << shift | x >>> (64 - shift);
}
public long rolLongC64Add(long x, int shift) {
return x << -shift | x >>> (64 + shift);
}
@Test
public void testRolExpand() {
final String[] intCases = {"rolIntC0", "rolIntC32", "rorIntC32Add"};
for (String name : intCases) {
for (int shift = 0; shift <= Integer.SIZE; shift++) {
for (int value : intInput) {
test(name, value, shift);
checkLIR(name, RORV_PRED, 1);
checkLIR(name, NEG_PRED, 1);
}
}
}
final String[] longCases = {"rolLongC0", "rolLongC64", "rorLongC64Add"};
for (String name : longCases) {
for (int shift = 0; shift <= Long.SIZE; shift++) {
for (long value : longInput) {
test(name, value, shift);
checkLIR(name, RORV_PRED, 1);
checkLIR(name, NEG_PRED, 1);
}
}
}
}
// rotation const
public int rorInt0Const(int x) {
return x >>> CONST | x << (0 - CONST);
}
public int rorInt0ConstAdd(int x) {
return (x >>> CONST) + (x << (0 - CONST));
}
public int rorInt32Const(int x) {
return x >>> CONST | x << (32 - CONST);
}
public int rorInt32ConstAdd(int x) {
return (x >>> CONST) + (x << (32 - CONST));
}
public int rolInt0Const(int x) {
return x << CONST | x >>> (0 - CONST);
}
public int rolInt0ConstAdd(int x) {
return (x << CONST) + (x >>> (0 - CONST));
}
public int rolInt32Const(int x) {
return x << CONST | x >>> (32 - CONST);
}
public int rolInt32ConstAdd(int x) {
return (x << CONST) + (x >>> (32 - CONST));
}
public long rolLong0Const(long x) {
return x << CONST | x >>> (0 - CONST);
}
public long rolLong0ConstAdd(long x) {
return (x << CONST) + (x >>> (0 - CONST));
}
public long rolLong64Const(long x) {
return x << CONST | x >>> (64 - CONST);
}
public long rolLong64ConstAdd(long x) {
return (x << CONST) + (x >>> (64 - CONST));
}
public long rorLong0Const(long x) {
return x >>> CONST | x << (0 - CONST);
}
public long rorLong0ConstAdd(long x) {
return (x >>> CONST) + (x << (0 - CONST));
}
public long rorLong64Const(long x) {
return x >>> CONST | x << (64 - CONST);
}
public long rorLong64ConstAdd(long x) {
return (x >>> CONST) + (x << (64 - CONST));
}
@Test
public void testRotationConst() {
final String[] intCases = {"rolInt0Const",
"rolInt0ConstAdd",
"rolInt32Const",
"rolInt32ConstAdd",
"rorInt0Const",
"rorInt0ConstAdd",
"rorInt32Const",
"rorInt32ConstAdd"};
for (String name : intCases) {
for (int value : intInput) {
test(name, value);
checkLIR(name, ROR_PRED, 1);
}
}
final String[] longCases = {"rolLong0Const",
"rolLong0ConstAdd",
"rolLong64Const",
"rolLong64ConstAdd",
"rorLong0Const",
"rorLong0ConstAdd",
"rorLong64Const",
"rorLong64ConstAdd"};
for (String name : longCases) {
for (long value : longInput) {
test(name, value);
checkLIR(name, ROR_PRED, 1);
}
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,12 +26,14 @@
package org.graalvm.compiler.core.aarch64;
import org.graalvm.compiler.asm.aarch64.AArch64Address;
import org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode;
import org.graalvm.compiler.core.common.LIRKind;
import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.AddNode;
import org.graalvm.compiler.nodes.calc.LeftShiftNode;
import org.graalvm.compiler.nodes.calc.ZeroExtendNode;
import org.graalvm.compiler.nodes.memory.address.AddressNode;
import org.graalvm.compiler.nodes.memory.address.OffsetAddressNode;
import org.graalvm.compiler.phases.common.AddressLoweringByUsePhase;
@ -41,9 +43,15 @@ import jdk.vm.ci.meta.JavaConstant;
public class AArch64AddressLoweringByUse extends AddressLoweringByUsePhase.AddressLoweringByUse {
private AArch64LIRKindTool kindtool;
private boolean supportsDerivedReference;
public AArch64AddressLoweringByUse(AArch64LIRKindTool kindtool) {
this(kindtool, true);
}
public AArch64AddressLoweringByUse(AArch64LIRKindTool kindtool, boolean supportsDerivedReference) {
this.kindtool = kindtool;
this.supportsDerivedReference = supportsDerivedReference;
}
@Override
@ -76,8 +84,8 @@ public class AArch64AddressLoweringByUse extends AddressLoweringByUsePhase.Addre
return base.graph().unique(ret);
}
protected boolean improve(AArch64Kind kind, AArch64AddressNode ret) {
AArch64Address.AddressingMode mode = ret.getAddressingMode();
private boolean improve(AArch64Kind kind, AArch64AddressNode ret) {
AddressingMode mode = ret.getAddressingMode();
// if we have already set a displacement or set to base only mode then we are done
if (isDisplacementMode(mode) || isBaseOnlyMode(mode)) {
return false;
@ -128,8 +136,8 @@ public class AArch64AddressLoweringByUse extends AddressLoweringByUsePhase.Addre
ValueNode child = add.getX();
if (child.isJavaConstant() && child.asJavaConstant().getJavaKind().isNumericInteger()) {
long newDisp = disp + child.asJavaConstant().asLong();
AArch64Address.AddressingMode newMode = immediateMode(kind, newDisp);
if (newMode != AArch64Address.AddressingMode.REGISTER_OFFSET) {
AddressingMode newMode = immediateMode(kind, newDisp);
if (newMode != AddressingMode.REGISTER_OFFSET) {
disp = newDisp;
mode = newMode;
base = add.getY();
@ -140,8 +148,8 @@ public class AArch64AddressLoweringByUse extends AddressLoweringByUsePhase.Addre
child = add.getY();
if (child.isJavaConstant() && child.asJavaConstant().getJavaKind().isNumericInteger()) {
long newDisp = disp + child.asJavaConstant().asLong();
AArch64Address.AddressingMode newMode = immediateMode(kind, newDisp);
if (newMode != AArch64Address.AddressingMode.REGISTER_OFFSET) {
AddressingMode newMode = immediateMode(kind, newDisp);
if (newMode != AddressingMode.REGISTER_OFFSET) {
disp = newDisp;
mode = newMode;
base = add.getX();
@ -159,12 +167,54 @@ public class AArch64AddressLoweringByUse extends AddressLoweringByUsePhase.Addre
} else {
// reset to base register only
ret.setIndex(null);
ret.setDisplacement(0, 1, AArch64Address.AddressingMode.BASE_REGISTER_ONLY);
ret.setDisplacement(0, 1, AddressingMode.BASE_REGISTER_ONLY);
}
return true;
}
}
}
// We try to convert (OffsetAddress base (Add (LeftShift (Ext i) k) #imm))
// to (AArch64AddressNode (AArch64PointerAdd (base (LeftShift (Ext i) k)) #imm)
if (supportsDerivedReference && index != null && index instanceof AddNode && index.getStackKind().isNumericInteger()) {
ValueNode x = ((AddNode) index).getX();
ValueNode y = ((AddNode) index).getY();
ValueNode objHeadOffset = null;
ValueNode scaledIndex = null;
if (x.isConstant()) {
objHeadOffset = x;
scaledIndex = y;
} else if (y.isConstant()) {
objHeadOffset = y;
scaledIndex = x;
}
if (scaledIndex == null || objHeadOffset == null) {
return false;
}
ZeroExtendNode wordIndex = null;
if (scaledIndex instanceof LeftShiftNode) {
ValueNode var = ((LeftShiftNode) scaledIndex).getX();
ValueNode amount = ((LeftShiftNode) scaledIndex).getY();
if (amount.isConstant() && var instanceof ZeroExtendNode) {
int s = amount.asJavaConstant().asInt();
if (s >= 0 && s <= 4) {
wordIndex = (ZeroExtendNode) var;
}
}
} else if (scaledIndex instanceof ZeroExtendNode) {
wordIndex = (ZeroExtendNode) scaledIndex;
}
if (wordIndex != null) {
AArch64PointerAddNode addP = base.graph().unique(new AArch64PointerAddNode(base, scaledIndex));
ret.setBase(addP);
ret.setIndex(objHeadOffset);
return true;
}
}
// nope cannot improve this any more
return false;
}
@ -180,7 +230,7 @@ public class AArch64AddressLoweringByUse extends AddressLoweringByUsePhase.Addre
return (AArch64Kind) lirKind.getPlatformKind();
}
private static AArch64Address.AddressingMode immediateMode(AArch64Kind kind, long value) {
private static AddressingMode immediateMode(AArch64Kind kind, long value) {
if (kind != null) {
int size = kind.getSizeInBytes();
// this next test should never really fail
@ -189,32 +239,32 @@ public class AArch64AddressLoweringByUse extends AddressLoweringByUsePhase.Addre
// assert value % size == 0
// we can try for a 12 bit scaled offset
if (NumUtil.isUnsignedNbit(12, encodedValue)) {
return AArch64Address.AddressingMode.IMMEDIATE_SCALED;
return AddressingMode.IMMEDIATE_SCALED;
}
}
}
// we can try for a 9 bit unscaled offset
if (NumUtil.isSignedNbit(9, value)) {
return AArch64Address.AddressingMode.IMMEDIATE_UNSCALED;
return AddressingMode.IMMEDIATE_UNSCALED;
}
// nope this index needs to be passed via offset register
return AArch64Address.AddressingMode.REGISTER_OFFSET;
return AddressingMode.REGISTER_OFFSET;
}
private static int computeScaleFactor(AArch64Kind kind, AArch64Address.AddressingMode mode) {
if (mode == AArch64Address.AddressingMode.IMMEDIATE_SCALED) {
private static int computeScaleFactor(AArch64Kind kind, AddressingMode mode) {
if (mode == AddressingMode.IMMEDIATE_SCALED) {
return kind.getSizeInBytes();
}
return 1;
}
boolean isBaseOnlyMode(AArch64Address.AddressingMode addressingMode) {
return addressingMode == AArch64Address.AddressingMode.BASE_REGISTER_ONLY;
boolean isBaseOnlyMode(AddressingMode addressingMode) {
return addressingMode == AddressingMode.BASE_REGISTER_ONLY;
}
private static boolean isDisplacementMode(AArch64Address.AddressingMode addressingMode) {
private static boolean isDisplacementMode(AddressingMode addressingMode) {
switch (addressingMode) {
case IMMEDIATE_POST_INDEXED:
case IMMEDIATE_PRE_INDEXED:

View File

@ -436,7 +436,8 @@ public class AArch64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implem
@Override
public Value emitMathSqrt(Value input) {
assert input.getPlatformKind() == AArch64Kind.DOUBLE;
assert input.getPlatformKind() == AArch64Kind.DOUBLE ||
input.getPlatformKind() == AArch64Kind.SINGLE;
return emitUnary(AArch64ArithmeticOp.SQRT, input);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,19 +25,18 @@
package org.graalvm.compiler.core.aarch64;
import jdk.vm.ci.aarch64.AArch64Kind;
import jdk.vm.ci.code.CodeUtil;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.Value;
import jdk.internal.vm.compiler.collections.EconomicMap;
import jdk.internal.vm.compiler.collections.Equivalence;
import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ExtendType;
import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
import org.graalvm.compiler.core.common.LIRKind;
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.common.calc.FloatConvert;
import org.graalvm.compiler.core.gen.NodeMatchRules;
import org.graalvm.compiler.core.match.ComplexMatchResult;
import org.graalvm.compiler.core.match.MatchRule;
import org.graalvm.compiler.core.match.MatchableNode;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.lir.LIRFrameState;
import org.graalvm.compiler.lir.LabelRef;
import org.graalvm.compiler.lir.Variable;
@ -54,6 +53,7 @@ import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.AddNode;
import org.graalvm.compiler.nodes.calc.AndNode;
import org.graalvm.compiler.nodes.calc.BinaryNode;
import org.graalvm.compiler.nodes.calc.FloatConvertNode;
import org.graalvm.compiler.nodes.calc.IntegerLessThanNode;
import org.graalvm.compiler.nodes.calc.LeftShiftNode;
import org.graalvm.compiler.nodes.calc.MulNode;
@ -62,16 +62,26 @@ import org.graalvm.compiler.nodes.calc.NegateNode;
import org.graalvm.compiler.nodes.calc.NotNode;
import org.graalvm.compiler.nodes.calc.OrNode;
import org.graalvm.compiler.nodes.calc.RightShiftNode;
import org.graalvm.compiler.nodes.calc.SignExtendNode;
import org.graalvm.compiler.nodes.calc.SubNode;
import org.graalvm.compiler.nodes.calc.UnaryNode;
import org.graalvm.compiler.nodes.calc.UnsignedRightShiftNode;
import org.graalvm.compiler.nodes.calc.XorNode;
import org.graalvm.compiler.nodes.memory.Access;
import org.graalvm.compiler.nodes.calc.ZeroExtendNode;
import org.graalvm.compiler.nodes.memory.MemoryAccess;
import jdk.vm.ci.aarch64.AArch64Kind;
import jdk.vm.ci.code.CodeUtil;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.Value;
@MatchableNode(nodeClass = AArch64PointerAddNode.class, inputs = {"base", "offset"})
public class AArch64NodeMatchRules extends NodeMatchRules {
private static final EconomicMap<Class<? extends BinaryNode>, AArch64ArithmeticOp> binaryOpMap;
private static final EconomicMap<Class<? extends BinaryNode>, AArch64BitFieldOp.BitFieldOpCode> bitFieldOpMap;
private static final EconomicMap<Class<? extends BinaryNode>, AArch64MacroAssembler.ShiftType> shiftTypeMap;
private static final EconomicMap<Class<? extends BinaryNode>, AArch64ArithmeticOp> logicalNotOpMap;
static {
binaryOpMap = EconomicMap.create(Equivalence.IDENTITY, 9);
@ -89,6 +99,11 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
bitFieldOpMap.put(UnsignedRightShiftNode.class, AArch64BitFieldOp.BitFieldOpCode.UBFX);
bitFieldOpMap.put(LeftShiftNode.class, AArch64BitFieldOp.BitFieldOpCode.UBFIZ);
logicalNotOpMap = EconomicMap.create(Equivalence.IDENTITY, 3);
logicalNotOpMap.put(AndNode.class, AArch64ArithmeticOp.BIC);
logicalNotOpMap.put(OrNode.class, AArch64ArithmeticOp.ORN);
logicalNotOpMap.put(XorNode.class, AArch64ArithmeticOp.EON);
shiftTypeMap = EconomicMap.create(Equivalence.IDENTITY, 3);
shiftTypeMap.put(LeftShiftNode.class, AArch64MacroAssembler.ShiftType.LSL);
shiftTypeMap.put(RightShiftNode.class, AArch64MacroAssembler.ShiftType.ASR);
@ -99,15 +114,31 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
super(gen);
}
protected LIRFrameState getState(Access access) {
protected LIRFrameState getState(MemoryAccess access) {
if (access instanceof DeoptimizingNode) {
return state((DeoptimizingNode) access);
}
return null;
}
protected AArch64Kind getMemoryKind(Access access) {
return (AArch64Kind) gen.getLIRKind(access.asNode().stamp(NodeView.DEFAULT)).getPlatformKind();
protected AArch64Kind getMemoryKind(MemoryAccess access) {
return (AArch64Kind) gen.getLIRKind(((ValueNode) access).stamp(NodeView.DEFAULT)).getPlatformKind();
}
private static ExtendType getZeroExtendType(int fromBits) {
switch (fromBits) {
case Byte.SIZE:
return ExtendType.UXTB;
case Short.SIZE:
return ExtendType.UXTH;
case Integer.SIZE:
return ExtendType.UXTW;
case Long.SIZE:
return ExtendType.UXTX;
default:
GraalError.shouldNotReachHere("extended from " + fromBits + "bits is not supported!");
return null;
}
}
private AllocatableValue moveSp(AllocatableValue value) {
@ -127,8 +158,7 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
};
}
private ComplexMatchResult emitBinaryShift(AArch64ArithmeticOp op, ValueNode value, BinaryNode shift,
boolean isShiftNot) {
private ComplexMatchResult emitBinaryShift(AArch64ArithmeticOp op, ValueNode value, BinaryNode shift) {
AArch64MacroAssembler.ShiftType shiftType = shiftTypeMap.get(shift.getClass());
assert shiftType != null;
assert value.getStackKind().isNumericInteger();
@ -142,7 +172,7 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
AllocatableValue x = moveSp(gen.asAllocatable(a));
AllocatableValue y = moveSp(gen.asAllocatable(b));
int shiftAmount = shift.getY().asJavaConstant().asInt();
gen.append(new AArch64ArithmeticOp.BinaryShiftOp(op, result, x, y, shiftType, shiftAmount, isShiftNot));
gen.append(new AArch64ArithmeticOp.BinaryShiftOp(op, result, x, y, shiftType, shiftAmount));
return result;
};
}
@ -163,6 +193,46 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
return narrow.getInputBits() == 64 && narrow.getResultBits() == 32;
}
@MatchRule("(AArch64PointerAdd=addP base ZeroExtend)")
@MatchRule("(AArch64PointerAdd=addP base (LeftShift ZeroExtend Constant))")
public ComplexMatchResult extendedPointerAddShift(AArch64PointerAddNode addP) {
ValueNode offset = addP.getOffset();
ZeroExtendNode zeroExtend;
int shiftNum;
if (offset instanceof ZeroExtendNode) {
zeroExtend = (ZeroExtendNode) offset;
shiftNum = 0;
} else {
LeftShiftNode shift = (LeftShiftNode) offset;
zeroExtend = (ZeroExtendNode) shift.getX();
shiftNum = shift.getY().asJavaConstant().asInt();
}
int fromBits = zeroExtend.getInputBits();
int toBits = zeroExtend.getResultBits();
if (toBits != 64) {
return null;
}
assert fromBits <= toBits;
ExtendType extendType = getZeroExtendType(fromBits);
if (shiftNum >= 0 && shiftNum <= 4) {
ValueNode base = addP.getBase();
return builder -> {
AllocatableValue x = gen.asAllocatable(operand(base));
AllocatableValue y = gen.asAllocatable(operand(zeroExtend.getValue()));
AllocatableValue baseReference = LIRKind.derivedBaseFromValue(x);
LIRKind kind = LIRKind.combineDerived(gen.getLIRKind(addP.stamp(NodeView.DEFAULT)),
baseReference, null);
Variable result = gen.newVariable(kind);
gen.append(new AArch64ArithmeticOp.ExtendedAddShiftOp(result, x, moveSp(y),
extendType, shiftNum));
return result;
};
}
return null;
}
@MatchRule("(And (UnsignedRightShift=shift a Constant=b) Constant=c)")
@MatchRule("(LeftShift=shift (And a Constant=c) Constant=b)")
public ComplexMatchResult unsignedBitField(BinaryNode shift, ValueNode a, ConstantNode b, ConstantNode c) {
@ -197,6 +267,60 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
return emitBitField(op, a, distance, width);
}
@MatchRule("(Or=op (LeftShift=x src Constant=shiftAmt1) (UnsignedRightShift src Constant=shiftAmt2))")
@MatchRule("(Or=op (UnsignedRightShift=x src Constant=shiftAmt1) (LeftShift src Constant=shiftAmt2))")
@MatchRule("(Add=op (LeftShift=x src Constant=shiftAmt1) (UnsignedRightShift src Constant=shiftAmt2))")
@MatchRule("(Add=op (UnsignedRightShift=x src Constant=shiftAmt1) (LeftShift src Constant=shiftAmt2))")
public ComplexMatchResult rotationConstant(ValueNode op, ValueNode x, ValueNode src, ConstantNode shiftAmt1, ConstantNode shiftAmt2) {
assert src.getStackKind().isNumericInteger();
assert shiftAmt1.getStackKind().getBitCount() == 32;
assert shiftAmt2.getStackKind().getBitCount() == 32;
int shift1 = shiftAmt1.asJavaConstant().asInt();
int shift2 = shiftAmt2.asJavaConstant().asInt();
if (op instanceof AddNode && (0 == shift1 || 0 == shift2)) {
return null;
}
if ((0 == shift1 + shift2) || (src.getStackKind().getBitCount() == shift1 + shift2)) {
return builder -> {
Value a = operand(src);
Value b = x instanceof LeftShiftNode ? operand(shiftAmt2) : operand(shiftAmt1);
return getArithmeticLIRGenerator().emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.ROR, false, a, b);
};
}
return null;
}
@MatchRule("(Or (LeftShift=x src shiftAmount) (UnsignedRightShift src (Sub=y Constant shiftAmount)))")
@MatchRule("(Or (UnsignedRightShift=x src shiftAmount) (LeftShift src (Sub=y Constant shiftAmount)))")
@MatchRule("(Or (LeftShift=x src (Negate shiftAmount)) (UnsignedRightShift src (Add=y Constant shiftAmount)))")
@MatchRule("(Or (UnsignedRightShift=x src (Negate shiftAmount)) (LeftShift src (Add=y Constant shiftAmount)))")
@MatchRule("(Or (LeftShift=x src shiftAmount) (UnsignedRightShift src (Negate=y shiftAmount)))")
@MatchRule("(Or (UnsignedRightShift=x src shiftAmount) (LeftShift src (Negate=y shiftAmount)))")
public ComplexMatchResult rotationExpander(ValueNode src, ValueNode shiftAmount, ValueNode x, ValueNode y) {
assert src.getStackKind().isNumericInteger();
assert shiftAmount.getStackKind().getBitCount() == 32;
if (y instanceof SubNode || y instanceof AddNode) {
BinaryNode binary = (BinaryNode) y;
ConstantNode delta = (ConstantNode) (binary.getX() instanceof ConstantNode ? binary.getX() : binary.getY());
if (delta.asJavaConstant().asInt() != src.getStackKind().getBitCount()) {
return null;
}
}
return builder -> {
Value a = operand(src);
Value b;
if (y instanceof AddNode) {
b = x instanceof LeftShiftNode ? operand(shiftAmount) : getArithmeticLIRGenerator().emitNegate(operand(shiftAmount));
} else {
b = x instanceof LeftShiftNode ? getArithmeticLIRGenerator().emitNegate(operand(shiftAmount)) : operand(shiftAmount);
}
return getArithmeticLIRGenerator().emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.RORV, false, a, b);
};
}
@MatchRule("(Add=binary a (LeftShift=shift b Constant))")
@MatchRule("(Add=binary a (RightShift=shift b Constant))")
@MatchRule("(Add=binary a (UnsignedRightShift=shift b Constant))")
@ -206,7 +330,7 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
public ComplexMatchResult addSubShift(BinaryNode binary, ValueNode a, BinaryNode shift) {
AArch64ArithmeticOp op = binaryOpMap.get(binary.getClass());
assert op != null;
return emitBinaryShift(op, a, shift, false);
return emitBinaryShift(op, a, shift);
}
@MatchRule("(And=binary a (LeftShift=shift b Constant))")
@ -228,11 +352,44 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
@MatchRule("(Xor=binary a (Not (RightShift=shift b Constant)))")
@MatchRule("(Xor=binary a (Not (UnsignedRightShift=shift b Constant)))")
public ComplexMatchResult logicShift(BinaryNode binary, ValueNode a, BinaryNode shift) {
AArch64ArithmeticOp op = binaryOpMap.get(binary.getClass());
assert op != null;
AArch64ArithmeticOp op;
ValueNode operand = binary.getX() == a ? binary.getY() : binary.getX();
boolean isShiftNot = operand instanceof NotNode;
return emitBinaryShift(op, a, shift, isShiftNot);
if (operand instanceof NotNode) {
op = logicalNotOpMap.get(binary.getClass());
} else {
op = binaryOpMap.get(binary.getClass());
}
assert op != null;
return emitBinaryShift(op, a, shift);
}
@MatchRule("(And=logic value1 (Not=not value2))")
@MatchRule("(Or=logic value1 (Not=not value2))")
@MatchRule("(Xor=logic value1 (Not=not value2))")
public ComplexMatchResult bitwiseLogicNot(BinaryNode logic, NotNode not) {
assert logic.getStackKind().isNumericInteger();
AArch64ArithmeticOp op = logicalNotOpMap.get(logic.getClass());
assert op != null;
ValueNode src1 = logic.getX() == not ? logic.getY() : logic.getX();
ValueNode src2 = not.getValue();
return builder -> {
Value a = operand(src1);
Value b = operand(src2);
LIRKind resultKind = LIRKind.combine(a, b);
return getArithmeticLIRGenerator().emitBinary(resultKind, op, false, a, b);
};
}
@MatchRule("(Not=not (Xor value1 value2))")
public ComplexMatchResult bitwiseNotXor(NotNode not) {
assert not.getStackKind().isNumericInteger();
return builder -> {
XorNode xor = (XorNode) not.getValue();
Value a = operand(xor.getX());
Value b = operand(xor.getY());
LIRKind resultKind = LIRKind.combine(a, b);
return getArithmeticLIRGenerator().emitBinary(resultKind, AArch64ArithmeticOp.EON, false, a, b);
};
}
@MatchRule("(Add=binary (Mul (SignExtend a) (SignExtend b)) c)")
@ -385,6 +542,28 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
return null;
}
@MatchRule("(FloatConvert=a (Sqrt (FloatConvert=b c)))")
public ComplexMatchResult floatSqrt(FloatConvertNode a, FloatConvertNode b, ValueNode c) {
if (c.getStackKind().isNumericFloat() && a.getStackKind().isNumericFloat()) {
if (a.getFloatConvert() == FloatConvert.D2F && b.getFloatConvert() == FloatConvert.F2D) {
return builder -> getArithmeticLIRGenerator().emitMathSqrt(operand(c));
}
}
return null;
}
@MatchRule("(SignExtend=extend (Narrow value))")
@MatchRule("(ZeroExtend=extend (Narrow value))")
public ComplexMatchResult mergeNarrowExtend(UnaryNode extend, ValueNode value) {
if (extend instanceof SignExtendNode) {
SignExtendNode sxt = (SignExtendNode) extend;
return builder -> getArithmeticLIRGenerator().emitSignExtend(operand(value), sxt.getInputBits(), sxt.getResultBits());
}
assert extend instanceof ZeroExtendNode;
ZeroExtendNode zxt = (ZeroExtendNode) extend;
return builder -> getArithmeticLIRGenerator().emitZeroExtend(operand(value), zxt.getInputBits(), zxt.getResultBits());
}
@Override
public AArch64LIRGenerator getLIRGeneratorTool() {
return (AArch64LIRGenerator) gen;

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.aarch64;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.Value;
import org.graalvm.compiler.core.common.LIRKind;
import org.graalvm.compiler.core.common.type.AbstractPointerStamp;
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.StampFactory;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp;
import org.graalvm.compiler.lir.gen.ArithmeticLIRGeneratorTool;
import org.graalvm.compiler.lir.gen.LIRGeneratorTool;
import org.graalvm.compiler.nodeinfo.NodeInfo;
import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.FloatingNode;
import org.graalvm.compiler.nodes.spi.ArithmeticLIRLowerable;
import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
@NodeInfo(nameTemplate = "AArch64PointerAdd", cycles = CYCLES_1, size = SIZE_1)
public class AArch64PointerAddNode extends FloatingNode implements ArithmeticLIRLowerable {
public static final NodeClass<AArch64PointerAddNode> TYPE = NodeClass.create(AArch64PointerAddNode.class);
@Input ValueNode base;
@Input ValueNode offset;
public AArch64PointerAddNode(ValueNode base, ValueNode offset) {
super(TYPE, StampFactory.pointer());
this.base = base;
this.offset = offset;
assert base != null && (base.stamp(NodeView.DEFAULT) instanceof AbstractPointerStamp ||
IntegerStamp.getBits(base.stamp(NodeView.DEFAULT)) == 64);
assert offset != null && offset.getStackKind().isNumericInteger();
}
public ValueNode getBase() {
return base;
}
public ValueNode getOffset() {
return offset;
}
@Override
public void generate(NodeLIRBuilderTool builder, ArithmeticLIRGeneratorTool gen) {
LIRGeneratorTool tool = builder.getLIRGeneratorTool();
Value x = builder.operand(base);
Value y = builder.operand(offset);
AllocatableValue baseValue = tool.asAllocatable(x);
AllocatableValue baseReference = LIRKind.derivedBaseFromValue(baseValue);
LIRKind kind = LIRKind.combineDerived(tool.getLIRKind(stamp(NodeView.DEFAULT)), baseReference, null);
builder.setResult(this, ((AArch64ArithmeticLIRGenerator) gen).emitBinary(kind, AArch64ArithmeticOp.ADD, true, x, y));
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -49,7 +49,6 @@ import jdk.internal.vm.compiler.word.LocationIdentity;
* AArch64-specific subclass of ReadNode that knows how to merge ZeroExtend and SignExtend into the
* read.
*/
@NodeInfo
public class AArch64ReadNode extends ReadNode {
public static final NodeClass<AArch64ReadNode> TYPE = NodeClass.create(AArch64ReadNode.class);
@ -73,7 +72,7 @@ public class AArch64ReadNode extends ReadNode {
}
@Override
public Stamp getAccessStamp() {
public Stamp getAccessStamp(NodeView view) {
return accessStamp;
}
@ -89,7 +88,7 @@ public class AArch64ReadNode extends ReadNode {
ValueNode usage = (ValueNode) readNode.usages().first();
boolean isSigned = usage instanceof SignExtendNode;
IntegerStamp accessStamp = ((IntegerStamp) readNode.getAccessStamp());
IntegerStamp accessStamp = ((IntegerStamp) readNode.getAccessStamp(NodeView.DEFAULT));
AddressNode address = readNode.getAddress();
LocationIdentity location = readNode.getLocationIdentity();

View File

@ -46,6 +46,11 @@ public class AArch64SuitesCreator extends DefaultSuitesCreator {
this.insertReadReplacementBeforePositions = insertReadReplacementBeforePositions;
}
public AArch64SuitesCreator(CompilerConfiguration compilerConfiguration, List<Class<? extends Phase>> insertReadReplacementBeforePositions) {
super(compilerConfiguration);
this.insertReadReplacementBeforePositions = insertReadReplacementBeforePositions;
}
@Override
public Suites createSuites(OptionValues options) {
Suites suites = super.createSuites(options);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,8 +31,11 @@ import org.graalvm.compiler.core.test.MatchRuleTest;
import org.graalvm.compiler.lir.LIR;
import org.graalvm.compiler.lir.LIRInstruction;
import org.graalvm.compiler.lir.amd64.AMD64Binary;
import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer.MemoryConstOp;
import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer.ConstOp;
import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer.MemoryConstOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CmpBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CmpConstBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CmpDataBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64Unary;
import org.junit.Before;
import org.junit.Test;
@ -68,6 +71,10 @@ public class AMD64MatchRuleTest extends MatchRuleTest {
assertFalse("MemoryConstOp expected only once in first block", found);
found = true;
}
if (ins instanceof CmpConstBranchOp || ins instanceof CmpBranchOp || ins instanceof CmpDataBranchOp) {
assertFalse("CMP expected only once in first block", found);
found = true;
}
}
assertTrue("Memory compare must be in the LIR", found);
}
@ -129,6 +136,10 @@ public class AMD64MatchRuleTest extends MatchRuleTest {
assertFalse("CMP expected only once in first block", found);
found = true;
}
if (ins instanceof CmpConstBranchOp || ins instanceof CmpBranchOp || ins instanceof CmpDataBranchOp) {
assertFalse("CMP expected only once in first block", found);
found = true;
}
}
assertTrue("CMP must be in the LIR", found);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -93,10 +93,9 @@ public class AMD64AddressLowering extends AddressLowering {
LeftShiftNode shift = (LeftShiftNode) ret.getIndex();
if (shift.getY().isConstant()) {
int amount = ret.getScale().log2 + shift.getY().asJavaConstant().asInt();
Scale scale = Scale.fromShift(amount);
if (scale != null) {
if (Scale.isScaleShiftSupported(amount)) {
ret.setIndex(shift.getX());
ret.setScale(scale);
ret.setScale(Scale.fromShift(amount));
return true;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -794,8 +794,9 @@ public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implemen
* Conversions between integer to floating point types require moves between CPU and FPU
* registers.
*/
AMD64Kind fromKind = (AMD64Kind) from.getPlatformKind();
switch ((AMD64Kind) to.getPlatformKind()) {
AMD64Kind fromKind = scalarKind((AMD64Kind) from.getPlatformKind());
AMD64Kind toKind = scalarKind((AMD64Kind) to.getPlatformKind());
switch (toKind) {
case DWORD:
switch (fromKind) {
case SINGLE:
@ -821,7 +822,21 @@ public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implemen
}
break;
}
throw GraalError.shouldNotReachHere();
throw GraalError.shouldNotReachHere(toKind + " " + fromKind);
}
private static AMD64Kind scalarKind(AMD64Kind kind) {
AMD64Kind resultKind = kind;
if (kind.isXMM() && kind.getVectorLength() > 1) {
if (kind.getSizeInBytes() == AMD64Kind.SINGLE.getSizeInBytes()) {
resultKind = AMD64Kind.SINGLE;
} else if (kind.getSizeInBytes() == AMD64Kind.DOUBLE.getSizeInBytes()) {
resultKind = AMD64Kind.DOUBLE;
} else {
GraalError.shouldNotReachHere("no equal size scalar kind for " + kind);
}
}
return resultKind;
}
@Override
@ -1263,11 +1278,15 @@ public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implemen
}
}
private boolean mustReplaceNullWithNullRegister(Constant nullConstant) {
public boolean mustReplaceNullWithNullRegister(Constant nullConstant) {
/* Uncompressed null pointers only */
return nullRegisterValue != null && JavaConstant.NULL_POINTER.equals(nullConstant);
}
public AllocatableValue getNullRegisterValue() {
return nullRegisterValue;
}
@Override
public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) {
OperandSize size;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@ package org.graalvm.compiler.core.amd64;
import static jdk.vm.ci.code.ValueUtil.asRegister;
import static jdk.vm.ci.code.ValueUtil.isAllocatableValue;
import static jdk.vm.ci.code.ValueUtil.isRegister;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP;
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE;
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD;
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD;
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS;
@ -78,6 +78,9 @@ import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp;
import org.graalvm.compiler.lir.amd64.AMD64Call;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CmpBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CmpConstBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CmpDataBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp;
@ -87,6 +90,9 @@ import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.HashTableSwitchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TestBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TestByteBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TestConstBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64LFenceOp;
import org.graalvm.compiler.lir.amd64.AMD64Move;
import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp;
@ -111,6 +117,7 @@ import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.RegisterValue;
import jdk.vm.ci.code.StackSlot;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.PlatformKind;
@ -311,22 +318,90 @@ public abstract class AMD64LIRGenerator extends LIRGenerator {
@Override
public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) {
Condition finalCondition = emitCompare(cmpKind, left, right, cond);
if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) {
Condition finalCondition = emitCompare(cmpKind, left, right, cond);
append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
} else {
append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
return;
}
if (LIRValueUtil.isVariable(right)) {
emitRawCompareBranch(OperandSize.get(cmpKind), load(right), loadNonConst(left), cond.mirror(), trueLabel, falseLabel, trueLabelProbability);
} else {
emitRawCompareBranch(OperandSize.get(cmpKind), load(left), loadNonConst(right), cond, trueLabel, falseLabel, trueLabelProbability);
}
}
private void emitRawCompareBranch(OperandSize size, Variable left, Value right, Condition cond, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) {
if (isConstantValue(right)) {
Constant c = LIRValueUtil.asConstant(right);
if (JavaConstant.isNull(c)) {
AMD64ArithmeticLIRGenerator arithmeticLIRGenerator = (AMD64ArithmeticLIRGenerator) arithmeticLIRGen;
if (arithmeticLIRGenerator.mustReplaceNullWithNullRegister(c)) {
append(new CmpBranchOp(size, left, arithmeticLIRGenerator.getNullRegisterValue(), null, cond, trueLabel, falseLabel, trueLabelProbability));
} else {
append(new TestBranchOp(size, left, left, null, cond, trueLabel, falseLabel, trueLabelProbability));
}
return;
} else if (c instanceof VMConstant) {
VMConstant vc = (VMConstant) c;
if (size == DWORD && !GeneratePIC.getValue(getResult().getLIR().getOptions()) && target().inlineObjects) {
append(new CmpConstBranchOp(DWORD, left, vc, null, cond, trueLabel, falseLabel, trueLabelProbability));
} else {
append(new CmpDataBranchOp(size, left, vc, cond, trueLabel, falseLabel, trueLabelProbability));
}
return;
} else if (c instanceof JavaConstant) {
JavaConstant jc = (JavaConstant) c;
if (jc.isDefaultForKind()) {
if (size == BYTE) {
append(new TestByteBranchOp(left, left, cond, trueLabel, falseLabel, trueLabelProbability));
} else {
append(new TestBranchOp(size, left, left, null, cond, trueLabel, falseLabel, trueLabelProbability));
}
return;
} else if (NumUtil.is32bit(jc.asLong())) {
append(new CmpConstBranchOp(size, left, (int) jc.asLong(), null, cond, trueLabel, falseLabel, trueLabelProbability));
return;
}
}
}
// fallback: load, then compare
append(new CmpBranchOp(size, left, asAllocatable(right), null, cond, trueLabel, falseLabel, trueLabelProbability));
}
public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel,
double trueLabelProbability) {
boolean mirrored = emitCompareMemory(cmpKind, left, right, state);
Condition finalCondition = mirrored ? cond.mirror() : cond;
if (cmpKind.isXMM()) {
append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
if (cmpKind == AMD64Kind.SINGLE) {
append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(left), right, state));
append(new FloatBranchOp(cond, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
} else if (cmpKind == AMD64Kind.DOUBLE) {
append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(left), right, state));
append(new FloatBranchOp(cond, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
} else {
throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind);
}
} else {
append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
OperandSize size = OperandSize.get(cmpKind);
if (isConstantValue(left)) {
ConstantValue a = asConstantValue(left);
if (JavaConstant.isNull(a.getConstant())) {
append(new CmpConstBranchOp(size, right, 0, state, cond.mirror(), trueLabel, falseLabel, trueLabelProbability));
return;
} else if (a.getConstant() instanceof VMConstant && size == DWORD && target().inlineObjects) {
VMConstant vc = (VMConstant) a.getConstant();
append(new CmpConstBranchOp(size, right, vc, state, cond.mirror(), trueLabel, falseLabel, trueLabelProbability));
return;
} else if (a.getConstant() instanceof JavaConstant && a.getJavaConstant().getJavaKind() != JavaKind.Object) {
long value = a.getJavaConstant().asLong();
if (NumUtil.is32bit(value)) {
append(new CmpConstBranchOp(size, right, (int) value, state, cond.mirror(), trueLabel, falseLabel, trueLabelProbability));
return;
}
}
}
append(new CmpBranchOp(size, asAllocatable(left), right, state, cond, trueLabel, falseLabel, trueLabelProbability));
}
}
@ -337,8 +412,22 @@ public abstract class AMD64LIRGenerator extends LIRGenerator {
@Override
public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
emitIntegerTest(left, right);
append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
if (left.getPlatformKind().getVectorLength() > 1) {
append(new AMD64VectorCompareOp(VexRMOp.VPTEST, getRegisterSize(left), asAllocatable(left), asAllocatable(right)));
append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
} else {
assert ((AMD64Kind) left.getPlatformKind()).isInteger();
OperandSize size = left.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD;
if (isJavaConstant(right) && NumUtil.is32bit(asJavaConstant(right).asLong())) {
append(new TestConstBranchOp(size, asAllocatable(left), (int) asJavaConstant(right).asLong(), null, Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
} else if (isJavaConstant(left) && NumUtil.is32bit(asJavaConstant(left).asLong())) {
append(new TestConstBranchOp(size, asAllocatable(right), (int) asJavaConstant(left).asLong(), null, Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
} else if (isAllocatableValue(right)) {
append(new TestBranchOp(size, asAllocatable(right), asAllocatable(left), null, Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
} else {
append(new TestBranchOp(size, asAllocatable(left), asAllocatable(right), null, Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
}
}
}
@Override
@ -436,71 +525,6 @@ public abstract class AMD64LIRGenerator extends LIRGenerator {
}
}
/**
* This method emits the compare against memory instruction, and may reorder the operands. It
* returns true if it did so.
*
* @param b the right operand of the comparison
* @return true if the left and right operands were switched, false otherwise
*/
private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) {
OperandSize size;
switch (cmpKind) {
case BYTE:
size = OperandSize.BYTE;
break;
case WORD:
size = OperandSize.WORD;
break;
case DWORD:
size = OperandSize.DWORD;
break;
case QWORD:
size = OperandSize.QWORD;
break;
case SINGLE:
append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state));
return false;
case DOUBLE:
append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state));
return false;
default:
throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind);
}
if (isConstantValue(a)) {
return emitCompareMemoryConOp(size, asConstantValue(a), b, state);
} else {
return emitCompareRegMemoryOp(size, asAllocatable(a), b, state);
}
}
protected boolean emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state) {
if (JavaConstant.isNull(a.getConstant())) {
append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, 0, state));
return true;
} else if (a.getConstant() instanceof VMConstant && size == DWORD && target().inlineObjects) {
VMConstant vc = (VMConstant) a.getConstant();
append(new AMD64BinaryConsumer.MemoryVMConstOp(CMP.getMIOpcode(size, false), b, vc, state));
return true;
} else {
if (a.getConstant() instanceof JavaConstant && a.getJavaConstant().getJavaKind() != JavaKind.Object) {
long value = a.getJavaConstant().asLong();
if (NumUtil.is32bit(value)) {
append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state));
return true;
}
}
return emitCompareRegMemoryOp(size, asAllocatable(a), b, state);
}
}
private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) {
AMD64RMOp op = CMP.getRMOpcode(size);
append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state));
return false;
}
/**
* This method emits the compare instruction, and may reorder the operands. It returns true if
* it did so.
@ -559,7 +583,7 @@ public abstract class AMD64LIRGenerator extends LIRGenerator {
RegisterValue cnt2 = AMD64.rdx.asValue(length2.getValueKind());
emitMove(cnt1, length1);
emitMove(cnt2, length2);
append(new AMD64ArrayCompareToOp(this, kind1, kind2, raxRes, array1, array2, cnt1, cnt2));
append(new AMD64ArrayCompareToOp(this, getAVX3Threshold(), kind1, kind2, raxRes, array1, array2, cnt1, cnt2));
Variable result = newVariable(resultKind);
emitMove(result, raxRes);
return result;
@ -587,6 +611,13 @@ public abstract class AMD64LIRGenerator extends LIRGenerator {
return -1;
}
/**
* Return the minimal array size for using AVX3 instructions.
*/
protected int getAVX3Threshold() {
return 4096;
}
@Override
public Variable emitArrayIndexOf(JavaKind arrayKind, JavaKind valueKind, boolean findTwoConsecutive, Value arrayPointer, Value arrayLength, Value fromIndex, Value... searchValues) {
Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
@ -605,7 +636,7 @@ public abstract class AMD64LIRGenerator extends LIRGenerator {
emitMove(rdst, dst);
emitMove(rlen, len);
append(new AMD64StringLatin1InflateOp(this, rsrc, rdst, rlen));
append(new AMD64StringLatin1InflateOp(this, getAVX3Threshold(), rsrc, rdst, rlen));
}
@Override
@ -621,7 +652,7 @@ public abstract class AMD64LIRGenerator extends LIRGenerator {
LIRKind reskind = LIRKind.value(AMD64Kind.DWORD);
RegisterValue rres = AMD64.rax.asValue(reskind);
append(new AMD64StringUTF16CompressOp(this, rres, rsrc, rdst, rlen));
append(new AMD64StringUTF16CompressOp(this, getAVX3Threshold(), rres, rsrc, rdst, rlen));
Variable res = newVariable(reskind);
emitMove(res, rres);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,7 +45,6 @@ import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD;
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS;
import org.graalvm.compiler.asm.amd64.AMD64Assembler;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
@ -62,8 +61,8 @@ import org.graalvm.compiler.lir.LIRFrameState;
import org.graalvm.compiler.lir.LIRValueUtil;
import org.graalvm.compiler.lir.LabelRef;
import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TestBranchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TestConstBranchOp;
import org.graalvm.compiler.lir.gen.LIRGeneratorTool;
import org.graalvm.compiler.nodes.ConstantNode;
import org.graalvm.compiler.nodes.DeoptimizingNode;
@ -80,8 +79,9 @@ import org.graalvm.compiler.nodes.calc.UnsignedRightShiftNode;
import org.graalvm.compiler.nodes.calc.ZeroExtendNode;
import org.graalvm.compiler.nodes.java.LogicCompareAndSwapNode;
import org.graalvm.compiler.nodes.java.ValueCompareAndSwapNode;
import org.graalvm.compiler.nodes.memory.Access;
import org.graalvm.compiler.nodes.memory.AddressableMemoryAccess;
import org.graalvm.compiler.nodes.memory.LIRLowerableAccess;
import org.graalvm.compiler.nodes.memory.MemoryAccess;
import org.graalvm.compiler.nodes.memory.WriteNode;
import org.graalvm.compiler.nodes.util.GraphUtil;
@ -101,7 +101,7 @@ public class AMD64NodeMatchRules extends NodeMatchRules {
super(gen);
}
protected LIRFrameState getState(Access access) {
protected LIRFrameState getState(MemoryAccess access) {
if (access instanceof DeoptimizingNode) {
return state((DeoptimizingNode) access);
}
@ -113,7 +113,7 @@ public class AMD64NodeMatchRules extends NodeMatchRules {
}
protected LIRKind getLirKind(LIRLowerableAccess access) {
return gen.getLIRKind(access.getAccessStamp());
return gen.getLIRKind(access.getAccessStamp(NodeView.DEFAULT));
}
protected OperandSize getMemorySize(LIRLowerableAccess access) {
@ -192,29 +192,27 @@ public class AMD64NodeMatchRules extends NodeMatchRules {
double trueLabelProbability = x.probability(x.trueSuccessor());
AMD64Kind kind = getMemoryKind(access);
OperandSize size = kind == AMD64Kind.QWORD ? QWORD : DWORD;
if (value.isConstant()) {
if (value.isJavaConstant()) {
JavaConstant constant = value.asJavaConstant();
if (constant != null && kind == AMD64Kind.QWORD && !NumUtil.isInt(constant.asLong())) {
if (kind == AMD64Kind.QWORD && !NumUtil.isInt(constant.asLong())) {
// Only imm32 as long
return null;
}
return builder -> {
AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress());
gen.append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.TEST, size, address, (int) constant.asLong(), getState(access)));
gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability));
gen.append(new TestConstBranchOp(size, address, (int) constant.asLong(), getState(access), Condition.EQ, trueLabel, falseLabel, trueLabelProbability));
return null;
};
} else {
return builder -> {
AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress());
gen.append(new AMD64BinaryConsumer.MemoryRMOp(AMD64RMOp.TEST, size, gen.asAllocatable(operand(value)), address, getState(access)));
gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability));
gen.append(new TestBranchOp(size, gen.asAllocatable(operand(value)), address, getState(access), Condition.EQ, trueLabel, falseLabel, trueLabelProbability));
return null;
};
}
}
protected ComplexMatchResult emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, Access access, ValueKind<?> addressKind) {
protected ComplexMatchResult emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AddressableMemoryAccess access, ValueKind<?> addressKind) {
return builder -> {
AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress());
LIRFrameState state = getState(access);
@ -225,11 +223,11 @@ public class AMD64NodeMatchRules extends NodeMatchRules {
};
}
protected ComplexMatchResult emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, Access access) {
protected ComplexMatchResult emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AddressableMemoryAccess access) {
return emitConvertMemoryOp(kind, op, size, access, null);
}
private ComplexMatchResult emitSignExtendMemory(Access access, int fromBits, int toBits, ValueKind<?> addressKind) {
private ComplexMatchResult emitSignExtendMemory(AddressableMemoryAccess access, int fromBits, int toBits, ValueKind<?> addressKind) {
assert fromBits <= toBits && toBits <= 64;
AMD64Kind kind = null;
AMD64RMOp op;
@ -276,7 +274,7 @@ public class AMD64NodeMatchRules extends NodeMatchRules {
return null;
}
private Value emitReinterpretMemory(LIRKind to, Access access) {
private Value emitReinterpretMemory(LIRKind to, AddressableMemoryAccess access) {
AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress());
LIRFrameState state = getState(access);
return getArithmeticLIRGenerator().emitLoad(to, address, state);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,10 @@ public class AMD64SuitesCreator extends DefaultSuitesCreator {
super(compilerConfiguration, plugins);
}
public AMD64SuitesCreator(CompilerConfiguration compilerConfiguration) {
super(compilerConfiguration);
}
@Override
public LIRSuites createLIRSuites(OptionValues options) {
LIRSuites lirSuites = super.createLIRSuites(options);

View File

@ -27,6 +27,7 @@ package org.graalvm.compiler.core.common;
import static org.graalvm.compiler.serviceprovider.GraalUnsafeAccess.getUnsafe;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import org.graalvm.compiler.debug.GraalError;
@ -146,11 +147,42 @@ public class Fields {
}
} else {
Object obj = UNSAFE.getObject(from, offset);
if (obj != null && type.isArray()) {
if (type.getComponentType().isPrimitive()) {
obj = copyObjectAsArray(obj);
} else {
obj = ((Object[]) obj).clone();
}
}
UNSAFE.putObject(to, offset, trans == null ? obj : trans.apply(index, obj));
}
}
}
private static Object copyObjectAsArray(Object obj) {
Object objCopy;
if (obj instanceof int[]) {
objCopy = Arrays.copyOf((int[]) obj, ((int[]) obj).length);
} else if (obj instanceof short[]) {
objCopy = Arrays.copyOf((short[]) obj, ((short[]) obj).length);
} else if (obj instanceof long[]) {
objCopy = Arrays.copyOf((long[]) obj, ((long[]) obj).length);
} else if (obj instanceof float[]) {
objCopy = Arrays.copyOf((float[]) obj, ((float[]) obj).length);
} else if (obj instanceof double[]) {
objCopy = Arrays.copyOf((double[]) obj, ((double[]) obj).length);
} else if (obj instanceof boolean[]) {
objCopy = Arrays.copyOf((boolean[]) obj, ((boolean[]) obj).length);
} else if (obj instanceof byte[]) {
objCopy = Arrays.copyOf((byte[]) obj, ((byte[]) obj).length);
} else if (obj instanceof char[]) {
objCopy = Arrays.copyOf((char[]) obj, ((char[]) obj).length);
} else {
throw GraalError.shouldNotReachHere();
}
return objCopy;
}
/**
* Gets the value of a field for a given object.
*

View File

@ -87,15 +87,6 @@ public final class GraalOptions {
@Option(help = "The maximum length of an array that will be escape analyzed.", type = OptionType.Expert)
public static final OptionKey<Integer> MaximumEscapeAnalysisArrayLength = new OptionKey<>(128);
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> PEAInliningHints = new OptionKey<>(false);
@Option(help = "", type = OptionType.Expert)
public static final OptionKey<Double> TailDuplicationProbability = new OptionKey<>(0.5);
@Option(help = "", type = OptionType.Expert)
public static final OptionKey<Integer> TailDuplicationTrivialSize = new OptionKey<>(1);
@Option(help = "", type = OptionType.Expert)
public static final OptionKey<Integer> DeoptsToDisableOptimisticOptimization = new OptionKey<>(40);
@ -185,9 +176,6 @@ public final class GraalOptions {
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> OmitHotExceptionStacktrace = new OptionKey<>(false);
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> GenSafepoints = new OptionKey<>(true);
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> GenLoopSafepoints = new OptionKey<>(true);
@ -244,12 +232,6 @@ public final class GraalOptions {
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> OptFloatingReads = new OptionKey<>(true);
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> OptEliminatePartiallyRedundantGuards = new OptionKey<>(true);
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> OptFilterProfiledTypes = new OptionKey<>(true);
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> OptDevirtualizeInvokesOptimistically = new OptionKey<>(true);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1511,7 +1511,8 @@ public final class IntegerStamp extends PrimitiveStamp {
return StampFactory.forInteger(resultBits).empty();
}
IntegerStamp stamp = (IntegerStamp) input;
assert inputBits == stamp.getBits();
assert inputBits == stamp.getBits() : "Input bits" + inputBits + " stamp bits " +
stamp.getBits() + " result bits " + resultBits;
assert inputBits <= resultBits;
if (inputBits == resultBits) {

View File

@ -519,6 +519,9 @@ public class MatchProcessor extends AbstractProcessor {
out.println("import org.graalvm.compiler.core.gen.NodeMatchRules;");
out.println("import org.graalvm.compiler.graph.Position;");
for (String p : info.requiredPackages) {
if (p.equals(pkg)) {
continue;
}
out.println("import " + p + ".*;");
}
out.println("");
@ -774,7 +777,7 @@ public class MatchProcessor extends AbstractProcessor {
if (mirror != null) {
matchableNodeAnnotations = getAnnotationValueList(mirror, "value", AnnotationMirror.class);
} else {
mirror = getAnnotation(element, getType(MATCHABLE_NODES_CLASS_NAME));
mirror = getAnnotation(element, getType(MATCHABLE_NODE_CLASS_NAME));
if (mirror != null) {
matchableNodeAnnotations = Collections.singletonList(mirror);
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@ import jdk.vm.ci.meta.JavaConstant;
* is not 32 or 64 bit into either a 32 or 64 bit compare by sign extending the input values.
*
* Why we do this in the HIR instead in the LIR? This enables the pattern matcher
* {@link SPARCNodeMatchRules#signExtend(SignExtendNode, org.graalvm.compiler.nodes.memory.Access)}
* {@link SPARCNodeMatchRules#signExtend(SignExtendNode, org.graalvm.compiler.nodes.memory.AddressableMemoryAccess)}
* to do it's job and replace loads with sign extending ones.
*/
public class SPARCIntegerCompareCanonicalizationPhase extends Phase {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,13 +43,15 @@ import org.graalvm.compiler.lir.gen.LIRGeneratorTool;
import org.graalvm.compiler.lir.sparc.SPARCAddressValue;
import org.graalvm.compiler.nodes.DeoptimizingNode;
import org.graalvm.compiler.nodes.IfNode;
import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.CompareNode;
import org.graalvm.compiler.nodes.calc.SignExtendNode;
import org.graalvm.compiler.nodes.calc.ZeroExtendNode;
import org.graalvm.compiler.nodes.java.LogicCompareAndSwapNode;
import org.graalvm.compiler.nodes.memory.Access;
import org.graalvm.compiler.nodes.memory.AddressableMemoryAccess;
import org.graalvm.compiler.nodes.memory.LIRLowerableAccess;
import org.graalvm.compiler.nodes.memory.MemoryAccess;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.Value;
@ -64,7 +66,7 @@ public class SPARCNodeMatchRules extends NodeMatchRules {
super(gen);
}
protected LIRFrameState getState(Access access) {
protected LIRFrameState getState(MemoryAccess access) {
if (access instanceof DeoptimizingNode) {
return state((DeoptimizingNode) access);
}
@ -72,10 +74,10 @@ public class SPARCNodeMatchRules extends NodeMatchRules {
}
protected LIRKind getLirKind(LIRLowerableAccess access) {
return gen.getLIRKind(access.getAccessStamp());
return gen.getLIRKind(access.getAccessStamp(NodeView.DEFAULT));
}
private ComplexMatchResult emitSignExtendMemory(Access access, int fromBits, int toBits) {
private ComplexMatchResult emitSignExtendMemory(AddressableMemoryAccess access, int fromBits, int toBits) {
assert fromBits <= toBits && toBits <= 64;
SPARCKind toKind = null;
SPARCKind fromKind = null;
@ -103,7 +105,7 @@ public class SPARCNodeMatchRules extends NodeMatchRules {
};
}
private ComplexMatchResult emitZeroExtendMemory(Access access, int fromBits, int toBits) {
private ComplexMatchResult emitZeroExtendMemory(AddressableMemoryAccess access, int fromBits, int toBits) {
assert fromBits <= toBits && toBits <= 64;
SPARCKind toKind = null;
SPARCKind fromKind = null;
@ -135,14 +137,14 @@ public class SPARCNodeMatchRules extends NodeMatchRules {
@MatchRule("(SignExtend Read=access)")
@MatchRule("(SignExtend FloatingRead=access)")
@MatchRule("(SignExtend VolatileRead=access)")
public ComplexMatchResult signExtend(SignExtendNode root, Access access) {
public ComplexMatchResult signExtend(SignExtendNode root, AddressableMemoryAccess access) {
return emitSignExtendMemory(access, root.getInputBits(), root.getResultBits());
}
@MatchRule("(ZeroExtend Read=access)")
@MatchRule("(ZeroExtend FloatingRead=access)")
@MatchRule("(ZeroExtend VolatileRead=access)")
public ComplexMatchResult zeroExtend(ZeroExtendNode root, Access access) {
public ComplexMatchResult zeroExtend(ZeroExtendNode root, AddressableMemoryAccess access) {
return emitZeroExtendMemory(access, root.getInputBits(), root.getResultBits());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -29,13 +29,18 @@ import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.lang.annotation.Annotation;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@ -48,6 +53,7 @@ import org.graalvm.compiler.api.replacements.Snippet.ConstantParameter;
import org.graalvm.compiler.api.replacements.Snippet.NonNullParameter;
import org.graalvm.compiler.api.replacements.Snippet.VarargsParameter;
import org.graalvm.compiler.api.test.Graal;
import org.graalvm.compiler.api.test.ModuleSupport;
import org.graalvm.compiler.bytecode.BridgeMethodUtils;
import org.graalvm.compiler.core.CompilerThreadFactory;
import org.graalvm.compiler.core.common.LIRKind;
@ -66,8 +72,13 @@ import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration.Plugins;
import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins;
import org.graalvm.compiler.nodes.java.LoadFieldNode;
import org.graalvm.compiler.nodes.spi.CoreProviders;
import org.graalvm.compiler.options.Option;
import org.graalvm.compiler.options.OptionDescriptor;
import org.graalvm.compiler.options.OptionDescriptors;
import org.graalvm.compiler.options.OptionValues;
import org.graalvm.compiler.options.OptionsParser;
import org.graalvm.compiler.phases.OptimisticOptimizations;
import org.graalvm.compiler.phases.PhaseSuite;
import org.graalvm.compiler.phases.VerifyPhase;
@ -78,7 +89,6 @@ import org.graalvm.compiler.phases.util.Providers;
import org.graalvm.compiler.runtime.RuntimeProvider;
import org.graalvm.compiler.serviceprovider.JavaVersionUtil;
import org.graalvm.compiler.test.AddExports;
import org.graalvm.compiler.api.test.ModuleSupport;
import jdk.internal.vm.compiler.word.LocationIdentity;
import org.junit.Assert;
import org.junit.Assume;
@ -91,6 +101,7 @@ import jdk.vm.ci.meta.JavaField;
import jdk.vm.ci.meta.JavaMethod;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.MetaAccessProvider;
import jdk.vm.ci.meta.ResolvedJavaField;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
import jdk.vm.ci.meta.Value;
@ -197,10 +208,31 @@ public class CheckGraalInvariants extends GraalCompilerTest {
@SuppressWarnings("unused")
protected void updateVerifiers(List<VerifyPhase<CoreProviders>> verifiers) {
}
/**
* Determines if {@code option} should be checked to ensure it has at least one usage.
*/
public boolean shouldCheckUsage(OptionDescriptor option) {
Class<?> declaringClass = option.getDeclaringClass();
if (declaringClass.getName().equals("org.graalvm.compiler.truffle.compiler.SharedTruffleCompilerOptions")) {
/*
* These options are aliases for Truffle runtime options whose usages are not on the
* class path used when running CheckGraalInvariants.
*/
return false;
}
if (option.getOptionKey().getClass().isAnonymousClass()) {
/*
* Probably a derived option such as
* org.graalvm.compiler.debug.DebugOptions.PrintGraphFile.
*/
return false;
}
return true;
}
}
@Test
@SuppressWarnings("try")
public void test() {
assumeManagementLibraryIsLoadable();
runTest(new InvariantsTool());
@ -330,6 +362,10 @@ public class CheckGraalInvariants extends GraalCompilerTest {
}
}
}
Map<ResolvedJavaField, Set<ResolvedJavaMethod>> optionFieldUsages = initOptionFieldUsagesMap(tool, metaAccess, errors);
ResolvedJavaType optionDescriptorsType = metaAccess.lookupJavaType(OptionDescriptors.class);
if (errors.isEmpty()) {
// Order outer classes before the inner classes
classNames.sort((String a, String b) -> a.compareTo(b));
@ -373,6 +409,7 @@ public class CheckGraalInvariants extends GraalCompilerTest {
graphBuilderSuite.apply(graph, context);
// update phi stamps
graph.getNodes().filter(PhiNode.class).forEach(PhiNode::inferStamp);
collectOptionFieldUsages(optionFieldUsages, optionDescriptorsType, method, graph);
checkGraph(verifiers, context, graph);
} catch (VerificationError e) {
errors.add(e.getMessage());
@ -411,6 +448,9 @@ public class CheckGraalInvariants extends GraalCompilerTest {
}
}
}
checkOptionFieldUsages(errors, optionFieldUsages);
if (!errors.isEmpty()) {
StringBuilder msg = new StringBuilder();
String nl = String.format("%n");
@ -424,6 +464,52 @@ public class CheckGraalInvariants extends GraalCompilerTest {
}
}
/**
* Initializes a map from a field annotated by {@link Option} to a set that will be used to
* collect methods that accesses the option field.
*
* @param tool
*/
private static Map<ResolvedJavaField, Set<ResolvedJavaMethod>> initOptionFieldUsagesMap(InvariantsTool tool, MetaAccessProvider metaAccess, List<String> errors) {
Map<ResolvedJavaField, Set<ResolvedJavaMethod>> optionFields = new HashMap<>();
for (OptionDescriptors set : OptionsParser.getOptionsLoader()) {
for (OptionDescriptor option : set) {
if (tool.shouldCheckUsage(option)) {
Class<?> declaringClass = option.getDeclaringClass();
try {
Field javaField = declaringClass.getDeclaredField(option.getFieldName());
optionFields.put(metaAccess.lookupJavaField(javaField), new HashSet<>());
} catch (NoSuchFieldException e) {
errors.add(e.toString());
}
}
}
}
return optionFields;
}
private static void collectOptionFieldUsages(Map<ResolvedJavaField, Set<ResolvedJavaMethod>> optionFields, ResolvedJavaType optionDescriptorsType, ResolvedJavaMethod method,
StructuredGraph graph) {
if (!optionDescriptorsType.isAssignableFrom(method.getDeclaringClass())) {
for (LoadFieldNode lfn : graph.getNodes().filter(LoadFieldNode.class)) {
ResolvedJavaField field = lfn.field();
Set<ResolvedJavaMethod> loads = optionFields.get(field);
if (loads != null) {
loads.add(graph.method());
}
}
}
}
private static void checkOptionFieldUsages(List<String> errors, Map<ResolvedJavaField, Set<ResolvedJavaMethod>> optionFieldUsages) {
for (Map.Entry<ResolvedJavaField, Set<ResolvedJavaMethod>> e : optionFieldUsages.entrySet()) {
if (e.getValue().isEmpty()) {
errors.add("No uses found for " + e.getKey().format("%H.%n"));
}
}
}
private static boolean isInNativeImage(String className) {
return className.startsWith("org.graalvm.nativeimage");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,9 +44,11 @@ import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins.Registratio
import org.graalvm.compiler.nodes.spi.LIRLowerable;
import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
import org.graalvm.compiler.nodes.util.GraphUtil;
import org.graalvm.compiler.options.OptionValues;
import org.graalvm.compiler.phases.OptimisticOptimizations;
import org.junit.Test;
import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.ResolvedJavaMethod;
@ -647,8 +649,8 @@ public class CountedLoopTest extends GraalCompilerTest {
@Override
protected OptimisticOptimizations getOptimisticOptimizations() {
// Don't convert unreached paths into Guard
return OptimisticOptimizations.ALL.remove(OptimisticOptimizations.Optimization.RemoveNeverExecutedCode);
// Disable profile based optimizations
return OptimisticOptimizations.NONE;
}
private Object[] argsToBind;
@ -675,6 +677,12 @@ public class CountedLoopTest extends GraalCompilerTest {
testCounted(true, snippetName, start, limit, step);
}
@Override
protected InstalledCode getCode(ResolvedJavaMethod installedCodeOwner, StructuredGraph graph, boolean forceCompile, boolean installAsDefault, OptionValues options) {
installedCodeOwner.reprofile();
return super.getCode(installedCodeOwner, graph, forceCompile, installAsDefault, options);
}
public void testCounted(boolean removable, String snippetName, Object start, Object limit, Object step) {
this.loopCanBeRemoved = removable;
Object[] args = {start, limit, step};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.test;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.calc.SignedRemNode;
import org.junit.Test;
public class IntegerDivRemCanonicalizationTest extends GraalCompilerTest {
public static int redundantRemNode(int a, int b) {
int r = (a - a % b) / b;
return r;
}
@Test
public void testRedundantRemNode() {
StructuredGraph graph = parseForCompile(getResolvedJavaMethod("redundantRemNode"));
createCanonicalizerPhase().apply(graph, getProviders());
// We expect the remainder to be canonicalized away.
assertTrue(graph.getNodes().filter(SignedRemNode.class).count() == 0);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
package org.graalvm.compiler.core.test;
import org.graalvm.compiler.core.common.cfg.AbstractBlockBase;
import org.graalvm.compiler.lir.LIR;
import org.graalvm.compiler.lir.LIRInstruction;
import org.graalvm.compiler.lir.gen.LIRGenerationResult;
@ -59,9 +60,13 @@ public abstract class MatchRuleTest extends GraalCompilerTest {
}
protected void checkLIR(String methodName, Predicate<LIRInstruction> predicate, int expected) {
checkLIR(methodName, predicate, 0, expected);
}
protected void checkLIR(String methodName, Predicate<LIRInstruction> predicate, int blockIndex, int expected) {
compile(getResolvedJavaMethod(methodName), null);
int actualOpNum = 0;
for (LIRInstruction ins : lir.getLIRforBlock(lir.codeEmittingOrder()[0])) {
for (LIRInstruction ins : lir.getLIRforBlock(lir.codeEmittingOrder()[blockIndex])) {
if (predicate.test(ins)) {
actualOpNum++;
}
@ -69,4 +74,19 @@ public abstract class MatchRuleTest extends GraalCompilerTest {
Assert.assertEquals(expected, actualOpNum);
}
protected void checkLIRforAll(String methodName, Predicate<LIRInstruction> predicate, int expected) {
compile(getResolvedJavaMethod(methodName), null);
int actualOpNum = 0;
for (AbstractBlockBase<?> block : lir.codeEmittingOrder()) {
if (block == null) {
continue;
}
for (LIRInstruction ins : lir.getLIRforBlock(block)) {
if (predicate.test(ins)) {
actualOpNum++;
}
}
}
Assert.assertEquals(expected, actualOpNum);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,39 +24,18 @@
package org.graalvm.compiler.core.test;
import java.lang.reflect.Field;
import org.graalvm.compiler.nodes.NamedLocationIdentity;
import org.graalvm.compiler.nodes.memory.ReadNode;
import org.junit.Assert;
import org.junit.Test;
import jdk.vm.ci.meta.JavaKind;
import sun.misc.Unsafe;
/**
* Tests that off-heap memory writes don't prevent optimization of on-heap accesses.
*/
public class OffHeapUnsafeAccessTest extends GraalCompilerTest {
static final Unsafe UNSAFE = initUnsafe();
private static Unsafe initUnsafe() {
try {
// Fast path when we are trusted.
return Unsafe.getUnsafe();
} catch (SecurityException se) {
// Slow path when we are not trusted.
try {
Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
theUnsafe.setAccessible(true);
return (Unsafe) theUnsafe.get(Unsafe.class);
} catch (Exception e) {
throw new RuntimeException("exception while trying to get Unsafe", e);
}
}
}
public byte unboxByteAndStore(long memory, byte[] box) {
byte val = box[0];
UNSAFE.putByte(memory, val);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,14 +28,14 @@ import java.lang.ref.Reference;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import org.junit.Test;
import org.graalvm.compiler.graph.Node;
import org.graalvm.compiler.loop.LoopEx;
import org.graalvm.compiler.loop.LoopsData;
import org.graalvm.compiler.nodes.FieldLocationIdentity;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.memory.Access;
import org.graalvm.compiler.nodes.memory.MemoryAccess;
import jdk.internal.vm.compiler.word.LocationIdentity;
import org.junit.Test;
import jdk.vm.ci.meta.ResolvedJavaField;
@ -47,8 +47,8 @@ public class ReferenceGetLoopTest extends GraalCompilerTest {
boolean found = false;
for (LoopEx loop : loops.loops()) {
for (Node node : loop.inside().nodes()) {
if (node instanceof Access) {
Access access = (Access) node;
if (node instanceof MemoryAccess) {
MemoryAccess access = (MemoryAccess) node;
LocationIdentity location = access.getLocationIdentity();
if (location instanceof FieldLocationIdentity) {
ResolvedJavaField field = ((FieldLocationIdentity) location).getField();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,92 +24,485 @@
package org.graalvm.compiler.core.test;
import java.lang.reflect.Field;
import java.util.Arrays;
import org.graalvm.compiler.api.directives.GraalDirectives;
import org.graalvm.compiler.core.test.ea.EATestBase.TestClassInt;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
import org.graalvm.compiler.nodes.extended.RawLoadNode;
import org.graalvm.compiler.nodes.extended.RawStoreNode;
import org.graalvm.compiler.nodes.spi.CoreProviders;
import org.graalvm.compiler.options.OptionValues;
import org.graalvm.compiler.phases.common.CanonicalizerPhase;
import org.graalvm.compiler.virtual.phases.ea.PartialEscapePhase;
import org.junit.Assume;
import org.junit.Test;
import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import sun.misc.Unsafe;
public class UnsafeVirtualizationTest extends GraalCompilerTest {
public static int unsafeSnippet1(double i1) {
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
return UNSAFE.getInt(a, TestClassInt.fieldOffset1) + UNSAFE.getInt(a, TestClassInt.fieldOffset2);
private static boolean[] FT = new boolean[]{false, true};
public static class Base {
/*
* This padding ensure that the size of the Base class ends up as a multiple of 8, which
* makes the first field of the subclass 8-byte aligned.
*/
double padding;
}
public static long unsafeSnippet2a(int i1) {
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
a.setFirstField(i1);
return UNSAFE.getLong(a, TestClassInt.fieldOffset1);
public static class A extends Base {
int f1;
int f2;
}
public static long unsafeSnippet2b(int i1) {
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
a.setSecondField(i1);
return UNSAFE.getLong(a, TestClassInt.fieldOffset1);
private static final long AF1Offset;
private static final long AF2Offset;
static {
long o1 = -1;
long o2 = -1;
try {
Field f1 = A.class.getDeclaredField("f1");
Field f2 = A.class.getDeclaredField("f2");
o1 = UNSAFE.objectFieldOffset(f1);
o2 = UNSAFE.objectFieldOffset(f2);
} catch (NoSuchFieldException | SecurityException e) {
throw new AssertionError(e);
}
AF1Offset = o1;
AF2Offset = o2;
}
public static long unsafeSnippet3a(int i1) {
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
UNSAFE.putInt(a, TestClassInt.fieldOffset1, i1);
return UNSAFE.getLong(a, TestClassInt.fieldOffset1);
// Side effect to create a deopt point, after possible virtualization.
static int sideEffectField;
private static void sideEffect() {
sideEffectField = 5;
}
public static long unsafeSnippet3b(int i1) {
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
UNSAFE.putInt(a, TestClassInt.fieldOffset2, i1);
return UNSAFE.getLong(a, TestClassInt.fieldOffset1);
public static int unsafeSnippet1(double i1, boolean c) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getInt(a, AF1Offset) + UNSAFE.getInt(a, AF2Offset);
}
public static int unsafeSnippet4(double i1) {
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
return UNSAFE.getInt(a, TestClassInt.fieldOffset1) + UNSAFE.getInt(a, TestClassInt.fieldOffset2);
public static long unsafeSnippet2a(int i1, boolean c) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
a.f1 = i1;
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(a, AF1Offset);
}
public static long unsafeSnippet2b(int i1, boolean c) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
a.f2 = i1;
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(a, AF1Offset);
}
public static long unsafeSnippet3a(int i1, boolean c) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
UNSAFE.putInt(a, AF1Offset, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(a, AF1Offset);
}
public static long unsafeSnippet3b(int i1, boolean c) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
UNSAFE.putInt(a, AF2Offset, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(a, AF1Offset);
}
public static int unsafeSnippet4(double i1, boolean c) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
UNSAFE.putDouble(a, AF1Offset, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getInt(a, AF1Offset) + UNSAFE.getInt(a, AF2Offset);
}
public static int unsafeSnippet5(long i1, boolean c) {
int[] t = new int[2];
UNSAFE.putLong(t, (long) Unsafe.ARRAY_INT_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(t, (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * 6 + Unsafe.ARRAY_INT_BASE_OFFSET);
}
public static int unsafeSnippet6(long i1, boolean c) {
byte[] b = new byte[8];
UNSAFE.putLong(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(b, (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * 6 + Unsafe.ARRAY_INT_BASE_OFFSET);
}
public static int unsafeSnippet7(int i1, boolean c) {
byte[] b = new byte[4];
UNSAFE.putInt(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(b, (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * 0 + Unsafe.ARRAY_INT_BASE_OFFSET);
}
public static int unsafeSnippet8(long i1, int i2, boolean c) {
byte[] b = new byte[8];
UNSAFE.putLong(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
UNSAFE.putInt(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + 4 * Unsafe.ARRAY_BYTE_INDEX_SCALE, i2);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(b, (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * 2 + Unsafe.ARRAY_BYTE_BASE_OFFSET);
}
public static int unsafeSnippet9(long i1, short i2, boolean c) {
byte[] b = new byte[8];
UNSAFE.putLong(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
UNSAFE.putShort(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + 4 * Unsafe.ARRAY_BYTE_INDEX_SCALE, i2);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(b, (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * 6 + Unsafe.ARRAY_BYTE_BASE_OFFSET);
}
public static int unsafeSnippet10(double i1, boolean c) {
byte[] b = new byte[8];
UNSAFE.putDouble(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(b, (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * 2 + Unsafe.ARRAY_BYTE_BASE_OFFSET);
}
public static float unsafeSnippet11(double i1, boolean c) {
byte[] b = new byte[8];
UNSAFE.putDouble(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getFloat(b, (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * 4 + Unsafe.ARRAY_BYTE_BASE_OFFSET);
}
public static long unsafeSnippet12(double i1, boolean c) {
byte[] b = new byte[8];
UNSAFE.putDouble(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET);
}
public static short unsafeSnippet13(short i1, boolean c) {
byte[] b = new byte[8];
UNSAFE.putShort(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET);
}
public static int unsafeSnippet14(long l, int i, boolean c) {
int[] t = new int[2];
if (i < l) {
UNSAFE.putLong(t, (long) Unsafe.ARRAY_INT_BASE_OFFSET, l);
} else {
UNSAFE.putInt(t, (long) Unsafe.ARRAY_INT_BASE_OFFSET, i);
}
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(t, (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * 6 + Unsafe.ARRAY_INT_BASE_OFFSET);
}
public static int unsafeSnippet15(long i1, boolean c) {
byte[] b = new byte[8];
UNSAFE.putLong(b, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getShort(b, getUnsafeByteArrayOffset(0));
}
private static long getUnsafeByteArrayOffset(int i) {
return (long) Unsafe.ARRAY_BYTE_INDEX_SCALE * i + Unsafe.ARRAY_BYTE_BASE_OFFSET;
}
public static byte[] unsafeSnippet16(long l, int i, short s, double d, float f, boolean c) {
byte[] b = new byte[128];
UNSAFE.putLong(b, getUnsafeByteArrayOffset(8), l);
UNSAFE.putInt(b, getUnsafeByteArrayOffset(20), i);
UNSAFE.putShort(b, getUnsafeByteArrayOffset(26), s);
UNSAFE.putDouble(b, getUnsafeByteArrayOffset(32), d);
UNSAFE.putFloat(b, getUnsafeByteArrayOffset(44), f);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return b;
}
public static long unsafeSnippet17(long i1, boolean c) {
byte[] t = new byte[8];
UNSAFE.putLong(t, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(t, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET);
}
public static long unsafeSnippet18(int i1, boolean c) {
byte[] t = new byte[8];
UNSAFE.putInt(t, getUnsafeByteArrayOffset(3), i1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(t, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET);
}
public static long unsafeSnippet19(long l1, long l2, boolean c) {
byte[] t = new byte[16];
if (l1 < l2) {
UNSAFE.putLong(t, getUnsafeByteArrayOffset(0), l1);
} else {
UNSAFE.putLong(t, getUnsafeByteArrayOffset(0), l2);
}
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(t, getUnsafeByteArrayOffset(0));
}
public static long unsafeSnippet20(long l1, int i2, boolean c) {
byte[] t = new byte[16];
if (l1 < i2) {
UNSAFE.putLong(t, getUnsafeByteArrayOffset(0), l1);
} else {
UNSAFE.putInt(t, getUnsafeByteArrayOffset(0), i2);
}
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(t, getUnsafeByteArrayOffset(0));
}
public static long unsafeSnippet21(long l1, boolean c) {
byte[] t = new byte[16];
if (l1 < 0) {
UNSAFE.putLong(t, getUnsafeByteArrayOffset(0), l1);
} else {
sideEffect();
}
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(t, getUnsafeByteArrayOffset(0));
}
public static long unsafeSnippet22(long l1, double d1, boolean c) {
byte[] t = new byte[16];
if (l1 < d1) {
UNSAFE.putLong(t, getUnsafeByteArrayOffset(0), l1);
} else {
UNSAFE.putDouble(t, getUnsafeByteArrayOffset(0), d1);
}
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(t, getUnsafeByteArrayOffset(0));
}
public static long unsafeSnippet23(long l1, short s1, float f1, byte[][] box, boolean c) {
byte[] t = new byte[16];
UNSAFE.putLong(t, getUnsafeByteArrayOffset(0), l1);
if (s1 < f1) {
UNSAFE.putShort(t, getUnsafeByteArrayOffset(12), (short) 0);
}
// escape
box[0] = t;
UNSAFE.putFloat(t, getUnsafeByteArrayOffset(12), f1);
sideEffect();
if (c) {
GraalDirectives.deoptimize();
}
return UNSAFE.getLong(t, getUnsafeByteArrayOffset(0));
}
@Test
public void testUnsafePEA01() {
testPartialEscapeReadElimination("unsafeSnippet1", false, 1.0);
testPartialEscapeReadElimination("unsafeSnippet1", true, 1.0);
performTest("unsafeSnippet1", false, true, 1.0);
}
@Test
public void testUnsafePEA02() {
testPartialEscapeReadElimination("unsafeSnippet2a", false, 1);
testPartialEscapeReadElimination("unsafeSnippet2a", true, 1);
performTest("unsafeSnippet2a", false, true, 1);
testPartialEscapeReadElimination("unsafeSnippet2b", false, 1);
testPartialEscapeReadElimination("unsafeSnippet2b", true, 1);
performTest("unsafeSnippet2b", false, true, 1);
}
@Test
public void testUnsafePEA03() {
testPartialEscapeReadElimination("unsafeSnippet3a", false, 1);
testPartialEscapeReadElimination("unsafeSnippet3a", true, 1);
performTest("unsafeSnippet3a", false, true, 1);
testPartialEscapeReadElimination("unsafeSnippet3b", false, 1);
testPartialEscapeReadElimination("unsafeSnippet3b", true, 1);
performTest("unsafeSnippet3b", false, true, 1);
}
@Test
public void testUnsafePEA04() {
testPartialEscapeReadElimination("unsafeSnippet4", false, 1.0);
testPartialEscapeReadElimination("unsafeSnippet4", true, 1.0);
performTest("unsafeSnippet4", false, true, 1.0);
}
public void testPartialEscapeReadElimination(String snippet, boolean canonicalizeBefore, Object... args) {
@Test
public void testUnsafePEA05() {
performTest("unsafeSnippet5", false, true, 0x0102030405060708L);
}
@Test
public void testUnsafePEA06() {
performTest("unsafeSnippet6", false, true, 0x0102030405060708L);
}
@Test
public void testUnsafePEA07() {
performTest("unsafeSnippet7", false, true, 0x01020304);
}
@Test
public void testUnsafePEA08() {
performTest("unsafeSnippet8", false, true, 0x0102030405060708L, 0x01020304);
}
@Test
public void testUnsafePEA09() {
performTest("unsafeSnippet9", false, true, 0x0102030405060708L, (short) 0x0102);
}
@Test
public void testUnsafePEA10() {
performTest("unsafeSnippet10", false, true, Double.longBitsToDouble(0x0102030405060708L));
}
@Test
public void testUnsafePEA11() {
performTest("unsafeSnippet11", false, true, Double.longBitsToDouble(0x0102030405060708L));
}
@Test
public void testUnsafePEA12() {
performTest("unsafeSnippet12", false, true, Double.longBitsToDouble(0x0102030405060708L));
}
@Test
public void testUnsafePEA13() {
performTest("unsafeSnippet13", true, true, (short) 0x0102);
}
@Test
public void testUnsafePEA14() {
performTest("unsafeSnippet14", false, true, 0x0102030405060708L, 0x01020304);
}
@Test
public void testUnsafePEA15() {
performTest("unsafeSnippet15", false, true, 0x0102030405060708L);
}
@Test
public void testUnsafePEA16() {
performTest("unsafeSnippet16", false, true, 0x0102030405060708L, 0x01020304, (short) 0x0102, Double.longBitsToDouble(0x0102030405060708L), Float.intBitsToFloat(0x01020304));
}
@Test
public void testUnsafePEA17() {
performTest("unsafeSnippet17", true, true, 0x0102030405060708L);
}
@Test
public void testUnsafePEA18() {
Assume.assumeTrue(getBackend().getTarget().arch.supportsUnalignedMemoryAccess());
performTest("unsafeSnippet18", false, false, 0x01020304);
}
@Test
public void testUnsafePEA19() {
performTest("unsafeSnippet19", true, true, 0x0102030405060708L, 0x0102030405060708L);
}
@Test
public void testUnsafePEA20() {
performTest("unsafeSnippet20", false, false, 0x0102030405060708L, 0x01020304);
}
@Test
public void testUnsafePEA21() {
performTest("unsafeSnippet21", true, true, 0x0102030405060708L);
}
@Test
public void testUnsafePEA22() {
performTest("unsafeSnippet22", false, false, 0x0102030405060708L, Double.longBitsToDouble(0x0506070801020304L));
}
@Test
public void testUnsafePEA23() {
performTest("unsafeSnippet23", false, false, 0x0102030405060708L, (short) 0x0102, Float.intBitsToFloat(0x01020304), new byte[1][]);
}
private void performTest(String snippet, boolean shouldEscapeRead, boolean shouldEscapeWrite, Object... args) {
Object[] boolArgs = Arrays.copyOf(args, args.length + 1);
for (boolean b1 : FT) {
for (boolean b2 : FT) {
boolArgs[args.length] = b2;
testPartialEscapeReadElimination(snippet, b1, shouldEscapeRead, shouldEscapeWrite, boolArgs);
}
}
}
public void testPartialEscapeReadElimination(String snippet, boolean canonicalizeBefore, boolean shouldEscapeRead, boolean shouldEscapeWrite, Object... args) {
assert TestClassInt.fieldOffset1 % 8 == 0 : "First of the two int-fields must be 8-byte aligned";
ResolvedJavaMethod method = getResolvedJavaMethod(snippet);
@ -121,7 +514,25 @@ public class UnsafeVirtualizationTest extends GraalCompilerTest {
canonicalizer.apply(graph, context);
}
Result r = executeExpected(method, null, args);
int readCount = 0;
int writeCount = 0;
boolean escapeReads = shouldEscapeRead && context.getPlatformConfigurationProvider().canVirtualizeLargeByteArrayAccess();
boolean escapeWrites = shouldEscapeWrite && context.getPlatformConfigurationProvider().canVirtualizeLargeByteArrayAccess();
if (escapeReads) {
readCount = graph.getNodes().filter(RawLoadNode.class).count();
}
if (escapeWrites) {
writeCount = graph.getNodes().filter(RawStoreNode.class).count();
}
new PartialEscapePhase(true, true, canonicalizer, null, options).apply(graph, context);
if (escapeReads) {
int newCount = graph.getNodes().filter(RawLoadNode.class).count();
assertTrue(readCount > newCount, "PEA did not escape reads. before: " + readCount + ", after " + newCount);
}
if (escapeWrites) {
int newCount = graph.getNodes().filter(RawStoreNode.class).count();
assertTrue(writeCount > newCount, "PEA did not escape writes, before: " + writeCount + ", after: " + newCount);
}
try {
InstalledCode code = getCode(method, graph);
Object result = code.executeVarargs(args);

View File

@ -96,6 +96,11 @@ public class VerifySystemPropertyUsage extends VerifyPhase<CoreProviders> {
// * its JDK substitutions to mimic required JDK semantics
// * native-image for config info
return;
} else if (packageName.startsWith("jdk.jfr")) {
// JFR for SVM must read system properties in:
// * its JDK substitutions to mimic required JDK semantics
// * native-image for config info
return;
} else if (packageName.startsWith("jdk.tools.jaotc")) {
// Workaround since jdk.internal.vm.ci/jdk.vm.ci.services is not exported to jdk.aot.
// The jaotc launcher dynamically adds these exports.

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
*/
package org.graalvm.compiler.core.test.deopt;
import static org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration.BytecodeExceptionMode.CheckAll;
import org.graalvm.compiler.api.directives.GraalDirectives;
import org.graalvm.compiler.core.phases.HighTier;
import org.graalvm.compiler.core.test.GraalCompilerTest;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderContext;
import org.graalvm.compiler.nodes.graphbuilderconf.InlineInvokePlugin;
import org.graalvm.compiler.options.OptionValues;
import org.graalvm.compiler.phases.tiers.Suites;
import org.junit.Test;
import jdk.vm.ci.meta.ResolvedJavaMethod;
public final class RethrowExceptionLoopTest extends GraalCompilerTest {
static Object method(Object object) {
if (object instanceof SecurityException) {
throw (SecurityException) object;
}
if (object instanceof IllegalAccessError) {
throw (IllegalAccessError) object;
}
return object;
}
public static Object catchInLoop(Object object) {
for (;;) {
try {
return method(object);
} catch (SecurityException e) {
GraalDirectives.deoptimize();
throw new IllegalArgumentException();
} catch (IllegalAccessError e) {
}
}
}
@Override
@SuppressWarnings("try")
protected Suites createSuites(OptionValues options) {
return super.createSuites(new OptionValues(options, HighTier.Options.Inline, false));
}
@Override
protected InlineInvokePlugin.InlineInfo bytecodeParserShouldInlineInvoke(GraphBuilderContext b, ResolvedJavaMethod method, ValueNode[] args) {
return InlineInvokePlugin.InlineInfo.DO_NOT_INLINE_WITH_EXCEPTION;
}
@Override
protected GraphBuilderConfiguration editGraphBuilderConfiguration(GraphBuilderConfiguration conf) {
return super.editGraphBuilderConfiguration(conf).withBytecodeExceptionMode(CheckAll);
}
/**
* Check that a deoptimize in an exception handler resumes execution properly.
*/
@Test
public void testCatchInLoop() {
test("catchInLoop", new SecurityException());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,37 +24,21 @@
package org.graalvm.compiler.core.test.ea;
import java.lang.reflect.Field;
import org.graalvm.compiler.core.test.GraalCompilerTest;
import org.junit.Test;
import sun.misc.Unsafe;
/**
* Exercise a mix of unsafe and normal reads ands writes in situations where EA might attempt to
* fold the operations.
*/
public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
private static final Unsafe unsafe = initUnsafe();
private static Unsafe initUnsafe() {
try {
Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
theUnsafe.setAccessible(true);
return (Unsafe) theUnsafe.get(Unsafe.class);
} catch (Exception e) {
throw new RuntimeException("exception while trying to get Unsafe", e);
}
}
private static final long byteArrayBaseOffset = unsafe.arrayBaseOffset(byte[].class);
private static final long byteArrayBaseOffset = UNSAFE.arrayBaseOffset(byte[].class);
private static byte byteValue = 0x61;
public static byte[] testByteArrayWithCharStoreSnippet(char v) {
byte[] b = new byte[8];
unsafe.putChar(b, byteArrayBaseOffset, v);
UNSAFE.putChar(b, byteArrayBaseOffset, v);
return b;
}
@ -65,7 +49,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte[] testByteArrayWithShortStoreSnippet(short v) {
byte[] b = new byte[8];
unsafe.putShort(b, byteArrayBaseOffset, v);
UNSAFE.putShort(b, byteArrayBaseOffset, v);
return b;
}
@ -76,7 +60,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte[] testByteArrayWithIntStoreSnippet(int v) {
byte[] b = new byte[8];
unsafe.putInt(b, byteArrayBaseOffset, v);
UNSAFE.putInt(b, byteArrayBaseOffset, v);
return b;
}
@ -87,7 +71,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte[] testByteArrayWithLongStoreSnippet(long v) {
byte[] b = new byte[8];
unsafe.putLong(b, byteArrayBaseOffset, v);
UNSAFE.putLong(b, byteArrayBaseOffset, v);
return b;
}
@ -98,7 +82,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte[] testByteArrayWithFloatStoreSnippet(float v) {
byte[] b = new byte[8];
unsafe.putFloat(b, byteArrayBaseOffset, v);
UNSAFE.putFloat(b, byteArrayBaseOffset, v);
return b;
}
@ -109,7 +93,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte[] testByteArrayWithDoubleStoreSnippet(double v) {
byte[] b = new byte[8];
unsafe.putDouble(b, byteArrayBaseOffset, v);
UNSAFE.putDouble(b, byteArrayBaseOffset, v);
return b;
}
@ -118,12 +102,12 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
test("testByteArrayWithDoubleStoreSnippet", doubleValue);
}
private static final long charArrayBaseOffset = unsafe.arrayBaseOffset(char[].class);
private static final long charArrayBaseOffset = UNSAFE.arrayBaseOffset(char[].class);
private static char charValue = 0x4142;
public static char[] testCharArrayWithByteStoreSnippet(byte v) {
char[] b = new char[4];
unsafe.putByte(b, charArrayBaseOffset, v);
UNSAFE.putByte(b, charArrayBaseOffset, v);
return b;
}
@ -134,7 +118,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char[] testCharArrayWithShortStoreSnippet(short v) {
char[] b = new char[4];
unsafe.putShort(b, charArrayBaseOffset, v);
UNSAFE.putShort(b, charArrayBaseOffset, v);
return b;
}
@ -145,7 +129,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char[] testCharArrayWithIntStoreSnippet(int v) {
char[] b = new char[4];
unsafe.putInt(b, charArrayBaseOffset, v);
UNSAFE.putInt(b, charArrayBaseOffset, v);
return b;
}
@ -156,7 +140,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char[] testCharArrayWithLongStoreSnippet(long v) {
char[] b = new char[4];
unsafe.putLong(b, charArrayBaseOffset, v);
UNSAFE.putLong(b, charArrayBaseOffset, v);
return b;
}
@ -167,7 +151,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char[] testCharArrayWithFloatStoreSnippet(float v) {
char[] b = new char[4];
unsafe.putFloat(b, charArrayBaseOffset, v);
UNSAFE.putFloat(b, charArrayBaseOffset, v);
return b;
}
@ -178,7 +162,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char[] testCharArrayWithDoubleStoreSnippet(double v) {
char[] b = new char[4];
unsafe.putDouble(b, charArrayBaseOffset, v);
UNSAFE.putDouble(b, charArrayBaseOffset, v);
return b;
}
@ -187,12 +171,12 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
test("testCharArrayWithDoubleStoreSnippet", doubleValue);
}
private static final long shortArrayBaseOffset = unsafe.arrayBaseOffset(short[].class);
private static final long shortArrayBaseOffset = UNSAFE.arrayBaseOffset(short[].class);
private static short shortValue = 0x1112;
public static short[] testShortArrayWithByteStoreSnippet(byte v) {
short[] b = new short[4];
unsafe.putByte(b, shortArrayBaseOffset, v);
UNSAFE.putByte(b, shortArrayBaseOffset, v);
return b;
}
@ -203,7 +187,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short[] testShortArrayWithCharStoreSnippet(char v) {
short[] b = new short[4];
unsafe.putChar(b, shortArrayBaseOffset, v);
UNSAFE.putChar(b, shortArrayBaseOffset, v);
return b;
}
@ -214,7 +198,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short[] testShortArrayWithIntStoreSnippet(int v) {
short[] b = new short[4];
unsafe.putInt(b, shortArrayBaseOffset, v);
UNSAFE.putInt(b, shortArrayBaseOffset, v);
return b;
}
@ -225,7 +209,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short[] testShortArrayWithLongStoreSnippet(long v) {
short[] b = new short[4];
unsafe.putLong(b, shortArrayBaseOffset, v);
UNSAFE.putLong(b, shortArrayBaseOffset, v);
return b;
}
@ -236,7 +220,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short[] testShortArrayWithFloatStoreSnippet(float v) {
short[] b = new short[4];
unsafe.putFloat(b, shortArrayBaseOffset, v);
UNSAFE.putFloat(b, shortArrayBaseOffset, v);
return b;
}
@ -247,7 +231,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short[] testShortArrayWithDoubleStoreSnippet(double v) {
short[] b = new short[4];
unsafe.putDouble(b, shortArrayBaseOffset, v);
UNSAFE.putDouble(b, shortArrayBaseOffset, v);
return b;
}
@ -256,12 +240,12 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
test("testShortArrayWithDoubleStoreSnippet", doubleValue);
}
private static final long intArrayBaseOffset = unsafe.arrayBaseOffset(int[].class);
private static final long intArrayBaseOffset = UNSAFE.arrayBaseOffset(int[].class);
private static int intValue = 0x01020304;
public static int[] testIntArrayWithByteStoreSnippet(byte v) {
int[] b = new int[4];
unsafe.putByte(b, intArrayBaseOffset, v);
UNSAFE.putByte(b, intArrayBaseOffset, v);
return b;
}
@ -272,7 +256,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int[] testIntArrayWithCharStoreSnippet(char v) {
int[] b = new int[4];
unsafe.putChar(b, intArrayBaseOffset, v);
UNSAFE.putChar(b, intArrayBaseOffset, v);
return b;
}
@ -283,7 +267,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int[] testIntArrayWithShortStoreSnippet(short v) {
int[] b = new int[4];
unsafe.putShort(b, intArrayBaseOffset, v);
UNSAFE.putShort(b, intArrayBaseOffset, v);
return b;
}
@ -294,7 +278,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int[] testIntArrayWithLongStoreSnippet(long v) {
int[] b = new int[4];
unsafe.putLong(b, intArrayBaseOffset, v);
UNSAFE.putLong(b, intArrayBaseOffset, v);
return b;
}
@ -305,7 +289,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int[] testIntArrayWithFloatStoreSnippet(float v) {
int[] b = new int[4];
unsafe.putFloat(b, intArrayBaseOffset, v);
UNSAFE.putFloat(b, intArrayBaseOffset, v);
return b;
}
@ -316,7 +300,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int[] testIntArrayWithDoubleStoreSnippet(double v) {
int[] b = new int[4];
unsafe.putDouble(b, intArrayBaseOffset, v);
UNSAFE.putDouble(b, intArrayBaseOffset, v);
return b;
}
@ -325,12 +309,12 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
test("testIntArrayWithDoubleStoreSnippet", doubleValue);
}
private static final long longArrayBaseOffset = unsafe.arrayBaseOffset(long[].class);
private static final long longArrayBaseOffset = UNSAFE.arrayBaseOffset(long[].class);
private static long longValue = 0x31323334353637L;
public static long[] testLongArrayWithByteStoreSnippet(byte v) {
long[] b = new long[4];
unsafe.putByte(b, longArrayBaseOffset, v);
UNSAFE.putByte(b, longArrayBaseOffset, v);
return b;
}
@ -341,7 +325,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long[] testLongArrayWithCharStoreSnippet(char v) {
long[] b = new long[4];
unsafe.putChar(b, longArrayBaseOffset, v);
UNSAFE.putChar(b, longArrayBaseOffset, v);
return b;
}
@ -352,7 +336,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long[] testLongArrayWithShortStoreSnippet(short v) {
long[] b = new long[4];
unsafe.putShort(b, longArrayBaseOffset, v);
UNSAFE.putShort(b, longArrayBaseOffset, v);
return b;
}
@ -363,7 +347,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long[] testLongArrayWithIntStoreSnippet(int v) {
long[] b = new long[4];
unsafe.putInt(b, longArrayBaseOffset, v);
UNSAFE.putInt(b, longArrayBaseOffset, v);
return b;
}
@ -374,7 +358,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long[] testLongArrayWithFloatStoreSnippet(float v) {
long[] b = new long[4];
unsafe.putFloat(b, longArrayBaseOffset, v);
UNSAFE.putFloat(b, longArrayBaseOffset, v);
return b;
}
@ -385,7 +369,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long[] testLongArrayWithDoubleStoreSnippet(double v) {
long[] b = new long[4];
unsafe.putDouble(b, longArrayBaseOffset, v);
UNSAFE.putDouble(b, longArrayBaseOffset, v);
return b;
}
@ -394,12 +378,12 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
test("testLongArrayWithDoubleStoreSnippet", doubleValue);
}
private static final long floatArrayBaseOffset = unsafe.arrayBaseOffset(float[].class);
private static final long floatArrayBaseOffset = UNSAFE.arrayBaseOffset(float[].class);
private static float floatValue = Float.NaN;
public static float[] testFloatArrayWithByteStoreSnippet(byte v) {
float[] b = new float[4];
unsafe.putByte(b, floatArrayBaseOffset, v);
UNSAFE.putByte(b, floatArrayBaseOffset, v);
return b;
}
@ -410,7 +394,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float[] testFloatArrayWithCharStoreSnippet(char v) {
float[] b = new float[4];
unsafe.putChar(b, floatArrayBaseOffset, v);
UNSAFE.putChar(b, floatArrayBaseOffset, v);
return b;
}
@ -421,7 +405,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float[] testFloatArrayWithShortStoreSnippet(short v) {
float[] b = new float[4];
unsafe.putShort(b, floatArrayBaseOffset, v);
UNSAFE.putShort(b, floatArrayBaseOffset, v);
return b;
}
@ -432,7 +416,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float[] testFloatArrayWithIntStoreSnippet(int v) {
float[] b = new float[4];
unsafe.putInt(b, floatArrayBaseOffset, v);
UNSAFE.putInt(b, floatArrayBaseOffset, v);
return b;
}
@ -443,7 +427,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float[] testFloatArrayWithLongStoreSnippet(long v) {
float[] b = new float[4];
unsafe.putLong(b, floatArrayBaseOffset, v);
UNSAFE.putLong(b, floatArrayBaseOffset, v);
return b;
}
@ -454,7 +438,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float[] testFloatArrayWithDoubleStoreSnippet(double v) {
float[] b = new float[4];
unsafe.putDouble(b, floatArrayBaseOffset, v);
UNSAFE.putDouble(b, floatArrayBaseOffset, v);
return b;
}
@ -463,7 +447,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
test("testFloatArrayWithDoubleStoreSnippet", doubleValue);
}
private static final long doubleArrayBaseOffset = unsafe.arrayBaseOffset(double[].class);
private static final long doubleArrayBaseOffset = UNSAFE.arrayBaseOffset(double[].class);
private static double doubleValue = Double.NaN;
private static final int byteSize = 1;
private static final int charSize = 2;
@ -475,7 +459,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double[] testDoubleArrayWithByteStoreSnippet(byte v) {
double[] b = new double[4];
unsafe.putByte(b, doubleArrayBaseOffset, v);
UNSAFE.putByte(b, doubleArrayBaseOffset, v);
return b;
}
@ -486,7 +470,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double[] testDoubleArrayWithCharStoreSnippet(char v) {
double[] b = new double[4];
unsafe.putChar(b, doubleArrayBaseOffset, v);
UNSAFE.putChar(b, doubleArrayBaseOffset, v);
return b;
}
@ -497,7 +481,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double[] testDoubleArrayWithShortStoreSnippet(short v) {
double[] b = new double[4];
unsafe.putShort(b, doubleArrayBaseOffset, v);
UNSAFE.putShort(b, doubleArrayBaseOffset, v);
return b;
}
@ -508,7 +492,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double[] testDoubleArrayWithIntStoreSnippet(int v) {
double[] b = new double[4];
unsafe.putInt(b, doubleArrayBaseOffset, v);
UNSAFE.putInt(b, doubleArrayBaseOffset, v);
return b;
}
@ -519,7 +503,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double[] testDoubleArrayWithLongStoreSnippet(long v) {
double[] b = new double[4];
unsafe.putLong(b, doubleArrayBaseOffset, v);
UNSAFE.putLong(b, doubleArrayBaseOffset, v);
return b;
}
@ -530,7 +514,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double[] testDoubleArrayWithFloatStoreSnippet(float v) {
double[] b = new double[4];
unsafe.putFloat(b, doubleArrayBaseOffset, v);
UNSAFE.putFloat(b, doubleArrayBaseOffset, v);
return b;
}
@ -541,7 +525,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte testByteArrayWithCharStoreAndReadSnippet(char v) {
byte[] b = new byte[4];
unsafe.putChar(b, byteArrayBaseOffset, v);
UNSAFE.putChar(b, byteArrayBaseOffset, v);
return b[(byteSize / charSize) + 1];
}
@ -552,7 +536,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte testByteArrayWithShortStoreAndReadSnippet(short v) {
byte[] b = new byte[4];
unsafe.putShort(b, byteArrayBaseOffset, v);
UNSAFE.putShort(b, byteArrayBaseOffset, v);
return b[(byteSize / shortSize) + 1];
}
@ -563,7 +547,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte testByteArrayWithIntStoreAndReadSnippet(int v) {
byte[] b = new byte[4];
unsafe.putInt(b, byteArrayBaseOffset, v);
UNSAFE.putInt(b, byteArrayBaseOffset, v);
return b[(byteSize / intSize) + 1];
}
@ -574,7 +558,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte testByteArrayWithLongStoreAndReadSnippet(long v) {
byte[] b = new byte[4];
unsafe.putLong(b, byteArrayBaseOffset, v);
UNSAFE.putLong(b, byteArrayBaseOffset, v);
return b[(byteSize / longSize) + 1];
}
@ -585,7 +569,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte testByteArrayWithFloatStoreAndReadSnippet(float v) {
byte[] b = new byte[4];
unsafe.putFloat(b, byteArrayBaseOffset, v);
UNSAFE.putFloat(b, byteArrayBaseOffset, v);
return b[(byteSize / floatSize) + 1];
}
@ -596,7 +580,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static byte testByteArrayWithDoubleStoreAndReadSnippet(double v) {
byte[] b = new byte[4];
unsafe.putDouble(b, byteArrayBaseOffset, v);
UNSAFE.putDouble(b, byteArrayBaseOffset, v);
return b[(byteSize / doubleSize) + 1];
}
@ -607,7 +591,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char testCharArrayWithByteStoreAndReadSnippet(byte v) {
char[] b = new char[4];
unsafe.putByte(b, charArrayBaseOffset, v);
UNSAFE.putByte(b, charArrayBaseOffset, v);
return b[(charSize / byteSize) + 1];
}
@ -618,7 +602,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char testCharArrayWithShortStoreAndReadSnippet(short v) {
char[] b = new char[4];
unsafe.putShort(b, charArrayBaseOffset, v);
UNSAFE.putShort(b, charArrayBaseOffset, v);
return b[(charSize / shortSize) + 1];
}
@ -629,7 +613,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char testCharArrayWithIntStoreAndReadSnippet(int v) {
char[] b = new char[4];
unsafe.putInt(b, charArrayBaseOffset, v);
UNSAFE.putInt(b, charArrayBaseOffset, v);
return b[(charSize / intSize) + 1];
}
@ -640,7 +624,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char testCharArrayWithLongStoreAndReadSnippet(long v) {
char[] b = new char[4];
unsafe.putLong(b, charArrayBaseOffset, v);
UNSAFE.putLong(b, charArrayBaseOffset, v);
return b[(charSize / longSize) + 1];
}
@ -651,7 +635,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char testCharArrayWithFloatStoreAndReadSnippet(float v) {
char[] b = new char[4];
unsafe.putFloat(b, charArrayBaseOffset, v);
UNSAFE.putFloat(b, charArrayBaseOffset, v);
return b[(charSize / floatSize) + 1];
}
@ -662,7 +646,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static char testCharArrayWithDoubleStoreAndReadSnippet(double v) {
char[] b = new char[4];
unsafe.putDouble(b, charArrayBaseOffset, v);
UNSAFE.putDouble(b, charArrayBaseOffset, v);
return b[(charSize / doubleSize) + 1];
}
@ -673,7 +657,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short testShortArrayWithByteStoreAndReadSnippet(byte v) {
short[] b = new short[4];
unsafe.putByte(b, shortArrayBaseOffset, v);
UNSAFE.putByte(b, shortArrayBaseOffset, v);
return b[(shortSize / byteSize) + 1];
}
@ -684,7 +668,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short testShortArrayWithCharStoreAndReadSnippet(char v) {
short[] b = new short[4];
unsafe.putChar(b, shortArrayBaseOffset, v);
UNSAFE.putChar(b, shortArrayBaseOffset, v);
return b[(shortSize / charSize) + 1];
}
@ -695,7 +679,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short testShortArrayWithIntStoreAndReadSnippet(int v) {
short[] b = new short[4];
unsafe.putInt(b, shortArrayBaseOffset, v);
UNSAFE.putInt(b, shortArrayBaseOffset, v);
return b[(shortSize / intSize) + 1];
}
@ -706,7 +690,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short testShortArrayWithLongStoreAndReadSnippet(long v) {
short[] b = new short[4];
unsafe.putLong(b, shortArrayBaseOffset, v);
UNSAFE.putLong(b, shortArrayBaseOffset, v);
return b[(shortSize / longSize) + 1];
}
@ -717,7 +701,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short testShortArrayWithFloatStoreAndReadSnippet(float v) {
short[] b = new short[4];
unsafe.putFloat(b, shortArrayBaseOffset, v);
UNSAFE.putFloat(b, shortArrayBaseOffset, v);
return b[(shortSize / floatSize) + 1];
}
@ -728,7 +712,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static short testShortArrayWithDoubleStoreAndReadSnippet(double v) {
short[] b = new short[4];
unsafe.putDouble(b, shortArrayBaseOffset, v);
UNSAFE.putDouble(b, shortArrayBaseOffset, v);
return b[(shortSize / doubleSize) + 1];
}
@ -739,7 +723,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int testIntArrayWithByteStoreAndReadSnippet(byte v) {
int[] b = new int[4];
unsafe.putByte(b, intArrayBaseOffset, v);
UNSAFE.putByte(b, intArrayBaseOffset, v);
return b[(intSize / byteSize) + 1];
}
@ -750,7 +734,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int testIntArrayWithCharStoreAndReadSnippet(char v) {
int[] b = new int[4];
unsafe.putChar(b, intArrayBaseOffset, v);
UNSAFE.putChar(b, intArrayBaseOffset, v);
return b[(intSize / charSize) + 1];
}
@ -761,7 +745,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int testIntArrayWithShortStoreAndReadSnippet(short v) {
int[] b = new int[4];
unsafe.putShort(b, intArrayBaseOffset, v);
UNSAFE.putShort(b, intArrayBaseOffset, v);
return b[(intSize / shortSize) + 1];
}
@ -772,7 +756,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int testIntArrayWithLongStoreAndReadSnippet(long v) {
int[] b = new int[4];
unsafe.putLong(b, intArrayBaseOffset, v);
UNSAFE.putLong(b, intArrayBaseOffset, v);
return b[(intSize / longSize) + 1];
}
@ -783,7 +767,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int testIntArrayWithFloatStoreAndReadSnippet(float v) {
int[] b = new int[4];
unsafe.putFloat(b, intArrayBaseOffset, v);
UNSAFE.putFloat(b, intArrayBaseOffset, v);
return b[(intSize / floatSize) + 1];
}
@ -794,7 +778,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static int testIntArrayWithDoubleStoreAndReadSnippet(double v) {
int[] b = new int[4];
unsafe.putDouble(b, intArrayBaseOffset, v);
UNSAFE.putDouble(b, intArrayBaseOffset, v);
return b[(intSize / doubleSize) + 1];
}
@ -805,7 +789,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long testLongArrayWithByteStoreAndReadSnippet(byte v) {
long[] b = new long[4];
unsafe.putByte(b, longArrayBaseOffset, v);
UNSAFE.putByte(b, longArrayBaseOffset, v);
return b[(longSize / byteSize) + 1];
}
@ -816,7 +800,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long testLongArrayWithCharStoreAndReadSnippet(char v) {
long[] b = new long[4];
unsafe.putChar(b, longArrayBaseOffset, v);
UNSAFE.putChar(b, longArrayBaseOffset, v);
return b[(longSize / charSize) + 1];
}
@ -827,7 +811,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long testLongArrayWithShortStoreAndReadSnippet(short v) {
long[] b = new long[4];
unsafe.putShort(b, longArrayBaseOffset, v);
UNSAFE.putShort(b, longArrayBaseOffset, v);
return b[(longSize / shortSize) + 1];
}
@ -838,7 +822,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long testLongArrayWithIntStoreAndReadSnippet(int v) {
long[] b = new long[4];
unsafe.putInt(b, longArrayBaseOffset, v);
UNSAFE.putInt(b, longArrayBaseOffset, v);
return b[(longSize / intSize) + 1];
}
@ -849,7 +833,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long testLongArrayWithFloatStoreAndReadSnippet(float v) {
long[] b = new long[4];
unsafe.putFloat(b, longArrayBaseOffset, v);
UNSAFE.putFloat(b, longArrayBaseOffset, v);
return b[(longSize / floatSize) + 1];
}
@ -860,7 +844,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static long testLongArrayWithDoubleStoreAndReadSnippet(double v) {
long[] b = new long[4];
unsafe.putDouble(b, longArrayBaseOffset, v);
UNSAFE.putDouble(b, longArrayBaseOffset, v);
return b[(longSize / doubleSize) + 1];
}
@ -871,7 +855,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float testFloatArrayWithByteStoreAndReadSnippet(byte v) {
float[] b = new float[4];
unsafe.putByte(b, floatArrayBaseOffset, v);
UNSAFE.putByte(b, floatArrayBaseOffset, v);
return b[(floatSize / byteSize) + 1];
}
@ -882,7 +866,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float testFloatArrayWithCharStoreAndReadSnippet(char v) {
float[] b = new float[4];
unsafe.putChar(b, floatArrayBaseOffset, v);
UNSAFE.putChar(b, floatArrayBaseOffset, v);
return b[(floatSize / charSize) + 1];
}
@ -893,7 +877,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float testFloatArrayWithShortStoreAndReadSnippet(short v) {
float[] b = new float[4];
unsafe.putShort(b, floatArrayBaseOffset, v);
UNSAFE.putShort(b, floatArrayBaseOffset, v);
return b[(floatSize / shortSize) + 1];
}
@ -904,7 +888,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float testFloatArrayWithIntStoreAndReadSnippet(int v) {
float[] b = new float[4];
unsafe.putInt(b, floatArrayBaseOffset, v);
UNSAFE.putInt(b, floatArrayBaseOffset, v);
return b[(floatSize / intSize) + 1];
}
@ -915,7 +899,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float testFloatArrayWithLongStoreAndReadSnippet(long v) {
float[] b = new float[4];
unsafe.putLong(b, floatArrayBaseOffset, v);
UNSAFE.putLong(b, floatArrayBaseOffset, v);
return b[(floatSize / longSize) + 1];
}
@ -926,7 +910,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static float testFloatArrayWithDoubleStoreAndReadSnippet(double v) {
float[] b = new float[4];
unsafe.putDouble(b, floatArrayBaseOffset, v);
UNSAFE.putDouble(b, floatArrayBaseOffset, v);
return b[(floatSize / doubleSize) + 1];
}
@ -937,7 +921,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double testDoubleArrayWithByteStoreAndReadSnippet(byte v) {
double[] b = new double[4];
unsafe.putByte(b, doubleArrayBaseOffset, v);
UNSAFE.putByte(b, doubleArrayBaseOffset, v);
return b[(doubleSize / byteSize) + 1];
}
@ -948,7 +932,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double testDoubleArrayWithCharStoreAndReadSnippet(char v) {
double[] b = new double[4];
unsafe.putChar(b, doubleArrayBaseOffset, v);
UNSAFE.putChar(b, doubleArrayBaseOffset, v);
return b[(doubleSize / charSize) + 1];
}
@ -959,7 +943,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double testDoubleArrayWithShortStoreAndReadSnippet(short v) {
double[] b = new double[4];
unsafe.putShort(b, doubleArrayBaseOffset, v);
UNSAFE.putShort(b, doubleArrayBaseOffset, v);
return b[(doubleSize / shortSize) + 1];
}
@ -970,7 +954,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double testDoubleArrayWithIntStoreAndReadSnippet(int v) {
double[] b = new double[4];
unsafe.putInt(b, doubleArrayBaseOffset, v);
UNSAFE.putInt(b, doubleArrayBaseOffset, v);
return b[(doubleSize / intSize) + 1];
}
@ -981,7 +965,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double testDoubleArrayWithLongStoreAndReadSnippet(long v) {
double[] b = new double[4];
unsafe.putLong(b, doubleArrayBaseOffset, v);
UNSAFE.putLong(b, doubleArrayBaseOffset, v);
return b[(doubleSize / longSize) + 1];
}
@ -992,7 +976,7 @@ public class PartialEscapeUnsafeStoreTest extends GraalCompilerTest {
public static double testDoubleArrayWithFloatStoreAndReadSnippet(float v) {
double[] b = new double[4];
unsafe.putFloat(b, doubleArrayBaseOffset, v);
UNSAFE.putFloat(b, doubleArrayBaseOffset, v);
return b[(doubleSize / floatSize) + 1];
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -181,11 +181,9 @@ public class GraalCompiler {
}
if (crashLabel == null) {
ResolvedJavaMethod method = graph.method();
MethodFilter[] filters = MethodFilter.parse(methodPattern);
for (MethodFilter filter : filters) {
if (filter.matches(method)) {
crashLabel = method.format("%H.%n(%p)");
}
MethodFilter filter = MethodFilter.parse(methodPattern);
if (filter.matches(method)) {
crashLabel = method.format("%H.%n(%p)");
}
}
if (crashLabel != null) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -140,9 +140,23 @@ public class DebugInfoBuilder {
} else {
assert value.getStackKind() == JavaKind.Illegal;
ValueNode previousValue = currentField.values().get(i - 1);
assert (previousValue != null && previousValue.getStackKind().needsTwoSlots()) : vobjNode + " " + i +
assert (previousValue != null && (previousValue.getStackKind().needsTwoSlots()) || vobjNode.isVirtualByteArray()) : vobjNode + " " + i +
" " + previousValue + " " + currentField.values().snapshot();
if (previousValue == null || !previousValue.getStackKind().needsTwoSlots()) {
if (vobjNode.isVirtualByteArray()) {
/*
* Let Illegals pass through to help knowing the number of bytes to
* write. For example, writing a short to index 2 of a byte array of
* size 6 would look like, in debug info:
*
* {b0, b1, INT(...), ILLEGAL, b4, b5}
*
* Thus, from the VM, we can simply count the number of illegals to
* restore the byte count.
*/
values[pos] = Value.ILLEGAL;
slotKinds[pos] = JavaKind.Illegal;
pos++;
} else if (previousValue == null || !previousValue.getStackKind().needsTwoSlots()) {
// Don't allow the IllegalConstant to leak into the debug info
JavaKind entryKind = vobjNode.entryKind(i);
values[pos] = JavaConstant.defaultForKind(entryKind.getStackKind());
@ -203,7 +217,9 @@ public class DebugInfoBuilder {
}
} else {
for (int i = 0; i < values.length; i++) {
assert slotKinds[i] == componentKind || componentKind.getBitCount() >= slotKinds[i].getBitCount() ||
assert slotKinds[i] == componentKind ||
(slotKinds[i] == JavaKind.Illegal && storageKind(type.getComponentType()) == JavaKind.Byte) ||
componentKind.getBitCount() >= slotKinds[i].getBitCount() ||
(componentKind == JavaKind.Int && slotKinds[i].getBitCount() >= JavaKind.Int.getBitCount()) : slotKinds[i] + " != " + componentKind;
}
}

View File

@ -27,9 +27,9 @@ package org.graalvm.compiler.core.gen;
import static jdk.vm.ci.code.ValueUtil.asRegister;
import static jdk.vm.ci.code.ValueUtil.isLegal;
import static jdk.vm.ci.code.ValueUtil.isRegister;
import static org.graalvm.compiler.core.common.GraalOptions.MatchExpressions;
import static org.graalvm.compiler.core.common.SpeculativeExecutionAttacksMitigations.AllTargets;
import static org.graalvm.compiler.core.common.SpeculativeExecutionAttacksMitigations.Options.MitigateSpeculativeExecutionAttacks;
import static org.graalvm.compiler.core.common.GraalOptions.MatchExpressions;
import static org.graalvm.compiler.debug.DebugOptions.LogVerbose;
import static org.graalvm.compiler.lir.LIR.verifyBlock;
@ -544,39 +544,22 @@ public abstract class NodeLIRBuilder implements NodeLIRBuilderTool, LIRGeneratio
public void emitBranch(LogicNode node, LabelRef trueSuccessor, LabelRef falseSuccessor, double trueSuccessorProbability) {
if (node instanceof IsNullNode) {
emitNullCheckBranch((IsNullNode) node, trueSuccessor, falseSuccessor, trueSuccessorProbability);
LIRKind kind = gen.getLIRKind(((IsNullNode) node).getValue().stamp(NodeView.DEFAULT));
Value nullValue = gen.emitConstant(kind, ((IsNullNode) node).nullConstant());
gen.emitCompareBranch(kind.getPlatformKind(), operand(((IsNullNode) node).getValue()), nullValue, Condition.EQ, false, trueSuccessor, falseSuccessor, trueSuccessorProbability);
} else if (node instanceof CompareNode) {
emitCompareBranch((CompareNode) node, trueSuccessor, falseSuccessor, trueSuccessorProbability);
PlatformKind kind = gen.getLIRKind(((CompareNode) node).getX().stamp(NodeView.DEFAULT)).getPlatformKind();
gen.emitCompareBranch(kind, operand(((CompareNode) node).getX()), operand(((CompareNode) node).getY()), ((CompareNode) node).condition().asCondition(),
((CompareNode) node).unorderedIsTrue(), trueSuccessor, falseSuccessor, trueSuccessorProbability);
} else if (node instanceof LogicConstantNode) {
emitConstantBranch(((LogicConstantNode) node).getValue(), trueSuccessor, falseSuccessor);
gen.emitJump(((LogicConstantNode) node).getValue() ? trueSuccessor : falseSuccessor);
} else if (node instanceof IntegerTestNode) {
emitIntegerTestBranch((IntegerTestNode) node, trueSuccessor, falseSuccessor, trueSuccessorProbability);
gen.emitIntegerTestBranch(operand(((IntegerTestNode) node).getX()), operand(((IntegerTestNode) node).getY()), trueSuccessor, falseSuccessor, trueSuccessorProbability);
} else {
throw GraalError.unimplemented(node.toString());
}
}
private void emitNullCheckBranch(IsNullNode node, LabelRef trueSuccessor, LabelRef falseSuccessor, double trueSuccessorProbability) {
LIRKind kind = gen.getLIRKind(node.getValue().stamp(NodeView.DEFAULT));
Value nullValue = gen.emitConstant(kind, node.nullConstant());
gen.emitCompareBranch(kind.getPlatformKind(), operand(node.getValue()), nullValue, Condition.EQ, false, trueSuccessor, falseSuccessor, trueSuccessorProbability);
}
public void emitCompareBranch(CompareNode compare, LabelRef trueSuccessor, LabelRef falseSuccessor, double trueSuccessorProbability) {
PlatformKind kind = gen.getLIRKind(compare.getX().stamp(NodeView.DEFAULT)).getPlatformKind();
gen.emitCompareBranch(kind, operand(compare.getX()), operand(compare.getY()), compare.condition().asCondition(), compare.unorderedIsTrue(), trueSuccessor, falseSuccessor,
trueSuccessorProbability);
}
public void emitIntegerTestBranch(IntegerTestNode test, LabelRef trueSuccessor, LabelRef falseSuccessor, double trueSuccessorProbability) {
gen.emitIntegerTestBranch(operand(test.getX()), operand(test.getY()), trueSuccessor, falseSuccessor, trueSuccessorProbability);
}
public void emitConstantBranch(boolean value, LabelRef trueSuccessorBlock, LabelRef falseSuccessorBlock) {
LabelRef block = value ? trueSuccessorBlock : falseSuccessorBlock;
gen.emitJump(block);
}
@Override
public void emitConditional(ConditionalNode conditional) {
Value tVal = operand(conditional.trueValue());

View File

@ -55,6 +55,7 @@ import org.graalvm.compiler.nodes.calc.PointerEqualsNode;
import org.graalvm.compiler.nodes.calc.ReinterpretNode;
import org.graalvm.compiler.nodes.calc.RightShiftNode;
import org.graalvm.compiler.nodes.calc.SignExtendNode;
import org.graalvm.compiler.nodes.calc.SqrtNode;
import org.graalvm.compiler.nodes.calc.SubNode;
import org.graalvm.compiler.nodes.calc.UnsignedRightShiftNode;
import org.graalvm.compiler.nodes.calc.XorNode;
@ -101,6 +102,7 @@ import jdk.vm.ci.meta.Value;
@MatchableNode(nodeClass = LogicCompareAndSwapNode.class, inputs = {"address", "expectedValue", "newValue"})
@MatchableNode(nodeClass = ValueCompareAndSwapNode.class, inputs = {"address", "expectedValue", "newValue"})
@MatchableNode(nodeClass = RightShiftNode.class, inputs = {"x", "y"}, ignoresSideEffects = true)
@MatchableNode(nodeClass = SqrtNode.class, inputs = {"value"}, ignoresSideEffects = true)
public abstract class NodeMatchRules {
NodeLIRBuilder lirBuilder;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ import org.graalvm.compiler.phases.common.CanonicalizerPhase;
public class BaseTier<C> extends PhaseSuite<C> {
public LoopPolicies createLoopPolicies() {
public LoopPolicies createLoopPolicies(@SuppressWarnings("unused") OptionValues options) {
return new DefaultLoopPolicies();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -90,7 +90,7 @@ public class HighTier extends BaseTier<HighTierContext> {
appendPhase(new IterativeConditionalEliminationPhase(canonicalizer, false));
}
LoopPolicies loopPolicies = createLoopPolicies();
LoopPolicies loopPolicies = createLoopPolicies(options);
appendPhase(new LoopFullUnrollPhase(canonicalizer, loopPolicies));
if (LoopPeeling.getValue(options)) {
@ -117,7 +117,7 @@ public class HighTier extends BaseTier<HighTierContext> {
}
@Override
public LoopPolicies createLoopPolicies() {
public LoopPolicies createLoopPolicies(OptionValues options) {
return new DefaultLoopPolicies();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,7 +96,7 @@ public class MidTier extends BaseTier<MidTierContext> {
appendPhase(new FrameStateAssignmentPhase());
if (PartialUnroll.getValue(options)) {
LoopPolicies loopPolicies = createLoopPolicies();
LoopPolicies loopPolicies = createLoopPolicies(options);
appendPhase(new LoopPartialUnrollPhase(loopPolicies, canonicalizer));
}
@ -114,7 +114,7 @@ public class MidTier extends BaseTier<MidTierContext> {
}
@Override
public LoopPolicies createLoopPolicies() {
public LoopPolicies createLoopPolicies(OptionValues options) {
return new DefaultLoopPolicies();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,7 @@ final class DebugConfigImpl implements DebugConfig {
private final DebugFilter timerFilter;
private final DebugFilter dumpFilter;
private final DebugFilter verifyFilter;
private final MethodFilter[] methodFilter;
private final MethodFilter methodFilter;
private final List<DebugDumpHandler> dumpHandlers;
private final List<DebugVerifyHandler> verifyHandlers;
private final PrintStream output;
@ -193,7 +193,7 @@ final class DebugConfigImpl implements DebugConfig {
JavaMethod method = DebugConfig.asJavaMethod(o);
if (method != null) {
if (!DebugOptions.MethodFilterRootOnly.getValue(options)) {
if (org.graalvm.compiler.debug.MethodFilter.matches(methodFilter, method)) {
if (methodFilter.matches(method)) {
return true;
}
} else {
@ -207,7 +207,7 @@ final class DebugConfigImpl implements DebugConfig {
}
}
}
if (lastMethod != null && org.graalvm.compiler.debug.MethodFilter.matches(methodFilter, lastMethod)) {
if (lastMethod != null && methodFilter.matches(lastMethod)) {
return true;
}
return false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@ import static java.util.FormattableFlags.LEFT_JUSTIFY;
import static java.util.FormattableFlags.UPPERCASE;
import static org.graalvm.compiler.debug.DebugOptions.Count;
import static org.graalvm.compiler.debug.DebugOptions.Counters;
import static org.graalvm.compiler.debug.DebugOptions.DisableIntercept;
import static org.graalvm.compiler.debug.DebugOptions.Dump;
import static org.graalvm.compiler.debug.DebugOptions.DumpOnError;
import static org.graalvm.compiler.debug.DebugOptions.DumpOnPhaseChange;
@ -447,7 +448,7 @@ public final class DebugContext implements AutoCloseable {
}
}
currentConfig = new DebugConfigImpl(options, logStream, dumpHandlers, verifyHandlers);
currentScope = new ScopeImpl(this, Thread.currentThread());
currentScope = new ScopeImpl(this, Thread.currentThread(), DisableIntercept.getValue(options));
currentScope.updateFlags(currentConfig);
metricsEnabled = true;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -118,12 +118,12 @@ public class DebugOptions {
"otherwise a more human readable format is used. If not specified, metrics are dumped to the console.", type = OptionType.Debug)
public static final OptionKey<String> AggregatedMetricsFile = new OptionKey<>(null);
@Option(help = "Only report metrics for threads whose name matches the regular expression.", type = OptionType.Debug)
public static final OptionKey<String> MetricsThreadFilter = new OptionKey<>(null);
@Option(help = "Enable debug output for stub code generation and snippet preparation.", type = OptionType.Debug)
public static final OptionKey<Boolean> DebugStubsAndSnippets = new OptionKey<>(false);
@Option(help = "Send compiler IR to dump handlers on error.", type = OptionType.Debug)
public static final OptionKey<Boolean> DumpOnError = new OptionKey<>(false);
@Option(help = "Disable intercepting exceptions in debug scopes.", type = OptionType.Debug)
public static final OptionKey<Boolean> DisableIntercept = new OptionKey<>(false);
@Option(help = "Intercept also bailout exceptions", type = OptionType.Debug)
public static final OptionKey<Boolean> InterceptBailout = new OptionKey<>(false);
@Option(help = "Enable more verbose log output when available", type = OptionType.Debug)
@ -166,9 +166,6 @@ public class DebugOptions {
@Option(help = "Schedule graphs as they are dumped.", type = OptionType.Debug)
public static final OptionKey<Boolean> PrintGraphWithSchedule = new OptionKey<>(false);
@Option(help = "Enable dumping Truffle ASTs to the IdealGraphVisualizer.", type = OptionType.Debug)
public static final OptionKey<Boolean> PrintTruffleTrees = new OptionKey<>(true);
@Option(help = "Treat any exceptions during dumping as fatal.", type = OptionType.Debug)
public static final OptionKey<Boolean> DumpingErrorsAreFatal = new OptionKey<>(false);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,8 +24,11 @@
package org.graalvm.compiler.debug;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import jdk.vm.ci.meta.JavaMethod;
import jdk.vm.ci.meta.JavaType;
@ -33,82 +36,140 @@ import jdk.vm.ci.meta.Signature;
/**
* This class implements a method filter that can filter based on class name, method name and
* parameters. The syntax for a filter is explained <a href="MethodFilterHelp.txt">here</a>.
* parameters. This filter is a collection of "base filters", each of which may be negated. The
* syntax for a filter is explained <a href="file:doc-files/MethodFilterHelp.txt">here</a>.
*/
public class MethodFilter {
public final class MethodFilter {
private final Pattern clazz;
private final Pattern methodName;
private final Pattern[] signature;
private final ArrayList<BaseFilter> positiveFilters;
private final ArrayList<BaseFilter> negativeFilters;
private MethodFilter(ArrayList<BaseFilter> positiveFilters, ArrayList<BaseFilter> negativeFilters) {
this.positiveFilters = positiveFilters;
this.negativeFilters = negativeFilters;
}
/**
* Parses a string containing list of comma separated filter patterns into an array of
* {@link MethodFilter}s.
* Parses a string containing a list of comma separated, possibly negated filter patterns into a
* filter.
*/
public static MethodFilter[] parse(String commaSeparatedPatterns) {
public static MethodFilter parse(String commaSeparatedPatterns) {
String[] filters = commaSeparatedPatterns.split(",");
MethodFilter[] methodFilters = new MethodFilter[filters.length];
ArrayList<BaseFilter> positiveFilters = new ArrayList<>();
ArrayList<BaseFilter> negativeFilters = new ArrayList<>();
for (int i = 0; i < filters.length; i++) {
methodFilters[i] = new MethodFilter(filters[i]);
String pattern = filters[i].trim();
boolean positive = true;
if (pattern.startsWith("~")) {
positive = false;
pattern = pattern.substring(1);
}
BaseFilter filter = new BaseFilter(pattern);
if (positive) {
positiveFilters.add(filter);
} else {
negativeFilters.add(filter);
}
}
return methodFilters;
return new MethodFilter(positiveFilters, negativeFilters);
}
/**
* Determines if a given method is matched by a given array of filters.
* Cached instances matching nothing or everything, respectively.
*/
public static boolean matches(MethodFilter[] filters, JavaMethod method) {
for (MethodFilter filter : filters) {
if (filter.matches(method)) {
return true;
}
private static MethodFilter matchNothingInstance = null;
private static MethodFilter matchAllInstance = null;
/**
* Creates a MethodFilter instance that does not match anything.
*/
public static MethodFilter matchNothing() {
if (matchNothingInstance == null) {
matchNothingInstance = new MethodFilter(new ArrayList<>(), new ArrayList<>());
}
return false;
return matchNothingInstance;
}
/**
* Determines if a given class name is matched by a given array of filters.
* Creates a MethodFilter instance that matches everything.
*/
public static boolean matchesClassName(MethodFilter[] filters, String className) {
for (MethodFilter filter : filters) {
if (filter.matchesClassName(className)) {
public static MethodFilter matchAll() {
if (matchAllInstance == null) {
ArrayList<BaseFilter> matchAllFilter = new ArrayList<>();
matchAllFilter.add(new BaseFilter("*"));
matchAllInstance = new MethodFilter(matchAllFilter, new ArrayList<>());
}
return matchAllInstance;
}
/**
* Determines whether this is an empty filter that does not match anything.
*/
public boolean matchesNothing() {
return this.positiveFilters.isEmpty() && this.negativeFilters.isEmpty();
}
/**
* Returns a string representation of all the base filters in this filter set.
*/
@Override
public String toString() {
String positive = positiveFilters.stream().map(BaseFilter::toString).collect(Collectors.joining(", "));
String negative = negativeFilters.stream().map(filter -> filter.toString(false)).collect(Collectors.joining(", "));
if (positiveFilters.isEmpty()) {
return negative;
} else if (negativeFilters.isEmpty()) {
return positive;
} else {
return positive + ", " + negative;
}
}
/**
* Determines if a given method is matched by this filter.
*/
public boolean matches(JavaMethod method) {
return matches(baseFilter -> baseFilter.matches(method));
}
/**
* Determines if a given method with a given class and signature is matched by this filter.
*/
public boolean matches(String javaClassName, String name, Signature sig) {
return matches(baseFilter -> baseFilter.matches(javaClassName, name, sig));
}
/**
* Determines if a given class name is matched by this filter.
*/
public boolean matchesClassName(String className) {
return matches(baseFilter -> baseFilter.matchesClassName(className));
}
private boolean matches(Predicate<BaseFilter> predicate) {
// No match if any negative filter matches.
for (BaseFilter negative : negativeFilters) {
if (predicate.test(negative)) {
return false;
}
}
// At least one positive filter should normally match. But as a special case, if there are
// only negative filters (and none of them matched), consider this a match.
if (!negativeFilters.isEmpty() && positiveFilters.isEmpty()) {
return true;
}
// Otherwise, match if there is at least one matching positive filter.
for (BaseFilter positive : positiveFilters) {
if (predicate.test(positive)) {
return true;
}
}
return false;
}
public MethodFilter(String sourcePattern) {
String pattern = sourcePattern.trim();
// extract parameter part
int pos = pattern.indexOf('(');
if (pos != -1) {
if (pattern.charAt(pattern.length() - 1) != ')') {
throw new IllegalArgumentException("missing ')' at end of method filter pattern: " + pattern);
}
String[] signatureClasses = pattern.substring(pos + 1, pattern.length() - 1).split(";", -1);
signature = new Pattern[signatureClasses.length];
for (int i = 0; i < signatureClasses.length; i++) {
signature[i] = createClassGlobPattern(signatureClasses[i].trim());
}
pattern = pattern.substring(0, pos);
} else {
signature = null;
}
// If there is at least one "." then everything before the last "." is the class name.
// Otherwise, the pattern contains only the method name.
pos = pattern.lastIndexOf('.');
if (pos != -1) {
clazz = createClassGlobPattern(pattern.substring(0, pos));
methodName = Pattern.compile(createGlobString(pattern.substring(pos + 1)));
} else {
clazz = null;
methodName = Pattern.compile(createGlobString(pattern));
}
}
public static String createGlobString(String pattern) {
return Pattern.quote(pattern).replace("?", "\\E.\\Q").replace("*", "\\E.*\\Q");
}
@ -123,73 +184,114 @@ public class MethodFilter {
}
}
public boolean hasSignature() {
return signature != null;
}
private static final class BaseFilter {
private final Pattern clazz;
private final Pattern methodName;
private final Pattern[] signature;
/**
* Determines if the class part of this filter matches a given class name.
*/
public boolean matchesClassName(String className) {
return clazz == null || clazz.matcher(className).matches();
}
private BaseFilter(String sourcePattern) {
String pattern = sourcePattern.trim();
public boolean matches(JavaMethod o) {
// check method name first, since MetaUtil.toJavaName is expensive
if (methodName != null && !methodName.matcher(o.getName()).matches()) {
return false;
}
if (clazz != null && !clazz.matcher(o.getDeclaringClass().toJavaName()).matches()) {
return false;
}
return matchesSignature(o.getSignature());
}
// extract parameter part
int pos = pattern.indexOf('(');
if (pos != -1) {
if (pattern.charAt(pattern.length() - 1) != ')') {
throw new IllegalArgumentException("missing ')' at end of method filter pattern: " + pattern);
}
String[] signatureClasses = pattern.substring(pos + 1, pattern.length() - 1).split(";", -1);
signature = new Pattern[signatureClasses.length];
for (int i = 0; i < signatureClasses.length; i++) {
signature[i] = createClassGlobPattern(signatureClasses[i].trim());
}
pattern = pattern.substring(0, pos);
} else {
signature = null;
}
private boolean matchesSignature(Signature sig) {
if (signature == null) {
return true;
}
if (sig.getParameterCount(false) != signature.length) {
return false;
}
for (int i = 0; i < signature.length; i++) {
JavaType type = sig.getParameterType(i, null);
String javaName = type.toJavaName();
if (signature[i] != null && !signature[i].matcher(javaName).matches()) {
return false;
// If there is at least one "." then everything before the last "." is the class name.
// Otherwise, the pattern contains only the method name.
pos = pattern.lastIndexOf('.');
if (pos != -1) {
clazz = createClassGlobPattern(pattern.substring(0, pos));
methodName = Pattern.compile(createGlobString(pattern.substring(pos + 1)));
} else {
clazz = null;
methodName = Pattern.compile(createGlobString(pattern));
}
}
return true;
}
public boolean matches(String javaClassName, String name, Signature sig) {
assert sig != null || signature == null;
// check method name first, since MetaUtil.toJavaName is expensive
if (methodName != null && !methodName.matcher(name).matches()) {
return false;
/**
* Determines if the class part of this filter matches a given class name.
*/
private boolean matchesClassName(String className) {
return clazz == null || clazz.matcher(className).matches();
}
if (clazz != null && !clazz.matcher(javaClassName).matches()) {
return false;
}
return matchesSignature(sig);
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder("MethodFilter[");
String sep = "";
if (clazz != null) {
buf.append(sep).append("clazz=").append(clazz);
sep = ", ";
private boolean matches(JavaMethod o) {
// check method name first, since MetaUtil.toJavaName is expensive
if (methodName != null && !methodName.matcher(o.getName()).matches()) {
return false;
}
if (clazz != null && !clazz.matcher(o.getDeclaringClass().toJavaName()).matches()) {
return false;
}
return matchesSignature(o.getSignature());
}
if (methodName != null) {
buf.append(sep).append("methodName=").append(methodName);
sep = ", ";
private boolean matchesSignature(Signature sig) {
if (signature == null) {
return true;
}
if (sig.getParameterCount(false) != signature.length) {
return false;
}
for (int i = 0; i < signature.length; i++) {
JavaType type = sig.getParameterType(i, null);
String javaName = type.toJavaName();
if (signature[i] != null && !signature[i].matcher(javaName).matches()) {
return false;
}
}
return true;
}
if (signature != null) {
buf.append(sep).append("signature=").append(Arrays.toString(signature));
sep = ", ";
private boolean matches(String javaClassName, String name, Signature sig) {
assert sig != null || signature == null;
// check method name first, since MetaUtil.toJavaName is expensive
if (methodName != null && !methodName.matcher(name).matches()) {
return false;
}
if (clazz != null && !clazz.matcher(javaClassName).matches()) {
return false;
}
return matchesSignature(sig);
}
@Override
public String toString() {
return toString(true);
}
private String toString(boolean positive) {
StringBuilder buf = new StringBuilder("MethodFilter[");
String sep = "";
if (!positive) {
buf.append(sep).append("NOT");
sep = ", ";
}
if (clazz != null) {
buf.append(sep).append("clazz=").append(clazz);
sep = ", ";
}
if (methodName != null) {
buf.append(sep).append("methodName=").append(methodName);
sep = ", ";
}
if (signature != null) {
buf.append(sep).append("signature=").append(Arrays.toString(signature));
sep = ", ";
}
return buf.append("]").toString();
}
return buf.append("]").toString();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -147,19 +147,19 @@ public final class ScopeImpl implements DebugContext.Scope {
private PrintStream output;
private boolean interceptDisabled;
ScopeImpl(DebugContext owner, Thread thread) {
this(owner, thread.getName(), null, false);
ScopeImpl(DebugContext owner, Thread thread, boolean interceptDisabled) {
this(owner, thread.getName(), null, false, interceptDisabled);
}
private ScopeImpl(DebugContext owner, String unqualifiedName, ScopeImpl parent, boolean sandbox, Object... context) {
private ScopeImpl(DebugContext owner, String unqualifiedName, ScopeImpl parent, boolean sandbox, boolean interceptDisabled, Object... context) {
this.owner = owner;
this.parent = parent;
this.sandbox = sandbox;
this.context = context;
this.unqualifiedName = unqualifiedName;
this.interceptDisabled = interceptDisabled;
if (parent != null) {
emptyScope = unqualifiedName.equals("");
this.interceptDisabled = parent.interceptDisabled;
} else {
if (unqualifiedName.isEmpty()) {
throw new IllegalArgumentException("root scope name must be non-empty");
@ -258,7 +258,7 @@ public final class ScopeImpl implements DebugContext.Scope {
public ScopeImpl scope(CharSequence name, DebugConfig sandboxConfig, Object... newContextObjects) {
ScopeImpl newScope = null;
if (sandboxConfig != null) {
newScope = new ScopeImpl(owner, name.toString(), this, true, newContextObjects);
newScope = new ScopeImpl(owner, name.toString(), this, true, this.interceptDisabled, newContextObjects);
} else {
newScope = this.createChild(name.toString(), newContextObjects);
}
@ -376,7 +376,7 @@ public final class ScopeImpl implements DebugContext.Scope {
}
private ScopeImpl createChild(String newName, Object[] newContext) {
return new ScopeImpl(owner, newName, this, false, newContext);
return new ScopeImpl(owner, newName, this, false, this.interceptDisabled, newContext);
}
@Override

View File

@ -1,40 +1,62 @@
Pattern for filtering debug scope output based on method context.
The syntax for a pattern is:
Pattern for matching methods. The syntax for a pattern is:
SourcePatterns = SourcePattern ["," SourcePatterns] .
SourcePattern = [ Class "." ] method [ "(" [ Parameter { ";" Parameter } ] ")" ] .
SourcePattern = [ "~" ] [ Class "." ] method [ "(" [ Parameter { ";" Parameter } ] ")" ] .
Parameter = Class | "int" | "long" | "float" | "double" | "short" | "char" | "boolean" .
Class = { package "." } class .
Glob pattern matching (*, ?) is allowed in all parts of the source pattern.
The "~" prefix negates the pattern.
Positive patterns are joined by an "or" operator: "A,B" matches anything
matched by "A" or "B". Negative patterns are joined by "and not": "~A,~B"
matches anything not matched by "A" and not matched by "B". "A,~B,~C,D"
matches anything matched by "A" or "D" and not matched by "B" and not
matched by "C".
A set of patterns containing negative patterns but no positive ones contains
an implicit positive "*" pattern: "~A,~B" is equivalent to "*,~A,~B".
Examples of method filters:
---------
visit(Argument;BlockScope)
Matches all methods named "visit", with the first parameter of
type "Argument", and the second parameter of type "BlockScope".
---------
*
Matches all methods in all classes.
---------
canonical(CanonicalizerTool;LogicNode;LogicNode)
Matches all methods named "canonical", with the first parameter of type
"CanonicalizerTool", and the second and third parameters of type
"LogicNode".
The packages of the parameter types are irrelevant.
---------
arraycopy(Object;;;;)
Matches all methods named "arraycopy", with the first parameter
of type "Object", and four more parameters of any type. The
packages of the parameter types are irrelevant.
---------
org.graalvm.compiler.core.graph.PostOrderNodeIterator.*
Matches all methods in the class "org.graalvm.compiler.core.graph.PostOrderNodeIterator".
---------
*
Matches all methods in all classes
---------
org.graalvm.compiler.core.graph.*.visit
Matches all methods named "visit" in classes in the package
"org.graalvm.compiler.core.graph".
org.graalvm.compiler.nodes.PhiNode.*
Matches all methods in the class "org.graalvm.compiler.nodes.PhiNode".
---------
org.graalvm.compiler.nodes.*.canonical
Matches all methods named "canonical" in classes in the package
"org.graalvm.compiler.nodes".
---------
arraycopy,toString
Matches all methods named "arraycopy" or "toString", meaning that ',' acts as an or operator.
Matches all methods named "arraycopy" or "toString", meaning that ',' acts
as an "or" operator.
---------
java.util.*.*.,~java.util.*Array*.*
java.util.*.*.,~*Array*.*
These patterns are equivalent and match all methods in the package
"java.util" except for classes that have "Array" in their name.
---------
~java.util.*.*
Matches all methods in all classes in all packages except for anything in
the "java.util" package.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1184,7 +1184,7 @@ public class Graph {
*
* @return the number of node ids generated so far
*/
int nodeIdCount() {
public int nodeIdCount() {
return nodesSize;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -308,6 +308,17 @@ public final class NodeClass<T> extends FieldIntrospection<T> {
assert size != null;
debug.log("Node cost for node of type __| %s |_, cycles:%s,size:%s", clazz, cycles, size);
}
assert verifyMemoryEdgeInvariant(fs) : "Nodes participating in the memory graph should have at most 1 optional memory input.";
}
private static boolean verifyMemoryEdgeInvariant(NodeFieldsScanner fs) {
int optionalMemoryInputs = 0;
for (InputInfo info : fs.inputs) {
if (info.optional && info.inputType == InputType.Memory) {
optionalMemoryInputs++;
}
}
return optionalMemoryInputs <= 1;
}
private final NodeCycles cycles;

Some files were not shown because too many files have changed in this diff Show More