8233841: Update Graal

Reviewed-by: kvn
This commit is contained in:
Dean Long 2019-11-14 12:21:00 -08:00
parent 51abc89ab6
commit 609bd95209
47 changed files with 816 additions and 283 deletions

View File

@ -944,7 +944,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
public final boolean vexPrefix(Register dst, Register nds, Register src, AVXSize size, int pp, int mmmmm, int w, int wEvex, boolean checkAVX) {
if (isAVX512Register(dst) || isAVX512Register(nds) || isAVX512Register(src)) {
if (isAVX512Register(dst) || isAVX512Register(nds) || isAVX512Register(src) || size == AVXSize.ZMM) {
evexPrefix(dst, Register.None, nds, src, size, pp, mmmmm, wEvex, Z0, B0);
return true;
}
@ -953,7 +953,7 @@ public abstract class AMD64BaseAssembler extends Assembler {
}
public final boolean vexPrefix(Register dst, Register nds, AMD64Address src, AVXSize size, int pp, int mmmmm, int w, int wEvex, boolean checkAVX) {
if (isAVX512Register(dst) || isAVX512Register(nds)) {
if (isAVX512Register(dst) || isAVX512Register(nds) || size == AVXSize.ZMM) {
evexPrefix(dst, Register.None, nds, src, size, pp, mmmmm, wEvex, Z0, B0);
return true;
}

View File

@ -0,0 +1,142 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.core.aarch64.test;
import org.graalvm.compiler.lir.LIRInstruction;
import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.BinaryConstOp;
import org.junit.Test;
import java.util.function.Predicate;
public class AArch64ElideL2ITest extends AArch64MatchRuleTest {
private static final Predicate<LIRInstruction> predicate = op -> {
if (op instanceof BinaryConstOp && op.name().toUpperCase().equals("AND")) {
return true;
}
return false;
};
public int addWithSingleL2I(long m) {
return (int) m + 100;
}
@Test
public void testAddWithSingleL2I() {
test("addWithSingleL2I", 5L);
checkLIR("addWithSingleL2I", predicate, 0);
}
public int addWithTwoL2I(long m, long n) {
return (int) m + (int) n;
}
@Test
public void testAddWithTwoL2I() {
test("addWithTwoL2I", 5L, 0x1FFFFFFFFL);
checkLIR("addWithTwoL2I", predicate, 0);
}
public int addWithTwoNarrow(long m, long n) {
return (int) m + (short) n;
}
@Test
public void testAddWithTwoNarrow() {
test("addWithTwoNarrow", 0x80000000L, 6L);
checkLIR("addWithTwoNarrow", predicate, 1);
}
public int subSingleL2I(int m, long n) {
return m - (int) n;
}
@Test
public void testSubSingleL2I() {
test("subSingleL2I", 13, 40L);
checkLIR("subSingleL2I", predicate, 0);
}
public int shiftWithSingleL2I(long m) {
return ((int) m) >> 5;
}
@Test
public void testShiftWithSingleL2I() {
test("shiftWithSingleL2I", 234L);
checkLIR("shiftWithSingleL2I", predicate, 0);
}
public int shiftWithTwoL2I(long m, long n) {
return (int) m << (int) n;
}
@Test
public void testShiftWithTwoL2I() {
test("shiftWithTwoL2I", 234L, 3L);
checkLIR("shiftWithTwoL2I", predicate, 0);
}
public long shiftLongWithL2I(long a, int m) {
return a + ((m & 0xFFFFFFFFL) << (int) a);
}
@Test
public void testShiftLongWithL2I() {
test("shiftLongWithL2I", 0xFFFFFFFFL, 123);
checkLIR("shiftLongWithL2I", predicate, 1);
}
public int logicWithTwoL2I(long m, long n) {
return (int) m | (int) n;
}
@Test
public void testLogicWithTwoL2I() {
test("logicWithTwoL2I", 234L, 3L);
checkLIR("logicWithTwoL2I", predicate, 0);
}
public int negateL2I(long m) {
return -((int) m);
}
@Test
public void testNegateL2I() {
test("negateL2I", 0xFFFFFFFFL);
checkLIR("negateL2I", predicate, 0);
}
public int notL2I(long m) {
return ~((int) m);
}
@Test
public void testNotL2I() {
test("notL2I", 0xFFFFFFFFL);
checkLIR("notL2I", predicate, 0);
}
}

View File

@ -38,7 +38,6 @@ import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.gen.NodeMatchRules;
import org.graalvm.compiler.core.match.ComplexMatchResult;
import org.graalvm.compiler.core.match.MatchRule;
import org.graalvm.compiler.graph.Node;
import org.graalvm.compiler.lir.LIRFrameState;
import org.graalvm.compiler.lir.LabelRef;
import org.graalvm.compiler.lir.Variable;
@ -58,26 +57,33 @@ import org.graalvm.compiler.nodes.calc.BinaryNode;
import org.graalvm.compiler.nodes.calc.IntegerLessThanNode;
import org.graalvm.compiler.nodes.calc.LeftShiftNode;
import org.graalvm.compiler.nodes.calc.MulNode;
import org.graalvm.compiler.nodes.calc.NarrowNode;
import org.graalvm.compiler.nodes.calc.NegateNode;
import org.graalvm.compiler.nodes.calc.NotNode;
import org.graalvm.compiler.nodes.calc.OrNode;
import org.graalvm.compiler.nodes.calc.RightShiftNode;
import org.graalvm.compiler.nodes.calc.SubNode;
import org.graalvm.compiler.nodes.calc.UnaryNode;
import org.graalvm.compiler.nodes.calc.UnsignedRightShiftNode;
import org.graalvm.compiler.nodes.calc.XorNode;
import org.graalvm.compiler.nodes.memory.Access;
public class AArch64NodeMatchRules extends NodeMatchRules {
private static final EconomicMap<Class<? extends Node>, AArch64ArithmeticOp> nodeOpMap;
private static final EconomicMap<Class<? extends BinaryNode>, AArch64ArithmeticOp> binaryOpMap;
private static final EconomicMap<Class<? extends BinaryNode>, AArch64BitFieldOp.BitFieldOpCode> bitFieldOpMap;
private static final EconomicMap<Class<? extends BinaryNode>, AArch64MacroAssembler.ShiftType> shiftTypeMap;
static {
nodeOpMap = EconomicMap.create(Equivalence.IDENTITY, 5);
nodeOpMap.put(AddNode.class, AArch64ArithmeticOp.ADD);
nodeOpMap.put(SubNode.class, AArch64ArithmeticOp.SUB);
nodeOpMap.put(AndNode.class, AArch64ArithmeticOp.AND);
nodeOpMap.put(OrNode.class, AArch64ArithmeticOp.OR);
nodeOpMap.put(XorNode.class, AArch64ArithmeticOp.XOR);
binaryOpMap = EconomicMap.create(Equivalence.IDENTITY, 9);
binaryOpMap.put(AddNode.class, AArch64ArithmeticOp.ADD);
binaryOpMap.put(SubNode.class, AArch64ArithmeticOp.SUB);
binaryOpMap.put(MulNode.class, AArch64ArithmeticOp.MUL);
binaryOpMap.put(AndNode.class, AArch64ArithmeticOp.AND);
binaryOpMap.put(OrNode.class, AArch64ArithmeticOp.OR);
binaryOpMap.put(XorNode.class, AArch64ArithmeticOp.XOR);
binaryOpMap.put(LeftShiftNode.class, AArch64ArithmeticOp.SHL);
binaryOpMap.put(RightShiftNode.class, AArch64ArithmeticOp.ASHR);
binaryOpMap.put(UnsignedRightShiftNode.class, AArch64ArithmeticOp.LSHR);
bitFieldOpMap = EconomicMap.create(Equivalence.IDENTITY, 2);
bitFieldOpMap.put(UnsignedRightShiftNode.class, AArch64BitFieldOp.BitFieldOpCode.UBFX);
@ -153,6 +159,10 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
};
}
private static boolean isNarrowingLongToInt(NarrowNode narrow) {
return narrow.getInputBits() == 64 && narrow.getResultBits() == 32;
}
@MatchRule("(And (UnsignedRightShift=shift a Constant=b) Constant=c)")
@MatchRule("(LeftShift=shift (And a Constant=c) Constant=b)")
public ComplexMatchResult unsignedBitField(BinaryNode shift, ValueNode a, ConstantNode b, ConstantNode c) {
@ -194,7 +204,7 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
@MatchRule("(Sub=binary a (RightShift=shift b Constant))")
@MatchRule("(Sub=binary a (UnsignedRightShift=shift b Constant))")
public ComplexMatchResult addSubShift(BinaryNode binary, ValueNode a, BinaryNode shift) {
AArch64ArithmeticOp op = nodeOpMap.get(binary.getClass());
AArch64ArithmeticOp op = binaryOpMap.get(binary.getClass());
assert op != null;
return emitBinaryShift(op, a, shift, false);
}
@ -218,7 +228,7 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
@MatchRule("(Xor=binary a (Not (RightShift=shift b Constant)))")
@MatchRule("(Xor=binary a (Not (UnsignedRightShift=shift b Constant)))")
public ComplexMatchResult logicShift(BinaryNode binary, ValueNode a, BinaryNode shift) {
AArch64ArithmeticOp op = nodeOpMap.get(binary.getClass());
AArch64ArithmeticOp op = binaryOpMap.get(binary.getClass());
assert op != null;
ValueNode operand = binary.getX() == a ? binary.getY() : binary.getX();
boolean isShiftNot = operand instanceof NotNode;
@ -252,6 +262,75 @@ public class AArch64NodeMatchRules extends NodeMatchRules {
resultKind, AArch64ArithmeticOp.SMULL, true, operand(a), operand(b));
}
@MatchRule("(Add=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(Sub=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(Mul=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(And=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(Or=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(Xor=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(LeftShift=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(RightShift=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(UnsignedRightShift=binary (Narrow=narrow a) (Narrow b))")
@MatchRule("(Add=binary a (Narrow=narrow b))")
@MatchRule("(Sub=binary a (Narrow=narrow b))")
@MatchRule("(Mul=binary a (Narrow=narrow b))")
@MatchRule("(And=binary a (Narrow=narrow b))")
@MatchRule("(Or=binary a (Narrow=narrow b))")
@MatchRule("(Xor=binary a (Narrow=narrow b))")
@MatchRule("(LeftShift=binary a (Narrow=narrow b))")
@MatchRule("(RightShift=binary a (Narrow=narrow b))")
@MatchRule("(UnsignedRightShift=binary a (Narrow=narrow b))")
@MatchRule("(Sub=binary (Narrow=narrow a) b)")
@MatchRule("(LeftShift=binary (Narrow=narrow a) b)")
@MatchRule("(RightShift=binary (Narrow=narrow a) b)")
@MatchRule("(UnsignedRightShift=binary (Narrow=narrow a) b)")
public ComplexMatchResult elideL2IForBinary(BinaryNode binary, NarrowNode narrow) {
assert binary.getStackKind().isNumericInteger();
ValueNode a = narrow;
ValueNode b = binary.getX() == narrow ? binary.getY() : binary.getX();
boolean isL2Ia = isNarrowingLongToInt((NarrowNode) a);
boolean isL2Ib = (b instanceof NarrowNode) && isNarrowingLongToInt((NarrowNode) b);
if (!isL2Ia && !isL2Ib) {
return null;
}
// Get the value of L2I NarrowNode as the src value.
ValueNode src1 = isL2Ia ? ((NarrowNode) a).getValue() : a;
ValueNode src2 = isL2Ib ? ((NarrowNode) b).getValue() : b;
AArch64ArithmeticOp op = binaryOpMap.get(binary.getClass());
assert op != null;
boolean commutative = binary.getNodeClass().isCommutative();
LIRKind resultKind = LIRKind.fromJavaKind(gen.target().arch, binary.getStackKind());
// Must keep the right operator order for un-commutative binary operations.
if (a == binary.getX()) {
return builder -> getArithmeticLIRGenerator().emitBinary(
resultKind, op, commutative, operand(src1), operand(src2));
}
return builder -> getArithmeticLIRGenerator().emitBinary(
resultKind, op, commutative, operand(src2), operand(src1));
}
@MatchRule("(Negate=unary (Narrow=narrow value))")
@MatchRule("(Not=unary (Narrow=narrow value))")
public ComplexMatchResult elideL2IForUnary(UnaryNode unary, NarrowNode narrow) {
assert unary.getStackKind().isNumericInteger();
if (!isNarrowingLongToInt(narrow)) {
return null;
}
AArch64ArithmeticOp op = unary instanceof NegateNode ? AArch64ArithmeticOp.NEG
: AArch64ArithmeticOp.NOT;
return builder -> {
AllocatableValue input = gen.asAllocatable(operand(narrow.getValue()));
LIRKind resultKind = LIRKind.fromJavaKind(gen.target().arch, unary.getStackKind());
Variable result = gen.newVariable(resultKind);
gen.append(new AArch64ArithmeticOp.UnaryOp(op, result, moveSp(input)));
return result;
};
}
@MatchRule("(Mul (Negate a) b)")
@MatchRule("(Negate (Mul a b))")
public ComplexMatchResult multiplyNegate(ValueNode a, ValueNode b) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,4 +73,13 @@ public interface ForeignCallLinkage extends InvokeTarget {
* the VM to be able to inspect the thread's execution state.
*/
boolean needsDebugInfo();
/**
* Returns true if further cleanup on the float registers is needed after performing the foreign
* call. This is critical on AMD64 as there is a performance penalty switching between legacy
* SSE and AVX instruction while the upper halves of the xmm registers are not zero.
*/
default boolean needsClearUpperVectorRegisters() {
return false;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@ public class HashMapGetTest extends GraalCompilerTest {
for (IfNode ifNode : lastCompiledGraph.getNodes(IfNode.TYPE)) {
LogicNode condition = ifNode.condition();
if (ifNode.getTrueSuccessorProbability() < 0.4 && condition instanceof ObjectEqualsNode) {
assertTrue(ifNode.trueSuccessor().next() instanceof ReturnNode, "Expected return.", ifNode.trueSuccessor(), ifNode.trueSuccessor().next());
assertTrue(ifNode.trueSuccessor().next() instanceof ReturnNode, "Expected return but got %s (trueSuccessor: %s)", ifNode.trueSuccessor().next(), ifNode.trueSuccessor());
}
}
}

View File

@ -24,8 +24,7 @@
package org.graalvm.compiler.core.test;
import java.lang.reflect.Field;
import org.graalvm.compiler.core.test.ea.EATestBase.TestClassInt;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
import org.graalvm.compiler.nodes.spi.CoreProviders;
@ -39,75 +38,45 @@ import jdk.vm.ci.meta.ResolvedJavaMethod;
public class UnsafeVirtualizationTest extends GraalCompilerTest {
public static class Base {
/*
* This padding ensure that the size of the Base class ends up as a multiple of 8, which
* makes the first field of the subclass 8-byte aligned.
*/
double padding;
}
public static class A extends Base {
int f1;
int f2;
}
private static final long AF1Offset;
private static final long AF2Offset;
static {
long o1 = -1;
long o2 = -1;
try {
Field f1 = A.class.getDeclaredField("f1");
Field f2 = A.class.getDeclaredField("f2");
o1 = UNSAFE.objectFieldOffset(f1);
o2 = UNSAFE.objectFieldOffset(f2);
} catch (NoSuchFieldException | SecurityException e) {
throw new AssertionError(e);
}
AF1Offset = o1;
AF2Offset = o2;
}
public static int unsafeSnippet1(double i1) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
return UNSAFE.getInt(a, AF1Offset) + UNSAFE.getInt(a, AF2Offset);
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
return UNSAFE.getInt(a, TestClassInt.fieldOffset1) + UNSAFE.getInt(a, TestClassInt.fieldOffset2);
}
public static long unsafeSnippet2a(int i1) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
a.f1 = i1;
return UNSAFE.getLong(a, AF1Offset);
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
a.setFirstField(i1);
return UNSAFE.getLong(a, TestClassInt.fieldOffset1);
}
public static long unsafeSnippet2b(int i1) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
a.f2 = i1;
return UNSAFE.getLong(a, AF1Offset);
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
a.setSecondField(i1);
return UNSAFE.getLong(a, TestClassInt.fieldOffset1);
}
public static long unsafeSnippet3a(int i1) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
UNSAFE.putInt(a, AF1Offset, i1);
return UNSAFE.getLong(a, AF1Offset);
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
UNSAFE.putInt(a, TestClassInt.fieldOffset1, i1);
return UNSAFE.getLong(a, TestClassInt.fieldOffset1);
}
public static long unsafeSnippet3b(int i1) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
UNSAFE.putInt(a, AF2Offset, i1);
return UNSAFE.getLong(a, AF1Offset);
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
UNSAFE.putInt(a, TestClassInt.fieldOffset2, i1);
return UNSAFE.getLong(a, TestClassInt.fieldOffset1);
}
public static int unsafeSnippet4(double i1) {
A a = new A();
UNSAFE.putDouble(a, AF1Offset, i1);
UNSAFE.putDouble(a, AF1Offset, i1);
return UNSAFE.getInt(a, AF1Offset) + UNSAFE.getInt(a, AF2Offset);
TestClassInt a = new TestClassInt();
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1);
return UNSAFE.getInt(a, TestClassInt.fieldOffset1) + UNSAFE.getInt(a, TestClassInt.fieldOffset2);
}
@Test
@ -141,7 +110,7 @@ public class UnsafeVirtualizationTest extends GraalCompilerTest {
}
public void testPartialEscapeReadElimination(String snippet, boolean canonicalizeBefore, Object... args) {
assert AF1Offset % 8 == 0 : "First of the two int-fields must be 8-byte aligned";
assert TestClassInt.fieldOffset1 % 8 == 0 : "First of the two int-fields must be 8-byte aligned";
ResolvedJavaMethod method = getResolvedJavaMethod(snippet);
StructuredGraph graph = parseEager(snippet, AllowAssumptions.NO);

View File

@ -87,6 +87,46 @@ public class EATestBase extends GraalCompilerTest {
public int hashCode() {
return x + 13 * y;
}
public static final long fieldOffset1;
public static final long fieldOffset2;
public static final boolean firstFieldIsX;
static {
try {
long localFieldOffset1 = UNSAFE.objectFieldOffset(EATestBase.TestClassInt.class.getField("x"));
// Make the fields 8 byte aligned (Required for testing setLong on Architectures
// which does not support unaligned memory access
if (localFieldOffset1 % 8 == 0) {
fieldOffset1 = localFieldOffset1;
fieldOffset2 = UNSAFE.objectFieldOffset(EATestBase.TestClassInt.class.getField("y"));
firstFieldIsX = true;
} else {
fieldOffset1 = UNSAFE.objectFieldOffset(EATestBase.TestClassInt.class.getField("y"));
fieldOffset2 = UNSAFE.objectFieldOffset(EATestBase.TestClassInt.class.getField("z"));
firstFieldIsX = false;
}
assert fieldOffset2 == fieldOffset1 + 4;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void setFirstField(int v) {
if (firstFieldIsX) {
x = v;
} else {
y = v;
}
}
public void setSecondField(int v) {
if (firstFieldIsX) {
y = v;
} else {
z = v;
}
}
}
public static class TestClassObject {

View File

@ -48,27 +48,6 @@ public class UnsafeEATest extends EATestBase {
public static int zero = 0;
private static final long fieldOffset1;
private static final long fieldOffset2;
static {
try {
long localFieldOffset1 = UNSAFE.objectFieldOffset(TestClassInt.class.getField("x"));
// Make the fields 8 byte aligned (Required for testing setLong on Architectures which
// does not support unaligned memory access
if (localFieldOffset1 % 8 == 0) {
fieldOffset1 = localFieldOffset1;
fieldOffset2 = UNSAFE.objectFieldOffset(TestClassInt.class.getField("y"));
} else {
fieldOffset1 = UNSAFE.objectFieldOffset(TestClassInt.class.getField("y"));
fieldOffset2 = UNSAFE.objectFieldOffset(TestClassInt.class.getField("z"));
}
assert fieldOffset2 == fieldOffset1 + 4;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
protected void testEscapeAnalysis(String snippet, JavaConstant expectedConstantResult, boolean iterativeEscapeAnalysis) {
// Exercise both a graph containing UnsafeAccessNodes and one which has been possibly been
@ -134,8 +113,8 @@ public class UnsafeEATest extends EATestBase {
public static int testSimpleIntSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putInt(x, fieldOffset1, 101);
return UNSAFE.getInt(x, fieldOffset1);
UNSAFE.putInt(x, TestClassInt.fieldOffset1, 101);
return UNSAFE.getInt(x, TestClassInt.fieldOffset1);
}
@Test
@ -145,7 +124,7 @@ public class UnsafeEATest extends EATestBase {
public static TestClassInt testMaterializedIntSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putInt(x, fieldOffset1, 101);
UNSAFE.putInt(x, TestClassInt.fieldOffset1, 101);
return x;
}
@ -156,8 +135,8 @@ public class UnsafeEATest extends EATestBase {
public static double testSimpleDoubleSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, 10.1);
return UNSAFE.getDouble(x, fieldOffset1);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1);
return UNSAFE.getDouble(x, TestClassInt.fieldOffset1);
}
@Test
@ -167,9 +146,9 @@ public class UnsafeEATest extends EATestBase {
public static int testSimpleDoubleOverwriteWithIntSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, 10.1);
UNSAFE.putInt(x, fieldOffset1, 10);
return UNSAFE.getInt(x, fieldOffset1);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1);
UNSAFE.putInt(x, TestClassInt.fieldOffset1, 10);
return UNSAFE.getInt(x, TestClassInt.fieldOffset1);
}
@Test
@ -183,9 +162,9 @@ public class UnsafeEATest extends EATestBase {
public static int testSimpleDoubleOverwriteWithSecondIntSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, 10.1);
UNSAFE.putInt(x, fieldOffset1, 10);
return UNSAFE.getInt(x, fieldOffset2);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1);
UNSAFE.putInt(x, TestClassInt.fieldOffset1, 10);
return UNSAFE.getInt(x, TestClassInt.fieldOffset2);
}
@Test
@ -199,9 +178,9 @@ public class UnsafeEATest extends EATestBase {
public static int testSimpleDoubleOverwriteWithFirstIntSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, 10.1);
UNSAFE.putInt(x, fieldOffset2, 10);
return UNSAFE.getInt(x, fieldOffset1);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1);
UNSAFE.putInt(x, TestClassInt.fieldOffset2, 10);
return UNSAFE.getInt(x, TestClassInt.fieldOffset1);
}
@Test
@ -215,9 +194,9 @@ public class UnsafeEATest extends EATestBase {
public static int testSimpleLongOverwriteWithSecondIntSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putLong(x, fieldOffset1, 0x1122334455667788L);
UNSAFE.putInt(x, fieldOffset1, 10);
return UNSAFE.getInt(x, fieldOffset2);
UNSAFE.putLong(x, TestClassInt.fieldOffset1, 0x1122334455667788L);
UNSAFE.putInt(x, TestClassInt.fieldOffset1, 10);
return UNSAFE.getInt(x, TestClassInt.fieldOffset2);
}
@Test
@ -231,9 +210,9 @@ public class UnsafeEATest extends EATestBase {
public static int testSimpleLongOverwriteWithFirstIntSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putLong(x, fieldOffset1, 0x1122334455667788L);
UNSAFE.putInt(x, fieldOffset2, 10);
return UNSAFE.getInt(x, fieldOffset1);
UNSAFE.putLong(x, TestClassInt.fieldOffset1, 0x1122334455667788L);
UNSAFE.putInt(x, TestClassInt.fieldOffset2, 10);
return UNSAFE.getInt(x, TestClassInt.fieldOffset1);
}
@Test
@ -250,12 +229,12 @@ public class UnsafeEATest extends EATestBase {
TestClassInt x;
if (a) {
x = new TestClassInt(0, 0);
UNSAFE.putDouble(x, fieldOffset1, doubleField);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, doubleField);
} else {
x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, doubleField2);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, doubleField2);
}
return UNSAFE.getDouble(x, fieldOffset1);
return UNSAFE.getDouble(x, TestClassInt.fieldOffset1);
}
static class ExtendedTestClassInt extends TestClassInt {
@ -271,14 +250,14 @@ public class UnsafeEATest extends EATestBase {
TestClassInt x;
if (value == 1) {
x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, 10);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10);
} else {
x = new TestClassInt();
UNSAFE.putInt(x, fieldOffset1, 0);
UNSAFE.putInt(x, TestClassInt.fieldOffset1, 0);
}
UNSAFE.putInt(x, fieldOffset1, 0);
UNSAFE.putInt(x, TestClassInt.fieldOffset1, 0);
if (value == 2) {
UNSAFE.putInt(x, fieldOffset2, 0);
UNSAFE.putInt(x, TestClassInt.fieldOffset2, 0);
}
GraalDirectives.deoptimizeAndInvalidate();
return x;
@ -291,7 +270,7 @@ public class UnsafeEATest extends EATestBase {
public static TestClassInt testMaterializedDoubleSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, 10.1);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1);
return x;
}
@ -305,10 +284,10 @@ public class UnsafeEATest extends EATestBase {
public static TestClassInt testDeoptDoubleVarSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, doubleField);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, doubleField);
doubleField2 = 123;
try {
doubleField = ((int) UNSAFE.getDouble(x, fieldOffset1)) / zero;
doubleField = ((int) UNSAFE.getDouble(x, TestClassInt.fieldOffset1)) / zero;
} catch (RuntimeException e) {
return x;
}
@ -322,10 +301,10 @@ public class UnsafeEATest extends EATestBase {
public static TestClassInt testDeoptDoubleConstantSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putDouble(x, fieldOffset1, 10.123);
UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.123);
doubleField2 = 123;
try {
doubleField = ((int) UNSAFE.getDouble(x, fieldOffset1)) / zero;
doubleField = ((int) UNSAFE.getDouble(x, TestClassInt.fieldOffset1)) / zero;
} catch (RuntimeException e) {
return x;
}
@ -342,10 +321,10 @@ public class UnsafeEATest extends EATestBase {
public static TestClassInt testDeoptLongVarSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putLong(x, fieldOffset1, longField);
UNSAFE.putLong(x, TestClassInt.fieldOffset1, longField);
longField2 = 123;
try {
longField = UNSAFE.getLong(x, fieldOffset1) / zero;
longField = UNSAFE.getLong(x, TestClassInt.fieldOffset1) / zero;
} catch (RuntimeException e) {
return x;
}
@ -359,10 +338,10 @@ public class UnsafeEATest extends EATestBase {
public static TestClassInt testDeoptLongConstantSnippet() {
TestClassInt x = new TestClassInt();
UNSAFE.putLong(x, fieldOffset1, 0x2222222210123L);
UNSAFE.putLong(x, TestClassInt.fieldOffset1, 0x2222222210123L);
longField2 = 123;
try {
longField = UNSAFE.getLong(x, fieldOffset1) / zero;
longField = UNSAFE.getLong(x, TestClassInt.fieldOffset1) / zero;
} catch (RuntimeException e) {
return x;
}

View File

@ -243,6 +243,9 @@ public abstract class CompilationWrapper<T> {
String message;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (PrintStream ps = new PrintStream(baos)) {
// This output is used by external tools to detect compilation failures.
ps.println("[[[Graal compilation failure]]]");
ps.printf("%s: Compilation of %s failed:%n", Thread.currentThread(), this);
cause.printStackTrace(ps);
ps.printf("To disable compilation failure notifications, set %s to %s (e.g., -Dgraal.%s=%s).%n",

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -168,6 +168,14 @@ public class GraalError extends Error {
super(cause);
}
/**
* This constructor creates a {@link GraalError} for a given causing Throwable instance with
* detailed error message.
*/
public GraalError(Throwable cause, String msg, Object... args) {
super(format(msg, args), cause);
}
/**
* This constructor creates a {@link GraalError} and adds all the
* {@linkplain #addContext(String) context} of another {@link GraalError}.

View File

@ -26,6 +26,7 @@ package org.graalvm.compiler.hotspot.amd64;
import static jdk.vm.ci.amd64.AMD64.r10;
import static jdk.vm.ci.amd64.AMD64.rax;
import static jdk.vm.ci.amd64.AMD64.rbp;
import static jdk.vm.ci.amd64.AMD64.rsp;
import static jdk.vm.ci.code.ValueUtil.asRegister;
import static org.graalvm.compiler.core.common.GraalOptions.CanOmitFrame;
@ -93,7 +94,7 @@ public class AMD64HotSpotBackend extends HotSpotHostBackend implements LIRGenera
@Override
protected FrameMapBuilder newFrameMapBuilder(RegisterConfig registerConfig) {
RegisterConfig registerConfigNonNull = registerConfig == null ? getCodeCache().getRegisterConfig() : registerConfig;
FrameMap frameMap = new AMD64FrameMap(getCodeCache(), registerConfigNonNull, this);
FrameMap frameMap = new AMD64FrameMap(getCodeCache(), registerConfigNonNull, this, config.preserveFramePointer);
return new AMD64FrameMapBuilder(frameMap, getCodeCache(), registerConfigNonNull);
}
@ -130,10 +131,12 @@ public class AMD64HotSpotBackend extends HotSpotHostBackend implements LIRGenera
final boolean isStub;
final boolean omitFrame;
final boolean useStandardFrameProlog;
HotSpotFrameContext(boolean isStub, boolean omitFrame) {
HotSpotFrameContext(boolean isStub, boolean omitFrame, boolean useStandardFrameProlog) {
this.isStub = isStub;
this.omitFrame = omitFrame;
this.useStandardFrameProlog = useStandardFrameProlog;
}
@Override
@ -157,6 +160,11 @@ public class AMD64HotSpotBackend extends HotSpotHostBackend implements LIRGenera
// assert asm.position() - verifiedEntryPointOffset >=
// PATCHED_VERIFIED_ENTRY_POINT_INSTRUCTION_SIZE;
}
if (useStandardFrameProlog) {
// Stack-walking friendly instructions
asm.push(rbp);
asm.movq(rbp, rsp);
}
if (!isStub && asm.position() == verifiedEntryPointOffset) {
asm.subqWide(rsp, frameSize);
assert asm.position() - verifiedEntryPointOffset >= PATCHED_VERIFIED_ENTRY_POINT_INSTRUCTION_SIZE;
@ -180,7 +188,12 @@ public class AMD64HotSpotBackend extends HotSpotHostBackend implements LIRGenera
assert crb.frameMap.getRegisterConfig().getCalleeSaveRegisters() == null;
int frameSize = crb.frameMap.frameSize();
asm.incrementq(rsp, frameSize);
if (useStandardFrameProlog) {
asm.movq(rsp, rbp);
asm.pop(rbp);
} else {
asm.incrementq(rsp, frameSize);
}
}
}
}
@ -202,7 +215,7 @@ public class AMD64HotSpotBackend extends HotSpotHostBackend implements LIRGenera
Stub stub = gen.getStub();
Assembler masm = new AMD64MacroAssembler(getTarget());
HotSpotFrameContext frameContext = new HotSpotFrameContext(stub != null, omitFrame);
HotSpotFrameContext frameContext = new HotSpotFrameContext(stub != null, omitFrame, config.preserveFramePointer);
DataBuilder dataBuilder = new HotSpotDataBuilder(getCodeCache().getTarget());
CompilationResultBuilder crb = factory.createBuilder(getCodeCache(), getForeignCalls(), frameMap, masm, dataBuilder, frameContext, options, debug, compilationResult, Register.None);
crb.setTotalFrameSize(frameMap.totalFrameSize());
@ -330,7 +343,7 @@ public class AMD64HotSpotBackend extends HotSpotHostBackend implements LIRGenera
@Override
public RegisterAllocationConfig newRegisterAllocationConfig(RegisterConfig registerConfig, String[] allocationRestrictedTo) {
RegisterConfig registerConfigNonNull = registerConfig == null ? getCodeCache().getRegisterConfig() : registerConfig;
return new AMD64HotSpotRegisterAllocationConfig(registerConfigNonNull, allocationRestrictedTo);
return new AMD64HotSpotRegisterAllocationConfig(registerConfigNonNull, allocationRestrictedTo, config.preserveFramePointer);
}
@Override

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
package org.graalvm.compiler.hotspot.amd64;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
@ -33,6 +34,7 @@ import org.graalvm.compiler.lir.amd64.AMD64BlockEndOp;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.Value;
/**
* @see AMD64HotSpotEpilogueOp
@ -43,7 +45,7 @@ abstract class AMD64HotSpotEpilogueBlockEndOp extends AMD64BlockEndOp implements
super(c);
}
@Use({REG, STACK}) protected AllocatableValue savedRbp = PLACEHOLDER;
@Use({REG, STACK, ILLEGAL}) protected AllocatableValue savedRbp = Value.ILLEGAL;
protected void leaveFrameAndRestoreRbp(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
AMD64HotSpotEpilogueOp.leaveFrameAndRestoreRbp(savedRbp, crb, masm);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,20 +24,23 @@
package org.graalvm.compiler.hotspot.amd64;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
import static jdk.vm.ci.amd64.AMD64.rbp;
import static jdk.vm.ci.code.ValueUtil.asRegister;
import static jdk.vm.ci.code.ValueUtil.isStackSlot;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
import org.graalvm.compiler.asm.amd64.AMD64Address;
import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
import org.graalvm.compiler.lir.LIRInstructionClass;
import org.graalvm.compiler.lir.amd64.AMD64FrameMap;
import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.Value;
/**
* Superclass for operations that use the value of RBP saved in a method's prologue.
@ -48,14 +51,17 @@ abstract class AMD64HotSpotEpilogueOp extends AMD64LIRInstruction implements AMD
super(c);
}
@Use({REG, STACK}) private AllocatableValue savedRbp = PLACEHOLDER;
@Use({REG, STACK, ILLEGAL}) private AllocatableValue savedRbp = Value.ILLEGAL;
protected void leaveFrameAndRestoreRbp(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
leaveFrameAndRestoreRbp(savedRbp, crb, masm);
}
static void leaveFrameAndRestoreRbp(AllocatableValue savedRbp, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
if (isStackSlot(savedRbp)) {
if (Value.ILLEGAL.equals(savedRbp)) {
// RBP will be restored in FrameContext.leave(..). Nothing to do here.
assert ((AMD64FrameMap) crb.frameMap).useStandardFrameProlog() : "savedRbp is not initialized.";
} else if (isStackSlot(savedRbp)) {
// Restoring RBP from the stack must be done before the frame is removed
masm.movq(rbp, (AMD64Address) crb.asAddress(savedRbp));
} else {

View File

@ -153,7 +153,7 @@ public class AMD64HotSpotLIRGenerator extends AMD64LIRGenerator implements HotSp
SaveRbp(NoOp placeholder) {
this.placeholder = placeholder;
AMD64FrameMapBuilder frameMapBuilder = (AMD64FrameMapBuilder) getResult().getFrameMapBuilder();
this.reservedSlot = frameMapBuilder.allocateRBPSpillSlot();
this.reservedSlot = config.preserveFramePointer ? null : frameMapBuilder.allocateRBPSpillSlot();
}
/**
@ -162,6 +162,7 @@ public class AMD64HotSpotLIRGenerator extends AMD64LIRGenerator implements HotSp
* @param useStack specifies if rbp must be saved to the stack
*/
public AllocatableValue finalize(boolean useStack) {
assert !config.preserveFramePointer : "rbp has been pushed onto the stack";
AllocatableValue dst;
if (useStack) {
dst = reservedSlot;
@ -173,6 +174,10 @@ public class AMD64HotSpotLIRGenerator extends AMD64LIRGenerator implements HotSp
placeholder.replace(getResult().getLIR(), new MoveFromRegOp(AMD64Kind.QWORD, dst, rbp.asValue(LIRKind.value(AMD64Kind.QWORD))));
return dst;
}
public void remove() {
placeholder.remove(getResult().getLIR());
}
}
private SaveRbp saveRbp;
@ -183,10 +188,6 @@ public class AMD64HotSpotLIRGenerator extends AMD64LIRGenerator implements HotSp
saveRbp = new SaveRbp(placeholder);
}
protected SaveRbp getSaveRbp() {
return saveRbp;
}
/**
* Helper instruction to reserve a stack slot for the whole method. Note that the actual users
* of the stack slot might be inserted after stack slot allocation. This dummy instruction
@ -547,16 +548,21 @@ public class AMD64HotSpotLIRGenerator extends AMD64LIRGenerator implements HotSp
public void beforeRegisterAllocation() {
super.beforeRegisterAllocation();
boolean hasDebugInfo = getResult().getLIR().hasDebugInfo();
AllocatableValue savedRbp = saveRbp.finalize(hasDebugInfo);
if (config.preserveFramePointer) {
saveRbp.remove();
} else {
AllocatableValue savedRbp = saveRbp.finalize(hasDebugInfo);
for (AMD64HotSpotRestoreRbpOp op : epilogueOps) {
op.setSavedRbp(savedRbp);
}
}
if (hasDebugInfo) {
getResult().setDeoptimizationRescueSlot(((AMD64FrameMapBuilder) getResult().getFrameMapBuilder()).allocateDeoptimizationRescueSlot());
}
getResult().setMaxInterpreterFrameSize(debugInfoBuilder.maxInterpreterFrameSize());
for (AMD64HotSpotRestoreRbpOp op : epilogueOps) {
op.setSavedRbp(savedRbp);
}
if (BenchmarkCounters.enabled) {
// ensure that the rescue slot is available
LIRInstruction op = getOrInitRescueSlotOp();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,8 +83,11 @@ class AMD64HotSpotRegisterAllocationConfig extends RegisterAllocationConfig {
};
// @formatter:on
AMD64HotSpotRegisterAllocationConfig(RegisterConfig registerConfig, String[] allocationRestrictedTo) {
private final boolean useStandardFrameProlog;
AMD64HotSpotRegisterAllocationConfig(RegisterConfig registerConfig, String[] allocationRestrictedTo, boolean useStandardFrameProlog) {
super(registerConfig, allocationRestrictedTo);
this.useStandardFrameProlog = useStandardFrameProlog;
}
@Override
@ -93,6 +96,9 @@ class AMD64HotSpotRegisterAllocationConfig extends RegisterAllocationConfig {
for (Register reg : registers) {
regMap.set(reg.number);
}
if (useStandardFrameProlog) {
regMap.clear(rbp.number);
}
ArrayList<Register> allocatableRegisters = new ArrayList<>(registers.size());
for (Register reg : registerAllocationOrder) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,20 +24,9 @@
package org.graalvm.compiler.hotspot.amd64;
import org.graalvm.compiler.core.common.LIRKind;
import org.graalvm.compiler.lir.Variable;
import jdk.vm.ci.amd64.AMD64Kind;
import jdk.vm.ci.meta.AllocatableValue;
public interface AMD64HotSpotRestoreRbpOp {
/**
* The type of location (i.e., stack or register) in which RBP is saved is not known until
* initial LIR generation is finished. Until then, we use a placeholder variable so that LIR
* verification is successful.
*/
Variable PLACEHOLDER = new Variable(LIRKind.value(AMD64Kind.QWORD), Integer.MAX_VALUE);
void setSavedRbp(AllocatableValue value);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,10 +111,14 @@ final class AMD64HotSpotReturnOp extends AMD64HotSpotEpilogueBlockEndOp implemen
* live value at this point should be the return value in either rax, or in xmm0 with
* the upper half of the register unused, so we don't destroy any value here.
*/
if (masm.supports(CPUFeature.AVX)) {
if (masm.supports(CPUFeature.AVX) && crb.needsClearUpperVectorRegisters()) {
// If we decide to perform vzeroupper also for stubs (like what JDK9+ C2 does for
// intrinsics that employ AVX2 instruction), we need to be careful that it kills all
// the xmm registers (at least the upper halves).
masm.vzeroupper();
}
}
masm.ret(0);
}
}

View File

@ -158,11 +158,15 @@ public class CheckGraalIntrinsics extends GraalTest {
private static Collection<String> add(Collection<String> c, String... elements) {
String[] sorted = elements.clone();
Arrays.sort(sorted);
for (int i = 0; i < elements.length; i++) {
if (!elements[i].equals(sorted[i])) {
// Let's keep the list sorted for easier visual inspection
fail("Element %d is out of order, \"%s\"", i, elements[i]);
if (!Arrays.equals(elements, sorted)) {
int width = 2 + Arrays.asList(elements).stream().map(String::length).reduce(0, Integer::max);
Formatter fmt = new Formatter();
fmt.format("%-" + width + "s | sorted%n", "original");
fmt.format("%s%n", new String(new char[width * 2 + 2]).replace('\0', '='));
for (int i = 0; i < elements.length; i++) {
fmt.format("%-" + width + "s | %s%n", elements[i], sorted[i]);
}
fail("Elements not sorted alphabetically:%n%s", fmt);
}
c.addAll(Arrays.asList(elements));
return c;
@ -517,8 +521,8 @@ public class CheckGraalIntrinsics extends GraalTest {
// AES intrinsics
if (!config.useAESIntrinsics) {
add(ignore,
"com/sun/crypto/provider/AESCrypt." + aesEncryptName + "([BI[BI)V",
"com/sun/crypto/provider/AESCrypt." + aesDecryptName + "([BI[BI)V",
"com/sun/crypto/provider/AESCrypt." + aesEncryptName + "([BI[BI)V",
"com/sun/crypto/provider/CipherBlockChaining." + cbcDecryptName + "([BII[BI)I",
"com/sun/crypto/provider/CipherBlockChaining." + cbcEncryptName + "([BII[BI)I");
}

View File

@ -122,6 +122,12 @@ public class CompilationWrapperTest extends GraalCompilerTest {
public void testVMCompilation3() throws IOException, InterruptedException {
assumeManagementLibraryIsLoadable();
final int maxProblems = 2;
Probe failurePatternProbe = new Probe("[[[Graal compilation failure]]]", maxProblems) {
@Override
String test() {
return actualOccurrences > 0 && actualOccurrences <= maxProblems ? null : String.format("expected occurrences to be in [1 .. %d]", maxProblems);
}
};
Probe retryingProbe = new Probe("Retrying compilation of", maxProblems) {
@Override
String test() {
@ -140,6 +146,7 @@ public class CompilationWrapperTest extends GraalCompilerTest {
}
};
Probe[] probes = {
failurePatternProbe,
retryingProbe,
adjustmentProbe
};

View File

@ -66,7 +66,7 @@ public class CompileTheWorldTest extends GraalCompilerTest {
excludeMethodFilters,
verbose,
harnessOptions,
new OptionValues(initialOptions, HighTier.Options.Inline, false));
new OptionValues(initialOptions, HighTier.Options.Inline, false, CompilationFailureAction, ExceptionAction.Silent));
ctw.compile();
assert CompilationBailoutAsFailure.getValue(initialOptions) == originalBailoutAction;
assert CompilationFailureAction.getValue(initialOptions) == originalFailureAction;

View File

@ -111,6 +111,8 @@ public class GraalHotSpotVMConfig extends GraalHotSpotVMConfigBase {
public final boolean useVectorizedMismatchIntrinsic = getFlag("UseVectorizedMismatchIntrinsic", Boolean.class, false);
public final boolean useFMAIntrinsics = getFlag("UseFMA", Boolean.class, false);
public final boolean preserveFramePointer = getFlag("PreserveFramePointer", Boolean.class, false);
/*
* These are methods because in some JDKs the flags are visible but the stubs themselves haven't
* been exported so we have to check both if the flag is on and if we have the stub.

View File

@ -304,4 +304,9 @@ public class HotSpotForeignCallLinkageImpl extends HotSpotForeignCallTarget impl
public String getSymbol() {
return stub == null ? null : stub.toString();
}
@Override
public boolean needsClearUpperVectorRegisters() {
return isCompiledStub() && mayContainFP();
}
}

View File

@ -43,7 +43,7 @@ import java.util.regex.Pattern;
*/
public final class JVMCIVersionCheck {
private static final Version JVMCI8_MIN_VERSION = new Version3(19, 3, 2);
private static final Version JVMCI_MIN_VERSION = new Version3(19, 3, 4);
public interface Version {
boolean isLessThan(Version other);
@ -145,7 +145,7 @@ public final class JVMCIVersionCheck {
}
}
private static void failVersionCheck(Map<String, String> props, boolean exit, String reason, Object... args) {
private void failVersionCheck(boolean exit, String reason, Object... args) {
Formatter errorMessage = new Formatter().format(reason, args);
String javaHome = props.get("java.home");
String vmName = props.get("java.vm.name");
@ -153,10 +153,14 @@ public final class JVMCIVersionCheck {
errorMessage.format("this error or to \"warn\" to emit a warning and continue execution.%n");
errorMessage.format("Currently used Java home directory is %s.%n", javaHome);
errorMessage.format("Currently used VM configuration is: %s%n", vmName);
if (props.get("java.specification.version").compareTo("1.9") < 0) {
if (javaSpecVersion.compareTo("1.9") < 0) {
errorMessage.format("Download the latest JVMCI JDK 8 from https://github.com/graalvm/openjdk8-jvmci-builder/releases");
} else {
errorMessage.format("Download JDK 11 or later.");
if (javaSpecVersion.compareTo("11") == 0 && vmVersion.contains("-jvmci-")) {
errorMessage.format("Download the latest Labs OpenJDK 11 from https://github.com/graalvm/labs-openjdk-11/releases");
} else {
errorMessage.format("Download JDK 11 or later.");
}
}
String value = System.getenv("JVMCI_VERSION_CHECK");
if ("warn".equals(value)) {
@ -183,7 +187,7 @@ public final class JVMCIVersionCheck {
static void check(Map<String, String> props, boolean exitOnFailure) {
JVMCIVersionCheck checker = new JVMCIVersionCheck(props, props.get("java.specification.version"), props.get("java.vm.version"));
checker.run(exitOnFailure, JVMCI8_MIN_VERSION);
checker.run(exitOnFailure, JVMCI_MIN_VERSION);
}
/**
@ -202,14 +206,14 @@ public final class JVMCIVersionCheck {
Version v = Version.parse(vmVersion);
if (v != null) {
if (v.isLessThan(minVersion)) {
failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion);
failVersionCheck(exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion);
}
return;
}
failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" +
failVersionCheck(exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" +
"Cannot read JVMCI version from java.vm.version property: %s.%n", vmVersion);
} else if (javaSpecVersion.compareTo("11") < 0) {
failVersionCheck(props, exitOnFailure, "Graal is not compatible with the JVMCI API in JDK 9 and 10.%n");
failVersionCheck(exitOnFailure, "Graal is not compatible with the JVMCI API in JDK 9 and 10.%n");
} else {
if (vmVersion.contains("SNAPSHOT")) {
return;
@ -218,28 +222,16 @@ public final class JVMCIVersionCheck {
// Allow local builds
return;
}
if (vmVersion.startsWith("11-ea+")) {
String buildString = vmVersion.substring("11-ea+".length());
try {
int build = Integer.parseInt(buildString);
if (build < 20) {
failVersionCheck(props, exitOnFailure, "Graal requires build 20 or later of JDK 11 early access binary, got build %d.%n", build);
return;
}
} catch (NumberFormatException e) {
failVersionCheck(props, exitOnFailure, "Could not parse the JDK 11 early access build number from java.vm.version property: %s.%n", vmVersion);
return;
}
} else if (vmVersion.contains("-jvmci-")) {
if (vmVersion.contains("-jvmci-")) {
// A "labsjdk"
Version v = Version.parse(vmVersion);
if (v != null) {
if (v.isLessThan(minVersion)) {
failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion);
failVersionCheck(exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion);
}
return;
}
failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" +
failVersionCheck(exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" +
"Cannot read JVMCI version from java.vm.version property: %s.%n", vmVersion);
} else {
// Graal is compatible with all JDK versions as of 11 GA.

View File

@ -35,9 +35,11 @@ import java.lang.invoke.ConstantCallSite;
import java.lang.invoke.MutableCallSite;
import java.lang.invoke.VolatileCallSite;
import java.lang.reflect.Array;
import java.lang.reflect.Type;
import java.math.BigInteger;
import java.util.zip.CRC32;
import jdk.internal.vm.compiler.collections.Pair;
import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.core.common.spi.ForeignCallsProvider;
import org.graalvm.compiler.core.common.type.ObjectStamp;
@ -115,6 +117,7 @@ import jdk.vm.ci.meta.DeoptimizationAction;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.MetaAccessProvider;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.services.Services;
import sun.misc.Unsafe;
/**
@ -461,21 +464,29 @@ public class HotSpotGraphBuilderPlugins {
}
public static String lookupIntrinsicName(GraalHotSpotVMConfig config, String className, String name1, String name2) {
return selectIntrinsicName(config, className, name1, name2).getLeft();
}
/**
* Returns a pair of Strings where the left one represents the matched intrinsic name and the
* right one represents the mismatched intrinsic name.
*/
public static Pair<String, String> selectIntrinsicName(GraalHotSpotVMConfig config, String className, String name1, String name2) {
boolean foundName1 = false;
boolean foundName2 = false;
String name = name1;
for (VMIntrinsicMethod intrinsic : config.getStore().getIntrinsics()) {
if (className.equals(intrinsic.declaringClass)) {
if (name1.equals(intrinsic.name)) {
foundName1 = true;
} else if (name2.equals(intrinsic.name)) {
foundName2 = true;
name = name2;
}
}
}
if (foundName1 != foundName2) {
return name;
if (foundName1 && !foundName2) {
return Pair.create(name1, name2);
} else if (foundName2 && !foundName1) {
return Pair.create(name2, name1);
}
throw GraalError.shouldNotReachHere();
}
@ -500,19 +511,41 @@ public class HotSpotGraphBuilderPlugins {
String arch = config.osArch;
String decryptSuffix = arch.equals("sparc") ? "WithOriginalKey" : "";
String cbcEncryptName = lookupIntrinsicName(config, "com/sun/crypto/provider/CipherBlockChaining", "implEncrypt", "encrypt");
String cbcDecryptName = lookupIntrinsicName(config, "com/sun/crypto/provider/CipherBlockChaining", "implDecrypt", "decrypt");
Registration r = new Registration(plugins, "com.sun.crypto.provider.CipherBlockChaining", replacements);
r.registerMethodSubstitution(CipherBlockChainingSubstitutions.class, cbcEncryptName, Receiver.class, byte[].class, int.class, int.class, byte[].class, int.class);
r.registerMethodSubstitution(CipherBlockChainingSubstitutions.class, cbcDecryptName, cbcDecryptName + decryptSuffix, Receiver.class, byte[].class, int.class, int.class, byte[].class,
int.class);
String aesEncryptName = lookupIntrinsicName(config, "com/sun/crypto/provider/AESCrypt", "implEncryptBlock", "encryptBlock");
String aesDecryptName = lookupIntrinsicName(config, "com/sun/crypto/provider/AESCrypt", "implDecryptBlock", "decryptBlock");
Pair<String, String> cbcEncryptName = selectIntrinsicName(config, "com/sun/crypto/provider/CipherBlockChaining", "implEncrypt", "encrypt");
registerAndCheckMismatch(r, CipherBlockChainingSubstitutions.class, cbcEncryptName, Receiver.class, byte[].class, int.class, int.class,
byte[].class, int.class);
Pair<String, String> cbcDecryptName = selectIntrinsicName(config, "com/sun/crypto/provider/CipherBlockChaining", "implDecrypt", "decrypt");
registerAndCheckMismatch(r, CipherBlockChainingSubstitutions.class, cbcDecryptName, cbcDecryptName.getLeft() + decryptSuffix, Receiver.class, byte[].class, int.class, int.class,
byte[].class, int.class);
r = new Registration(plugins, "com.sun.crypto.provider.AESCrypt", replacements);
r.registerMethodSubstitution(AESCryptSubstitutions.class, aesEncryptName, Receiver.class, byte[].class, int.class, byte[].class, int.class);
r.registerMethodSubstitution(AESCryptSubstitutions.class, aesDecryptName, aesDecryptName + decryptSuffix, Receiver.class, byte[].class, int.class, byte[].class, int.class);
Pair<String, String> aesEncryptName = selectIntrinsicName(config, "com/sun/crypto/provider/AESCrypt", "implEncryptBlock", "encryptBlock");
registerAndCheckMismatch(r, AESCryptSubstitutions.class, aesEncryptName, Receiver.class, byte[].class, int.class, byte[].class, int.class);
Pair<String, String> aesDecryptName = selectIntrinsicName(config, "com/sun/crypto/provider/AESCrypt", "implDecryptBlock", "decryptBlock");
registerAndCheckMismatch(r, AESCryptSubstitutions.class, aesDecryptName, aesDecryptName.getLeft() + decryptSuffix, Receiver.class, byte[].class, int.class, byte[].class, int.class);
}
}
private static void registerAndCheckMismatch(Registration r, Class<?> substitutionClass, Pair<String, String> intrinsicNames, Type... argumentTypes) {
try {
r.registerMethodSubstitution(substitutionClass, intrinsicNames.getLeft(), argumentTypes);
} catch (NoSuchMethodError e) {
throw new GraalError(e, "Found method named '%s' instead of '%s' in class '%s'. This is most likely because the JVMCI JDK in %s was built on an incompatible base JDK.",
intrinsicNames.getRight(), intrinsicNames.getLeft(), r.getDeclaringType().getTypeName(), Services.getSavedProperties().get("java.home"));
}
}
private static void registerAndCheckMismatch(Registration r, Class<?> substitutionClass, Pair<String, String> intrinsicNames, String substituteName, Type... argumentTypes) {
try {
r.registerMethodSubstitution(substitutionClass, intrinsicNames.getLeft(), substituteName, argumentTypes);
} catch (NoSuchMethodError e) {
throw new GraalError(e, "Found method named '%s' instead of '%s' in class '%s'. This is most likely because the JVMCI JDK in %s was built on an incompatible base JDK.",
intrinsicNames.getRight(), intrinsicNames.getLeft(), r.getDeclaringType().getTypeName(), Services.getSavedProperties().get("java.home"));
}
}
@ -544,21 +577,21 @@ public class HotSpotGraphBuilderPlugins {
r.registerMethodSubstitution(DigestBaseSubstitutions.class, "implCompressMultiBlock0", Receiver.class, byte[].class, int.class, int.class);
}
String implCompressName = lookupIntrinsicName(config, "sun/security/provider/SHA", "implCompress", "implCompress0");
Pair<String, String> implCompressName = selectIntrinsicName(config, "sun/security/provider/SHA", "implCompress", "implCompress0");
if (useSha1) {
assert config.sha1ImplCompress != 0L;
Registration r = new Registration(plugins, "sun.security.provider.SHA", replacements);
r.registerMethodSubstitution(SHASubstitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class);
registerAndCheckMismatch(r, SHASubstitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class);
}
if (useSha256) {
assert config.sha256ImplCompress != 0L;
Registration r = new Registration(plugins, "sun.security.provider.SHA2", replacements);
r.registerMethodSubstitution(SHA2Substitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class);
registerAndCheckMismatch(r, SHA2Substitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class);
}
if (useSha512) {
assert config.sha512ImplCompress != 0L;
Registration r = new Registration(plugins, "sun.security.provider.SHA5", replacements);
r.registerMethodSubstitution(SHA5Substitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class);
registerAndCheckMismatch(r, SHA5Substitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class);
}
}

View File

@ -27,6 +27,7 @@ package org.graalvm.compiler.hotspot.stubs;
import static java.util.Collections.singletonList;
import static org.graalvm.compiler.core.GraalCompiler.emitFrontEnd;
import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
import static org.graalvm.compiler.core.common.GraalOptions.RegisterPressure;
import static org.graalvm.compiler.debug.DebugContext.DEFAULT_LOG_STREAM;
import static org.graalvm.compiler.debug.DebugOptions.DebugStubsAndSnippets;
import static org.graalvm.compiler.hotspot.HotSpotHostBackend.UNCOMMON_TRAP_HANDLER;
@ -76,7 +77,7 @@ import jdk.vm.ci.meta.TriState;
/**
* Base class for implementing some low level code providing the out-of-line slow path for a snippet
* and/or a callee saved call to a HotSpot C/C++ runtime function or even a another compiled Java
* and/or a callee saved call to a HotSpot C/C++ runtime function or even another compiled Java
* method.
*/
public abstract class Stub {
@ -135,7 +136,9 @@ public abstract class Stub {
*/
public Stub(OptionValues options, HotSpotProviders providers, HotSpotForeignCallLinkage linkage) {
this.linkage = linkage;
this.options = new OptionValues(options, GraalOptions.TraceInlining, GraalOptions.TraceInliningForStubsAndSnippets.getValue(options));
// The RegisterPressure flag can be ignored by a compilation that runs out of registers, so
// the stub compilation must ignore the flag so that all allocatable registers are saved.
this.options = new OptionValues(options, GraalOptions.TraceInlining, GraalOptions.TraceInliningForStubsAndSnippets.getValue(options), RegisterPressure, null);
this.providers = providers;
}

View File

@ -579,4 +579,9 @@ public final class AMD64ArrayCompareToOp extends AMD64LIRInstruction {
masm.movzwl(elem2, new AMD64Address(str2, index, scale2, 0));
}
}
@Override
public boolean needsClearUpperVectorRegisters() {
return true;
}
}

View File

@ -864,4 +864,9 @@ public final class AMD64ArrayEqualsOp extends AMD64LIRInstruction {
throw new IllegalStateException();
}
}
@Override
public boolean needsClearUpperVectorRegisters() {
return true;
}
}

View File

@ -644,4 +644,9 @@ public final class AMD64ArrayIndexOfOp extends AMD64LIRInstruction {
private static boolean supports(LIRGeneratorTool tool, CPUFeature cpuFeature) {
return ((AMD64) tool.target().arch).getFeatures().contains(cpuFeature);
}
@Override
public boolean needsClearUpperVectorRegisters() {
return true;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -152,6 +152,11 @@ public class AMD64Call {
public boolean destroysCallerSavedRegisters() {
return callTarget.destroysRegisters();
}
@Override
public boolean needsClearUpperVectorRegisters() {
return callTarget.needsClearUpperVectorRegisters();
}
}
@Opcode("NEAR_FOREIGN_CALL")

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,17 +78,15 @@ import jdk.vm.ci.code.StackSlot;
*/
public class AMD64FrameMap extends FrameMap {
private final boolean useStandardFrameProlog;
private StackSlot rbpSpillSlot;
public AMD64FrameMap(CodeCacheProvider codeCache, RegisterConfig registerConfig, ReferenceMapBuilderFactory referenceMapFactory) {
this(codeCache, registerConfig, referenceMapFactory, false);
}
public AMD64FrameMap(CodeCacheProvider codeCache, RegisterConfig registerConfig, ReferenceMapBuilderFactory referenceMapFactory, boolean useBasePointer) {
public AMD64FrameMap(CodeCacheProvider codeCache, RegisterConfig registerConfig, ReferenceMapBuilderFactory referenceMapFactory, boolean useStandardFrameProlog) {
super(codeCache, registerConfig, referenceMapFactory);
// (negative) offset relative to sp + total frame size
initialSpillSize = returnAddressSize() + (useBasePointer ? getTarget().arch.getWordSize() : 0);
spillSize = initialSpillSize;
this.useStandardFrameProlog = useStandardFrameProlog;
this.initialSpillSize = returnAddressSize() + (useStandardFrameProlog ? getTarget().arch.getWordSize() : 0);
this.spillSize = initialSpillSize;
}
@Override
@ -141,4 +139,8 @@ public class AMD64FrameMap extends FrameMap {
spillSlotSize(LIRKind.value(AMD64Kind.QWORD)) : "Deoptimization rescue slot must be the first or second (if there is an RBP spill slot) stack slot";
return allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
}
public boolean useStandardFrameProlog() {
return useStandardFrameProlog;
}
}

View File

@ -275,4 +275,8 @@ public final class AMD64StringLatin1InflateOp extends AMD64LIRInstruction {
masm.bind(labelDone);
}
@Override
public boolean needsClearUpperVectorRegisters() {
return true;
}
}

View File

@ -340,4 +340,8 @@ public final class AMD64StringUTF16CompressOp extends AMD64LIRInstruction {
masm.bind(labelDone);
}
@Override
public boolean needsClearUpperVectorRegisters() {
return true;
}
}

View File

@ -37,6 +37,7 @@ import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
import org.graalvm.compiler.lir.LIRInstructionClass;
import org.graalvm.compiler.lir.Opcode;
import org.graalvm.compiler.lir.amd64.vector.AMD64VectorInstruction;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
import jdk.vm.ci.meta.AllocatableValue;
@ -49,11 +50,10 @@ public class AMD64Ternary {
/**
* Instruction that has two {@link AllocatableValue} operands.
*/
public static class ThreeOp extends AMD64LIRInstruction {
public static class ThreeOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<ThreeOp> TYPE = LIRInstructionClass.create(ThreeOp.class);
@Opcode private final VexRVMOp opcode;
private final AVXSize size;
@Def({REG, HINT}) protected AllocatableValue result;
@Use({REG}) protected AllocatableValue x;
@ -65,10 +65,8 @@ public class AMD64Ternary {
@Alive({REG, STACK}) protected AllocatableValue z;
public ThreeOp(VexRVMOp opcode, AVXSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y, AllocatableValue z) {
super(TYPE);
super(TYPE, size);
this.opcode = opcode;
this.size = size;
this.result = result;
this.x = x;
this.y = y;

View File

@ -32,6 +32,8 @@ import java.util.BitSet;
import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
import org.graalvm.compiler.lir.LIRInstructionClass;
import org.graalvm.compiler.lir.amd64.AMD64Call.ForeignCallOp;
import org.graalvm.compiler.lir.amd64.vector.AMD64VectorInstruction;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
import jdk.vm.ci.amd64.AMD64;
@ -40,6 +42,48 @@ import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.RegisterValue;
import jdk.vm.ci.meta.Value;
/**
* vzeroupper is essential to avoid performance penalty during SSE-AVX transition. Specifically,
* once we have executed instructions that modify the upper bits (i.e., 128+) of the YMM registers,
* we need to perform vzeroupper to transit the state to 128bits before executing any SSE
* instructions. We don't need to place vzeroupper between VEX-encoded SSE instructions and legacy
* SSE instructions, nor between AVX instructions and VEX-encoded SSE instructions.
*
* When running Graal on HotSpot, we emit a vzeroupper LIR operation (i.e. an instance of this
* class) before a foreign call to the runtime function where Graal has no knowledge. The underlying
* reason is that HotSpot is SSE-compiled so as to support older CPUs. We also emit a vzeroupper
* instruction (see {@code AMD64HotSpotReturnOp.emitCode}) upon returning, if the current LIR graph
* contains LIR operations that touch the upper bits of the YMM registers, including but not limited
* to {@link AMD64VectorInstruction}, {@link AMD64ArrayCompareToOp}, {@link AMD64ArrayEqualsOp},
* {@link AMD64ArrayIndexOfOp}, and {@link ForeignCallOp} that invokes to Graal-compiled stubs. For
* the last case, since Graal-compiled stubs is under our control, we don't emit vzeroupper upon
* returning of the stub, but rather do that upon returning of the current method.
*
* On JDK8, C2 does not emit many vzeroupper instructions, potentially because that YMM registers
* are not heavily employed (C2 vectorization starts using YMM registers in 9, source
* https://cr.openjdk.java.net/~vlivanov/talks/2017_Vectorization_in_HotSpot_JVM.pdf) and thus less
* care has been taken to place these instructions. One example is that many intrinsics employ YMM
* registers starting from https://bugs.openjdk.java.net/browse/JDK-8005419, but does not properly
* place vzeroupper upon returning of the intrinsic stub or the caller of the stub.
*
* Most vzeroupper were added in JDK 10 (https://bugs.openjdk.java.net/browse/JDK-8178811), and was
* later restricted on Haswell Xeon due to performance regression
* (https://bugs.openjdk.java.net/browse/JDK-8190934). The actual condition for placing vzeroupper
* is at http://hg.openjdk.java.net/jdk/jdk/file/c7d9df2e470c/src/hotspot/cpu/x86/x86_64.ad#l428. To
* summarize, if nmethod employs YMM registers (or intrinsics which use them, search for
* clear_upper_avx() in opto/library_call.cpp) vzeroupper will be generated on nmethod's exit and
* before any calls in nmethod, because even compiled nmethods can still use only SSE instructions.
*
* This means, if a Java method performs a call to an intrinsic that employs YMM registers,
* C2-compiled code will place a vzeroupper before the call, upon exit of the stub and upon exit of
* this method. Graal will only place the last, because it ensures that Graal-compiled Java method
* and stubs will be consistent on using VEX-encoding.
*
* In SubstrateVM, since the whole image is compiled consistently with or without VEX encoding (the
* later is the default behavior, see {@code NativeImageGenerator.createTarget}), there is no need
* for vzeroupper. For dynamic compilation on a SubstrateVM image, if the image is SSE-compiled, we
* then need vzeroupper when returning from the dynamic compiled code to the pre-built image code.
*/
public class AMD64VZeroUpper extends AMD64LIRInstruction {
public static final LIRInstructionClass<AMD64VZeroUpper> TYPE = LIRInstructionClass.create(AMD64VZeroUpper.class);

View File

@ -40,7 +40,6 @@ import org.graalvm.compiler.lir.LIRFrameState;
import org.graalvm.compiler.lir.LIRInstructionClass;
import org.graalvm.compiler.lir.Opcode;
import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
import jdk.vm.ci.amd64.AMD64Kind;
@ -48,20 +47,18 @@ import jdk.vm.ci.meta.AllocatableValue;
public class AMD64VectorBinary {
public static final class AVXBinaryOp extends AMD64LIRInstruction {
public static final class AVXBinaryOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AVXBinaryOp> TYPE = LIRInstructionClass.create(AVXBinaryOp.class);
@Opcode private final VexRVMOp opcode;
private final AVXKind.AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({REG}) protected AllocatableValue x;
@Use({REG, STACK}) protected AllocatableValue y;
public AVXBinaryOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y) {
super(TYPE);
super(TYPE, size);
this.opcode = opcode;
this.size = size;
this.result = result;
this.x = x;
this.y = y;
@ -77,22 +74,20 @@ public class AMD64VectorBinary {
}
}
public static final class AVXBinaryConstOp extends AMD64LIRInstruction {
public static final class AVXBinaryConstOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AVXBinaryConstOp> TYPE = LIRInstructionClass.create(AVXBinaryConstOp.class);
@Opcode private final VexRRIOp opcode;
private final AVXKind.AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({REG}) protected AllocatableValue x;
protected int y;
public AVXBinaryConstOp(VexRRIOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, int y) {
super(TYPE);
super(TYPE, size);
assert (y & 0xFF) == y;
this.opcode = opcode;
this.size = size;
this.result = result;
this.x = x;
this.y = y;
@ -104,22 +99,20 @@ public class AMD64VectorBinary {
}
}
public static final class AVXBinaryConstFloatOp extends AMD64LIRInstruction {
public static final class AVXBinaryConstFloatOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AVXBinaryConstFloatOp> TYPE = LIRInstructionClass.create(AVXBinaryConstFloatOp.class);
@Opcode private final VexRVMOp opcode;
private final AVXKind.AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({REG}) protected AllocatableValue x;
protected ConstantValue y;
public AVXBinaryConstFloatOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, ConstantValue y) {
super(TYPE);
super(TYPE, size);
assert y.getPlatformKind() == AMD64Kind.SINGLE || y.getPlatformKind() == AMD64Kind.DOUBLE;
this.opcode = opcode;
this.size = size;
this.result = result;
this.x = x;
this.y = y;
@ -136,11 +129,10 @@ public class AMD64VectorBinary {
}
}
public static final class AVXBinaryMemoryOp extends AMD64LIRInstruction {
public static final class AVXBinaryMemoryOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AVXBinaryMemoryOp> TYPE = LIRInstructionClass.create(AVXBinaryMemoryOp.class);
@Opcode private final VexRVMOp opcode;
private final AVXKind.AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({REG}) protected AllocatableValue x;
@ -148,9 +140,8 @@ public class AMD64VectorBinary {
@State protected LIRFrameState state;
public AVXBinaryMemoryOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, AMD64AddressValue y, LIRFrameState state) {
super(TYPE);
super(TYPE, size);
this.opcode = opcode;
this.size = size;
this.result = result;
this.x = x;
this.y = y;

View File

@ -35,16 +35,14 @@ import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
import org.graalvm.compiler.lir.LIRInstructionClass;
import org.graalvm.compiler.lir.Opcode;
import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
import jdk.vm.ci.meta.AllocatableValue;
public final class AMD64VectorCompareOp extends AMD64LIRInstruction {
public final class AMD64VectorCompareOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AMD64VectorCompareOp> TYPE = LIRInstructionClass.create(AMD64VectorCompareOp.class);
@Opcode private final VexRMOp opcode;
private final AVXSize size;
@Use({REG}) protected AllocatableValue x;
@Use({REG, STACK}) protected AllocatableValue y;
@ -53,9 +51,8 @@ public final class AMD64VectorCompareOp extends AMD64LIRInstruction {
}
public AMD64VectorCompareOp(VexRMOp opcode, AVXSize size, AllocatableValue x, AllocatableValue y) {
super(TYPE);
super(TYPE, size);
this.opcode = opcode;
this.size = size;
this.x = x;
this.y = y;
}

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.lir.amd64.vector;
import static jdk.vm.ci.code.ValueUtil.asRegister;
import static jdk.vm.ci.code.ValueUtil.isRegister;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
import org.graalvm.compiler.asm.amd64.AMD64Address;
import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexFloatCompareOp;
import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
import org.graalvm.compiler.lir.LIRInstructionClass;
import org.graalvm.compiler.lir.Opcode;
import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
import jdk.vm.ci.meta.AllocatableValue;
public class AMD64VectorFloatCompareOp extends AMD64LIRInstruction {
public static final LIRInstructionClass<AMD64VectorFloatCompareOp> TYPE = LIRInstructionClass.create(AMD64VectorFloatCompareOp.class);
@Opcode private final VexFloatCompareOp opcode;
private final AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({REG}) protected AllocatableValue x;
@Use({REG, STACK}) protected AllocatableValue y;
private final VexFloatCompareOp.Predicate predicate;
public AMD64VectorFloatCompareOp(VexFloatCompareOp opcode, AVXSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y, VexFloatCompareOp.Predicate predicate) {
super(TYPE);
this.opcode = opcode;
this.size = size;
this.result = result;
this.x = x;
this.y = y;
this.predicate = predicate;
}
@Override
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
if (isRegister(y)) {
opcode.emit(masm, size, asRegister(result), asRegister(x), asRegister(y), predicate);
} else {
opcode.emit(masm, size, asRegister(result), asRegister(x), (AMD64Address) crb.asAddress(y), predicate);
}
}
}

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.graalvm.compiler.lir.amd64.vector;
import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
import org.graalvm.compiler.lir.LIRInstructionClass;
import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction;
public abstract class AMD64VectorInstruction extends AMD64LIRInstruction {
public static final LIRInstructionClass<AMD64VectorInstruction> TYPE = LIRInstructionClass.create(AMD64VectorInstruction.class);
protected final AVXSize size;
public AMD64VectorInstruction(LIRInstructionClass<? extends AMD64VectorInstruction> c, AVXSize size) {
super(c);
this.size = size;
}
@Override
public boolean needsClearUpperVectorRegisters() {
return size == AVXSize.YMM || size == AVXSize.ZMM;
}
}

View File

@ -203,17 +203,15 @@ public class AMD64VectorMove {
}
}
public abstract static class VectorMemOp extends AMD64LIRInstruction {
public abstract static class VectorMemOp extends AMD64VectorInstruction {
protected final AVXSize size;
protected final VexMoveOp op;
@Use({COMPOSITE}) protected AMD64AddressValue address;
@State protected LIRFrameState state;
protected VectorMemOp(LIRInstructionClass<? extends VectorMemOp> c, AVXSize size, VexMoveOp op, AMD64AddressValue address, LIRFrameState state) {
super(c);
this.size = size;
super(c, size);
this.op = op;
this.address = address;
this.state = state;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,19 +50,17 @@ import jdk.vm.ci.meta.Value;
public class AMD64VectorUnary {
public static final class AVXUnaryOp extends AMD64LIRInstruction {
public static final class AVXUnaryOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AVXUnaryOp> TYPE = LIRInstructionClass.create(AVXUnaryOp.class);
@Opcode private final VexRMOp opcode;
private final AVXKind.AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({REG, STACK}) protected AllocatableValue input;
public AVXUnaryOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue input) {
super(TYPE);
super(TYPE, size);
this.opcode = opcode;
this.size = size;
this.result = result;
this.input = input;
}
@ -77,20 +75,18 @@ public class AMD64VectorUnary {
}
}
public static final class AVXUnaryMemoryOp extends AMD64LIRInstruction {
public static final class AVXUnaryMemoryOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AVXUnaryMemoryOp> TYPE = LIRInstructionClass.create(AVXUnaryMemoryOp.class);
@Opcode private final VexRMOp opcode;
private final AVXKind.AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({COMPOSITE}) protected AMD64AddressValue input;
@State protected LIRFrameState state;
public AVXUnaryMemoryOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) {
super(TYPE);
super(TYPE, size);
this.opcode = opcode;
this.size = size;
this.result = result;
this.input = input;
this.state = state;
@ -105,19 +101,17 @@ public class AMD64VectorUnary {
}
}
public static final class AVXBroadcastOp extends AMD64LIRInstruction {
public static final class AVXBroadcastOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AVXBroadcastOp> TYPE = LIRInstructionClass.create(AVXBroadcastOp.class);
@Opcode private final VexRMOp opcode;
private final AVXKind.AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({REG, STACK, CONST}) protected Value input;
public AVXBroadcastOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, Value input) {
super(TYPE);
super(TYPE, size);
this.opcode = opcode;
this.size = size;
this.result = result;
this.input = input;
}
@ -136,20 +130,18 @@ public class AMD64VectorUnary {
}
}
public static final class AVXConvertMemoryOp extends AMD64LIRInstruction {
public static final class AVXConvertMemoryOp extends AMD64VectorInstruction {
public static final LIRInstructionClass<AVXConvertMemoryOp> TYPE = LIRInstructionClass.create(AVXConvertMemoryOp.class);
@Opcode private final VexRVMOp opcode;
private final AVXKind.AVXSize size;
@Def({REG}) protected AllocatableValue result;
@Use({COMPOSITE}) protected AMD64AddressValue input;
@State protected LIRFrameState state;
public AVXConvertMemoryOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) {
super(TYPE);
super(TYPE, size);
this.opcode = opcode;
this.size = size;
this.result = result;
this.input = input;
this.state = state;
@ -180,6 +172,8 @@ public class AMD64VectorUnary {
@Override
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
// Note that we assume only XMM-size instructions are emitted here. Loosening this
// restriction would require informing AMD64HotSpotReturnOp when emitting vzeroupper.
if (isRegister(input)) {
if (!asRegister(input).equals(asRegister(result))) {
// clear result register to avoid unnecessary dependency

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -458,4 +458,8 @@ public abstract class LIRInstruction {
public int hashCode() {
return id;
}
public boolean needsClearUpperVectorRegisters() {
return false;
}
}

View File

@ -692,4 +692,18 @@ public class CompilationResultBuilder {
public void setConservativeLabelRanges() {
this.conservativeLabelOffsets = true;
}
public final boolean needsClearUpperVectorRegisters() {
for (AbstractBlockBase<?> block : lir.codeEmittingOrder()) {
if (block == null) {
continue;
}
for (LIRInstruction op : lir.getLIRforBlock(block)) {
if (op.needsClearUpperVectorRegisters()) {
return true;
}
}
}
return false;
}
}

View File

@ -354,4 +354,24 @@ public abstract class CompareNode extends BinaryOpLogicNode implements Canonical
return comparison;
}
public static LogicNode createFloatCompareNode(StructuredGraph graph, CanonicalCondition condition, ValueNode x, ValueNode y, boolean unorderedIsTrue, NodeView view) {
LogicNode result = createFloatCompareNode(condition, x, y, unorderedIsTrue, view);
return (result.graph() == null ? graph.addOrUniqueWithInputs(result) : result);
}
public static LogicNode createFloatCompareNode(CanonicalCondition condition, ValueNode x, ValueNode y, boolean unorderedIsTrue, NodeView view) {
assert x.getStackKind() == y.getStackKind();
assert x.getStackKind().isNumericFloat();
LogicNode comparison;
if (condition == CanonicalCondition.EQ) {
comparison = FloatEqualsNode.create(x, y, view);
} else {
assert condition == CanonicalCondition.LT;
comparison = FloatLessThanNode.create(x, y, unorderedIsTrue, view);
}
return comparison;
}
}

View File

@ -1401,7 +1401,7 @@ public class InvocationPlugins {
if (declaringType instanceof ResolvedJavaSymbol) {
return checkResolvable(isOptional, ((ResolvedJavaSymbol) declaringType).getResolved(), binding);
}
Class<?> declaringClass = InvocationPlugins.resolveType(declaringType, isOptional);
Class<?> declaringClass = resolveType(declaringType, isOptional);
if (declaringClass == null) {
return true;
}
@ -1411,7 +1411,7 @@ public class InvocationPlugins {
}
} else {
if (resolveMethod(declaringClass, binding) == null && !isOptional) {
throw new AssertionError(String.format("Method not found: %s.%s%s", declaringClass.getName(), binding.name, binding.argumentsDescriptor));
throw new NoSuchMethodError(String.format("%s.%s%s", declaringClass.getName(), binding.name, binding.argumentsDescriptor));
}
}
return true;

View File

@ -132,12 +132,19 @@ public class OptionsParser {
throw new IllegalArgumentException(msg.toString());
}
Object value = parseOptionValue(desc, uncheckedValue);
desc.getOptionKey().update(values, value);
}
/** Parses a given option value with a known descriptor. */
public static Object parseOptionValue(OptionDescriptor desc, Object uncheckedValue) {
Class<?> optionType = desc.getOptionValueType();
Object value;
if (!(uncheckedValue instanceof String)) {
if (optionType != uncheckedValue.getClass()) {
String type = optionType.getSimpleName();
throw new IllegalArgumentException(type + " option '" + name + "' must have " + type + " value, not " + uncheckedValue.getClass() + " [toString: " + uncheckedValue + "]");
throw new IllegalArgumentException(type + " option '" + desc.getName() + "' must have " + type + " value, not " + uncheckedValue.getClass() + " [toString: " + uncheckedValue + "]");
}
value = uncheckedValue;
} else {
@ -148,7 +155,7 @@ public class OptionsParser {
} else if ("false".equals(valueString)) {
value = Boolean.FALSE;
} else {
throw new IllegalArgumentException("Boolean option '" + name + "' must have value \"true\" or \"false\", not \"" + uncheckedValue + "\"");
throw new IllegalArgumentException("Boolean option '" + desc.getName() + "' must have value \"true\" or \"false\", not \"" + uncheckedValue + "\"");
}
} else if (optionType == String.class) {
value = valueString;
@ -156,7 +163,7 @@ public class OptionsParser {
value = ((EnumOptionKey<?>) desc.getOptionKey()).valueOf(valueString);
} else {
if (valueString.isEmpty()) {
throw new IllegalArgumentException("Non empty value required for option '" + name + "'");
throw new IllegalArgumentException("Non empty value required for option '" + desc.getName() + "'");
}
try {
if (optionType == Float.class) {
@ -168,15 +175,14 @@ public class OptionsParser {
} else if (optionType == Long.class) {
value = Long.valueOf(parseLong(valueString));
} else {
throw new IllegalArgumentException("Wrong value for option '" + name + "'");
throw new IllegalArgumentException("Wrong value for option '" + desc.getName() + "'");
}
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException("Value for option '" + name + "' has invalid number format: " + valueString);
throw new IllegalArgumentException("Value for option '" + desc.getName() + "' has invalid number format: " + valueString);
}
}
}
desc.getOptionKey().update(values, value);
return value;
}
private static long parseLong(String v) {

View File

@ -1107,11 +1107,20 @@ public class StandardGraphBuilderPlugins {
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset) {
// Opaque mode does not directly impose any ordering constraints with respect to other
// variables beyond Plain mode.
if (accessKind == AccessKind.OPAQUE && StampTool.isPointerAlwaysNull(object)) {
// OFF_HEAP_LOCATION accesses are not floatable => no membars needed for opaque.
return apply(b, targetMethod, unsafe, offset);
}
// Emits a null-check for the otherwise unused receiver
unsafe.get();
if (accessKind.emitBarriers) {
b.add(new MembarNode(accessKind.preReadBarriers));
}
// Raw accesses can be turned into floatable field accesses, the membars preserve the
// access mode. In the case of opaque access, and only for opaque, the location of the
// wrapping membars can be refined to the field location.
createUnsafeAccess(object, b, (obj, loc) -> new RawLoadNode(obj, offset, unsafeAccessKind, loc));
if (accessKind.emitBarriers) {
b.add(new MembarNode(accessKind.postReadBarriers));
@ -1144,12 +1153,21 @@ public class StandardGraphBuilderPlugins {
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode value) {
// Opaque mode does not directly impose any ordering constraints with respect to other
// variables beyond Plain mode.
if (accessKind == AccessKind.OPAQUE && StampTool.isPointerAlwaysNull(object)) {
// OFF_HEAP_LOCATION accesses are not floatable => no membars needed for opaque.
return apply(b, targetMethod, unsafe, offset, value);
}
// Emits a null-check for the otherwise unused receiver
unsafe.get();
if (accessKind.emitBarriers) {
b.add(new MembarNode(accessKind.preWriteBarriers));
}
ValueNode maskedValue = b.maskSubWordValue(value, unsafeAccessKind);
// Raw accesses can be turned into floatable field accesses, the membars preserve the
// access mode. In the case of opaque access, and only for opaque, the location of the
// wrapping membars can be refined to the field location.
createUnsafeAccess(object, b, (obj, loc) -> new RawStoreNode(obj, offset, maskedValue, unsafeAccessKind, loc));
if (accessKind.emitBarriers) {
b.add(new MembarNode(accessKind.postWriteBarriers));

View File

@ -259,7 +259,7 @@ public final class MethodHandleNode extends MacroStateSplitNode implements Simpl
Assumptions assumptions = adder.getAssumptions();
ResolvedJavaMethod realTarget = null;
if (target.canBeStaticallyBound()) {
if (target.canBeStaticallyBound() || intrinsicMethod == IntrinsicMethod.LINK_TO_SPECIAL) {
realTarget = target;
} else {
ResolvedJavaType targetType = target.getDeclaringClass();