This commit is contained in:
Phil Race 2018-06-25 10:21:50 -07:00
commit b6dba38ce4
632 changed files with 32844 additions and 5035 deletions
.hgtags
bin/nashorn
make
src/hotspot
cpu
os_cpu/linux_x86
share

@ -491,3 +491,5 @@ a11c1cb542bbd1671d25b85efe7d09b983c48525 jdk-11+15
64e4b1686141e57a681936a8283983341484676e jdk-11+17
e1b3def126240d5433902f3cb0e91a4c27f6db50 jdk-11+18
fb8b3f4672774e15654958295558a1af1b576919 jdk-11+19
fb8b3f4672774e15654958295558a1af1b576919 jdk-11+19
36ca515343e00b021dcfc902e986d26ec994a2e5 jdk-11+19

@ -1,6 +1,6 @@
#!/bin/sh
#
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,6 @@ fi
#
# see above - already in place, copy the flags down here to disable
ENABLE_FLIGHT_RECORDER_FLAGS="\
-XX:+UnlockCommercialFeatures \
-XX:+FlightRecorder \
-XX:FlightRecorderOptions=defaultrecording=true,disk=true,dumponexit=true,dumponexitpath=$JFR_FILENAME,stackdepth=1024"

@ -81,7 +81,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
elif test "x$TOOLCHAIN_TYPE" = xclang; then
BASIC_LDFLAGS_JVM_ONLY="-mno-omit-leaf-frame-pointer -mstack-alignment=16 \
-stdlib=libc++ -fPIC"
-fPIC"
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
BASIC_LDFLAGS="-Wl,-z,defs"

@ -832,7 +832,7 @@ var getJibProfilesDependencies = function (input, common) {
var devkit_platform_revisions = {
linux_x64: "gcc7.3.0-OEL6.4+1.0",
macosx_x64: "Xcode6.3-MacOSX10.9+1.0",
macosx_x64: "Xcode9.4-MacOSX10.13+1.0",
solaris_x64: "SS12u4-Solaris11u1+1.0",
solaris_sparcv9: "SS12u4-Solaris11u1+1.1",
windows_x64: "VS2017-15.5.5+1.0",

@ -395,8 +395,8 @@ JDWP "Java(tm) Debug Wire Protocol"
"Can the VM add methods when redefining "
"classes?")
(boolean canUnrestrictedlyRedefineClasses
"Can the VM redefine classes"
"in arbitrary ways?")
"Can the VM redefine classes "
"in ways that are normally restricted?")
(boolean canPopFrames
"Can the VM pop stack frames?")
(boolean canUseInstanceFilters
@ -460,12 +460,23 @@ JDWP "Java(tm) Debug Wire Protocol"
"<a href=\"#JDWP_StackFrame_PopFrames\">PopFrames</a> command can be used "
"to pop frames with obsolete methods."
"<p>"
"Unless the canUnrestrictedlyRedefineClasses capability is present the following "
"redefinitions are restricted: "
"<ul>"
"<li>changing the schema (the fields)</li>"
"<li>changing the hierarchy (superclasses, interfaces)</li>"
"<li>deleting a method</li>"
"<li>changing class modifiers</li>"
"<li>changing method modifiers</li>"
"<li>changing the <code>NestHost</code> or <code>NestMembers</code> class attributes</li>"
"</ul>"
"<p>"
"Requires canRedefineClasses capability - see "
"<a href=\"#JDWP_VirtualMachine_CapabilitiesNew\">CapabilitiesNew</a>. "
"In addition to the canRedefineClasses capability, the target VM must "
"have the canAddMethod capability to add methods when redefining classes, "
"or the canUnrestrictedlyRedefineClasses to redefine classes in arbitrary "
"ways."
"or the canUnrestrictedlyRedefineClasses capability to redefine classes in ways "
"that are normally restricted."
(Out
(Repeat classes "Number of reference types that follow."
(Group ClassDef
@ -496,6 +507,7 @@ JDWP "Java(tm) Debug Wire Protocol"
(Error DELETE_METHOD_NOT_IMPLEMENTED)
(Error CLASS_MODIFIERS_CHANGE_NOT_IMPLEMENTED)
(Error METHOD_MODIFIERS_CHANGE_NOT_IMPLEMENTED)
(Error CLASS_ATTRIBUTE_CHANGE_NOT_IMPLEMENTED)
(Error VM_DEAD)
)
)
@ -3148,12 +3160,16 @@ JDWP "Java(tm) Debug Wire Protocol"
"different from the name in the old class object.")
(Constant CLASS_MODIFIERS_CHANGE_NOT_IMPLEMENTED
=70 "The new class version has different modifiers and "
"and canUnrestrictedlyRedefineClasses is false.")
"canUnrestrictedlyRedefineClasses is false.")
(Constant METHOD_MODIFIERS_CHANGE_NOT_IMPLEMENTED
=71 "A method in the new class version has "
"different modifiers "
"than its counterpart in the old class version and "
"and canUnrestrictedlyRedefineClasses is false.")
"canUnrestrictedlyRedefineClasses is false.")
(Constant CLASS_ATTRIBUTE_CHANGE_NOT_IMPLEMENTED
=72 "The new class version has different NestHost or "
"NestMembers class attribute and "
"canUnrestrictedlyRedefineClasses is false.")
(Constant NOT_IMPLEMENTED =99 "The functionality is not implemented in "
"this virtual machine.")
(Constant NULL_POINTER =100 "Invalid pointer.")

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,8 @@
#
-class name jdk/nashorn/api/scripting/AbstractJSObject
-class name jdk/nashorn/api/scripting/ClassFilter
-class name jdk/nashorn/api/scripting/JSObject
-class name jdk/nashorn/api/scripting/NashornException

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
# ##########################################################
#
class name jdk/nashorn/api/scripting/AbstractJSObject
header extends java/lang/Object implements jdk/nashorn/api/scripting/JSObject flags 421
header extends java/lang/Object implements jdk/nashorn/api/scripting/JSObject flags 421 runtimeAnnotations @Ljdk/Exported;
method name <init> descriptor ()V flags 1
method name call descriptor (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; flags 81
method name newObject descriptor ([Ljava/lang/Object;)Ljava/lang/Object; flags 81
@ -47,10 +47,16 @@ method name getClassName descriptor ()Ljava/lang/String; flags 1
method name isFunction descriptor ()Z flags 1
method name isStrictFunction descriptor ()Z flags 1
method name isArray descriptor ()Z flags 1
method name toNumber descriptor ()D flags 1
method name toNumber descriptor ()D flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;
method name getDefaultValue descriptor (Ljava/lang/Class;)Ljava/lang/Object; flags 1 signature (Ljava/lang/Class<*>;)Ljava/lang/Object;
method name getDefaultValue descriptor (Ljdk/nashorn/api/scripting/JSObject;Ljava/lang/Class;)Ljava/lang/Object; flags 9 signature (Ljdk/nashorn/api/scripting/JSObject;Ljava/lang/Class<*>;)Ljava/lang/Object;
class name jdk/nashorn/api/scripting/ClassFilter
header extends java/lang/Object flags 601 runtimeAnnotations @Ljdk/Exported;
method name exposeToScripts descriptor (Ljava/lang/String;)Z flags 401
class name jdk/nashorn/api/scripting/JSObject
header extends java/lang/Object flags 601
header extends java/lang/Object flags 601 runtimeAnnotations @Ljdk/Exported;
method name call descriptor (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; flags 481
method name newObject descriptor ([Ljava/lang/Object;)Ljava/lang/Object; flags 481
method name eval descriptor (Ljava/lang/String;)Ljava/lang/Object; flags 401
@ -69,22 +75,28 @@ method name getClassName descriptor ()Ljava/lang/String; flags 401
method name isFunction descriptor ()Z flags 401
method name isStrictFunction descriptor ()Z flags 401
method name isArray descriptor ()Z flags 401
method name toNumber descriptor ()D flags 401
method name toNumber descriptor ()D flags 401 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;
class name jdk/nashorn/api/scripting/NashornException
header extends java/lang/RuntimeException flags 421
field name ENGINE_SCRIPT_SOURCE_NAME descriptor Ljava/lang/String; constantValue nashorn:engine/resources/engine.js flags 19
header extends java/lang/RuntimeException flags 421 runtimeAnnotations @Ljdk/Exported;
method name <init> descriptor (Ljava/lang/String;Ljava/lang/String;II)V flags 4
method name <init> descriptor (Ljava/lang/String;Ljava/lang/Throwable;Ljava/lang/String;II)V flags 4
method name <init> descriptor (Ljava/lang/String;Ljava/lang/Throwable;)V flags 4
method name getFileName descriptor ()Ljava/lang/String; flags 11
method name setFileName descriptor (Ljava/lang/String;)V flags 11
method name getLineNumber descriptor ()I flags 11
method name setLineNumber descriptor (I)V flags 11
method name getColumnNumber descriptor ()I flags 11
method name setColumnNumber descriptor (I)V flags 11
method name getScriptFrames descriptor (Ljava/lang/Throwable;)[Ljava/lang/StackTraceElement; flags 9
method name getScriptStackString descriptor (Ljava/lang/Throwable;)Ljava/lang/String; flags 9
method name getThrown descriptor ()Ljava/lang/Object; flags 4
method name initEcmaError descriptor (Ljdk/nashorn/internal/runtime/ScriptObject;)Ljdk/nashorn/api/scripting/NashornException; flags 4
method name getEcmaError descriptor ()Ljava/lang/Object; flags 1
method name setEcmaError descriptor (Ljava/lang/Object;)V flags 1
class name jdk/nashorn/api/scripting/NashornScriptEngine
header extends javax/script/AbstractScriptEngine implements javax/script/Compilable,javax/script/Invocable flags 31
header extends javax/script/AbstractScriptEngine implements javax/script/Compilable,javax/script/Invocable flags 31 runtimeAnnotations @Ljdk/Exported;
innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
field name NASHORN_GLOBAL descriptor Ljava/lang/String; constantValue nashorn.global flags 19
method name eval descriptor (Ljava/io/Reader;Ljavax/script/ScriptContext;)Ljava/lang/Object; thrownTypes javax/script/ScriptException flags 1
@ -97,10 +109,9 @@ method name invokeFunction descriptor (Ljava/lang/String;[Ljava/lang/Object;)Lja
method name invokeMethod descriptor (Ljava/lang/Object;Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/Object; thrownTypes javax/script/ScriptException,java/lang/NoSuchMethodException flags 81
method name getInterface descriptor (Ljava/lang/Class;)Ljava/lang/Object; flags 1 signature <T:Ljava/lang/Object;>(Ljava/lang/Class<TT;>;)TT;
method name getInterface descriptor (Ljava/lang/Object;Ljava/lang/Class;)Ljava/lang/Object; flags 1 signature <T:Ljava/lang/Object;>(Ljava/lang/Object;Ljava/lang/Class<TT;>;)TT;
method name __noSuchProperty__ descriptor (Ljava/lang/Object;Ljavax/script/ScriptContext;Ljava/lang/String;)Ljava/lang/Object; flags 1
class name jdk/nashorn/api/scripting/NashornScriptEngineFactory
header extends java/lang/Object implements javax/script/ScriptEngineFactory flags 31
header extends java/lang/Object implements javax/script/ScriptEngineFactory flags 31 runtimeAnnotations @Ljdk/Exported;
method name <init> descriptor ()V flags 1
method name getEngineName descriptor ()Ljava/lang/String; flags 1
method name getEngineVersion descriptor ()Ljava/lang/String; flags 1
@ -115,11 +126,13 @@ method name getParameter descriptor (Ljava/lang/String;)Ljava/lang/Object; flags
method name getProgram descriptor ([Ljava/lang/String;)Ljava/lang/String; flags 81
method name getScriptEngine descriptor ()Ljavax/script/ScriptEngine; flags 1
method name getScriptEngine descriptor (Ljava/lang/ClassLoader;)Ljavax/script/ScriptEngine; flags 1
method name getScriptEngine descriptor ([Ljava/lang/String;)Ljavax/script/ScriptEngine; flags 1
method name getScriptEngine descriptor (Ljdk/nashorn/api/scripting/ClassFilter;)Ljavax/script/ScriptEngine; flags 1
method name getScriptEngine descriptor ([Ljava/lang/String;)Ljavax/script/ScriptEngine; flags 81
method name getScriptEngine descriptor ([Ljava/lang/String;Ljava/lang/ClassLoader;)Ljavax/script/ScriptEngine; flags 1
method name getScriptEngine descriptor ([Ljava/lang/String;Ljava/lang/ClassLoader;Ljdk/nashorn/api/scripting/ClassFilter;)Ljavax/script/ScriptEngine; flags 1
class name jdk/nashorn/api/scripting/ScriptObjectMirror
header extends jdk/nashorn/api/scripting/AbstractJSObject implements javax/script/Bindings flags 31
header extends jdk/nashorn/api/scripting/AbstractJSObject implements javax/script/Bindings flags 31 runtimeAnnotations @Ljdk/Exported;
innerclass innerClass java/util/Map$Entry outerClass java/util/Map innerClassName Entry flags 609
method name equals descriptor (Ljava/lang/Object;)Z flags 1
method name hashCode descriptor ()I flags 1
@ -135,6 +148,7 @@ method name hasSlot descriptor (I)Z flags 1
method name removeMember descriptor (Ljava/lang/String;)V flags 1
method name setMember descriptor (Ljava/lang/String;Ljava/lang/Object;)V flags 1
method name setSlot descriptor (ILjava/lang/Object;)V flags 1
method name setIndexedPropertiesToExternalArrayData descriptor (Ljava/nio/ByteBuffer;)V flags 1
method name isInstance descriptor (Ljava/lang/Object;)Z flags 1
method name getClassName descriptor ()Ljava/lang/String; flags 1
method name isFunction descriptor ()Z flags 1
@ -166,25 +180,28 @@ method name isFrozen descriptor ()Z flags 1
method name isUndefined descriptor (Ljava/lang/Object;)Z flags 9
method name to descriptor (Ljava/lang/Class;)Ljava/lang/Object; flags 1 signature <T:Ljava/lang/Object;>(Ljava/lang/Class<TT;>;)TT;
method name wrap descriptor (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name wrapAsJSONCompatible descriptor (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name unwrap descriptor (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name wrapArray descriptor ([Ljava/lang/Object;Ljava/lang/Object;)[Ljava/lang/Object; flags 9
method name unwrapArray descriptor ([Ljava/lang/Object;Ljava/lang/Object;)[Ljava/lang/Object; flags 9
method name toNumber descriptor ()D flags 1
method name identical descriptor (Ljava/lang/Object;Ljava/lang/Object;)Z flags 9
method name toNumber descriptor ()D flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;
method name getDefaultValue descriptor (Ljava/lang/Class;)Ljava/lang/Object; flags 1 signature (Ljava/lang/Class<*>;)Ljava/lang/Object;
method name put descriptor (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; flags 1041
class name jdk/nashorn/api/scripting/ScriptUtils
header extends java/lang/Object flags 31
header extends java/lang/Object flags 31 runtimeAnnotations @Ljdk/Exported;
method name parse descriptor (Ljava/lang/String;Ljava/lang/String;Z)Ljava/lang/String; flags 9
method name format descriptor (Ljava/lang/String;[Ljava/lang/Object;)Ljava/lang/String; flags 9
method name makeSynchronizedFunction descriptor (Ljdk/nashorn/internal/runtime/ScriptFunction;Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name wrap descriptor (Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name makeSynchronizedFunction descriptor (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name wrap descriptor (Ljava/lang/Object;)Ljdk/nashorn/api/scripting/ScriptObjectMirror; flags 9
method name unwrap descriptor (Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name wrapArray descriptor ([Ljava/lang/Object;)[Ljava/lang/Object; flags 9
method name unwrapArray descriptor ([Ljava/lang/Object;)[Ljava/lang/Object; flags 9
method name convert descriptor (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; flags 9
class name jdk/nashorn/api/scripting/URLReader
header extends java/io/Reader flags 31
header extends java/io/Reader flags 31 runtimeAnnotations @Ljdk/Exported;
method name <init> descriptor (Ljava/net/URL;)V flags 1
method name <init> descriptor (Ljava/net/URL;Ljava/lang/String;)V flags 1
method name <init> descriptor (Ljava/net/URL;Ljava/nio/charset/Charset;)V flags 1

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -30,15 +30,16 @@ module name jdk.scripting.nashorn
header exports jdk/nashorn/api/scripting,jdk/nashorn/api/tree requires name\u0020;jdk.dynalink\u0020;flags\u0020;0,name\u0020;java.logging\u0020;flags\u0020;0,name\u0020;java.base\u0020;flags\u0020;8000,name\u0020;java.scripting\u0020;flags\u0020;20 provides interface\u0020;javax/script/ScriptEngineFactory\u0020;impls\u0020;jdk/nashorn/api/scripting/NashornScriptEngineFactory,interface\u0020;jdk/dynalink/linker/GuardingDynamicLinkerExporter\u0020;impls\u0020;jdk/nashorn/api/linker/NashornLinkerExporter flags 8000
class name jdk/nashorn/api/scripting/AbstractJSObject
-method name toNumber descriptor ()D
method name toNumber descriptor ()D flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;
header extends java/lang/Object implements jdk/nashorn/api/scripting/JSObject flags 421
-method name getDefaultValue descriptor (Ljava/lang/Class;)Ljava/lang/Object;
-method name getDefaultValue descriptor (Ljdk/nashorn/api/scripting/JSObject;Ljava/lang/Class;)Ljava/lang/Object;
method name getDefaultValue descriptor (Ljdk/nashorn/api/scripting/JSObject;Ljava/lang/Class;)Ljava/lang/Object; flags 9 deprecated true signature (Ljdk/nashorn/api/scripting/JSObject;Ljava/lang/Class<*>;)Ljava/lang/Object; runtimeAnnotations @Ljava/lang/Deprecated;
class name jdk/nashorn/api/scripting/ClassFilter
header extends java/lang/Object flags 601
method name exposeToScripts descriptor (Ljava/lang/String;)Z flags 401
class name jdk/nashorn/api/scripting/JSObject
header extends java/lang/Object flags 601
-method name toNumber descriptor ()D
method name toNumber descriptor ()D flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;
method name getDefaultValue descriptor (Ljava/lang/Class;)Ljava/lang/Object; thrownTypes java/lang/UnsupportedOperationException flags 1 signature (Ljava/lang/Class<*>;)Ljava/lang/Object;
@ -46,41 +47,26 @@ method name getDefaultValue descriptor (Ljava/lang/Class;)Ljava/lang/Object; thr
class name jdk/nashorn/api/scripting/NashornException
header extends java/lang/RuntimeException flags 421
innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
-field name ENGINE_SCRIPT_SOURCE_NAME descriptor Ljava/lang/String;
method name setFileName descriptor (Ljava/lang/String;)V flags 11
method name setLineNumber descriptor (I)V flags 11
method name setColumnNumber descriptor (I)V flags 11
method name getThrown descriptor ()Ljava/lang/Object; flags 4
method name getEcmaError descriptor ()Ljava/lang/Object; flags 1
method name setEcmaError descriptor (Ljava/lang/Object;)V flags 1
-method name initEcmaError descriptor (Ljdk/nashorn/internal/runtime/ScriptObject;)Ljdk/nashorn/api/scripting/NashornException;
class name jdk/nashorn/api/scripting/NashornScriptEngine
-method name __noSuchProperty__ descriptor (Ljava/lang/Object;Ljavax/script/ScriptContext;Ljava/lang/String;)Ljava/lang/Object;
header extends javax/script/AbstractScriptEngine implements javax/script/Compilable,javax/script/Invocable flags 31
innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
class name jdk/nashorn/api/scripting/NashornScriptEngineFactory
header extends java/lang/Object implements javax/script/ScriptEngineFactory flags 31
innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
-method name getScriptEngine descriptor ([Ljava/lang/String;)Ljavax/script/ScriptEngine;
method name getScriptEngine descriptor (Ljdk/nashorn/api/scripting/ClassFilter;)Ljavax/script/ScriptEngine; flags 1
method name getScriptEngine descriptor ([Ljava/lang/String;)Ljavax/script/ScriptEngine; flags 81
method name getScriptEngine descriptor ([Ljava/lang/String;Ljava/lang/ClassLoader;Ljdk/nashorn/api/scripting/ClassFilter;)Ljavax/script/ScriptEngine; flags 1
class name jdk/nashorn/api/scripting/ScriptObjectMirror
header extends jdk/nashorn/api/scripting/AbstractJSObject implements javax/script/Bindings flags 31
innerclass innerClass java/util/Map$Entry outerClass java/util/Map innerClassName Entry flags 609
innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
-method name toNumber descriptor ()D
method name setIndexedPropertiesToExternalArrayData descriptor (Ljava/nio/ByteBuffer;)V flags 1
method name wrapAsJSONCompatible descriptor (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name identical descriptor (Ljava/lang/Object;Ljava/lang/Object;)Z flags 9
method name toNumber descriptor ()D flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;
method name getDefaultValue descriptor (Ljava/lang/Class;)Ljava/lang/Object; flags 1 signature (Ljava/lang/Class<*>;)Ljava/lang/Object;
class name jdk/nashorn/api/scripting/ScriptUtils
-method name makeSynchronizedFunction descriptor (Ljdk/nashorn/internal/runtime/ScriptFunction;Ljava/lang/Object;)Ljava/lang/Object;
-method name wrap descriptor (Ljava/lang/Object;)Ljava/lang/Object;
method name makeSynchronizedFunction descriptor (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object; flags 9
method name wrap descriptor (Ljava/lang/Object;)Ljdk/nashorn/api/scripting/ScriptObjectMirror; flags 9
header extends java/lang/Object flags 31
class name jdk/nashorn/api/scripting/URLReader
header extends java/io/Reader flags 31
class name jdk/nashorn/api/tree/ArrayAccessTree
header extends java/lang/Object implements jdk/nashorn/api/tree/ExpressionTree flags 601

@ -22,6 +22,7 @@
#
JVM_ActiveProcessorCount
JVM_AreNestMates
JVM_ArrayCopy
JVM_AssertionStatusDirectives
JVM_BeforeHalt
@ -118,6 +119,8 @@ JVM_GetMethodIxSignatureUTF
JVM_GetMethodParameters
JVM_GetMethodTypeAnnotations
JVM_GetNanoTimeAdjustment
JVM_GetNestHost
JVM_GetNestMembers
JVM_GetPrimitiveArrayElement
JVM_GetProtectionDomain
JVM_GetSimpleBinaryName

@ -35,7 +35,7 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := crypt32.lib advapi32.lib, \
LIBS := crypt32.lib advapi32.lib ncrypt.lib, \
))
TARGETS += $(BUILD_LIBSUNMSCAPI)

@ -351,7 +351,7 @@ test.src.dir=test/nashorn/src
run.test.xmx=2G
run.test.xms=2G
# uncomment this jfr.args to enable light recordings. the stack needs to be cranked up to 1024 frames,
# uncomment this jfr.args to enable flight recordings. the stack needs to be cranked up to 1024 frames,
# or everything will as of the now drown in lambda forms and be cut off.
#
#jfr.args=-XX:StartFlightRecording=disk=true,dumponexit=true,dumponexitpath="test_suite.jfr",stackdepth=1024

File diff suppressed because it is too large Load Diff

@ -1638,12 +1638,14 @@ void mvnw(Register Rd, Register Rm,
#undef INSN
// Conditional compare (both kinds)
void conditional_compare(unsigned op, int o2, int o3,
void conditional_compare(unsigned op, int o1, int o2, int o3,
Register Rn, unsigned imm5, unsigned nzcv,
unsigned cond) {
starti;
f(op, 31, 29);
f(0b11010010, 28, 21);
f(cond, 15, 12);
f(o1, 11);
f(o2, 10);
f(o3, 4);
f(nzcv, 3, 0);
@ -1652,15 +1654,12 @@ void mvnw(Register Rd, Register Rm,
#define INSN(NAME, op) \
void NAME(Register Rn, Register Rm, int imm, Condition cond) { \
starti; \
f(0, 11); \
conditional_compare(op, 0, 0, Rn, (uintptr_t)Rm, imm, cond); \
int regNumber = (Rm == zr ? 31 : (uintptr_t)Rm); \
conditional_compare(op, 0, 0, 0, Rn, regNumber, imm, cond); \
} \
\
void NAME(Register Rn, int imm5, int imm, Condition cond) { \
starti; \
f(1, 11); \
conditional_compare(op, 0, 0, Rn, imm5, imm, cond); \
void NAME(Register Rn, int imm5, int imm, Condition cond) { \
conditional_compare(op, 1, 0, 0, Rn, imm5, imm, cond); \
}
INSN(ccmnw, 0b001);
@ -2025,6 +2024,57 @@ public:
fmovd(Vn, zr);
}
// Floating-point rounding
// type: half-precision = 11
// single = 00
// double = 01
// rmode: A = Away = 100
// I = current = 111
// M = MinusInf = 010
// N = eveN = 000
// P = PlusInf = 001
// X = eXact = 110
// Z = Zero = 011
void float_round(unsigned type, unsigned rmode, FloatRegister Rd, FloatRegister Rn) {
starti;
f(0b00011110, 31, 24);
f(type, 23, 22);
f(0b1001, 21, 18);
f(rmode, 17, 15);
f(0b10000, 14, 10);
rf(Rn, 5), rf(Rd, 0);
}
#define INSN(NAME, type, rmode) \
void NAME(FloatRegister Vd, FloatRegister Vn) { \
float_round(type, rmode, Vd, Vn); \
}
public:
INSN(frintah, 0b11, 0b100);
INSN(frintih, 0b11, 0b111);
INSN(frintmh, 0b11, 0b010);
INSN(frintnh, 0b11, 0b000);
INSN(frintph, 0b11, 0b001);
INSN(frintxh, 0b11, 0b110);
INSN(frintzh, 0b11, 0b011);
INSN(frintas, 0b00, 0b100);
INSN(frintis, 0b00, 0b111);
INSN(frintms, 0b00, 0b010);
INSN(frintns, 0b00, 0b000);
INSN(frintps, 0b00, 0b001);
INSN(frintxs, 0b00, 0b110);
INSN(frintzs, 0b00, 0b011);
INSN(frintad, 0b01, 0b100);
INSN(frintid, 0b01, 0b111);
INSN(frintmd, 0b01, 0b010);
INSN(frintnd, 0b01, 0b000);
INSN(frintpd, 0b01, 0b001);
INSN(frintxd, 0b01, 0b110);
INSN(frintzd, 0b01, 0b011);
#undef INSN
/* SIMD extensions
*
* We just use FloatRegister in the following. They are exactly the same
@ -2294,6 +2344,42 @@ public:
#undef INSN
#define INSN(NAME, op1, op2) \
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index = 0) { \
starti; \
assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \
assert(index >= 0 && ((T == T2D && index <= 1) || (T != T2D && index <= 3)), "invalid index"); \
f(0, 31), f((int)T & 1, 30), f(op1, 29); f(0b011111, 28, 23); \
f(T == T2D ? 1 : 0, 22), f(T == T2D ? 0 : index & 1, 21), rf(Vm, 16); \
f(op2, 15, 12), f(T == T2D ? index : (index >> 1), 11), f(0, 10); \
rf(Vn, 5), rf(Vd, 0); \
}
// FMLA/FMLS - Vector - Scalar
INSN(fmlavs, 0, 0b0001);
INSN(fmlsvs, 0, 0b0001);
// FMULX - Vector - Scalar
INSN(fmulxvs, 1, 0b1001);
#undef INSN
// Floating-point Reciprocal Estimate
void frecpe(FloatRegister Vd, FloatRegister Vn, SIMD_RegVariant type) {
assert(type == D || type == S, "Wrong type for frecpe");
starti;
f(0b010111101, 31, 23);
f(type == D ? 1 : 0, 22);
f(0b100001110110, 21, 10);
rf(Vn, 5), rf(Vd, 0);
}
// (double) {a, b} -> (a + b)
void faddpd(FloatRegister Vd, FloatRegister Vn) {
starti;
f(0b0111111001110000110110, 31, 10);
rf(Vn, 5), rf(Vd, 0);
}
void ins(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int didx, int sidx) {
starti;
assert(T != Q, "invalid register variant");

@ -745,6 +745,14 @@ LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
x->id() == vmIntrinsics::_dlog10) {
do_LibmIntrinsic(x);
return;
}
switch (x->id()) {
case vmIntrinsics::_dabs:
case vmIntrinsics::_dsqrt: {
@ -754,63 +762,102 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
LIR_Opr dst = rlock_result(x);
switch (x->id()) {
case vmIntrinsics::_dsqrt: {
__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
break;
case vmIntrinsics::_dsqrt: {
__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
break;
}
case vmIntrinsics::_dabs: {
__ abs(value.result(), dst, LIR_OprFact::illegalOpr);
break;
}
}
case vmIntrinsics::_dabs: {
__ abs(value.result(), dst, LIR_OprFact::illegalOpr);
break;
}
}
break;
}
case vmIntrinsics::_dlog10: // fall through
case vmIntrinsics::_dlog: // fall through
case vmIntrinsics::_dsin: // fall through
case vmIntrinsics::_dtan: // fall through
case vmIntrinsics::_dcos: // fall through
case vmIntrinsics::_dexp: {
assert(x->number_of_arguments() == 1, "wrong type");
address runtime_entry = NULL;
switch (x->id()) {
case vmIntrinsics::_dsin:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
break;
case vmIntrinsics::_dcos:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
break;
case vmIntrinsics::_dtan:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
break;
case vmIntrinsics::_dlog:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
break;
case vmIntrinsics::_dlog10:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
break;
case vmIntrinsics::_dexp:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
break;
default:
ShouldNotReachHere();
}
LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
set_result(x, result);
break;
}
case vmIntrinsics::_dpow: {
assert(x->number_of_arguments() == 2, "wrong type");
address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
set_result(x, result);
break;
}
}
}
void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
LIRItem value(x->argument_at(0), this);
value.set_destroys_register();
LIR_Opr calc_result = rlock_result(x);
LIR_Opr result_reg = result_register_for(x->type());
CallingConvention* cc = NULL;
if (x->id() == vmIntrinsics::_dpow) {
LIRItem value1(x->argument_at(1), this);
value1.set_destroys_register();
BasicTypeList signature(2);
signature.append(T_DOUBLE);
signature.append(T_DOUBLE);
cc = frame_map()->c_calling_convention(&signature);
value.load_item_force(cc->at(0));
value1.load_item_force(cc->at(1));
} else {
BasicTypeList signature(1);
signature.append(T_DOUBLE);
cc = frame_map()->c_calling_convention(&signature);
value.load_item_force(cc->at(0));
}
switch (x->id()) {
case vmIntrinsics::_dexp:
if (StubRoutines::dexp() != NULL) {
__ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dlog:
if (StubRoutines::dlog() != NULL) {
__ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dlog10:
if (StubRoutines::dlog10() != NULL) {
__ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dpow:
if (StubRoutines::dpow() != NULL) {
__ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dsin:
if (StubRoutines::dsin() != NULL) {
__ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dcos:
if (StubRoutines::dcos() != NULL) {
__ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
}
break;
case vmIntrinsics::_dtan:
if (StubRoutines::dtan() != NULL) {
__ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
} else {
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
}
break;
default: ShouldNotReachHere();
}
__ move(result_reg, calc_result);
}
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
assert(x->number_of_arguments() == 5, "wrong type");

@ -71,9 +71,20 @@ bool frame::safe_for_sender(JavaThread *thread) {
return false;
}
// unextended sp must be within the stack and above or equal sp
bool unextended_sp_safe = (unextended_sp < thread->stack_base()) &&
(unextended_sp >= sp);
// When we are running interpreted code the machine stack pointer, SP, is
// set low enough so that the Java expression stack can grow and shrink
// without ever exceeding the machine stack bounds. So, ESP >= SP.
// When we call out of an interpreted method, SP is incremented so that
// the space between SP and ESP is removed. The SP saved in the callee's
// frame is the SP *before* this increment. So, when we walk a stack of
// interpreter frames the sender's SP saved in a frame might be less than
// the SP at the point of call.
// So unextended sp must be within the stack but we need not to check
// that unextended sp >= sp
bool unextended_sp_safe = (unextended_sp < thread->stack_base());
if (!unextended_sp_safe) {
return false;

@ -43,7 +43,7 @@
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, RegSet saved_regs) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
if (!dest_uninitialized) {
__ push(saved_regs, sp);
if (count == c_rarg0) {

@ -37,14 +37,14 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
bool is_not_null = (decorators & IS_NOT_NULL) != 0;
switch (type) {
case T_OBJECT:
case T_ARRAY: {
if (in_heap) {
if (UseCompressedOops) {
__ ldrw(dst, src);
if (oop_not_null) {
if (is_not_null) {
__ decode_heap_oop_not_null(dst);
} else {
__ decode_heap_oop(dst);

@ -91,9 +91,9 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
bool needs_post_barrier = val != noreg && in_heap;
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, noreg, noreg);

File diff suppressed because it is too large Load Diff

@ -1212,8 +1212,8 @@ public:
void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result,
Register tmp1,
FloatRegister vtmp, FloatRegister vtmpZ, int ae);
Register tmp1, Register tmp2, FloatRegister vtmp1,
FloatRegister vtmp2, FloatRegister vtmp3, int ae);
void has_negatives(Register ary1, Register len, Register result);
@ -1247,11 +1247,25 @@ public:
Register cnt1, Register cnt2,
Register tmp1, Register tmp2,
Register tmp3, Register tmp4,
Register tmp5, Register tmp6,
int int_cnt1, Register result, int ae);
void string_indexof_char(Register str1, Register cnt1,
Register ch, Register result,
Register tmp1, Register tmp2, Register tmp3);
private:
void fast_log(FloatRegister vtmp0, FloatRegister vtmp1, FloatRegister vtmp2,
FloatRegister vtmp3, FloatRegister vtmp4, FloatRegister vtmp5,
FloatRegister tmpC1, FloatRegister tmpC2, FloatRegister tmpC3,
FloatRegister tmpC4, Register tmp1, Register tmp2,
Register tmp3, Register tmp4, Register tmp5);
void generate_dsin_dcos(bool isCos, address npio2_hw, address two_over_pi,
address pio2, address dsin_coef, address dcos_coef);
private:
// begin trigonometric functions support block
void generate__ieee754_rem_pio2(address npio2_hw, address two_over_pi, address pio2);
void generate__kernel_rem_pio2(address two_over_pi, address pio2);
void generate_kernel_sin(FloatRegister x, bool iyIsOne, address dsin_coef);
void generate_kernel_cos(FloatRegister x, address dcos_coef);
// end trigonometric functions support block
void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
Register src1, Register src2);
void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {

@ -0,0 +1,365 @@
/* Copyright (c) 2018, Cavium. All rights reserved. (By BELLSOFT)
* Copyright (c) 2016, Intel Corporation.
* Intel Math Library (LIBM) Source Code
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "macroAssembler_aarch64.hpp"
// Algorithm idea is taken from x86 hotspot intrinsic and adapted for AARCH64.
//
// For mathematical background please refer to the following literature:
//
// Tang, Ping-Tak Peter.
// Table-driven implementation of the logarithm function
// in IEEE floating-point arithmetic.
// ACM Transactions on Mathematical Software (TOMS) 16, no. 4, 1990: 378-400.
/******************************************************************************/
// ALGORITHM DESCRIPTION - LOG()
// ---------------------
//
// x=2^k * mx, mx in [1,2)
//
// Get B~1/mx based on the output of frecpe instruction (B0)
// B = int((B0*2^7+0.5))/2^7
//
// Reduced argument: r=B*mx-1.0 (computed accurately in high and low parts)
//
// Result: k*log(2) - log(B) + p(r) if |x-1| >= small value (2^-6) and
// p(r) is a degree 7 polynomial
// -log(B) read from data table (high, low parts)
// Result is formed from high and low parts
//
// Special cases:
// 1. log(NaN) = quiet NaN
// 2. log(+INF) = that INF
// 3. log(0) = -INF
// 4. log(1) = +0
// 5. log(x) = NaN if x < -0, including -INF
//
/******************************************************************************/
// Table with p(r) polynomial coefficients
// and table representation of logarithm values (hi and low parts)
__attribute__ ((aligned(64))) juint _L_tbl[] =
{
// coefficients of p(r) polynomial:
// _coeff[]
0x00000000UL, 0xbfd00000UL, // C1_0 = -0.25
0x92492492UL, 0x3fc24924UL, // C1_1 = 0.14285714285714285
0x55555555UL, 0x3fd55555UL, // C2_0 = 0.3333333333333333
0x3d6fb175UL, 0xbfc5555eUL, // C2_1 = -0.16666772842235003
0x00000000UL, 0xbfe00000UL, // C3_0 = -0.5
0x9999999aUL, 0x3fc99999UL, // C3_1 = 0.2
// _log2[]
0xfefa3800UL, 0x3fa62e42UL, // C4_0 = 0.043321698784993146
0x93c76730UL, 0x3ceef357UL, // C4_1 = 3.436201886692732e-15
// _L_tbl[] with logarithm values (hi and low parts)
0xfefa3800UL, 0x3fe62e42UL, 0x93c76730UL, 0x3d2ef357UL, 0xaa241800UL,
0x3fe5ee82UL, 0x0cda46beUL, 0x3d220238UL, 0x5c364800UL, 0x3fe5af40UL,
0xac10c9fbUL, 0x3d2dfa63UL, 0x26bb8c00UL, 0x3fe5707aUL, 0xff3303ddUL,
0x3d09980bUL, 0x26867800UL, 0x3fe5322eUL, 0x5d257531UL, 0x3d05ccc4UL,
0x835a5000UL, 0x3fe4f45aUL, 0x6d93b8fbUL, 0xbd2e6c51UL, 0x6f970c00UL,
0x3fe4b6fdUL, 0xed4c541cUL, 0x3cef7115UL, 0x27e8a400UL, 0x3fe47a15UL,
0xf94d60aaUL, 0xbd22cb6aUL, 0xf2f92400UL, 0x3fe43d9fUL, 0x481051f7UL,
0xbcfd984fUL, 0x2125cc00UL, 0x3fe4019cUL, 0x30f0c74cUL, 0xbd26ce79UL,
0x0c36c000UL, 0x3fe3c608UL, 0x7cfe13c2UL, 0xbd02b736UL, 0x17197800UL,
0x3fe38ae2UL, 0xbb5569a4UL, 0xbd218b7aUL, 0xad9d8c00UL, 0x3fe35028UL,
0x9527e6acUL, 0x3d10b83fUL, 0x44340800UL, 0x3fe315daUL, 0xc5a0ed9cUL,
0xbd274e93UL, 0x57b0e000UL, 0x3fe2dbf5UL, 0x07b9dc11UL, 0xbd17a6e5UL,
0x6d0ec000UL, 0x3fe2a278UL, 0xe797882dUL, 0x3d206d2bUL, 0x1134dc00UL,
0x3fe26962UL, 0x05226250UL, 0xbd0b61f1UL, 0xd8bebc00UL, 0x3fe230b0UL,
0x6e48667bUL, 0x3d12fc06UL, 0x5fc61800UL, 0x3fe1f863UL, 0xc9fe81d3UL,
0xbd2a7242UL, 0x49ae6000UL, 0x3fe1c078UL, 0xed70e667UL, 0x3cccacdeUL,
0x40f23c00UL, 0x3fe188eeUL, 0xf8ab4650UL, 0x3d14cc4eUL, 0xf6f29800UL,
0x3fe151c3UL, 0xa293ae49UL, 0xbd2edd97UL, 0x23c75c00UL, 0x3fe11af8UL,
0xbb9ddcb2UL, 0xbd258647UL, 0x8611cc00UL, 0x3fe0e489UL, 0x07801742UL,
0x3d1c2998UL, 0xe2d05400UL, 0x3fe0ae76UL, 0x887e7e27UL, 0x3d1f486bUL,
0x0533c400UL, 0x3fe078bfUL, 0x41edf5fdUL, 0x3d268122UL, 0xbe760400UL,
0x3fe04360UL, 0xe79539e0UL, 0xbd04c45fUL, 0xe5b20800UL, 0x3fe00e5aUL,
0xb1727b1cUL, 0xbd053ba3UL, 0xaf7a4800UL, 0x3fdfb358UL, 0x3c164935UL,
0x3d0085faUL, 0xee031800UL, 0x3fdf4aa7UL, 0x6f014a8bUL, 0x3d12cde5UL,
0x56b41000UL, 0x3fdee2a1UL, 0x5a470251UL, 0x3d2f27f4UL, 0xc3ddb000UL,
0x3fde7b42UL, 0x5372bd08UL, 0xbd246550UL, 0x1a272800UL, 0x3fde148aUL,
0x07322938UL, 0xbd1326b2UL, 0x484c9800UL, 0x3fddae75UL, 0x60dc616aUL,
0xbd1ea42dUL, 0x46def800UL, 0x3fdd4902UL, 0xe9a767a8UL, 0x3d235bafUL,
0x18064800UL, 0x3fdce42fUL, 0x3ec7a6b0UL, 0xbd0797c3UL, 0xc7455800UL,
0x3fdc7ff9UL, 0xc15249aeUL, 0xbd29b6ddUL, 0x693fa000UL, 0x3fdc1c60UL,
0x7fe8e180UL, 0x3d2cec80UL, 0x1b80e000UL, 0x3fdbb961UL, 0xf40a666dUL,
0x3d27d85bUL, 0x04462800UL, 0x3fdb56faUL, 0x2d841995UL, 0x3d109525UL,
0x5248d000UL, 0x3fdaf529UL, 0x52774458UL, 0xbd217cc5UL, 0x3c8ad800UL,
0x3fda93edUL, 0xbea77a5dUL, 0x3d1e36f2UL, 0x0224f800UL, 0x3fda3344UL,
0x7f9d79f5UL, 0x3d23c645UL, 0xea15f000UL, 0x3fd9d32bUL, 0x10d0c0b0UL,
0xbd26279eUL, 0x43135800UL, 0x3fd973a3UL, 0xa502d9f0UL, 0xbd152313UL,
0x635bf800UL, 0x3fd914a8UL, 0x2ee6307dUL, 0xbd1766b5UL, 0xa88b3000UL,
0x3fd8b639UL, 0xe5e70470UL, 0xbd205ae1UL, 0x776dc800UL, 0x3fd85855UL,
0x3333778aUL, 0x3d2fd56fUL, 0x3bd81800UL, 0x3fd7fafaUL, 0xc812566aUL,
0xbd272090UL, 0x687cf800UL, 0x3fd79e26UL, 0x2efd1778UL, 0x3d29ec7dUL,
0x76c67800UL, 0x3fd741d8UL, 0x49dc60b3UL, 0x3d2d8b09UL, 0xe6af1800UL,
0x3fd6e60eUL, 0x7c222d87UL, 0x3d172165UL, 0x3e9c6800UL, 0x3fd68ac8UL,
0x2756eba0UL, 0x3d20a0d3UL, 0x0b3ab000UL, 0x3fd63003UL, 0xe731ae00UL,
0xbd2db623UL, 0xdf596000UL, 0x3fd5d5bdUL, 0x08a465dcUL, 0xbd0a0b2aUL,
0x53c8d000UL, 0x3fd57bf7UL, 0xee5d40efUL, 0x3d1fadedUL, 0x0738a000UL,
0x3fd522aeUL, 0x8164c759UL, 0x3d2ebe70UL, 0x9e173000UL, 0x3fd4c9e0UL,
0x1b0ad8a4UL, 0xbd2e2089UL, 0xc271c800UL, 0x3fd4718dUL, 0x0967d675UL,
0xbd2f27ceUL, 0x23d5e800UL, 0x3fd419b4UL, 0xec90e09dUL, 0x3d08e436UL,
0x77333000UL, 0x3fd3c252UL, 0xb606bd5cUL, 0x3d183b54UL, 0x76be1000UL,
0x3fd36b67UL, 0xb0f177c8UL, 0x3d116ecdUL, 0xe1d36000UL, 0x3fd314f1UL,
0xd3213cb8UL, 0xbd28e27aUL, 0x7cdc9000UL, 0x3fd2bef0UL, 0x4a5004f4UL,
0x3d2a9cfaUL, 0x1134d800UL, 0x3fd26962UL, 0xdf5bb3b6UL, 0x3d2c93c1UL,
0x6d0eb800UL, 0x3fd21445UL, 0xba46baeaUL, 0x3d0a87deUL, 0x635a6800UL,
0x3fd1bf99UL, 0x5147bdb7UL, 0x3d2ca6edUL, 0xcbacf800UL, 0x3fd16b5cUL,
0xf7a51681UL, 0x3d2b9acdUL, 0x8227e800UL, 0x3fd1178eUL, 0x63a5f01cUL,
0xbd2c210eUL, 0x67616000UL, 0x3fd0c42dUL, 0x163ceae9UL, 0x3d27188bUL,
0x604d5800UL, 0x3fd07138UL, 0x16ed4e91UL, 0x3cf89cdbUL, 0x5626c800UL,
0x3fd01eaeUL, 0x1485e94aUL, 0xbd16f08cUL, 0x6cb3b000UL, 0x3fcf991cUL,
0xca0cdf30UL, 0x3d1bcbecUL, 0xe4dd0000UL, 0x3fcef5adUL, 0x65bb8e11UL,
0xbcca2115UL, 0xffe71000UL, 0x3fce530eUL, 0x6041f430UL, 0x3cc21227UL,
0xb0d49000UL, 0x3fcdb13dUL, 0xf715b035UL, 0xbd2aff2aUL, 0xf2656000UL,
0x3fcd1037UL, 0x75b6f6e4UL, 0xbd084a7eUL, 0xc6f01000UL, 0x3fcc6ffbUL,
0xc5962bd2UL, 0xbcf1ec72UL, 0x383be000UL, 0x3fcbd087UL, 0x595412b6UL,
0xbd2d4bc4UL, 0x575bd000UL, 0x3fcb31d8UL, 0x4eace1aaUL, 0xbd0c358dUL,
0x3c8ae000UL, 0x3fca93edUL, 0x50562169UL, 0xbd287243UL, 0x07089000UL,
0x3fc9f6c4UL, 0x6865817aUL, 0x3d29904dUL, 0xdcf70000UL, 0x3fc95a5aUL,
0x58a0ff6fUL, 0x3d07f228UL, 0xeb390000UL, 0x3fc8beafUL, 0xaae92cd1UL,
0xbd073d54UL, 0x6551a000UL, 0x3fc823c1UL, 0x9a631e83UL, 0x3d1e0ddbUL,
0x85445000UL, 0x3fc7898dUL, 0x70914305UL, 0xbd1c6610UL, 0x8b757000UL,
0x3fc6f012UL, 0xe59c21e1UL, 0xbd25118dUL, 0xbe8c1000UL, 0x3fc6574eUL,
0x2c3c2e78UL, 0x3d19cf8bUL, 0x6b544000UL, 0x3fc5bf40UL, 0xeb68981cUL,
0xbd127023UL, 0xe4a1b000UL, 0x3fc527e5UL, 0xe5697dc7UL, 0x3d2633e8UL,
0x8333b000UL, 0x3fc4913dUL, 0x54fdb678UL, 0x3d258379UL, 0xa5993000UL,
0x3fc3fb45UL, 0x7e6a354dUL, 0xbd2cd1d8UL, 0xb0159000UL, 0x3fc365fcUL,
0x234b7289UL, 0x3cc62fa8UL, 0x0c868000UL, 0x3fc2d161UL, 0xcb81b4a1UL,
0x3d039d6cUL, 0x2a49c000UL, 0x3fc23d71UL, 0x8fd3df5cUL, 0x3d100d23UL,
0x7e23f000UL, 0x3fc1aa2bUL, 0x44389934UL, 0x3d2ca78eUL, 0x8227e000UL,
0x3fc1178eUL, 0xce2d07f2UL, 0x3d21ef78UL, 0xb59e4000UL, 0x3fc08598UL,
0x7009902cUL, 0xbd27e5ddUL, 0x39dbe000UL, 0x3fbfe891UL, 0x4fa10afdUL,
0xbd2534d6UL, 0x830a2000UL, 0x3fbec739UL, 0xafe645e0UL, 0xbd2dc068UL,
0x63844000UL, 0x3fbda727UL, 0x1fa71733UL, 0x3d1a8940UL, 0x01bc4000UL,
0x3fbc8858UL, 0xc65aacd3UL, 0x3d2646d1UL, 0x8dad6000UL, 0x3fbb6ac8UL,
0x2bf768e5UL, 0xbd139080UL, 0x40b1c000UL, 0x3fba4e76UL, 0xb94407c8UL,
0xbd0e42b6UL, 0x5d594000UL, 0x3fb9335eUL, 0x3abd47daUL, 0x3d23115cUL,
0x2f40e000UL, 0x3fb8197eUL, 0xf96ffdf7UL, 0x3d0f80dcUL, 0x0aeac000UL,
0x3fb700d3UL, 0xa99ded32UL, 0x3cec1e8dUL, 0x4d97a000UL, 0x3fb5e95aUL,
0x3c5d1d1eUL, 0xbd2c6906UL, 0x5d208000UL, 0x3fb4d311UL, 0x82f4e1efUL,
0xbcf53a25UL, 0xa7d1e000UL, 0x3fb3bdf5UL, 0xa5db4ed7UL, 0x3d2cc85eUL,
0xa4472000UL, 0x3fb2aa04UL, 0xae9c697dUL, 0xbd20b6e8UL, 0xd1466000UL,
0x3fb1973bUL, 0x560d9e9bUL, 0xbd25325dUL, 0xb59e4000UL, 0x3fb08598UL,
0x7009902cUL, 0xbd17e5ddUL, 0xc006c000UL, 0x3faeea31UL, 0x4fc93b7bUL,
0xbd0e113eUL, 0xcdddc000UL, 0x3faccb73UL, 0x47d82807UL, 0xbd1a68f2UL,
0xd0fb0000UL, 0x3faaaef2UL, 0x353bb42eUL, 0x3d20fc1aUL, 0x149fc000UL,
0x3fa894aaUL, 0xd05a267dUL, 0xbd197995UL, 0xf2d4c000UL, 0x3fa67c94UL,
0xec19afa2UL, 0xbd029efbUL, 0xd42e0000UL, 0x3fa466aeUL, 0x75bdfd28UL,
0xbd2c1673UL, 0x2f8d0000UL, 0x3fa252f3UL, 0xe021b67bUL, 0x3d283e9aUL,
0x89e74000UL, 0x3fa0415dUL, 0x5cf1d753UL, 0x3d0111c0UL, 0xec148000UL,
0x3f9c63d2UL, 0x3f9eb2f3UL, 0x3d2578c6UL, 0x28c90000UL, 0x3f984925UL,
0x325a0c34UL, 0xbd2aa0baUL, 0x25980000UL, 0x3f9432a9UL, 0x928637feUL,
0x3d098139UL, 0x58938000UL, 0x3f902056UL, 0x06e2f7d2UL, 0xbd23dc5bUL,
0xa3890000UL, 0x3f882448UL, 0xda74f640UL, 0xbd275577UL, 0x75890000UL,
0x3f801015UL, 0x999d2be8UL, 0xbd10c76bUL, 0x59580000UL, 0x3f700805UL,
0xcb31c67bUL, 0x3d2166afUL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
0x80000000UL
};
// BEGIN dlog PSEUDO CODE:
// double dlog(double X) {
// // p(r) polynomial coefficients initialized from _L_tbl table
// double C1_0 = _L_tbl[0];
// double C1_1 = _L_tbl[1];
// double C2_0 = _L_tbl[2];
// double C2_1 = _L_tbl[3];
// double C3_0 = _L_tbl[4];
// double C3_1 = _L_tbl[5];
// double C4_0 = _L_tbl[6];
// double C4_1 = _L_tbl[7];
// // NOTE: operations with coefficients above are mostly vectorized in assembly
// // Check corner cases first
// if (X == 1.0d || AS_LONG_BITS(X) + 0x0010000000000000 <= 0x0010000000000000) {
// // NOTE: AS_LONG_BITS(X) + 0x0010000000000000 <= 0x0010000000000000 means
// // that X < 0 or X >= 0x7FF0000000000000 (0x7FF* is NaN or INF)
// if (X < 0 || X is NaN) return NaN;
// if (X == 1.0d) return 0.0d;
// if (X == 0.0d) return -INFINITY;
// if (X is INFINITY) return INFINITY;
// }
// // double representation is 2^exponent * mantissa
// // split X into two multipliers: 2^exponent and 1.0 * mantissa
// // pseudo function: zeroExponent(X) return value of X with exponent == 0
// float vtmp5 = 1/(float)(zeroExponent(X)); // reciprocal estimate
// // pseudo function: HI16(X) returns high 16 bits of double value
// int hiWord = HI16(X);
// double vtmp1 = (double) 0x77F0 << 48 | mantissa(X);
// hiWord -= 16;
// if (AS_LONG_BITS(hiWord) > 0x8000) {
// // SMALL_VALUE branch
// vtmp0 = vtmp1 = vtmp0 * AS_DOUBLE_BITS(0x47F0000000000000);
// hiWord = HI16(vtmp1);
// vtmp0 = AS_DOUBLE_BITS(AS_LONG_BITS(vtmp0) |= 0x3FF0000000000000);
// vtmp5 = (double) (1/(float)vtmp0);
// vtmp1 <<= 12;
// vtmp1 >>= 12;
// }
// // MAIN branch
// double vtmp3 = AS_LONG_BITS(vtmp1) & 0xffffe00000000000; // hi part
// int intB0 = AS_INT_BITS(vtmp5) + 0x8000;
// double vtmp0 = AS_DOUBLE_BITS(0xffffe00000000000 & (intB0<<29));
// int index = (intB0 >> 16) && 0xFF;
// double hiTableValue = _L_tbl[8+index]; // vtmp2[0]
// double lowTableValue = _L_tbl[16+index]; // vtmp2[1]
// vtmp5 = AS_DOUBLE_BITS(hiWord & 0x7FF0 - 0x3FE0); // 0x3FE = 1023 << 4
// vtmp1 -= vtmp3; // low part
// vtmp3 = vtmp3*vtmp0 - 1.0;
// hiTableValue += C4_0 * vtmp5;
// lowTableValue += C4_1 * vtmp5;
// double r = vtmp1 * vtmp0 + vtmp3; // r = B*mx-1.0, computed in hi and low parts
// vtmp0 = hiTableValue + r;
// hiTableValue -= vtmp0;
// double r2 = r*r;
// double r3 = r2*r;
// double p7 = C3_0*r2 + C2_0*r3 + C1_0*r2*r2 + C3_1*r3*r2 + C2_1*r3*r3
// + C1_1*r3*r2*r2; // degree 7 polynomial
// return p7 + (vtmp0 + ((r + hiTableValue) + lowTableValue));
// }
//
// END dlog PSEUDO CODE
// Generate log(X). X passed in register v0. Return log(X) into v0.
// Generator parameters: 10 temporary FPU registers and temporary general
// purpose registers
void MacroAssembler::fast_log(FloatRegister vtmp0, FloatRegister vtmp1,
FloatRegister vtmp2, FloatRegister vtmp3,
FloatRegister vtmp4, FloatRegister vtmp5,
FloatRegister C1, FloatRegister C2,
FloatRegister C3, FloatRegister C4,
Register tmp1, Register tmp2, Register tmp3,
Register tmp4, Register tmp5) {
Label DONE, CHECK_CORNER_CASES, SMALL_VALUE, MAIN,
CHECKED_CORNER_CASES, RETURN_MINF_OR_NAN;
const long INF_OR_NAN_PREFIX = 0x7FF0;
const long MINF_OR_MNAN_PREFIX = 0xFFF0;
const long ONE_PREFIX = 0x3FF0;
movz(tmp2, ONE_PREFIX, 48);
movz(tmp4, 0x0010, 48);
fmovd(rscratch1, v0); // rscratch1 = AS_LONG_BITS(X)
lea(rscratch2, ExternalAddress((address)_L_tbl));
movz(tmp5, 0x7F);
add(tmp1, rscratch1, tmp4);
cmp(tmp2, rscratch1);
lsr(tmp3, rscratch1, 29);
ccmp(tmp1, tmp4, 0b1101 /* LE */, NE);
bfm(tmp3, tmp5, 41, 8);
fmovs(vtmp5, tmp3);
// Load coefficients from table. All coefficients are organized to be
// in specific order, because load below will load it in vectors to be used
// later in vector instructions. Load will be performed in parallel while
// branches are taken. C1 will contain vector of {C1_0, C1_1}, C2 =
// {C2_0, C2_1}, C3 = {C3_0, C3_1}, C4 = {C4_0, C4_1}
ld1(C1, C2, C3, C4, T2D, post(rscratch2, 64));
br(LE, CHECK_CORNER_CASES);
bind(CHECKED_CORNER_CASES);
// all corner cases are handled
frecpe(vtmp5, vtmp5, S); // vtmp5 ~= 1/vtmp5
lsr(tmp2, rscratch1, 48);
movz(tmp4, 0x77f0, 48);
fmovd(vtmp4, 1.0d);
movz(tmp1, INF_OR_NAN_PREFIX, 48);
bfm(tmp4, rscratch1, 0, 51); // tmp4 = 0x77F0 << 48 | mantissa(X)
// vtmp1 = AS_DOUBLE_BITS(0x77F0 << 48 | mantissa(X)) == mx
fmovd(vtmp1, tmp4);
subw(tmp2, tmp2, 16);
cmp(tmp2, 0x8000);
br(GE, SMALL_VALUE);
bind(MAIN);
fmovs(tmp3, vtmp5); // int intB0 = AS_INT_BITS(B);
mov(tmp5, 0x3FE0);
mov(rscratch1, 0xffffe00000000000);
andr(tmp2, tmp2, tmp1, LSR, 48); // hiWord & 0x7FF0
sub(tmp2, tmp2, tmp5); // tmp2 = hiWord & 0x7FF0 - 0x3FE0
scvtfwd(vtmp5, tmp2); // vtmp5 = (double)tmp2;
addw(tmp3, tmp3, 0x8000); // tmp3 = B
andr(tmp4, tmp4, rscratch1); // tmp4 == hi_part(mx)
andr(rscratch1, rscratch1, tmp3, LSL, 29); // rscratch1 = hi_part(B)
ubfm(tmp3, tmp3, 16, 23); // int index = (intB0 >> 16) && 0xFF
ldrq(vtmp2, Address(rscratch2, tmp3, Address::lsl(4))); // vtmp2 = _L_tbl[index]
// AS_LONG_BITS(vtmp1) & 0xffffe00000000000 // hi_part(mx)
fmovd(vtmp3, tmp4);
fmovd(vtmp0, rscratch1); // vtmp0 = hi_part(B)
fsubd(vtmp1, vtmp1, vtmp3); // vtmp1 -= vtmp3; // low_part(mx)
fnmsub(vtmp3, vtmp3, vtmp0, vtmp4); // vtmp3 = vtmp3*vtmp0 - vtmp4
fmlavs(vtmp2, T2D, C4, vtmp5, 0); // vtmp2 += {C4} * vtmp5
// vtmp1 = r = vtmp1 * vtmp0 + vtmp3 == low_part(mx) * hi_part(B) + (hi_part(mx)*hi_part(B) - 1.0)
fmaddd(vtmp1, vtmp1, vtmp0, vtmp3);
ins(vtmp5, D, vtmp2, 0, 1); // vtmp5 = vtmp2[1];
faddd(vtmp0, vtmp2, vtmp1); // vtmp0 = vtmp2 + vtmp1
fmlavs(C3, T2D, C2, vtmp1, 0); // {C3} += {C2}*vtmp1
fsubd(vtmp2, vtmp2, vtmp0); // vtmp2 -= vtmp0
fmuld(vtmp3, vtmp1, vtmp1); // vtmp3 = vtmp1*vtmp1
faddd(C4, vtmp1, vtmp2); // C4[0] = vtmp1 + vtmp2
fmlavs(C3, T2D, C1, vtmp3, 0); // {C3} += {C1}*vtmp3
faddd(C4, C4, vtmp5); // C4 += vtmp5
fmuld(vtmp4, vtmp3, vtmp1); // vtmp4 = vtmp3*vtmp1
faddd(vtmp0, vtmp0, C4); // vtmp0 += C4
fmlavs(C3, T2D, vtmp4, C3, 1); // {C3} += {vtmp4}*C3[1]
fmaddd(vtmp0, C3, vtmp3, vtmp0); // vtmp0 = C3 * vtmp3 + vtmp0
ret(lr);
block_comment("if (AS_LONG_BITS(hiWord) > 0x8000)"); {
bind(SMALL_VALUE);
movz(tmp2, 0x47F0, 48);
fmovd(vtmp1, tmp2);
fmuld(vtmp0, vtmp1, v0);
fmovd(vtmp1, vtmp0);
umov(tmp2, vtmp1, S, 3);
orr(vtmp0, T16B, vtmp0, vtmp4);
ushr(vtmp5, T2D, vtmp0, 27);
ushr(vtmp5, T4S, vtmp5, 2);
frecpe(vtmp5, vtmp5, S);
shl(vtmp1, T2D, vtmp1, 12);
ushr(vtmp1, T2D, vtmp1, 12);
b(MAIN);
}
block_comment("Corner cases"); {
bind(RETURN_MINF_OR_NAN);
movz(tmp1, MINF_OR_MNAN_PREFIX, 48);
orr(rscratch1, rscratch1, tmp1);
fmovd(v0, rscratch1);
ret(lr);
bind(CHECK_CORNER_CASES);
movz(tmp1, INF_OR_NAN_PREFIX, 48);
cmp(rscratch1, zr);
br(LE, RETURN_MINF_OR_NAN);
cmp(rscratch1, tmp1);
br(GE, DONE);
cmp(rscratch1, tmp2);
br(NE, CHECKED_CORNER_CASES);
fmovd(v0, 0.0d);
}
bind(DONE);
ret(lr);
}

File diff suppressed because it is too large Load Diff

@ -1351,9 +1351,9 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
}
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -1425,9 +1425,9 @@ class StubGenerator: public StubCodeGenerator {
__ cmp(rscratch1, count, Assembler::LSL, exact_log2(size));
__ br(Assembler::HS, nooverlap_target);
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -1789,10 +1789,10 @@ class StubGenerator: public StubCodeGenerator {
}
#endif //ASSERT
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_CHECKCAST;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
bool is_oop = true;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
@ -3990,6 +3990,701 @@ class StubGenerator: public StubCodeGenerator {
return entry;
}
address generate_dsin_dcos(bool isCos) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", isCos ? "libmDcos" : "libmDsin");
address start = __ pc();
__ generate_dsin_dcos(isCos, (address)StubRoutines::aarch64::_npio2_hw,
(address)StubRoutines::aarch64::_two_over_pi,
(address)StubRoutines::aarch64::_pio2,
(address)StubRoutines::aarch64::_dsin_coef,
(address)StubRoutines::aarch64::_dcos_coef);
return start;
}
address generate_dlog() {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "dlog");
address entry = __ pc();
FloatRegister vtmp0 = v0, vtmp1 = v1, vtmp2 = v2, vtmp3 = v3, vtmp4 = v4,
vtmp5 = v5, tmpC1 = v16, tmpC2 = v17, tmpC3 = v18, tmpC4 = v19;
Register tmp1 = r0, tmp2 = r1, tmp3 = r2, tmp4 = r3, tmp5 = r4;
__ fast_log(vtmp0, vtmp1, vtmp2, vtmp3, vtmp4, vtmp5, tmpC1, tmpC2, tmpC3,
tmpC4, tmp1, tmp2, tmp3, tmp4, tmp5);
return entry;
}
// code for comparing 16 bytes of strings with same encoding
void compare_string_16_bytes_same(Label &DIFF1, Label &DIFF2) {
Register result = r0, str1 = r1, cnt1 = r2, str2 = r3, tmp1 = r10, tmp2 = r11;
__ ldr(rscratch1, Address(__ post(str1, 8)));
__ eor(rscratch2, tmp1, tmp2);
__ ldr(cnt1, Address(__ post(str2, 8)));
__ cbnz(rscratch2, DIFF1);
__ ldr(tmp1, Address(__ post(str1, 8)));
__ eor(rscratch2, rscratch1, cnt1);
__ ldr(tmp2, Address(__ post(str2, 8)));
__ cbnz(rscratch2, DIFF2);
}
// code for comparing 16 characters of strings with Latin1 and Utf16 encoding
void compare_string_16_x_LU(Register tmpL, Register tmpU, Label &DIFF1,
Label &DIFF2) {
Register cnt1 = r2, tmp1 = r10, tmp2 = r11, tmp3 = r12;
FloatRegister vtmp = v1, vtmpZ = v0, vtmp3 = v2;
__ ldrq(vtmp, Address(__ post(tmp2, 16)));
__ ldr(tmpU, Address(__ post(cnt1, 8)));
__ zip1(vtmp3, __ T16B, vtmp, vtmpZ);
// now we have 32 bytes of characters (converted to U) in vtmp:vtmp3
__ fmovd(tmpL, vtmp3);
__ eor(rscratch2, tmp3, tmpL);
__ cbnz(rscratch2, DIFF2);
__ ldr(tmp3, Address(__ post(cnt1, 8)));
__ umov(tmpL, vtmp3, __ D, 1);
__ eor(rscratch2, tmpU, tmpL);
__ cbnz(rscratch2, DIFF1);
__ zip2(vtmp, __ T16B, vtmp, vtmpZ);
__ ldr(tmpU, Address(__ post(cnt1, 8)));
__ fmovd(tmpL, vtmp);
__ eor(rscratch2, tmp3, tmpL);
__ cbnz(rscratch2, DIFF2);
__ ldr(tmp3, Address(__ post(cnt1, 8)));
__ umov(tmpL, vtmp, __ D, 1);
__ eor(rscratch2, tmpU, tmpL);
__ cbnz(rscratch2, DIFF1);
}
// r0 = result
// r1 = str1
// r2 = cnt1
// r3 = str2
// r4 = cnt2
// r10 = tmp1
// r11 = tmp2
address generate_compare_long_string_different_encoding(bool isLU) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", isLU
? "compare_long_string_different_encoding LU"
: "compare_long_string_different_encoding UL");
address entry = __ pc();
Label SMALL_LOOP, TAIL, TAIL_LOAD_16, LOAD_LAST, DIFF1, DIFF2,
DONE, CALCULATE_DIFFERENCE, LARGE_LOOP_PREFETCH, SMALL_LOOP_ENTER,
LARGE_LOOP_PREFETCH_REPEAT1, LARGE_LOOP_PREFETCH_REPEAT2;
Register result = r0, str1 = r1, cnt1 = r2, str2 = r3, cnt2 = r4,
tmp1 = r10, tmp2 = r11, tmp3 = r12, tmp4 = r14;
FloatRegister vtmpZ = v0, vtmp = v1, vtmp3 = v2;
RegSet spilled_regs = RegSet::of(tmp3, tmp4);
int prefetchLoopExitCondition = MAX(32, SoftwarePrefetchHintDistance/2);
__ eor(vtmpZ, __ T16B, vtmpZ, vtmpZ);
// cnt2 == amount of characters left to compare
// Check already loaded first 4 symbols(vtmp and tmp2(LU)/tmp1(UL))
__ zip1(vtmp, __ T8B, vtmp, vtmpZ);
__ add(str1, str1, isLU ? wordSize/2 : wordSize);
__ add(str2, str2, isLU ? wordSize : wordSize/2);
__ fmovd(isLU ? tmp1 : tmp2, vtmp);
__ subw(cnt2, cnt2, 8); // Already loaded 4 symbols. Last 4 is special case.
__ add(str1, str1, cnt2, __ LSL, isLU ? 0 : 1);
__ eor(rscratch2, tmp1, tmp2);
__ add(str2, str2, cnt2, __ LSL, isLU ? 1 : 0);
__ mov(rscratch1, tmp2);
__ cbnz(rscratch2, CALCULATE_DIFFERENCE);
Register strU = isLU ? str2 : str1,
strL = isLU ? str1 : str2,
tmpU = isLU ? rscratch1 : tmp1, // where to keep U for comparison
tmpL = isLU ? tmp1 : rscratch1; // where to keep L for comparison
__ push(spilled_regs, sp);
__ sub(tmp2, strL, cnt2); // strL pointer to load from
__ sub(cnt1, strU, cnt2, __ LSL, 1); // strU pointer to load from
__ ldr(tmp3, Address(__ post(cnt1, 8)));
if (SoftwarePrefetchHintDistance >= 0) {
__ cmp(cnt2, prefetchLoopExitCondition);
__ br(__ LT, SMALL_LOOP);
__ bind(LARGE_LOOP_PREFETCH);
__ prfm(Address(tmp2, SoftwarePrefetchHintDistance));
__ mov(tmp4, 2);
__ prfm(Address(cnt1, SoftwarePrefetchHintDistance));
__ bind(LARGE_LOOP_PREFETCH_REPEAT1);
compare_string_16_x_LU(tmpL, tmpU, DIFF1, DIFF2);
__ subs(tmp4, tmp4, 1);
__ br(__ GT, LARGE_LOOP_PREFETCH_REPEAT1);
__ prfm(Address(cnt1, SoftwarePrefetchHintDistance));
__ mov(tmp4, 2);
__ bind(LARGE_LOOP_PREFETCH_REPEAT2);
compare_string_16_x_LU(tmpL, tmpU, DIFF1, DIFF2);
__ subs(tmp4, tmp4, 1);
__ br(__ GT, LARGE_LOOP_PREFETCH_REPEAT2);
__ sub(cnt2, cnt2, 64);
__ cmp(cnt2, prefetchLoopExitCondition);
__ br(__ GE, LARGE_LOOP_PREFETCH);
}
__ cbz(cnt2, LOAD_LAST); // no characters left except last load
__ subs(cnt2, cnt2, 16);
__ br(__ LT, TAIL);
__ b(SMALL_LOOP_ENTER);
__ bind(SMALL_LOOP); // smaller loop
__ subs(cnt2, cnt2, 16);
__ bind(SMALL_LOOP_ENTER);
compare_string_16_x_LU(tmpL, tmpU, DIFF1, DIFF2);
__ br(__ GE, SMALL_LOOP);
__ cbz(cnt2, LOAD_LAST);
__ bind(TAIL); // 1..15 characters left
__ cmp(cnt2, -8);
__ br(__ GT, TAIL_LOAD_16);
__ ldrd(vtmp, Address(tmp2));
__ zip1(vtmp3, __ T8B, vtmp, vtmpZ);
__ ldr(tmpU, Address(__ post(cnt1, 8)));
__ fmovd(tmpL, vtmp3);
__ eor(rscratch2, tmp3, tmpL);
__ cbnz(rscratch2, DIFF2);
__ umov(tmpL, vtmp3, __ D, 1);
__ eor(rscratch2, tmpU, tmpL);
__ cbnz(rscratch2, DIFF1);
__ b(LOAD_LAST);
__ bind(TAIL_LOAD_16);
__ ldrq(vtmp, Address(tmp2));
__ ldr(tmpU, Address(__ post(cnt1, 8)));
__ zip1(vtmp3, __ T16B, vtmp, vtmpZ);
__ zip2(vtmp, __ T16B, vtmp, vtmpZ);
__ fmovd(tmpL, vtmp3);
__ eor(rscratch2, tmp3, tmpL);
__ cbnz(rscratch2, DIFF2);
__ ldr(tmp3, Address(__ post(cnt1, 8)));
__ umov(tmpL, vtmp3, __ D, 1);
__ eor(rscratch2, tmpU, tmpL);
__ cbnz(rscratch2, DIFF1);
__ ldr(tmpU, Address(__ post(cnt1, 8)));
__ fmovd(tmpL, vtmp);
__ eor(rscratch2, tmp3, tmpL);
__ cbnz(rscratch2, DIFF2);
__ umov(tmpL, vtmp, __ D, 1);
__ eor(rscratch2, tmpU, tmpL);
__ cbnz(rscratch2, DIFF1);
__ b(LOAD_LAST);
__ bind(DIFF2);
__ mov(tmpU, tmp3);
__ bind(DIFF1);
__ pop(spilled_regs, sp);
__ b(CALCULATE_DIFFERENCE);
__ bind(LOAD_LAST);
__ pop(spilled_regs, sp);
__ ldrs(vtmp, Address(strL));
__ ldr(tmpU, Address(strU));
__ zip1(vtmp, __ T8B, vtmp, vtmpZ);
__ fmovd(tmpL, vtmp);
__ eor(rscratch2, tmpU, tmpL);
__ cbz(rscratch2, DONE);
// Find the first different characters in the longwords and
// compute their difference.
__ bind(CALCULATE_DIFFERENCE);
__ rev(rscratch2, rscratch2);
__ clz(rscratch2, rscratch2);
__ andr(rscratch2, rscratch2, -16);
__ lsrv(tmp1, tmp1, rscratch2);
__ uxthw(tmp1, tmp1);
__ lsrv(rscratch1, rscratch1, rscratch2);
__ uxthw(rscratch1, rscratch1);
__ subw(result, tmp1, rscratch1);
__ bind(DONE);
__ ret(lr);
return entry;
}
// r0 = result
// r1 = str1
// r2 = cnt1
// r3 = str2
// r4 = cnt2
// r10 = tmp1
// r11 = tmp2
address generate_compare_long_string_same_encoding(bool isLL) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", isLL
? "compare_long_string_same_encoding LL"
: "compare_long_string_same_encoding UU");
address entry = __ pc();
Register result = r0, str1 = r1, cnt1 = r2, str2 = r3, cnt2 = r4,
tmp1 = r10, tmp2 = r11;
Label SMALL_LOOP, LARGE_LOOP_PREFETCH, CHECK_LAST, DIFF2, TAIL,
LENGTH_DIFF, DIFF, LAST_CHECK_AND_LENGTH_DIFF,
DIFF_LAST_POSITION, DIFF_LAST_POSITION2;
// exit from large loop when less than 64 bytes left to read or we're about
// to prefetch memory behind array border
int largeLoopExitCondition = MAX(64, SoftwarePrefetchHintDistance)/(isLL ? 1 : 2);
// cnt1/cnt2 contains amount of characters to compare. cnt1 can be re-used
// update cnt2 counter with already loaded 8 bytes
__ sub(cnt2, cnt2, wordSize/(isLL ? 1 : 2));
// update pointers, because of previous read
__ add(str1, str1, wordSize);
__ add(str2, str2, wordSize);
if (SoftwarePrefetchHintDistance >= 0) {
__ bind(LARGE_LOOP_PREFETCH);
__ prfm(Address(str1, SoftwarePrefetchHintDistance));
__ prfm(Address(str2, SoftwarePrefetchHintDistance));
compare_string_16_bytes_same(DIFF, DIFF2);
compare_string_16_bytes_same(DIFF, DIFF2);
__ sub(cnt2, cnt2, isLL ? 64 : 32);
compare_string_16_bytes_same(DIFF, DIFF2);
__ cmp(cnt2, largeLoopExitCondition);
compare_string_16_bytes_same(DIFF, DIFF2);
__ br(__ GT, LARGE_LOOP_PREFETCH);
__ cbz(cnt2, LAST_CHECK_AND_LENGTH_DIFF); // no more chars left?
// less than 16 bytes left?
__ subs(cnt2, cnt2, isLL ? 16 : 8);
__ br(__ LT, TAIL);
}
__ bind(SMALL_LOOP);
compare_string_16_bytes_same(DIFF, DIFF2);
__ subs(cnt2, cnt2, isLL ? 16 : 8);
__ br(__ GE, SMALL_LOOP);
__ bind(TAIL);
__ adds(cnt2, cnt2, isLL ? 16 : 8);
__ br(__ EQ, LAST_CHECK_AND_LENGTH_DIFF);
__ subs(cnt2, cnt2, isLL ? 8 : 4);
__ br(__ LE, CHECK_LAST);
__ eor(rscratch2, tmp1, tmp2);
__ cbnz(rscratch2, DIFF);
__ ldr(tmp1, Address(__ post(str1, 8)));
__ ldr(tmp2, Address(__ post(str2, 8)));
__ sub(cnt2, cnt2, isLL ? 8 : 4);
__ bind(CHECK_LAST);
if (!isLL) {
__ add(cnt2, cnt2, cnt2); // now in bytes
}
__ eor(rscratch2, tmp1, tmp2);
__ cbnz(rscratch2, DIFF);
__ ldr(rscratch1, Address(str1, cnt2));
__ ldr(cnt1, Address(str2, cnt2));
__ eor(rscratch2, rscratch1, cnt1);
__ cbz(rscratch2, LENGTH_DIFF);
// Find the first different characters in the longwords and
// compute their difference.
__ bind(DIFF2);
__ rev(rscratch2, rscratch2);
__ clz(rscratch2, rscratch2);
__ andr(rscratch2, rscratch2, isLL ? -8 : -16);
__ lsrv(rscratch1, rscratch1, rscratch2);
if (isLL) {
__ lsrv(cnt1, cnt1, rscratch2);
__ uxtbw(rscratch1, rscratch1);
__ uxtbw(cnt1, cnt1);
} else {
__ lsrv(cnt1, cnt1, rscratch2);
__ uxthw(rscratch1, rscratch1);
__ uxthw(cnt1, cnt1);
}
__ subw(result, rscratch1, cnt1);
__ b(LENGTH_DIFF);
__ bind(DIFF);
__ rev(rscratch2, rscratch2);
__ clz(rscratch2, rscratch2);
__ andr(rscratch2, rscratch2, isLL ? -8 : -16);
__ lsrv(tmp1, tmp1, rscratch2);
if (isLL) {
__ lsrv(tmp2, tmp2, rscratch2);
__ uxtbw(tmp1, tmp1);
__ uxtbw(tmp2, tmp2);
} else {
__ lsrv(tmp2, tmp2, rscratch2);
__ uxthw(tmp1, tmp1);
__ uxthw(tmp2, tmp2);
}
__ subw(result, tmp1, tmp2);
__ b(LENGTH_DIFF);
__ bind(LAST_CHECK_AND_LENGTH_DIFF);
__ eor(rscratch2, tmp1, tmp2);
__ cbnz(rscratch2, DIFF);
__ bind(LENGTH_DIFF);
__ ret(lr);
return entry;
}
void generate_compare_long_strings() {
StubRoutines::aarch64::_compare_long_string_LL
= generate_compare_long_string_same_encoding(true);
StubRoutines::aarch64::_compare_long_string_UU
= generate_compare_long_string_same_encoding(false);
StubRoutines::aarch64::_compare_long_string_LU
= generate_compare_long_string_different_encoding(true);
StubRoutines::aarch64::_compare_long_string_UL
= generate_compare_long_string_different_encoding(false);
}
// R0 = result
// R1 = str2
// R2 = cnt1
// R3 = str1
// R4 = cnt2
// This generic linear code use few additional ideas, which makes it faster:
// 1) we can safely keep at least 1st register of pattern(since length >= 8)
// in order to skip initial loading(help in systems with 1 ld pipeline)
// 2) we can use "fast" algorithm of finding single character to search for
// first symbol with less branches(1 branch per each loaded register instead
// of branch for each symbol), so, this is where constants like
// 0x0101...01, 0x00010001...0001, 0x7f7f...7f, 0x7fff7fff...7fff comes from
// 3) after loading and analyzing 1st register of source string, it can be
// used to search for every 1st character entry, saving few loads in
// comparison with "simplier-but-slower" implementation
// 4) in order to avoid lots of push/pop operations, code below is heavily
// re-using/re-initializing/compressing register values, which makes code
// larger and a bit less readable, however, most of extra operations are
// issued during loads or branches, so, penalty is minimal
address generate_string_indexof_linear(bool str1_isL, bool str2_isL) {
const char* stubName = str1_isL
? (str2_isL ? "indexof_linear_ll" : "indexof_linear_ul")
: "indexof_linear_uu";
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", stubName);
address entry = __ pc();
int str1_chr_size = str1_isL ? 1 : 2;
int str2_chr_size = str2_isL ? 1 : 2;
int str1_chr_shift = str1_isL ? 0 : 1;
int str2_chr_shift = str2_isL ? 0 : 1;
bool isL = str1_isL && str2_isL;
// parameters
Register result = r0, str2 = r1, cnt1 = r2, str1 = r3, cnt2 = r4;
// temporary registers
Register tmp1 = r20, tmp2 = r21, tmp3 = r22, tmp4 = r23;
RegSet spilled_regs = RegSet::range(tmp1, tmp4);
// redefinitions
Register ch1 = rscratch1, ch2 = rscratch2, first = tmp3;
__ push(spilled_regs, sp);
Label L_LOOP, L_LOOP_PROCEED, L_SMALL, L_HAS_ZERO, L_SMALL_MATCH_LOOP,
L_HAS_ZERO_LOOP, L_CMP_LOOP, L_CMP_LOOP_NOMATCH, L_SMALL_PROCEED,
L_SMALL_HAS_ZERO_LOOP, L_SMALL_CMP_LOOP_NOMATCH, L_SMALL_CMP_LOOP,
L_POST_LOOP, L_CMP_LOOP_LAST_CMP, L_HAS_ZERO_LOOP_NOMATCH,
L_SMALL_CMP_LOOP_LAST_CMP, L_SMALL_CMP_LOOP_LAST_CMP2,
L_CMP_LOOP_LAST_CMP2, DONE, NOMATCH;
// Read whole register from str1. It is safe, because length >=8 here
__ ldr(ch1, Address(str1));
// Read whole register from str2. It is safe, because length >=8 here
__ ldr(ch2, Address(str2));
__ andr(first, ch1, str1_isL ? 0xFF : 0xFFFF);
if (str1_isL != str2_isL) {
__ eor(v0, __ T16B, v0, v0);
}
__ mov(tmp1, str2_isL ? 0x0101010101010101 : 0x0001000100010001);
__ mul(first, first, tmp1);
// check if we have less than 1 register to check
__ subs(cnt2, cnt2, wordSize/str2_chr_size - 1);
if (str1_isL != str2_isL) {
__ fmovd(v1, ch1);
}
__ br(__ LE, L_SMALL);
__ eor(ch2, first, ch2);
if (str1_isL != str2_isL) {
__ zip1(v1, __ T16B, v1, v0);
}
__ sub(tmp2, ch2, tmp1);
__ orr(ch2, ch2, str2_isL ? 0x7f7f7f7f7f7f7f7f : 0x7fff7fff7fff7fff);
__ bics(tmp2, tmp2, ch2);
if (str1_isL != str2_isL) {
__ fmovd(ch1, v1);
}
__ br(__ NE, L_HAS_ZERO);
__ subs(cnt2, cnt2, wordSize/str2_chr_size);
__ add(result, result, wordSize/str2_chr_size);
__ add(str2, str2, wordSize);
__ br(__ LT, L_POST_LOOP);
__ BIND(L_LOOP);
__ ldr(ch2, Address(str2));
__ eor(ch2, first, ch2);
__ sub(tmp2, ch2, tmp1);
__ orr(ch2, ch2, str2_isL ? 0x7f7f7f7f7f7f7f7f : 0x7fff7fff7fff7fff);
__ bics(tmp2, tmp2, ch2);
__ br(__ NE, L_HAS_ZERO);
__ BIND(L_LOOP_PROCEED);
__ subs(cnt2, cnt2, wordSize/str2_chr_size);
__ add(str2, str2, wordSize);
__ add(result, result, wordSize/str2_chr_size);
__ br(__ GE, L_LOOP);
__ BIND(L_POST_LOOP);
__ cmp(cnt2, -wordSize/str2_chr_size); // no extra characters to check
__ br(__ LE, NOMATCH);
__ ldr(ch2, Address(str2));
__ sub(cnt2, zr, cnt2, __ LSL, LogBitsPerByte + str2_chr_shift);
__ eor(ch2, first, ch2);
__ sub(tmp2, ch2, tmp1);
__ orr(ch2, ch2, str2_isL ? 0x7f7f7f7f7f7f7f7f : 0x7fff7fff7fff7fff);
__ mov(tmp4, -1); // all bits set
__ b(L_SMALL_PROCEED);
__ align(OptoLoopAlignment);
__ BIND(L_SMALL);
__ sub(cnt2, zr, cnt2, __ LSL, LogBitsPerByte + str2_chr_shift);
__ eor(ch2, first, ch2);
if (str1_isL != str2_isL) {
__ zip1(v1, __ T16B, v1, v0);
}
__ sub(tmp2, ch2, tmp1);
__ mov(tmp4, -1); // all bits set
__ orr(ch2, ch2, str2_isL ? 0x7f7f7f7f7f7f7f7f : 0x7fff7fff7fff7fff);
if (str1_isL != str2_isL) {
__ fmovd(ch1, v1); // move converted 4 symbols
}
__ BIND(L_SMALL_PROCEED);
__ lsrv(tmp4, tmp4, cnt2); // mask. zeroes on useless bits.
__ bic(tmp2, tmp2, ch2);
__ ands(tmp2, tmp2, tmp4); // clear useless bits and check
__ rbit(tmp2, tmp2);
__ br(__ EQ, NOMATCH);
__ BIND(L_SMALL_HAS_ZERO_LOOP);
__ clz(tmp4, tmp2); // potentially long. Up to 4 cycles on some cpu's
__ cmp(cnt1, wordSize/str2_chr_size);
__ br(__ LE, L_SMALL_CMP_LOOP_LAST_CMP2);
if (str2_isL) { // LL
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte); // address of "index"
__ ldr(ch2, Address(str2)); // read whole register of str2. Safe.
__ lslv(tmp2, tmp2, tmp4); // shift off leading zeroes from match info
__ add(result, result, tmp4, __ LSR, LogBitsPerByte);
__ lsl(tmp2, tmp2, 1); // shift off leading "1" from match info
} else {
__ mov(ch2, 0xE); // all bits in byte set except last one
__ andr(ch2, ch2, tmp4, __ LSR, LogBitsPerByte); // byte shift amount
__ ldr(ch2, Address(str2, ch2)); // read whole register of str2. Safe.
__ lslv(tmp2, tmp2, tmp4);
__ add(result, result, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ lsl(tmp2, tmp2, 1); // shift off leading "1" from match info
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
}
__ cmp(ch1, ch2);
__ mov(tmp4, wordSize/str2_chr_size);
__ br(__ NE, L_SMALL_CMP_LOOP_NOMATCH);
__ BIND(L_SMALL_CMP_LOOP);
str1_isL ? __ ldrb(first, Address(str1, tmp4, Address::lsl(str1_chr_shift)))
: __ ldrh(first, Address(str1, tmp4, Address::lsl(str1_chr_shift)));
str2_isL ? __ ldrb(ch2, Address(str2, tmp4, Address::lsl(str2_chr_shift)))
: __ ldrh(ch2, Address(str2, tmp4, Address::lsl(str2_chr_shift)));
__ add(tmp4, tmp4, 1);
__ cmp(tmp4, cnt1);
__ br(__ GE, L_SMALL_CMP_LOOP_LAST_CMP);
__ cmp(first, ch2);
__ br(__ EQ, L_SMALL_CMP_LOOP);
__ BIND(L_SMALL_CMP_LOOP_NOMATCH);
__ cbz(tmp2, NOMATCH); // no more matches. exit
__ clz(tmp4, tmp2);
__ add(result, result, 1); // advance index
__ add(str2, str2, str2_chr_size); // advance pointer
__ b(L_SMALL_HAS_ZERO_LOOP);
__ align(OptoLoopAlignment);
__ BIND(L_SMALL_CMP_LOOP_LAST_CMP);
__ cmp(first, ch2);
__ br(__ NE, L_SMALL_CMP_LOOP_NOMATCH);
__ b(DONE);
__ align(OptoLoopAlignment);
__ BIND(L_SMALL_CMP_LOOP_LAST_CMP2);
if (str2_isL) { // LL
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte); // address of "index"
__ ldr(ch2, Address(str2)); // read whole register of str2. Safe.
__ lslv(tmp2, tmp2, tmp4); // shift off leading zeroes from match info
__ add(result, result, tmp4, __ LSR, LogBitsPerByte);
__ lsl(tmp2, tmp2, 1); // shift off leading "1" from match info
} else {
__ mov(ch2, 0xE); // all bits in byte set except last one
__ andr(ch2, ch2, tmp4, __ LSR, LogBitsPerByte); // byte shift amount
__ ldr(ch2, Address(str2, ch2)); // read whole register of str2. Safe.
__ lslv(tmp2, tmp2, tmp4);
__ add(result, result, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ lsl(tmp2, tmp2, 1); // shift off leading "1" from match info
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
}
__ cmp(ch1, ch2);
__ br(__ NE, L_SMALL_CMP_LOOP_NOMATCH);
__ b(DONE);
__ align(OptoLoopAlignment);
__ BIND(L_HAS_ZERO);
__ rbit(tmp2, tmp2);
__ clz(tmp4, tmp2); // potentially long. Up to 4 cycles on some CPU's
// Now, perform compression of counters(cnt2 and cnt1) into one register.
// It's fine because both counters are 32bit and are not changed in this
// loop. Just restore it on exit. So, cnt1 can be re-used in this loop.
__ orr(cnt2, cnt2, cnt1, __ LSL, BitsPerByte * wordSize / 2);
__ sub(result, result, 1);
__ BIND(L_HAS_ZERO_LOOP);
__ mov(cnt1, wordSize/str2_chr_size);
__ cmp(cnt1, cnt2, __ LSR, BitsPerByte * wordSize / 2);
__ br(__ GE, L_CMP_LOOP_LAST_CMP2); // case of 8 bytes only to compare
if (str2_isL) {
__ lsr(ch2, tmp4, LogBitsPerByte + str2_chr_shift); // char index
__ ldr(ch2, Address(str2, ch2)); // read whole register of str2. Safe.
__ lslv(tmp2, tmp2, tmp4);
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ add(tmp4, tmp4, 1);
__ add(result, result, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ lsl(tmp2, tmp2, 1);
__ mov(tmp4, wordSize/str2_chr_size);
} else {
__ mov(ch2, 0xE);
__ andr(ch2, ch2, tmp4, __ LSR, LogBitsPerByte); // byte shift amount
__ ldr(ch2, Address(str2, ch2)); // read whole register of str2. Safe.
__ lslv(tmp2, tmp2, tmp4);
__ add(tmp4, tmp4, 1);
__ add(result, result, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte);
__ lsl(tmp2, tmp2, 1);
__ mov(tmp4, wordSize/str2_chr_size);
__ sub(str2, str2, str2_chr_size);
}
__ cmp(ch1, ch2);
__ mov(tmp4, wordSize/str2_chr_size);
__ br(__ NE, L_CMP_LOOP_NOMATCH);
__ BIND(L_CMP_LOOP);
str1_isL ? __ ldrb(cnt1, Address(str1, tmp4, Address::lsl(str1_chr_shift)))
: __ ldrh(cnt1, Address(str1, tmp4, Address::lsl(str1_chr_shift)));
str2_isL ? __ ldrb(ch2, Address(str2, tmp4, Address::lsl(str2_chr_shift)))
: __ ldrh(ch2, Address(str2, tmp4, Address::lsl(str2_chr_shift)));
__ add(tmp4, tmp4, 1);
__ cmp(tmp4, cnt2, __ LSR, BitsPerByte * wordSize / 2);
__ br(__ GE, L_CMP_LOOP_LAST_CMP);
__ cmp(cnt1, ch2);
__ br(__ EQ, L_CMP_LOOP);
__ BIND(L_CMP_LOOP_NOMATCH);
// here we're not matched
__ cbz(tmp2, L_HAS_ZERO_LOOP_NOMATCH); // no more matches. Proceed to main loop
__ clz(tmp4, tmp2);
__ add(str2, str2, str2_chr_size); // advance pointer
__ b(L_HAS_ZERO_LOOP);
__ align(OptoLoopAlignment);
__ BIND(L_CMP_LOOP_LAST_CMP);
__ cmp(cnt1, ch2);
__ br(__ NE, L_CMP_LOOP_NOMATCH);
__ b(DONE);
__ align(OptoLoopAlignment);
__ BIND(L_CMP_LOOP_LAST_CMP2);
if (str2_isL) {
__ lsr(ch2, tmp4, LogBitsPerByte + str2_chr_shift); // char index
__ ldr(ch2, Address(str2, ch2)); // read whole register of str2. Safe.
__ lslv(tmp2, tmp2, tmp4);
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ add(tmp4, tmp4, 1);
__ add(result, result, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ lsl(tmp2, tmp2, 1);
} else {
__ mov(ch2, 0xE);
__ andr(ch2, ch2, tmp4, __ LSR, LogBitsPerByte); // byte shift amount
__ ldr(ch2, Address(str2, ch2)); // read whole register of str2. Safe.
__ lslv(tmp2, tmp2, tmp4);
__ add(tmp4, tmp4, 1);
__ add(result, result, tmp4, __ LSR, LogBitsPerByte + str2_chr_shift);
__ add(str2, str2, tmp4, __ LSR, LogBitsPerByte);
__ lsl(tmp2, tmp2, 1);
__ sub(str2, str2, str2_chr_size);
}
__ cmp(ch1, ch2);
__ br(__ NE, L_CMP_LOOP_NOMATCH);
__ b(DONE);
__ align(OptoLoopAlignment);
__ BIND(L_HAS_ZERO_LOOP_NOMATCH);
// 1) Restore "result" index. Index was wordSize/str2_chr_size * N until
// L_HAS_ZERO block. Byte octet was analyzed in L_HAS_ZERO_LOOP,
// so, result was increased at max by wordSize/str2_chr_size - 1, so,
// respective high bit wasn't changed. L_LOOP_PROCEED will increase
// result by analyzed characters value, so, we can just reset lower bits
// in result here. Clear 2 lower bits for UU/UL and 3 bits for LL
// 2) restore cnt1 and cnt2 values from "compressed" cnt2
// 3) advance str2 value to represent next str2 octet. result & 7/3 is
// index of last analyzed substring inside current octet. So, str2 in at
// respective start address. We need to advance it to next octet
__ andr(tmp2, result, wordSize/str2_chr_size - 1); // symbols analyzed
__ lsr(cnt1, cnt2, BitsPerByte * wordSize / 2);
__ bfm(result, zr, 0, 2 - str2_chr_shift);
__ sub(str2, str2, tmp2, __ LSL, str2_chr_shift); // restore str2
__ movw(cnt2, cnt2);
__ b(L_LOOP_PROCEED);
__ align(OptoLoopAlignment);
__ BIND(NOMATCH);
__ mov(result, -1);
__ BIND(DONE);
__ pop(spilled_regs, sp);
__ ret(lr);
return entry;
}
void generate_string_indexof_stubs() {
StubRoutines::aarch64::_string_indexof_linear_ll = generate_string_indexof_linear(true, true);
StubRoutines::aarch64::_string_indexof_linear_uu = generate_string_indexof_linear(false, false);
StubRoutines::aarch64::_string_indexof_linear_ul = generate_string_indexof_linear(true, false);
}
void inflate_and_store_2_fp_registers(bool generatePrfm,
FloatRegister src1, FloatRegister src2) {
Register dst = r1;
__ zip1(v1, __ T16B, src1, v0);
__ zip2(v2, __ T16B, src1, v0);
if (generatePrfm) {
__ prfm(Address(dst, SoftwarePrefetchHintDistance), PSTL1STRM);
}
__ zip1(v3, __ T16B, src2, v0);
__ zip2(v4, __ T16B, src2, v0);
__ st1(v1, v2, v3, v4, __ T16B, Address(__ post(dst, 64)));
}
// R0 = src
// R1 = dst
// R2 = len
// R3 = len >> 3
// V0 = 0
// v1 = loaded 8 bytes
address generate_large_byte_array_inflate() {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "large_byte_array_inflate");
address entry = __ pc();
Label LOOP, LOOP_START, LOOP_PRFM, LOOP_PRFM_START, DONE;
Register src = r0, dst = r1, len = r2, octetCounter = r3;
const int large_loop_threshold = MAX(64, SoftwarePrefetchHintDistance)/8 + 4;
// do one more 8-byte read to have address 16-byte aligned in most cases
// also use single store instruction
__ ldrd(v2, __ post(src, 8));
__ sub(octetCounter, octetCounter, 2);
__ zip1(v1, __ T16B, v1, v0);
__ zip1(v2, __ T16B, v2, v0);
__ st1(v1, v2, __ T16B, __ post(dst, 32));
__ ld1(v3, v4, v5, v6, __ T16B, Address(__ post(src, 64)));
__ cmp(octetCounter, large_loop_threshold);
__ br(__ LE, LOOP_START);
__ b(LOOP_PRFM_START);
__ bind(LOOP_PRFM);
__ ld1(v3, v4, v5, v6, __ T16B, Address(__ post(src, 64)));
__ bind(LOOP_PRFM_START);
__ prfm(Address(src, SoftwarePrefetchHintDistance));
__ sub(octetCounter, octetCounter, 8);
__ cmp(octetCounter, large_loop_threshold);
inflate_and_store_2_fp_registers(true, v3, v4);
inflate_and_store_2_fp_registers(true, v5, v6);
__ br(__ GT, LOOP_PRFM);
__ cmp(octetCounter, 8);
__ br(__ LT, DONE);
__ bind(LOOP);
__ ld1(v3, v4, v5, v6, __ T16B, Address(__ post(src, 64)));
__ bind(LOOP_START);
__ sub(octetCounter, octetCounter, 8);
__ cmp(octetCounter, 8);
inflate_and_store_2_fp_registers(false, v3, v4);
inflate_and_store_2_fp_registers(false, v5, v6);
__ br(__ GE, LOOP);
__ bind(DONE);
__ ret(lr);
return entry;
}
/**
* Arguments:
@ -5044,6 +5739,18 @@ class StubGenerator: public StubCodeGenerator {
if (UseCRC32CIntrinsics) {
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
}
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
StubRoutines::_dlog = generate_dlog();
}
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
}
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) {
StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true);
}
}
void generate_all() {
@ -5078,6 +5785,13 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::aarch64::_large_array_equals = generate_large_array_equals();
}
generate_compare_long_strings();
generate_string_indexof_stubs();
// byte_array_inflate stub for large arrays.
StubRoutines::aarch64::_large_byte_array_inflate = generate_large_byte_array_inflate();
if (UseMultiplyToLenIntrinsic) {
StubRoutines::_multiplyToLen = generate_multiplyToLen();
}

@ -48,6 +48,14 @@ address StubRoutines::aarch64::_zero_blocks = NULL;
address StubRoutines::aarch64::_has_negatives = NULL;
address StubRoutines::aarch64::_has_negatives_long = NULL;
address StubRoutines::aarch64::_large_array_equals = NULL;
address StubRoutines::aarch64::_compare_long_string_LL = NULL;
address StubRoutines::aarch64::_compare_long_string_UU = NULL;
address StubRoutines::aarch64::_compare_long_string_LU = NULL;
address StubRoutines::aarch64::_compare_long_string_UL = NULL;
address StubRoutines::aarch64::_string_indexof_linear_ll = NULL;
address StubRoutines::aarch64::_string_indexof_linear_uu = NULL;
address StubRoutines::aarch64::_string_indexof_linear_ul = NULL;
address StubRoutines::aarch64::_large_byte_array_inflate = NULL;
bool StubRoutines::aarch64::_completed = false;
/**
@ -278,3 +286,87 @@ juint StubRoutines::aarch64::_crc_table[] ATTRIBUTE_ALIGNED(4096) =
0x02D578EDUL, 0x7DAEED62UL, // word swap
0xD502ED78UL, 0xAE7D62EDUL, // byte swap of word swap
};
juint StubRoutines::aarch64::_npio2_hw[] __attribute__ ((aligned(64))) = {
// first, various coefficient values: 0.5, invpio2, pio2_1, pio2_1t, pio2_2,
// pio2_2t, pio2_3, pio2_3t
// This is a small optimization wich keeping double[8] values in int[] table
// to have less address calculation instructions
//
// invpio2: 53 bits of 2/pi (enough for cases when trigonometric argument is small)
// pio2_1: first 33 bit of pi/2
// pio2_1t: pi/2 - pio2_1
// pio2_2: second 33 bit of pi/2
// pio2_2t: pi/2 - (pio2_1+pio2_2)
// pio2_3: third 33 bit of pi/2
// pio2_3t: pi/2 - (pio2_1+pio2_2+pio2_3)
0x00000000, 0x3fe00000, // 0.5
0x6DC9C883, 0x3FE45F30, // invpio2 = 6.36619772367581382433e-01
0x54400000, 0x3FF921FB, // pio2_1 = 1.57079632673412561417e+00
0x1A626331, 0x3DD0B461, // pio2_1t = 6.07710050650619224932e-11
0x1A600000, 0x3DD0B461, // pio2_2 = 6.07710050630396597660e-11
0x2E037073, 0x3BA3198A, // pio2_2t = 2.02226624879595063154e-21
0x2E000000, 0x3BA3198A, // pio2_3 = 2.02226624871116645580e-21
0x252049C1, 0x397B839A, // pio2_3t = 8.47842766036889956997e-32
// now, npio2_hw itself
0x3FF921FB, 0x400921FB, 0x4012D97C, 0x401921FB, 0x401F6A7A, 0x4022D97C,
0x4025FDBB, 0x402921FB, 0x402C463A, 0x402F6A7A, 0x4031475C, 0x4032D97C,
0x40346B9C, 0x4035FDBB, 0x40378FDB, 0x403921FB, 0x403AB41B, 0x403C463A,
0x403DD85A, 0x403F6A7A, 0x40407E4C, 0x4041475C, 0x4042106C, 0x4042D97C,
0x4043A28C, 0x40446B9C, 0x404534AC, 0x4045FDBB, 0x4046C6CB, 0x40478FDB,
0x404858EB, 0x404921FB
};
// Coefficients for sin(x) polynomial approximation: S1..S6.
// See kernel_sin comments in macroAssembler_aarch64_trig.cpp for details
jdouble StubRoutines::aarch64::_dsin_coef[] __attribute__ ((aligned(64))) = {
-1.66666666666666324348e-01, // 0xBFC5555555555549
8.33333333332248946124e-03, // 0x3F8111111110F8A6
-1.98412698298579493134e-04, // 0xBF2A01A019C161D5
2.75573137070700676789e-06, // 0x3EC71DE357B1FE7D
-2.50507602534068634195e-08, // 0xBE5AE5E68A2B9CEB
1.58969099521155010221e-10 // 0x3DE5D93A5ACFD57C
};
// Coefficients for cos(x) polynomial approximation: C1..C6.
// See kernel_cos comments in macroAssembler_aarch64_trig.cpp for details
jdouble StubRoutines::aarch64::_dcos_coef[] __attribute__ ((aligned(64))) = {
4.16666666666666019037e-02, // c0x3FA555555555554C
-1.38888888888741095749e-03, // 0xBF56C16C16C15177
2.48015872894767294178e-05, // 0x3EFA01A019CB1590
-2.75573143513906633035e-07, // 0xBE927E4F809C52AD
2.08757232129817482790e-09, // 0x3E21EE9EBDB4B1C4
-1.13596475577881948265e-11 // 0xBDA8FAE9BE8838D4
};
// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi.
// Used in cases of very large argument. 396 hex digits is enough to support
// required precision.
// Converted to double to avoid unnecessary conversion in code
// NOTE: table looks like original int table: {0xA2F983, 0x6E4E44,...} with
// only (double) conversion added
jdouble StubRoutines::aarch64::_two_over_pi[] __attribute__ ((aligned(64))) = {
(double)0xA2F983, (double)0x6E4E44, (double)0x1529FC, (double)0x2757D1, (double)0xF534DD, (double)0xC0DB62,
(double)0x95993C, (double)0x439041, (double)0xFE5163, (double)0xABDEBB, (double)0xC561B7, (double)0x246E3A,
(double)0x424DD2, (double)0xE00649, (double)0x2EEA09, (double)0xD1921C, (double)0xFE1DEB, (double)0x1CB129,
(double)0xA73EE8, (double)0x8235F5, (double)0x2EBB44, (double)0x84E99C, (double)0x7026B4, (double)0x5F7E41,
(double)0x3991D6, (double)0x398353, (double)0x39F49C, (double)0x845F8B, (double)0xBDF928, (double)0x3B1FF8,
(double)0x97FFDE, (double)0x05980F, (double)0xEF2F11, (double)0x8B5A0A, (double)0x6D1F6D, (double)0x367ECF,
(double)0x27CB09, (double)0xB74F46, (double)0x3F669E, (double)0x5FEA2D, (double)0x7527BA, (double)0xC7EBE5,
(double)0xF17B3D, (double)0x0739F7, (double)0x8A5292, (double)0xEA6BFB, (double)0x5FB11F, (double)0x8D5D08,
(double)0x560330, (double)0x46FC7B, (double)0x6BABF0, (double)0xCFBC20, (double)0x9AF436, (double)0x1DA9E3,
(double)0x91615E, (double)0xE61B08, (double)0x659985, (double)0x5F14A0, (double)0x68408D, (double)0xFFD880,
(double)0x4D7327, (double)0x310606, (double)0x1556CA, (double)0x73A8C9, (double)0x60E27B, (double)0xC08C6B,
};
// Pi over 2 value
jdouble StubRoutines::aarch64::_pio2[] __attribute__ ((aligned(64))) = {
1.57079625129699707031e+00, // 0x3FF921FB40000000
7.54978941586159635335e-08, // 0x3E74442D00000000
5.39030252995776476554e-15, // 0x3CF8469880000000
3.28200341580791294123e-22, // 0x3B78CC5160000000
1.27065575308067607349e-29, // 0x39F01B8380000000
1.22933308981111328932e-36, // 0x387A252040000000
2.73370053816464559624e-44, // 0x36E3822280000000
2.16741683877804819444e-51, // 0x3569F31D00000000
};

@ -41,7 +41,7 @@ static bool returns_to_call_stub(address return_pc) {
enum platform_dependent_constants {
code_size1 = 19000, // simply increase if too small (assembler will crash if too small)
code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
code_size2 = 28000 // simply increase if too small (assembler will crash if too small)
};
class aarch64 {
@ -66,6 +66,14 @@ class aarch64 {
static address _has_negatives;
static address _has_negatives_long;
static address _large_array_equals;
static address _compare_long_string_LL;
static address _compare_long_string_LU;
static address _compare_long_string_UL;
static address _compare_long_string_UU;
static address _string_indexof_linear_ll;
static address _string_indexof_linear_uu;
static address _string_indexof_linear_ul;
static address _large_byte_array_inflate;
static bool _completed;
public:
@ -136,6 +144,38 @@ class aarch64 {
return _large_array_equals;
}
static address compare_long_string_LL() {
return _compare_long_string_LL;
}
static address compare_long_string_LU() {
return _compare_long_string_LU;
}
static address compare_long_string_UL() {
return _compare_long_string_UL;
}
static address compare_long_string_UU() {
return _compare_long_string_UU;
}
static address string_indexof_linear_ul() {
return _string_indexof_linear_ul;
}
static address string_indexof_linear_ll() {
return _string_indexof_linear_ll;
}
static address string_indexof_linear_uu() {
return _string_indexof_linear_uu;
}
static address large_byte_array_inflate() {
return _large_byte_array_inflate;
}
static bool complete() {
return _completed;
}
@ -146,7 +186,13 @@ class aarch64 {
private:
static juint _crc_table[];
// begin trigonometric tables block. See comments in .cpp file
static juint _npio2_hw[];
static jdouble _two_over_pi[];
static jdouble _pio2[];
static jdouble _dsin_coef[];
static jdouble _dcos_coef[];
// end trigonometric tables block
};
#endif // CPU_AARCH64_VM_STUBROUTINES_AARCH64_HPP

@ -247,26 +247,54 @@ void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpr
address fn;
switch (kind) {
case Interpreter::java_lang_math_sin :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
if (StubRoutines::dsin() == NULL) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
}
break;
case Interpreter::java_lang_math_cos :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
if (StubRoutines::dcos() == NULL) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
}
break;
case Interpreter::java_lang_math_tan :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
if (StubRoutines::dtan() == NULL) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
}
break;
case Interpreter::java_lang_math_log :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
if (StubRoutines::dlog() == NULL) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
}
break;
case Interpreter::java_lang_math_log10 :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
if (StubRoutines::dlog10() == NULL) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
}
break;
case Interpreter::java_lang_math_exp :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
if (StubRoutines::dexp() == NULL) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
}
break;
case Interpreter::java_lang_math_pow :
fpargs = 2;
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
if (StubRoutines::dpow() == NULL) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
}
break;
default:
ShouldNotReachHere();

@ -761,7 +761,7 @@ void TemplateTable::iaload()
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
__ access_load_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
__ access_load_at(T_INT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
}
void TemplateTable::laload()
@ -773,7 +773,7 @@ void TemplateTable::laload()
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
__ access_load_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
__ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
}
void TemplateTable::faload()
@ -785,7 +785,7 @@ void TemplateTable::faload()
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
__ access_load_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
__ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
}
void TemplateTable::daload()
@ -797,7 +797,7 @@ void TemplateTable::daload()
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
__ access_load_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
__ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
}
void TemplateTable::aaload()
@ -812,7 +812,7 @@ void TemplateTable::aaload()
do_oop_load(_masm,
Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
r0,
IN_HEAP_ARRAY);
IS_ARRAY);
}
void TemplateTable::baload()
@ -824,7 +824,7 @@ void TemplateTable::baload()
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
__ access_load_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
__ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
}
void TemplateTable::caload()
@ -836,7 +836,7 @@ void TemplateTable::caload()
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
__ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
__ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
// iload followed by caload frequent pair
@ -853,7 +853,7 @@ void TemplateTable::fast_icaload()
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
__ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
__ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
void TemplateTable::saload()
@ -865,7 +865,7 @@ void TemplateTable::saload()
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
__ access_load_at(T_SHORT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
__ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
void TemplateTable::iload(int n)
@ -1059,7 +1059,7 @@ void TemplateTable::iastore() {
// r3: array
index_check(r3, r1); // prefer index in r1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
__ access_store_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg);
__ access_store_at(T_INT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg);
}
void TemplateTable::lastore() {
@ -1071,7 +1071,7 @@ void TemplateTable::lastore() {
// r3: array
index_check(r3, r1); // prefer index in r1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
__ access_store_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg);
__ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg);
}
void TemplateTable::fastore() {
@ -1083,7 +1083,7 @@ void TemplateTable::fastore() {
// r3: array
index_check(r3, r1); // prefer index in r1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
__ access_store_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
__ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
}
void TemplateTable::dastore() {
@ -1095,7 +1095,7 @@ void TemplateTable::dastore() {
// r3: array
index_check(r3, r1); // prefer index in r1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
__ access_store_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
__ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
}
void TemplateTable::aastore() {
@ -1136,7 +1136,7 @@ void TemplateTable::aastore() {
// Get the value we will store
__ ldr(r0, at_tos());
// Now store using the appropriate barrier
do_oop_store(_masm, element_address, r0, IN_HEAP_ARRAY);
do_oop_store(_masm, element_address, r0, IS_ARRAY);
__ b(done);
// Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
@ -1144,7 +1144,7 @@ void TemplateTable::aastore() {
__ profile_null_seen(r2);
// Store a NULL
do_oop_store(_masm, element_address, noreg, IN_HEAP_ARRAY);
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
// Pop stack arguments
__ bind(done);
@ -1172,7 +1172,7 @@ void TemplateTable::bastore()
__ bind(L_skip);
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
__ access_store_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg);
__ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg);
}
void TemplateTable::castore()
@ -1185,7 +1185,7 @@ void TemplateTable::castore()
// r3: array
index_check(r3, r1); // prefer index in r1
__ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
__ access_store_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg);
__ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg);
}
void TemplateTable::sastore()
@ -3362,22 +3362,45 @@ void TemplateTable::invokeinterface(int byte_no) {
// r2: receiver
// r3: flags
// First check for Object case, then private interface method,
// then regular interface method.
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCacheOop.cpp for details.
// This code isn't produced by javac, but could be produced by
// another compliant java compiler.
Label notMethod;
__ tbz(r3, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
// java.lang.Object. See cpCache.cpp for details.
Label notObjectMethod;
__ tbz(r3, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
invokevirtual_helper(rmethod, r2, r3);
__ bind(notMethod);
__ bind(notObjectMethod);
Label no_such_interface;
// Check for private method invocation - indicated by vfinal
Label notVFinal;
__ tbz(r3, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
// Get receiver klass into r3 - also a null check
__ null_check(r2, oopDesc::klass_offset_in_bytes());
__ load_klass(r3, r2);
Label subtype;
__ check_klass_subtype(r3, r0, r4, subtype);
// If we get here the typecheck failed
__ b(no_such_interface);
__ bind(subtype);
__ profile_final_call(r0);
__ profile_arguments_type(r0, rmethod, r4, true);
__ jump_from_interpreted(rmethod, r0);
__ bind(notVFinal);
// Get receiver klass into r3 - also a null check
__ restore_locals();
__ null_check(r2, oopDesc::klass_offset_in_bytes());
__ load_klass(r3, r2);
Label no_such_interface, no_such_method;
Label no_such_method;
// Preserve method for throw_AbstractMethodErrorVerbose.
__ mov(r16, rmethod);

@ -53,7 +53,7 @@
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, int callee_saved_regs) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
if (!dest_uninitialized) {
assert( addr->encoding() < callee_saved_regs, "addr must be saved");
assert(count->encoding() < callee_saved_regs, "count must be saved");

@ -75,9 +75,9 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
if (is_null) {
BarrierSetAssembler::store_at(masm, decorators, type, obj, new_val, tmp1, tmp2, tmp3, true);

@ -2945,7 +2945,7 @@ class StubGenerator: public StubCodeGenerator {
__ push(LR);
#endif // AARCH64
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
if (disjoint) {
decorators |= ARRAYCOPY_DISJOINT;
}
@ -3217,7 +3217,7 @@ class StubGenerator: public StubCodeGenerator {
pushed+=1;
#endif // AARCH64
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_CHECKCAST;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);

@ -943,7 +943,7 @@ void TemplateTable::aaload() {
const Register Rindex = R0_tos;
index_check(Rarray, Rindex);
do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IN_HEAP_ARRAY);
do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY);
}
@ -1328,7 +1328,7 @@ void TemplateTable::aastore() {
__ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
// Now store using the appropriate barrier
do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IN_HEAP_ARRAY);
do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IS_ARRAY);
__ b(done);
__ bind(throw_array_store);
@ -1344,7 +1344,7 @@ void TemplateTable::aastore() {
__ profile_null_seen(R0_tmp);
// Store a NULL
do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IN_HEAP_ARRAY);
do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IS_ARRAY);
// Pop stack arguments
__ bind(done);
@ -4276,25 +4276,41 @@ void TemplateTable::invokeinterface(int byte_no) {
const Register Rinterf = R5_tmp;
const Register Rindex = R4_tmp;
const Register Rflags = R3_tmp;
const Register Rklass = R3_tmp;
const Register Rklass = R2_tmp; // Note! Same register with Rrecv
prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCacheOop.cpp for details.
// This code isn't produced by javac, but could be produced by
// another compliant java compiler.
Label notMethod;
__ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notMethod);
// First check for Object case, then private interface method,
// then regular interface method.
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCache.cpp for details.
Label notObjectMethod;
__ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
invokevirtual_helper(Rmethod, Rrecv, Rflags);
__ bind(notMethod);
__ bind(notObjectMethod);
// Get receiver klass into Rklass - also a null check
__ load_klass(Rklass, Rrecv);
// Check for private method invocation - indicated by vfinal
Label no_such_interface;
Label notVFinal;
__ tbz(Rflags, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
Label subtype;
__ check_klass_subtype(Rklass, Rinterf, R1_tmp, R3_tmp, noreg, subtype);
// If we get here the typecheck failed
__ b(no_such_interface);
__ bind(subtype);
// do the call
__ profile_final_call(R0_tmp);
__ jump_from_interpreted(Rmethod);
__ bind(notVFinal);
// Receiver subtype check against REFC.
__ lookup_interface_method(// inputs: rec. class, interface
Rklass, Rinterf, noreg,

@ -44,7 +44,7 @@
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register from, Register to, Register count,
Register preserve1, Register preserve2) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
int spill_slots = 3;
@ -107,7 +107,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators, Register obj, RegisterOrConstant ind_or_offs, Register pre_val,
Register tmp1, Register tmp2, bool needs_frame) {
bool not_null = (decorators & OOP_NOT_NULL) != 0,
bool not_null = (decorators & IS_NOT_NULL) != 0,
preloaded = obj == noreg;
Register nv_save = noreg;
@ -205,7 +205,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators, Register store_addr, Register new_val,
Register tmp1, Register tmp2, Register tmp3) {
bool not_null = (decorators & OOP_NOT_NULL) != 0;
bool not_null = (decorators & IS_NOT_NULL) != 0;
Label runtime, filtered;
assert_different_registers(store_addr, new_val, tmp1, tmp2);
@ -279,9 +279,9 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
// Load and record the previous value.
g1_write_barrier_pre(masm, decorators, base, ind_or_offs,
tmp1, tmp2, tmp3, needs_frame);
@ -318,7 +318,7 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator
// these parameters the pre-barrier does not generate
// the load of the previous value
// We only reach here if value is not null.
g1_write_barrier_pre(masm, decorators | OOP_NOT_NULL, noreg /* obj */, (intptr_t)0, dst /* pre_val */,
g1_write_barrier_pre(masm, decorators | IS_NOT_NULL, noreg /* obj */, (intptr_t)0, dst /* pre_val */,
tmp1, tmp2, needs_frame);
}
__ bind(done);

@ -35,7 +35,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool not_null = (decorators & OOP_NOT_NULL) != 0;
bool not_null = (decorators & IS_NOT_NULL) != 0;
assert(in_heap || in_native, "where?");
assert_different_registers(base, val, tmp1, tmp2, R0);
@ -68,7 +68,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
Register tmp1, Register tmp2, bool needs_frame, Label *L_handle_null) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool not_null = (decorators & OOP_NOT_NULL) != 0;
bool not_null = (decorators & IS_NOT_NULL) != 0;
assert(in_heap || in_native, "where?");
assert_different_registers(ind_or_offs.register_or_noreg(), dst, R0);

@ -93,9 +93,9 @@ void CardTableBarrierSetAssembler::card_write_barrier_post(MacroAssembler* masm,
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, needs_frame);

@ -2046,7 +2046,7 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_re
assert_different_registers(mtype_reg, mh_reg, temp_reg);
// Compare method type against that of the receiver.
load_heap_oop(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg,
noreg, noreg, false, OOP_NOT_NULL);
noreg, noreg, false, IS_NOT_NULL);
cmpd(CCR0, temp_reg, mtype_reg);
bne(CCR0, wrong_method_type);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -329,7 +329,7 @@ inline void MacroAssembler::null_check(Register a, int offset, Label *Lis_null)
inline void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs, Register val,
Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {
assert((decorators & ~(AS_RAW | IN_HEAP | IN_HEAP_ARRAY | IN_NATIVE | OOP_NOT_NULL |
assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bool as_raw = (decorators & AS_RAW) != 0;
@ -348,7 +348,7 @@ inline void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorat
inline void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, bool needs_frame, Label *L_handle_null) {
assert((decorators & ~(AS_RAW | IN_HEAP | IN_HEAP_ARRAY | IN_NATIVE | OOP_NOT_NULL |
assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
decorators = AccessInternal::decorator_fixup(decorators);

@ -174,13 +174,13 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
__ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv,
temp2, noreg, false, OOP_NOT_NULL);
temp2, noreg, false, IS_NOT_NULL);
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp,
temp2, noreg, false, OOP_NOT_NULL);
temp2, noreg, false, IS_NOT_NULL);
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp,
temp2, noreg, false, OOP_NOT_NULL);
temp2, noreg, false, IS_NOT_NULL);
__ verify_oop(method_temp);
__ ld(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), method_temp);
@ -342,7 +342,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Label L_ok;
Register temp2_defc = temp2;
__ load_heap_oop(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg,
temp3, noreg, false, OOP_NOT_NULL);
temp3, noreg, false, IS_NOT_NULL);
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
__ verify_klass_ptr(temp2_defc);
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
@ -370,7 +370,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
}
__ load_heap_oop(R19_method, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg,
temp3, noreg, false, OOP_NOT_NULL);
temp3, noreg, false, IS_NOT_NULL);
__ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), R19_method);
break;
@ -379,7 +379,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
}
__ load_heap_oop(R19_method, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg,
temp3, noreg, false, OOP_NOT_NULL);
temp3, noreg, false, IS_NOT_NULL);
__ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), R19_method);
break;
@ -422,7 +422,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Register temp2_intf = temp2;
__ load_heap_oop(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg,
temp3, noreg, false, OOP_NOT_NULL);
temp3, noreg, false, IS_NOT_NULL);
load_klass_from_Class(_masm, temp2_intf, temp3, temp4);
__ verify_klass_ptr(temp2_intf);

@ -2024,9 +2024,9 @@ class StubGenerator: public StubCodeGenerator {
STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
STUB_ENTRY(oop_disjoint_arraycopy);
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -2063,9 +2063,9 @@ class StubGenerator: public StubCodeGenerator {
address start = __ function_entry();
assert_positive_int(R5_ARG3);
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -2159,9 +2159,9 @@ class StubGenerator: public StubCodeGenerator {
}
#endif
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_CHECKCAST;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();

@ -688,7 +688,7 @@ void TemplateTable::aaload() {
Rtemp2 = R31;
__ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
do_oop_load(_masm, Rload_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos, Rtemp, Rtemp2,
IN_HEAP_ARRAY);
IS_ARRAY);
__ verify_oop(R17_tos);
//__ dcbt(R17_tos); // prefetch
}
@ -1015,14 +1015,14 @@ void TemplateTable::aastore() {
__ bind(Lis_null);
do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
Rscratch, Rscratch2, Rscratch3, IN_HEAP_ARRAY);
Rscratch, Rscratch2, Rscratch3, IS_ARRAY);
__ profile_null_seen(Rscratch, Rscratch2);
__ b(Ldone);
// Store is OK.
__ bind(Lstore_ok);
do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
Rscratch, Rscratch2, Rscratch3, IN_HEAP_ARRAY | OOP_NOT_NULL);
Rscratch, Rscratch2, Rscratch3, IS_ARRAY | IS_NOT_NULL);
__ bind(Ldone);
// Adjust sp (pops array, index and value).
@ -3583,14 +3583,46 @@ void TemplateTable::invokeinterface(int byte_no) {
prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1);
// Get receiver klass.
// First check for Object case, then private interface method,
// then regular interface method.
// Get receiver klass - this is also a null check
__ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2);
__ load_klass(Rrecv_klass, Rreceiver);
// Check corner case object method.
Label LobjectMethod, L_no_such_interface, Lthrow_ame;
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
// The invokeinterface was rewritten to a invokevirtual, hence we have
// to handle this corner case.
Label LnotObjectMethod, Lthrow_ame;
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
__ btrue(CCR0, LobjectMethod);
__ bfalse(CCR0, LnotObjectMethod);
invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2);
__ bind(LnotObjectMethod);
// Check for private method invocation - indicated by vfinal
Label LnotVFinal, L_no_such_interface, L_subtype;
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
__ bfalse(CCR0, LnotVFinal);
__ check_klass_subtype(Rrecv_klass, Rinterface_klass, Rscratch1, Rscratch2, L_subtype);
// If we get here the typecheck failed
__ b(L_no_such_interface);
__ bind(L_subtype);
// do the call
Register Rscratch = Rflags; // Rflags is dead now.
__ profile_final_call(Rscratch1, Rscratch);
__ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
__ call_from_interpreter(Rindex, Rret_addr, Rscratch, Rrecv_klass /* scratch */);
__ bind(LnotVFinal);
__ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2,
L_no_such_interface, /*return_method=*/false);
@ -3631,14 +3663,6 @@ void TemplateTable::invokeinterface(int byte_no) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
Rrecv_klass, Rinterface_klass);
DEBUG_ONLY( __ should_not_reach_here(); )
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
// The invokeinterface was rewritten to a invokevirtual, hence we have
// to handle this corner case. This code isn't produced by javac, but could
// be produced by another compliant java compiler.
__ bind(LobjectMethod);
invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2);
}
void TemplateTable::invokedynamic(int byte_no) {

@ -46,7 +46,7 @@
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
// With G1, don't generate the call if we statically know that the target is uninitialized.
if (!dest_uninitialized) {
@ -108,7 +108,7 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator
if (on_oop && on_reference) {
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
g1_write_barrier_pre(masm, decorators | OOP_NOT_NULL,
g1_write_barrier_pre(masm, decorators | IS_NOT_NULL,
NULL /* obj */,
dst /* pre_val */,
noreg/* preserve */ ,
@ -127,7 +127,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
bool pre_val_needed // Save Rpre_val across runtime call, caller uses it.
) {
bool not_null = (decorators & OOP_NOT_NULL) != 0,
bool not_null = (decorators & IS_NOT_NULL) != 0,
preloaded = obj == NULL;
const Register Robj = obj ? obj->base() : noreg,
@ -260,7 +260,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators, Register Rstore_addr, Register Rnew_val,
Register Rtmp1, Register Rtmp2, Register Rtmp3) {
bool not_null = (decorators & OOP_NOT_NULL) != 0;
bool not_null = (decorators & IS_NOT_NULL) != 0;
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
@ -372,9 +372,9 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
// Load and record the previous value.
g1_write_barrier_pre(masm, decorators, &dst, tmp3, val, tmp1, tmp2, false);

@ -39,7 +39,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
const Address& addr, Register dst, Register tmp1, Register tmp2, Label *L_handle_null) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool not_null = (decorators & OOP_NOT_NULL) != 0;
bool not_null = (decorators & IS_NOT_NULL) != 0;
assert(in_heap || in_native, "where?");
switch (type) {
@ -69,7 +69,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
const Address& addr, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool not_null = (decorators & OOP_NOT_NULL) != 0;
bool not_null = (decorators & IS_NOT_NULL) != 0;
assert(in_heap || in_native, "where?");
assert_different_registers(val, tmp1, tmp2);

@ -156,9 +156,9 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register st
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);

@ -4051,7 +4051,7 @@ void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL
void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
const Address& addr, Register val,
Register tmp1, Register tmp2, Register tmp3) {
assert((decorators & ~(AS_RAW | IN_HEAP | IN_HEAP_ARRAY | IN_NATIVE | OOP_NOT_NULL |
assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
decorators = AccessInternal::decorator_fixup(decorators);
@ -4070,7 +4070,7 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
const Address& addr, Register dst,
Register tmp1, Register tmp2, Label *is_null) {
assert((decorators & ~(AS_RAW | IN_HEAP | IN_HEAP_ARRAY | IN_NATIVE | OOP_NOT_NULL |
assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
decorators = AccessInternal::decorator_fixup(decorators);

@ -198,17 +198,17 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ load_heap_oop(method_temp,
Address(recv,
NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())),
noreg, noreg, OOP_NOT_NULL);
noreg, noreg, IS_NOT_NULL);
__ verify_oop(method_temp);
__ load_heap_oop(method_temp,
Address(method_temp,
NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())),
noreg, noreg, OOP_NOT_NULL);
noreg, noreg, IS_NOT_NULL);
__ verify_oop(method_temp);
__ load_heap_oop(method_temp,
Address(method_temp,
NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())),
noreg, noreg, OOP_NOT_NULL);
noreg, noreg, IS_NOT_NULL);
__ verify_oop(method_temp);
__ z_lg(method_temp,
Address(method_temp,
@ -409,7 +409,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Register temp2_defc = temp2;
__ load_heap_oop(temp2_defc, member_clazz,
noreg, noreg, OOP_NOT_NULL);
noreg, noreg, IS_NOT_NULL);
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
__ verify_klass_ptr(temp2_defc);
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
@ -436,7 +436,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
__ load_heap_oop(Z_method, member_vmtarget,
noreg, noreg, OOP_NOT_NULL);
noreg, noreg, IS_NOT_NULL);
__ z_lg(Z_method, vmtarget_method);
method_is_live = true;
break;
@ -446,7 +446,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
__ load_heap_oop(Z_method, member_vmtarget,
noreg, noreg, OOP_NOT_NULL);
noreg, noreg, IS_NOT_NULL);
__ z_lg(Z_method, vmtarget_method);
method_is_live = true;
break;
@ -488,7 +488,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Register temp3_intf = temp3;
__ load_heap_oop(temp3_intf, member_clazz,
noreg, noreg, OOP_NOT_NULL);
noreg, noreg, IS_NOT_NULL);
load_klass_from_Class(_masm, temp3_intf, temp2, temp4);
Register Z_index = Z_method;

@ -1300,9 +1300,9 @@ class StubGenerator: public StubCodeGenerator {
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
unsigned int size = UseCompressedOops ? 4 : 8;
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -1392,9 +1392,9 @@ class StubGenerator: public StubCodeGenerator {
// Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier.
array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;

@ -853,7 +853,7 @@ void TemplateTable::aaload() {
index_check(Z_tmp_1, index, shift);
// Now load array element.
do_oop_load(_masm, Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), Z_tos,
Z_tmp_2, Z_tmp_3, IN_HEAP_ARRAY);
Z_tmp_2, Z_tmp_3, IS_ARRAY);
__ verify_oop(Z_tos);
}
@ -1197,7 +1197,7 @@ void TemplateTable::aastore() {
// Store a NULL.
do_oop_store(_masm, Address(Rstore_addr, (intptr_t)0), noreg,
tmp3, tmp2, tmp1, IN_HEAP_ARRAY);
tmp3, tmp2, tmp1, IS_ARRAY);
__ z_bru(done);
// Come here on success.
@ -1205,7 +1205,7 @@ void TemplateTable::aastore() {
// Now store using the appropriate barrier.
do_oop_store(_masm, Address(Rstore_addr, (intptr_t)0), Rvalue,
tmp3, tmp2, tmp1, IN_HEAP_ARRAY | OOP_NOT_NULL);
tmp3, tmp2, tmp1, IS_ARRAY | IS_NOT_NULL);
// Pop stack arguments.
__ bind(done);
@ -3610,20 +3610,43 @@ void TemplateTable::invokeinterface(int byte_no) {
BLOCK_COMMENT("invokeinterface {");
prepare_invoke(byte_no, interface, method, // Get f1 klassOop, f2 itable index.
prepare_invoke(byte_no, interface, method, // Get f1 klassOop, f2 Method*.
receiver, flags);
// Z_R14 (== Z_bytecode) : return entry
// First check for Object case, then private interface method,
// then regular interface method.
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCacheOop.cpp for details.
// This code isn't produced by javac, but could be produced by
// another compliant java compiler.
NearLabel notMethod, no_such_interface, no_such_method;
// java.lang.Object. See cpCache.cpp for details.
NearLabel notObjectMethod, no_such_method;
__ testbit(flags, ConstantPoolCacheEntry::is_forced_virtual_shift);
__ z_brz(notMethod);
__ z_brz(notObjectMethod);
invokevirtual_helper(method, receiver, flags);
__ bind(notMethod);
__ bind(notObjectMethod);
// Check for private method invocation - indicated by vfinal
NearLabel notVFinal;
__ testbit(flags, ConstantPoolCacheEntry::is_vfinal_shift);
__ z_brz(notVFinal);
// Get receiver klass into klass - also a null check.
__ load_klass(klass, receiver);
NearLabel subtype, no_such_interface;
__ check_klass_subtype(klass, interface, Z_tmp_2, Z_tmp_3, subtype);
// If we get here the typecheck failed
__ z_bru(no_such_interface);
__ bind(subtype);
// do the call
__ profile_final_call(Z_tmp_2);
__ profile_arguments_type(Z_tmp_2, method, Z_ARG5, true);
__ jump_from_interpreted(method, Z_tmp_2);
__ bind(notVFinal);
// Get receiver klass into klass - also a null check.
__ restore_locals();

@ -43,7 +43,7 @@
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
Register tmp = O5;
@ -406,9 +406,9 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
// No need for post barrier if storing NULL
bool needs_post_barrier = val != G0 && in_heap;
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
Register index = dst.has_index() ? dst.index() : noreg;
int disp = dst.has_disp() ? dst.disp() : 0;

@ -34,7 +34,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
Register val, Address dst, Register tmp) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
bool is_not_null = (decorators & IS_NOT_NULL) != 0;
switch (type) {
case T_ARRAY:
@ -47,7 +47,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
}
if (UseCompressedOops) {
assert(dst.base() != val, "not enough registers");
if (oop_not_null) {
if (is_not_null) {
__ encode_heap_oop_not_null(val);
} else {
__ encode_heap_oop(val);
@ -70,7 +70,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
Address src, Register dst, Register tmp) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
bool is_not_null = (decorators & IS_NOT_NULL) != 0;
switch (type) {
case T_ARRAY:
@ -83,7 +83,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
}
if (UseCompressedOops) {
__ lduw(src, dst);
if (oop_not_null) {
if (is_not_null) {
__ decode_heap_oop_not_null(dst);
} else {
__ decode_heap_oop(dst);

@ -1,4 +1,3 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -90,9 +89,9 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS
Register val, Address dst, Register tmp) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
// No need for post barrier if storing NULL
bool needs_post_barrier = val != G0 && in_heap;

@ -2269,9 +2269,9 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
}
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -2326,9 +2326,9 @@ class StubGenerator: public StubCodeGenerator {
array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -2446,9 +2446,9 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
}
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_CHECKCAST;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();

@ -697,7 +697,7 @@ void TemplateTable::aaload() {
arrayOopDesc::base_offset_in_bytes(T_OBJECT),
Otos_i,
G3_scratch,
IN_HEAP_ARRAY);
IS_ARRAY);
__ verify_oop(Otos_i);
}
@ -997,13 +997,13 @@ void TemplateTable::aastore() {
// Store is OK.
__ bind(store_ok);
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, IN_HEAP_ARRAY);
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, IS_ARRAY);
__ ba(done);
__ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
__ bind(is_null);
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, IN_HEAP_ARRAY);
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, IS_ARRAY);
__ profile_null_seen(G3_scratch);
__ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
@ -3202,28 +3202,56 @@ void TemplateTable::invokeinterface(int byte_no) {
prepare_invoke(byte_no, Rinterface, Rret, Rmethod, O0_recv, O1_flags);
// get receiver klass
// First check for Object case, then private interface method,
// then regular interface method.
// get receiver klass - this is also a null check
__ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
__ load_klass(O0_recv, O2_Klass);
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCacheOop.cpp for details.
// This code isn't produced by javac, but could be produced by
// another compliant java compiler.
Label notMethod;
// java.lang.Object. See cpCache.cpp for details.
Label notObjectMethod;
__ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
__ btst(O1_flags, Rscratch);
__ br(Assembler::zero, false, Assembler::pt, notMethod);
__ br(Assembler::zero, false, Assembler::pt, notObjectMethod);
__ delayed()->nop();
invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags);
__ bind(notMethod);
Register Rtemp = O1_flags;
__ bind(notObjectMethod);
Label L_no_such_interface;
// Check for private method invocation - indicated by vfinal
Label notVFinal;
{
__ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
__ btst(O1_flags, Rscratch);
__ br(Assembler::zero, false, Assembler::pt, notVFinal);
__ delayed()->nop();
Label subtype;
Register Rtemp = O1_flags;
__ check_klass_subtype(O2_Klass, Rinterface, Rscratch, Rtemp, subtype);
// If we get here the typecheck failed
__ ba(L_no_such_interface);
__ delayed()->nop();
__ bind(subtype);
// do the call
Register Rcall = Rinterface;
__ mov(Rmethod, G5_method);
assert_different_registers(Rcall, G5_method, Gargs, Rret);
__ profile_arguments_type(G5_method, Rcall, Gargs, true);
__ profile_final_call(Rscratch);
__ call_from_interpreter(Rcall, Gargs, Rret);
}
__ bind(notVFinal);
Register Rtemp = O1_flags;
// Receiver subtype check against REFC.
__ lookup_interface_method(// inputs: rec. class, interface, itable index
O2_Klass, Rinterface, noreg,

@ -1303,6 +1303,16 @@ void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
emit_int8(0xC0 | encode);
}
void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(VM_Version::supports_vaes(), "");
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDE);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::aesdeclast(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
@ -1320,6 +1330,15 @@ void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(VM_Version::supports_vaes(), "");
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDF);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::aesenc(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
@ -4391,6 +4410,15 @@ void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int
emit_int8(imm8);
}
void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), "");
InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x3);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
@ -6708,7 +6736,29 @@ void Assembler::evpbroadcastq(XMMRegister dst, Address src, int vector_len) {
emit_int8(0x59);
emit_operand(dst, src);
}
void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) {
assert(vector_len != Assembler::AVX_128bit, "");
assert(VM_Version::supports_avx512dq(), "");
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_rex_vex_w_reverted();
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8(0x5A);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) {
assert(vector_len != Assembler::AVX_128bit, "");
assert(VM_Version::supports_avx512dq(), "");
assert(dst != xnoreg, "sanity");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_rex_vex_w_reverted();
attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit);
// swap src<->dst for encoding
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8(0x5A);
emit_operand(dst, src);
}
// scalar single/double precision replicate

@ -926,7 +926,8 @@ private:
void aesenc(XMMRegister dst, XMMRegister src);
void aesenclast(XMMRegister dst, Address src);
void aesenclast(XMMRegister dst, XMMRegister src);
void vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void andl(Address dst, int32_t imm32);
void andl(Register dst, int32_t imm32);
@ -1739,6 +1740,7 @@ private:
void palignr(XMMRegister dst, XMMRegister src, int imm8);
void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len);
void evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
void pblendw(XMMRegister dst, XMMRegister src, int imm8);
@ -2102,6 +2104,9 @@ private:
void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastq(XMMRegister dst, Address src, int vector_len);
void evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len);
void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len);
// scalar single/double precision replicate
void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastss(XMMRegister dst, Address src, int vector_len);

@ -43,7 +43,7 @@
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
if (!dest_uninitialized) {
Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);

@ -35,7 +35,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
Register dst, Address src, Register tmp1, Register tmp_thread) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
bool is_not_null = (decorators & IS_NOT_NULL) != 0;
bool atomic = (decorators & MO_RELAXED) != 0;
switch (type) {
@ -45,7 +45,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
#ifdef _LP64
if (UseCompressedOops) {
__ movl(dst, src);
if (oop_not_null) {
if (is_not_null) {
__ decode_heap_oop_not_null(dst);
} else {
__ decode_heap_oop(dst);
@ -100,7 +100,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
Address dst, Register val, Register tmp1, Register tmp2) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
bool is_not_null = (decorators & IS_NOT_NULL) != 0;
bool atomic = (decorators & MO_RELAXED) != 0;
switch (type) {
@ -108,7 +108,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
case T_ARRAY: {
if (in_heap) {
if (val == noreg) {
assert(!oop_not_null, "inconsistent access");
assert(!is_not_null, "inconsistent access");
#ifdef _LP64
if (UseCompressedOops) {
__ movl(dst, (int32_t)NULL_WORD);
@ -122,7 +122,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
#ifdef _LP64
if (UseCompressedOops) {
assert(!dst.uses(val), "not enough registers");
if (oop_not_null) {
if (is_not_null) {
__ encode_heap_oop_not_null(val);
} else {
__ encode_heap_oop(val);

@ -135,9 +135,9 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS
Address dst, Register val, Register tmp1, Register tmp2) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool is_array = (decorators & IS_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
bool precise = is_array || on_anonymous;
bool needs_post_barrier = val != noreg && in_heap;

@ -6287,7 +6287,7 @@ void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
// Doesn't do verfication, generates fixed size code
void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
Register thread_tmp, DecoratorSet decorators) {
access_load_at(T_OBJECT, IN_HEAP | OOP_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
}
void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,

@ -837,9 +837,9 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::zero, L_0_count);
}
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -1026,9 +1026,9 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::zero, L_0_count);
}
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -1383,9 +1383,9 @@ class StubGenerator: public StubCodeGenerator {
Address to_element_addr(end_to, count, Address::times_ptr, 0);
Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_CHECKCAST;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
BasicType type = T_OBJECT;

@ -1832,9 +1832,9 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -1926,9 +1926,9 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -2030,9 +2030,9 @@ class StubGenerator: public StubCodeGenerator {
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'qword_count' are now valid
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -2123,9 +2123,9 @@ class StubGenerator: public StubCodeGenerator {
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'qword_count' are now valid
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
@ -2306,9 +2306,9 @@ class StubGenerator: public StubCodeGenerator {
Address from_element_addr(end_from, count, TIMES_OOP, 0);
Address to_element_addr(end_to, count, TIMES_OOP, 0);
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_CHECKCAST;
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
decorators |= IS_DEST_UNINITIALIZED;
}
BasicType type = T_OBJECT;
@ -4084,6 +4084,312 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
void roundDec(XMMRegister xmm_reg) {
__ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit);
__ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit);
__ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit);
__ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit);
__ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit);
__ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit);
__ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit);
__ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit);
}
void roundDeclast(XMMRegister xmm_reg) {
__ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit);
__ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit);
__ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit);
__ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit);
__ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit);
__ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit);
__ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit);
__ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit);
}
void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) {
__ movdqu(xmmdst, Address(key, offset));
if (xmm_shuf_mask != NULL) {
__ pshufb(xmmdst, xmm_shuf_mask);
} else {
__ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
}
__ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit);
}
address generate_cipherBlockChaining_decryptVectorAESCrypt() {
assert(VM_Version::supports_vaes(), "need AES instructions and misaligned SSE support");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
address start = __ pc();
const Register from = c_rarg0; // source array address
const Register to = c_rarg1; // destination array address
const Register key = c_rarg2; // key array address
const Register rvec = c_rarg3; // r byte array initialized from initvector array address
// and left with the results of the last encryption block
#ifndef _WIN64
const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
#else
const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
const Register len_reg = r11; // pick the volatile windows register
#endif
Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop,
Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit;
__ enter();
#ifdef _WIN64
// on win64, fill len_reg from stack position
__ movl(len_reg, len_mem);
#else
__ push(len_reg); // Save
#endif
__ push(rbx);
__ vzeroupper();
// Temporary variable declaration for swapping key bytes
const XMMRegister xmm_key_shuf_mask = xmm1;
__ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
// Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds
const Register rounds = rbx;
__ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
const XMMRegister IV = xmm0;
// Load IV and broadcast value to 512-bits
__ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit);
// Temporary variables for storing round keys
const XMMRegister RK0 = xmm30;
const XMMRegister RK1 = xmm9;
const XMMRegister RK2 = xmm18;
const XMMRegister RK3 = xmm19;
const XMMRegister RK4 = xmm20;
const XMMRegister RK5 = xmm21;
const XMMRegister RK6 = xmm22;
const XMMRegister RK7 = xmm23;
const XMMRegister RK8 = xmm24;
const XMMRegister RK9 = xmm25;
const XMMRegister RK10 = xmm26;
// Load and shuffle key
// the java expanded key ordering is rotated one position from what we want
// so we start from 1*16 here and hit 0*16 last
ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask);
ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask);
ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask);
ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask);
ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask);
ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask);
ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask);
ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask);
ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask);
ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask);
ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask);
// Variables for storing source cipher text
const XMMRegister S0 = xmm10;
const XMMRegister S1 = xmm11;
const XMMRegister S2 = xmm12;
const XMMRegister S3 = xmm13;
const XMMRegister S4 = xmm14;
const XMMRegister S5 = xmm15;
const XMMRegister S6 = xmm16;
const XMMRegister S7 = xmm17;
// Variables for storing decrypted text
const XMMRegister B0 = xmm1;
const XMMRegister B1 = xmm2;
const XMMRegister B2 = xmm3;
const XMMRegister B3 = xmm4;
const XMMRegister B4 = xmm5;
const XMMRegister B5 = xmm6;
const XMMRegister B6 = xmm7;
const XMMRegister B7 = xmm8;
__ cmpl(rounds, 44);
__ jcc(Assembler::greater, KEY_192);
__ jmp(Loop);
__ BIND(KEY_192);
const XMMRegister RK11 = xmm27;
const XMMRegister RK12 = xmm28;
ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask);
ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask);
__ cmpl(rounds, 52);
__ jcc(Assembler::greater, KEY_256);
__ jmp(Loop);
__ BIND(KEY_256);
const XMMRegister RK13 = xmm29;
const XMMRegister RK14 = xmm31;
ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask);
ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask);
__ BIND(Loop);
__ cmpl(len_reg, 512);
__ jcc(Assembler::below, Lcbc_dec_rem);
__ BIND(Loop1);
__ subl(len_reg, 512);
__ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit);
__ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit);
__ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit);
__ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit);
__ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit);
__ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit);
__ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit);
__ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit);
__ leaq(from, Address(from, 8 * 64));
__ evpxorq(B0, S0, RK1, Assembler::AVX_512bit);
__ evpxorq(B1, S1, RK1, Assembler::AVX_512bit);
__ evpxorq(B2, S2, RK1, Assembler::AVX_512bit);
__ evpxorq(B3, S3, RK1, Assembler::AVX_512bit);
__ evpxorq(B4, S4, RK1, Assembler::AVX_512bit);
__ evpxorq(B5, S5, RK1, Assembler::AVX_512bit);
__ evpxorq(B6, S6, RK1, Assembler::AVX_512bit);
__ evpxorq(B7, S7, RK1, Assembler::AVX_512bit);
__ evalignq(IV, S0, IV, 0x06);
__ evalignq(S0, S1, S0, 0x06);
__ evalignq(S1, S2, S1, 0x06);
__ evalignq(S2, S3, S2, 0x06);
__ evalignq(S3, S4, S3, 0x06);
__ evalignq(S4, S5, S4, 0x06);
__ evalignq(S5, S6, S5, 0x06);
__ evalignq(S6, S7, S6, 0x06);
roundDec(RK2);
roundDec(RK3);
roundDec(RK4);
roundDec(RK5);
roundDec(RK6);
roundDec(RK7);
roundDec(RK8);
roundDec(RK9);
roundDec(RK10);
__ cmpl(rounds, 44);
__ jcc(Assembler::belowEqual, L_128);
roundDec(RK11);
roundDec(RK12);
__ cmpl(rounds, 52);
__ jcc(Assembler::belowEqual, L_192);
roundDec(RK13);
roundDec(RK14);
__ BIND(L_256);
roundDeclast(RK0);
__ jmp(Loop2);
__ BIND(L_128);
roundDeclast(RK0);
__ jmp(Loop2);
__ BIND(L_192);
roundDeclast(RK0);
__ BIND(Loop2);
__ evpxorq(B0, B0, IV, Assembler::AVX_512bit);
__ evpxorq(B1, B1, S0, Assembler::AVX_512bit);
__ evpxorq(B2, B2, S1, Assembler::AVX_512bit);
__ evpxorq(B3, B3, S2, Assembler::AVX_512bit);
__ evpxorq(B4, B4, S3, Assembler::AVX_512bit);
__ evpxorq(B5, B5, S4, Assembler::AVX_512bit);
__ evpxorq(B6, B6, S5, Assembler::AVX_512bit);
__ evpxorq(B7, B7, S6, Assembler::AVX_512bit);
__ evmovdquq(IV, S7, Assembler::AVX_512bit);
__ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit);
__ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit);
__ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit);
__ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit);
__ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit);
__ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit);
__ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit);
__ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit);
__ leaq(to, Address(to, 8 * 64));
__ jmp(Loop);
__ BIND(Lcbc_dec_rem);
__ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit);
__ BIND(Lcbc_dec_rem_loop);
__ subl(len_reg, 16);
__ jcc(Assembler::carrySet, Lcbc_dec_ret);
__ movdqu(S0, Address(from, 0));
__ evpxorq(B0, S0, RK1, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK2, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK3, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK4, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK5, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK6, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK7, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK8, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK9, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK10, Assembler::AVX_512bit);
__ cmpl(rounds, 44);
__ jcc(Assembler::belowEqual, Lcbc_dec_rem_last);
__ vaesdec(B0, B0, RK11, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK12, Assembler::AVX_512bit);
__ cmpl(rounds, 52);
__ jcc(Assembler::belowEqual, Lcbc_dec_rem_last);
__ vaesdec(B0, B0, RK13, Assembler::AVX_512bit);
__ vaesdec(B0, B0, RK14, Assembler::AVX_512bit);
__ BIND(Lcbc_dec_rem_last);
__ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit);
__ evpxorq(B0, B0, IV, Assembler::AVX_512bit);
__ evmovdquq(IV, S0, Assembler::AVX_512bit);
__ movdqu(Address(to, 0), B0);
__ leaq(from, Address(from, 16));
__ leaq(to, Address(to, 16));
__ jmp(Lcbc_dec_rem_loop);
__ BIND(Lcbc_dec_ret);
__ movdqu(Address(rvec, 0), IV);
// Zero out the round keys
__ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit);
__ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit);
__ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit);
__ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit);
__ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit);
__ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit);
__ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit);
__ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit);
__ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit);
__ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit);
__ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit);
__ cmpl(rounds, 44);
__ jcc(Assembler::belowEqual, Lcbc_exit);
__ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit);
__ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit);
__ cmpl(rounds, 52);
__ jcc(Assembler::belowEqual, Lcbc_exit);
__ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit);
__ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit);
__ BIND(Lcbc_exit);
__ pop(rbx);
#ifdef _WIN64
__ movl(rax, len_mem);
#else
__ pop(rax); // return length
#endif
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
return start;
}
// byte swap x86 long
address generate_ghash_long_swap_mask() {
__ align(CodeEntryAlignment);
@ -5078,7 +5384,11 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
if (VM_Version::supports_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) {
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt();
} else {
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
}
}
if (UseAESCTRIntrinsics){
StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask();

@ -770,7 +770,7 @@ void TemplateTable::iaload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
__ access_load_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, rax,
__ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
Address(rdx, rax, Address::times_4,
arrayOopDesc::base_offset_in_bytes(T_INT)),
noreg, noreg);
@ -783,7 +783,7 @@ void TemplateTable::laload() {
index_check(rdx, rax); // kills rbx
NOT_LP64(__ mov(rbx, rax));
// rbx,: index
__ access_load_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, noreg /* ltos */,
__ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
Address(rdx, rbx, Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_LONG)),
noreg, noreg);
@ -796,7 +796,7 @@ void TemplateTable::faload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
__ access_load_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, noreg /* ftos */,
__ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
Address(rdx, rax,
Address::times_4,
arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
@ -808,7 +808,7 @@ void TemplateTable::daload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
__ access_load_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, noreg /* dtos */,
__ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
Address(rdx, rax,
Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
@ -825,7 +825,7 @@ void TemplateTable::aaload() {
UseCompressedOops ? Address::times_4 : Address::times_ptr,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
rax,
IN_HEAP_ARRAY);
IS_ARRAY);
}
void TemplateTable::baload() {
@ -833,7 +833,7 @@ void TemplateTable::baload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
__ access_load_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, rax,
__ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
noreg, noreg);
}
@ -843,7 +843,7 @@ void TemplateTable::caload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
__ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, rax,
__ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
noreg, noreg);
}
@ -858,7 +858,7 @@ void TemplateTable::fast_icaload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
__ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, rax,
__ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
noreg, noreg);
}
@ -869,7 +869,7 @@ void TemplateTable::saload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
__ access_load_at(T_SHORT, IN_HEAP | IN_HEAP_ARRAY, rax,
__ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
noreg, noreg);
}
@ -1063,7 +1063,7 @@ void TemplateTable::iastore() {
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
__ access_store_at(T_INT, IN_HEAP | IN_HEAP_ARRAY,
__ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
Address(rdx, rbx, Address::times_4,
arrayOopDesc::base_offset_in_bytes(T_INT)),
rax, noreg, noreg);
@ -1077,7 +1077,7 @@ void TemplateTable::lastore() {
// rdx: high(value)
index_check(rcx, rbx); // prefer index in rbx,
// rbx,: index
__ access_store_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY,
__ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
Address(rcx, rbx, Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_LONG)),
noreg /* ltos */, noreg, noreg);
@ -1091,7 +1091,7 @@ void TemplateTable::fastore() {
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
__ access_store_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY,
__ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
Address(rdx, rbx, Address::times_4,
arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
noreg /* ftos */, noreg, noreg);
@ -1104,7 +1104,7 @@ void TemplateTable::dastore() {
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
__ access_store_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY,
__ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
Address(rdx, rbx, Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
noreg /* dtos */, noreg, noreg);
@ -1148,7 +1148,7 @@ void TemplateTable::aastore() {
__ movptr(rax, at_tos());
__ movl(rcx, at_tos_p1()); // index
// Now store using the appropriate barrier
do_oop_store(_masm, element_address, rax, IN_HEAP_ARRAY);
do_oop_store(_masm, element_address, rax, IS_ARRAY);
__ jmp(done);
// Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
@ -1156,7 +1156,7 @@ void TemplateTable::aastore() {
__ profile_null_seen(rbx);
// Store a NULL
do_oop_store(_masm, element_address, noreg, IN_HEAP_ARRAY);
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
// Pop stack arguments
__ bind(done);
@ -1180,7 +1180,7 @@ void TemplateTable::bastore() {
__ jccb(Assembler::zero, L_skip);
__ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
__ bind(L_skip);
__ access_store_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY,
__ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
Address(rdx, rbx,Address::times_1,
arrayOopDesc::base_offset_in_bytes(T_BYTE)),
rax, noreg, noreg);
@ -1193,7 +1193,7 @@ void TemplateTable::castore() {
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
__ access_store_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY,
__ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
Address(rdx, rbx, Address::times_2,
arrayOopDesc::base_offset_in_bytes(T_CHAR)),
rax, noreg, noreg);
@ -3792,30 +3792,61 @@ void TemplateTable::invokeinterface(int byte_no) {
prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 Method*
rcx, rdx); // recv, flags
// rax: reference klass (from f1)
// rax: reference klass (from f1) if interface method
// rbx: method (from f2)
// rcx: receiver
// rdx: flags
// First check for Object case, then private interface method,
// then regular interface method.
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCacheOop.cpp for details.
// This code isn't produced by javac, but could be produced by
// another compliant java compiler.
Label notMethod;
// java.lang.Object. See cpCache.cpp for details.
Label notObjectMethod;
__ movl(rlocals, rdx);
__ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
__ jcc(Assembler::zero, notMethod);
__ jcc(Assembler::zero, notObjectMethod);
invokevirtual_helper(rbx, rcx, rdx);
__ bind(notMethod);
// no return from above
__ bind(notObjectMethod);
Label no_such_interface; // for receiver subtype check
Register recvKlass; // used for exception processing
// Check for private method invocation - indicated by vfinal
Label notVFinal;
__ movl(rlocals, rdx);
__ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
__ jcc(Assembler::zero, notVFinal);
// Get receiver klass into rlocals - also a null check
__ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rlocals, rcx);
Label subtype;
__ check_klass_subtype(rlocals, rax, rbcp, subtype);
// If we get here the typecheck failed
recvKlass = rdx;
__ mov(recvKlass, rlocals); // shuffle receiver class for exception use
__ jmp(no_such_interface);
__ bind(subtype);
// do the call - rbx is actually the method to call
__ profile_final_call(rdx);
__ profile_arguments_type(rdx, rbx, rbcp, true);
__ jump_from_interpreted(rbx, rdx);
// no return from above
__ bind(notVFinal);
// Get receiver klass into rdx - also a null check
__ restore_locals(); // restore r14
__ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx, rcx);
Label no_such_interface, no_such_method;
Label no_such_method;
// Preserve method for throw_AbstractMethodErrorVerbose.
__ mov(rcx, rbx);
@ -3877,12 +3908,12 @@ void TemplateTable::invokeinterface(int byte_no) {
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
// Pass arguments for generating a verbose error message.
#ifdef _LP64
Register recvKlass = c_rarg1;
recvKlass = c_rarg1;
Register method = c_rarg2;
if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
if (method != rcx) { __ movq(method, rcx); }
#else
Register recvKlass = rdx;
recvKlass = rdx;
Register method = rcx;
#endif
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),

@ -666,6 +666,7 @@ void VM_Version::get_processor_features() {
_features &= ~CPU_AVX512VL;
_features &= ~CPU_AVX512_VPOPCNTDQ;
_features &= ~CPU_VPCLMULQDQ;
_features &= ~CPU_VAES;
}
if (UseAVX < 2)

@ -335,6 +335,7 @@ protected:
#define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000)) // Vzeroupper instruction
#define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64(0x2000000000)) // Vector popcount
#define CPU_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication
#define CPU_VAES ((uint64_t)UCONST64(0x8000000000)) // Vector AES instructions
enum Extended_Family {
// AMD
@ -545,6 +546,8 @@ protected:
result |= CPU_AVX512_VPOPCNTDQ;
if (_cpuid_info.sef_cpuid7_ecx.bits.vpclmulqdq != 0)
result |= CPU_VPCLMULQDQ;
if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0)
result |= CPU_VAES;
}
}
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
@ -823,6 +826,7 @@ public:
static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
static bool supports_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
static bool supports_vpclmulqdq() { return (_features & CPU_VPCLMULQDQ) != 0; }
static bool supports_vaes() { return (_features & CPU_VAES) != 0; }
// Intel features
static bool is_intel_family_core() { return is_intel() &&

@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "interpreter/cppInterpreterGenerator.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.inline.hpp"
@ -65,6 +66,37 @@ oop MethodHandles::popFromStack(TRAPS) {
}
void MethodHandles::throw_AME(Klass* rcvr, Method* interface_method, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
// Set up the frame anchor if it isn't already
bool has_last_Java_frame = thread->has_last_Java_frame();
if (!has_last_Java_frame) {
intptr_t *sp = thread->zero_stack()->sp();
ZeroFrame *frame = thread->top_zero_frame();
while (frame) {
if (frame->is_interpreter_frame()) {
interpreterState istate =
frame->as_interpreter_frame()->interpreter_state();
if (istate->self_link() == istate)
break;
}
sp = ((intptr_t *) frame) + 1;
frame = frame->next();
}
assert(frame != NULL, "must be");
thread->set_last_Java_frame(frame, sp);
}
InterpreterRuntime::throw_AbstractMethodErrorVerbose(thread, rcvr, interface_method);
// Reset the frame anchor if necessary
if (!has_last_Java_frame) {
thread->reset_last_Java_frame();
}
}
int MethodHandles::method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS) {
JavaThread *thread = (JavaThread *) THREAD;
@ -124,8 +156,15 @@ int MethodHandles::method_handle_entry_linkToInterface(Method* method, intptr_t
itableMethodEntry* im = ki->first_method_entry(recv->klass());
Method* vmtarget = im[vmindex].method();
invoke_target(vmtarget, THREAD);
// Check that the vmtarget entry is non-null. A null entry means
// that the method no longer exists (got deleted) or is private.
// Private class methods can never be an implementation of an
// interface method. In those cases, throw AME.
if (vmtarget != NULL) {
invoke_target(vmtarget, THREAD);
} else {
throw_AME(recv->klass(), target, THREAD);
}
return 0;
}

@ -32,6 +32,7 @@ enum /* platform_dependent_constants */ {
private:
static oop popFromStack(TRAPS);
static void invoke_target(Method* method, TRAPS);
static void throw_AME(Klass* rcvr, Method* interface_method, TRAPS);
static int method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS);
static int method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS);
static int method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS);

@ -96,11 +96,11 @@
address os::current_stack_pointer() {
#ifdef SPARC_WORKS
register void *esp;
__asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
__asm__("mov %%" SPELL_REG_SP ", %0":"=r"(esp));
return (address) ((char*)esp + sizeof(long)*2);
#elif defined(__clang__)
intptr_t* esp;
__asm__ __volatile__ ("mov %%"SPELL_REG_SP", %0":"=r"(esp):);
__asm__ __volatile__ ("mov %%" SPELL_REG_SP ", %0":"=r"(esp):);
return (address) esp;
#else
register void *esp __asm__ (SPELL_REG_SP);
@ -234,10 +234,10 @@ frame os::get_sender_for_C_frame(frame* fr) {
intptr_t* _get_previous_fp() {
#ifdef SPARC_WORKS
register intptr_t **ebp;
__asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
__asm__("mov %%" SPELL_REG_FP ", %0":"=r"(ebp));
#elif defined(__clang__)
intptr_t **ebp;
__asm__ __volatile__ ("mov %%"SPELL_REG_FP", %0":"=r"(ebp):);
__asm__ __volatile__ ("mov %%" SPELL_REG_FP ", %0":"=r"(ebp):);
#else
register intptr_t **ebp __asm__ (SPELL_REG_FP);
#endif

@ -1602,7 +1602,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
}
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
if (x->check_boolean()) {
decorators |= C1_MASK_BOOLEAN;
}
@ -1847,7 +1847,7 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
}
}
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
LIR_Opr result = rlock_result(x, x->elt_type());
access_load_at(decorators, x->elt_type(),

@ -70,22 +70,22 @@
ciField::ciField(ciInstanceKlass* klass, int index) :
_known_to_link_with_put(NULL), _known_to_link_with_get(NULL) {
ASSERT_IN_VM;
CompilerThread *thread = CompilerThread::current();
CompilerThread *THREAD = CompilerThread::current();
assert(ciObjectFactory::is_initialized(), "not a shared field");
assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constant-pool");
constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants());
constantPoolHandle cpool(THREAD, klass->get_instanceKlass()->constants());
// Get the field's name, signature, and type.
Symbol* name = cpool->name_ref_at(index);
_name = ciEnv::current(thread)->get_symbol(name);
_name = ciEnv::current(THREAD)->get_symbol(name);
int nt_index = cpool->name_and_type_ref_index_at(index);
int sig_index = cpool->signature_ref_index_at(nt_index);
Symbol* signature = cpool->symbol_at(sig_index);
_signature = ciEnv::current(thread)->get_symbol(signature);
_signature = ciEnv::current(THREAD)->get_symbol(signature);
BasicType field_type = FieldType::basic_type(signature);
@ -95,12 +95,12 @@ ciField::ciField(ciInstanceKlass* klass, int index) :
bool ignore;
// This is not really a class reference; the index always refers to the
// field's type signature, as a symbol. Linkage checks do not apply.
_type = ciEnv::current(thread)->get_klass_by_index(cpool, sig_index, ignore, klass);
_type = ciEnv::current(THREAD)->get_klass_by_index(cpool, sig_index, ignore, klass);
} else {
_type = ciType::make(field_type);
}
_name = (ciSymbol*)ciEnv::current(thread)->get_symbol(name);
_name = (ciSymbol*)ciEnv::current(THREAD)->get_symbol(name);
// Get the field's declared holder.
//
@ -109,7 +109,7 @@ ciField::ciField(ciInstanceKlass* klass, int index) :
int holder_index = cpool->klass_ref_index_at(index);
bool holder_is_accessible;
ciKlass* generic_declared_holder = ciEnv::current(thread)->get_klass_by_index(cpool, holder_index,
ciKlass* generic_declared_holder = ciEnv::current(THREAD)->get_klass_by_index(cpool, holder_index,
holder_is_accessible,
klass);
@ -126,7 +126,7 @@ ciField::ciField(ciInstanceKlass* klass, int index) :
// handling in ciField::will_link and will result in a
// java.lang.NoSuchFieldError exception being thrown by the compiled
// code (the expected behavior in this case).
_holder = ciEnv::current(thread)->Object_klass();
_holder = ciEnv::current(THREAD)->Object_klass();
_offset = -1;
_is_constant = false;
return;
@ -164,10 +164,22 @@ ciField::ciField(ciInstanceKlass* klass, int index) :
// to check access because it can erroneously succeed. If this check fails,
// propagate the declared holder to will_link() which in turn will bail out
// compilation for this field access.
if (!Reflection::verify_field_access(klass->get_Klass(), declared_holder->get_Klass(), canonical_holder, field_desc.access_flags(), true)) {
bool can_access = Reflection::verify_member_access(klass->get_Klass(),
declared_holder->get_Klass(),
canonical_holder,
field_desc.access_flags(),
true, false, THREAD);
if (!can_access) {
_holder = declared_holder;
_offset = -1;
_is_constant = false;
// It's possible the access check failed due to a nestmate access check
// encountering an exception. We can't propagate the exception from here
// so we have to clear it. If the access check happens again in a different
// context then the exception will be thrown there.
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
return;
}

@ -955,9 +955,10 @@ void ClassFileParser::parse_interfaces(const ClassFileStream* const stream,
if (!interf->is_interface()) {
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
err_msg("Class %s can not implement %s, because it is not an interface",
err_msg("class %s can not implement %s, because it is not an interface (%s)",
_class_name->as_klass_external_name(),
interf->class_loader_and_module_name()));
interf->external_name(),
interf->class_in_module_of_loader()));
}
if (InstanceKlass::cast(interf)->has_nonstatic_concrete_methods()) {
@ -3148,7 +3149,6 @@ u2 ClassFileParser::parse_classfile_inner_classes_attribute(const ClassFileStrea
_inner_classes = inner_classes;
int index = 0;
const int cp_size = _cp->length();
cfs->guarantee_more(8 * length, CHECK_0); // 4-tuples of u2
for (int n = 0; n < length; n++) {
// Inner class index
@ -3222,6 +3222,38 @@ u2 ClassFileParser::parse_classfile_inner_classes_attribute(const ClassFileStrea
return length;
}
u2 ClassFileParser::parse_classfile_nest_members_attribute(const ClassFileStream* const cfs,
const u1* const nest_members_attribute_start,
TRAPS) {
const u1* const current_mark = cfs->current();
u2 length = 0;
if (nest_members_attribute_start != NULL) {
cfs->set_current(nest_members_attribute_start);
cfs->guarantee_more(2, CHECK_0); // length
length = cfs->get_u2_fast();
}
const int size = length;
Array<u2>* const nest_members = MetadataFactory::new_array<u2>(_loader_data, size, CHECK_0);
_nest_members = nest_members;
int index = 0;
cfs->guarantee_more(2 * length, CHECK_0);
for (int n = 0; n < length; n++) {
const u2 class_info_index = cfs->get_u2_fast();
check_property(
valid_klass_reference_at(class_info_index),
"Nest member class_info_index %u has bad constant type in class file %s",
class_info_index, CHECK_0);
nest_members->at_put(index++, class_info_index);
}
assert(index == size, "wrong size");
// Restore buffer's current position.
cfs->set_current(current_mark);
return length;
}
void ClassFileParser::parse_classfile_synthetic_attribute(TRAPS) {
set_class_synthetic_flag(true);
}
@ -3329,10 +3361,14 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
// Set inner classes attribute to default sentinel
_inner_classes = Universe::the_empty_short_array();
// Set nest members attribute to default sentinel
_nest_members = Universe::the_empty_short_array();
cfs->guarantee_more(2, CHECK); // attributes_count
u2 attributes_count = cfs->get_u2_fast();
bool parsed_sourcefile_attribute = false;
bool parsed_innerclasses_attribute = false;
bool parsed_nest_members_attribute = false;
bool parsed_nest_host_attribute = false;
bool parsed_enclosingmethod_attribute = false;
bool parsed_bootstrap_methods_attribute = false;
const u1* runtime_visible_annotations = NULL;
@ -3350,6 +3386,9 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
u4 inner_classes_attribute_length = 0;
u2 enclosing_method_class_index = 0;
u2 enclosing_method_method_index = 0;
const u1* nest_members_attribute_start = NULL;
u4 nest_members_attribute_length = 0;
// Iterate over attributes
while (attributes_count--) {
cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length
@ -3498,6 +3537,40 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
}
cfs->skip_u1(attribute_length, CHECK);
} else if (_major_version >= JAVA_11_VERSION) {
if (tag == vmSymbols::tag_nest_members()) {
// Check for NestMembers tag
if (parsed_nest_members_attribute) {
classfile_parse_error("Multiple NestMembers attributes in class file %s", CHECK);
} else {
parsed_nest_members_attribute = true;
}
if (parsed_nest_host_attribute) {
classfile_parse_error("Conflicting NestHost and NestMembers attributes in class file %s", CHECK);
}
nest_members_attribute_start = cfs->current();
nest_members_attribute_length = attribute_length;
cfs->skip_u1(nest_members_attribute_length, CHECK);
} else if (tag == vmSymbols::tag_nest_host()) {
if (parsed_nest_host_attribute) {
classfile_parse_error("Multiple NestHost attributes in class file %s", CHECK);
} else {
parsed_nest_host_attribute = true;
}
if (parsed_nest_members_attribute) {
classfile_parse_error("Conflicting NestMembers and NestHost attributes in class file %s", CHECK);
}
if (_need_verify) {
guarantee_property(attribute_length == 2, "Wrong NestHost attribute length in class file %s", CHECK);
}
cfs->guarantee_more(2, CHECK);
u2 class_info_index = cfs->get_u2_fast();
check_property(
valid_klass_reference_at(class_info_index),
"Nest-host class_info_index %u has bad constant type in class file %s",
class_info_index, CHECK);
_nest_host = class_info_index;
}
} else {
// Unknown attribute
cfs->skip_u1(attribute_length, CHECK);
@ -3526,13 +3599,25 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
enclosing_method_class_index,
enclosing_method_method_index,
CHECK);
if (parsed_innerclasses_attribute &&_need_verify && _major_version >= JAVA_1_5_VERSION) {
if (parsed_innerclasses_attribute && _need_verify && _major_version >= JAVA_1_5_VERSION) {
guarantee_property(
inner_classes_attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes,
"Wrong InnerClasses attribute length in class file %s", CHECK);
}
}
if (parsed_nest_members_attribute) {
const u2 num_of_classes = parse_classfile_nest_members_attribute(
cfs,
nest_members_attribute_start,
CHECK);
if (_need_verify) {
guarantee_property(
nest_members_attribute_length == sizeof(num_of_classes) + sizeof(u2) * num_of_classes,
"Wrong NestMembers attribute length in class file %s", CHECK);
}
}
if (_max_bootstrap_specifier_index >= 0) {
guarantee_property(parsed_bootstrap_methods_attribute,
"Missing BootstrapMethods attribute in class file %s", CHECK);
@ -3595,6 +3680,8 @@ void ClassFileParser::apply_parsed_class_metadata(
this_klass->set_fields(_fields, java_fields_count);
this_klass->set_methods(_methods);
this_klass->set_inner_classes(_inner_classes);
this_klass->set_nest_members(_nest_members);
this_klass->set_nest_host_index(_nest_host);
this_klass->set_local_interfaces(_local_interfaces);
this_klass->set_annotations(_combined_annotations);
// Delay the setting of _transitive_interfaces until after initialize_supers() in
@ -4605,24 +4692,26 @@ static void check_final_method_override(const InstanceKlass* this_klass, TRAPS)
}
if (super_m->is_final() && !super_m->is_static() &&
// matching method in super is final, and not static
(Reflection::verify_field_access(this_klass,
super_m->method_holder(),
super_m->method_holder(),
super_m->access_flags(), false))
// this class can access super final method and therefore override
) {
ResourceMark rm(THREAD);
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_VerifyError(),
"class %s overrides final method %s.%s%s",
this_klass->external_name(),
super_m->method_holder()->external_name(),
name->as_C_string(),
signature->as_C_string()
);
return;
!super_m->access_flags().is_private()) {
// matching method in super is final, and not static or private
bool can_access = Reflection::verify_member_access(this_klass,
super_m->method_holder(),
super_m->method_holder(),
super_m->access_flags(),
false, false, CHECK);
if (can_access) {
// this class can access super final method and therefore override
ResourceMark rm(THREAD);
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_VerifyError(),
"class %s overrides final method %s.%s%s",
this_klass->external_name(),
super_m->method_holder()->external_name(),
name->as_C_string(),
signature->as_C_string()
);
return;
}
}
// continue to look from super_m's holder's super.
@ -5470,6 +5559,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
assert(NULL == _fields, "invariant");
assert(NULL == _methods, "invariant");
assert(NULL == _inner_classes, "invariant");
assert(NULL == _nest_members, "invariant");
assert(NULL == _local_interfaces, "invariant");
assert(NULL == _combined_annotations, "invariant");
@ -5739,6 +5829,8 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
_fields(NULL),
_methods(NULL),
_inner_classes(NULL),
_nest_members(NULL),
_nest_host(0),
_local_interfaces(NULL),
_transitive_interfaces(NULL),
_combined_annotations(NULL),
@ -5843,6 +5935,7 @@ void ClassFileParser::clear_class_metadata() {
_fields = NULL;
_methods = NULL;
_inner_classes = NULL;
_nest_members = NULL;
_local_interfaces = NULL;
_combined_annotations = NULL;
_annotations = _type_annotations = NULL;
@ -5868,6 +5961,10 @@ ClassFileParser::~ClassFileParser() {
MetadataFactory::free_array<u2>(_loader_data, _inner_classes);
}
if (_nest_members != NULL && _nest_members != Universe::the_empty_short_array()) {
MetadataFactory::free_array<u2>(_loader_data, _nest_members);
}
// Free interfaces
InstanceKlass::deallocate_interfaces(_loader_data, _super_klass,
_local_interfaces, _transitive_interfaces);

@ -97,6 +97,8 @@ class ClassFileParser {
Array<u2>* _fields;
Array<Method*>* _methods;
Array<u2>* _inner_classes;
Array<u2>* _nest_members;
u2 _nest_host;
Array<Klass*>* _local_interfaces;
Array<Klass*>* _transitive_interfaces;
Annotations* _combined_annotations;
@ -290,6 +292,10 @@ class ClassFileParser {
u2 enclosing_method_method_index,
TRAPS);
u2 parse_classfile_nest_members_attribute(const ClassFileStream* const cfs,
const u1* const nest_members_attribute_start,
TRAPS);
void parse_classfile_attributes(const ClassFileStream* const cfs,
ConstantPool* cp,
ClassAnnotationCollector* parsed_annotations,

@ -109,10 +109,9 @@ void ClassLoaderData::init_null_class_loader_data() {
// it will be available for error messages, logging, JFR, etc. The name
// and klass are available after the class_loader oop is no longer alive,
// during unloading.
void ClassLoaderData::initialize_name_and_klass(Handle class_loader) {
void ClassLoaderData::initialize_name(Handle class_loader) {
Thread* THREAD = Thread::current();
ResourceMark rm(THREAD);
_class_loader_klass = class_loader->klass();
// Obtain the class loader's name. If the class loader's name was not
// explicitly set during construction, the CLD's _name field will be null.
@ -159,6 +158,7 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
if (!h_class_loader.is_null()) {
_class_loader = _handles.add(h_class_loader());
_class_loader_klass = h_class_loader->klass();
}
if (!is_anonymous) {
@ -951,9 +951,11 @@ const char* ClassLoaderData::loader_name() const {
const char* ClassLoaderData::loader_name_and_id() const {
if (_class_loader_klass == NULL) {
return "'" BOOTSTRAP_LOADER_NAME "'";
} else {
assert(_name_and_id != NULL, "encountered a class loader null name and id");
} else if (_name_and_id != NULL) {
return _name_and_id->as_C_string();
} else {
// May be called in a race before _name_and_id is initialized.
return _class_loader_klass->external_name();
}
}
@ -1069,10 +1071,10 @@ ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_anony
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
ClassLoaderData* loader_data = add_to_graph(loader, is_anonymous);
// Initialize name and class after the loader data is added to the CLDG
// because adding the Symbol for the name might safepoint.
// Initialize _name and _name_and_id after the loader data is added to the
// CLDG because adding the Symbol for _name and _name_and_id might safepoint.
if (loader.not_null()) {
loader_data->initialize_name_and_klass(loader);
loader_data->initialize_name(loader);
}
return loader_data;
}

@ -210,7 +210,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
friend class ClassLoaderDataGraphKlassIteratorAtomic;
friend class ClassLoaderDataGraphKlassIteratorStatic;
friend class ClassLoaderDataGraphMetaspaceIterator;
friend class InstanceKlass;
friend class Klass;
friend class MetaDataFactory;
friend class Method;
@ -305,7 +305,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Dictionary* create_dictionary();
void initialize_name_and_klass(Handle class_loader);
void initialize_name(Handle class_loader);
public:
// GC interface.
void clear_claimed() { _claimed = 0; }

@ -40,10 +40,12 @@
#include "jfr/support/jfrTraceIdExtension.hpp"
#endif
#define UNNAMED_MODULE "Unnamed Module"
#define UNNAMED_MODULE "unnamed module"
#define UNNAMED_MODULE_LEN 14
#define JAVAPKG "java"
#define JAVAPKG_LEN 4
#define JAVA_BASE_NAME "java.base"
#define JAVA_BASE_NAME_LEN 9
class ModuleClosure;

@ -245,10 +245,6 @@ Klass* SystemDictionary::resolve_or_fail(Symbol* class_name,
// Forwards to resolve_instance_class_or_null
Klass* SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) {
assert(THREAD->can_call_java(),
"can not load classes with compiler thread: class=%s, classloader=%s",
class_name->as_C_string(),
class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string());
if (FieldType::is_array(class_name)) {
return resolve_array_class_or_null(class_name, class_loader, protection_domain, THREAD);
} else if (FieldType::is_obj(class_name)) {
@ -692,6 +688,10 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
PlaceholderEntry* placeholder;
Symbol* superclassname = NULL;
assert(THREAD->can_call_java(),
"can not load classes with compiler thread: class=%s, classloader=%s",
name->as_C_string(),
class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string());
{
MutexLocker mu(SystemDictionary_lock, THREAD);
InstanceKlass* check = find_class(d_hash, name, dictionary);

@ -142,6 +142,8 @@
/* class file format tags */ \
template(tag_source_file, "SourceFile") \
template(tag_inner_classes, "InnerClasses") \
template(tag_nest_members, "NestMembers") \
template(tag_nest_host, "NestHost") \
template(tag_constant_value, "ConstantValue") \
template(tag_code, "Code") \
template(tag_exceptions, "Exceptions") \

@ -33,6 +33,7 @@
#include "compiler/compileBroker.hpp"
#include "compiler/compileTask.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.hpp"
#include "oops/oop.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "runtime/flags/flagSetting.hpp"
@ -1228,8 +1229,9 @@ class ClassHierarchyWalker {
} else if (!k->is_instance_klass()) {
return false; // no methods to find in an array type
} else {
// Search class hierarchy first.
Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature);
// Search class hierarchy first, skipping private implementations
// as they never override any inherited methods
Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature, Klass::skip_private);
if (!Dependencies::is_concrete_method(m, k)) {
// Check for re-abstraction of method
if (!k->is_interface() && m != NULL && m->is_abstract()) {

@ -2599,6 +2599,16 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
st->move_to(column);
if (sd->bci() == SynchronizationEntryBCI) {
st->print(";*synchronization entry");
} else if (sd->bci() == AfterBci) {
st->print(";* method exit (unlocked if synchronized)");
} else if (sd->bci() == UnwindBci) {
st->print(";* unwind (locked if synchronized)");
} else if (sd->bci() == AfterExceptionBci) {
st->print(";* unwind (unlocked if synchronized)");
} else if (sd->bci() == UnknownBci) {
st->print(";* unknown");
} else if (sd->bci() == InvalidFrameStateBci) {
st->print(";* invalid frame state");
} else {
if (sd->method() == NULL) {
st->print("method is NULL");

@ -41,7 +41,13 @@ inline const char* compilertype2name(CompilerType t) { return (uint)t < compiler
// Handy constants for deciding which compiler mode to use.
enum MethodCompilation {
InvocationEntryBci = -1 // i.e., not a on-stack replacement compilation
InvocationEntryBci = -1, // i.e., not a on-stack replacement compilation
BeforeBci = InvocationEntryBci,
AfterBci = -2,
UnwindBci = -3,
AfterExceptionBci = -4,
UnknownBci = -5,
InvalidFrameStateBci = -6
};
// Enumeration to distinguish tiers of compilation

@ -44,22 +44,17 @@ class ParMarkFromRootsClosure;
void do_oop(oop obj); \
template <class T> inline void do_oop_work(T* p);
// TODO: This duplication of the MetadataAwareOopClosure class is only needed
// TODO: This duplication of the MetadataVisitingOopIterateClosure class is only needed
// because some CMS OopClosures derive from OopsInGenClosure. It would be
// good to get rid of them completely.
class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
class MetadataVisitingOopsInGenClosure: public OopsInGenClosure {
public:
virtual bool do_metadata() { return do_metadata_nv(); }
inline bool do_metadata_nv() { return true; }
virtual bool do_metadata() { return true; }
virtual void do_klass(Klass* k);
void do_klass_nv(Klass* k);
virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
void do_cld_nv(ClassLoaderData* cld);
virtual void do_cld(ClassLoaderData* cld);
};
class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
class MarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
private:
const MemRegion _span;
CMSBitMap* _bitMap;
@ -71,7 +66,7 @@ class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
virtual void do_oop(narrowOop* p);
};
class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
class ParMarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
private:
const MemRegion _span;
CMSBitMap* _bitMap;
@ -85,7 +80,7 @@ class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
// A variant of the above used in certain kinds of CMS
// marking verification.
class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
class MarkRefsIntoVerifyClosure: public MetadataVisitingOopsInGenClosure {
private:
const MemRegion _span;
CMSBitMap* _verification_bm;
@ -100,7 +95,7 @@ class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
};
// The non-parallel version (the parallel version appears further below).
class PushAndMarkClosure: public MetadataAwareOopClosure {
class PushAndMarkClosure: public MetadataVisitingOopIterateClosure {
private:
CMSCollector* _collector;
MemRegion _span;
@ -120,8 +115,6 @@ class PushAndMarkClosure: public MetadataAwareOopClosure {
bool concurrent_precleaning);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// In the parallel case, the bit map and the
@ -130,7 +123,7 @@ class PushAndMarkClosure: public MetadataAwareOopClosure {
// synchronization (for instance, via CAS). The marking stack
// used in the non-parallel case above is here replaced with
// an OopTaskQueue structure to allow efficient work stealing.
class ParPushAndMarkClosure: public MetadataAwareOopClosure {
class ParPushAndMarkClosure: public MetadataVisitingOopIterateClosure {
private:
CMSCollector* _collector;
MemRegion _span;
@ -146,12 +139,10 @@ class ParPushAndMarkClosure: public MetadataAwareOopClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// The non-parallel version (the parallel version appears further below).
class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
class MarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
private:
MemRegion _span;
CMSBitMap* _bit_map;
@ -175,8 +166,6 @@ class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
bool concurrent_precleaning);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
void set_freelistLock(Mutex* m) {
_freelistLock = m;
@ -192,7 +181,7 @@ class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
// stack and the bitMap are shared, so access needs to be suitably
// synchronized. An OopTaskQueue structure, supporting efficient
// work stealing, replaces a CMSMarkStack for storing grey objects.
class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
class ParMarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
private:
MemRegion _span;
CMSBitMap* _bit_map;
@ -209,8 +198,6 @@ class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
void trim_queue(uint size);
};
@ -218,7 +205,7 @@ class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
// This closure is used during the concurrent marking phase
// following the first checkpoint. Its use is buried in
// the closure MarkFromRootsClosure.
class PushOrMarkClosure: public MetadataAwareOopClosure {
class PushOrMarkClosure: public MetadataVisitingOopIterateClosure {
private:
CMSCollector* _collector;
MemRegion _span;
@ -238,8 +225,6 @@ class PushOrMarkClosure: public MetadataAwareOopClosure {
MarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
@ -251,7 +236,7 @@ class PushOrMarkClosure: public MetadataAwareOopClosure {
// This closure is used during the concurrent marking phase
// following the first checkpoint. Its use is buried in
// the closure ParMarkFromRootsClosure.
class ParPushOrMarkClosure: public MetadataAwareOopClosure {
class ParPushOrMarkClosure: public MetadataVisitingOopIterateClosure {
private:
CMSCollector* _collector;
MemRegion _whole_span;
@ -275,8 +260,6 @@ class ParPushOrMarkClosure: public MetadataAwareOopClosure {
ParMarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
@ -290,7 +273,7 @@ class ParPushOrMarkClosure: public MetadataAwareOopClosure {
// processing phase of the CMS final checkpoint step, as
// well as during the concurrent precleaning of the discovered
// reference lists.
class CMSKeepAliveClosure: public MetadataAwareOopClosure {
class CMSKeepAliveClosure: public MetadataVisitingOopIterateClosure {
private:
CMSCollector* _collector;
const MemRegion _span;
@ -306,11 +289,9 @@ class CMSKeepAliveClosure: public MetadataAwareOopClosure {
bool concurrent_precleaning() const { return _concurrent_precleaning; }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
class CMSInnerParMarkAndPushClosure: public MetadataVisitingOopIterateClosure {
private:
CMSCollector* _collector;
MemRegion _span;
@ -324,14 +305,12 @@ class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// A parallel (MT) version of the above, used when
// reference processing is parallel; the only difference
// is in the do_oop method.
class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
class CMSParKeepAliveClosure: public MetadataVisitingOopIterateClosure {
private:
MemRegion _span;
OopTaskQueue* _work_queue;

@ -32,42 +32,38 @@
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
// MetadataVisitingOopIterateClosure and MetadataVisitingOopsInGenClosure are duplicated,
// until we get rid of OopsInGenClosure.
inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
inline void MetadataVisitingOopsInGenClosure::do_klass(Klass* k) {
ClassLoaderData* cld = k->class_loader_data();
do_cld_nv(cld);
MetadataVisitingOopsInGenClosure::do_cld(cld);
}
inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
inline void MetadataVisitingOopsInGenClosure::do_cld(ClassLoaderData* cld) {
bool claim = true; // Must claim the class loader data before processing.
cld->oops_do(this, claim);
}
// Decode the oop and call do_oop on it.
#define DO_OOP_WORK_IMPL(cls) \
template <class T> void cls::do_oop_work(T* p) { \
T heap_oop = RawAccess<>::oop_load(p); \
if (!CompressedOops::is_null(heap_oop)) { \
oop obj = CompressedOops::decode_not_null(heap_oop); \
do_oop(obj); \
} \
}
#define DO_OOP_WORK_NV_IMPL(cls) \
DO_OOP_WORK_IMPL(cls) \
void cls::do_oop_nv(oop* p) { cls::do_oop_work(p); } \
void cls::do_oop_nv(narrowOop* p) { cls::do_oop_work(p); }
#define DO_OOP_WORK_IMPL(cls) \
template <class T> void cls::do_oop_work(T* p) { \
T heap_oop = RawAccess<>::oop_load(p); \
if (!CompressedOops::is_null(heap_oop)) { \
oop obj = CompressedOops::decode_not_null(heap_oop); \
do_oop(obj); \
} \
} \
inline void cls::do_oop(oop* p) { do_oop_work(p); } \
inline void cls::do_oop(narrowOop* p) { do_oop_work(p); }
DO_OOP_WORK_IMPL(MarkRefsIntoClosure)
DO_OOP_WORK_IMPL(ParMarkRefsIntoClosure)
DO_OOP_WORK_IMPL(MarkRefsIntoVerifyClosure)
DO_OOP_WORK_NV_IMPL(PushAndMarkClosure)
DO_OOP_WORK_NV_IMPL(ParPushAndMarkClosure)
DO_OOP_WORK_NV_IMPL(MarkRefsIntoAndScanClosure)
DO_OOP_WORK_NV_IMPL(ParMarkRefsIntoAndScanClosure)
DO_OOP_WORK_IMPL(PushAndMarkClosure)
DO_OOP_WORK_IMPL(ParPushAndMarkClosure)
DO_OOP_WORK_IMPL(MarkRefsIntoAndScanClosure)
DO_OOP_WORK_IMPL(ParMarkRefsIntoAndScanClosure)
// Trim our work_queue so its length is below max at return
inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
@ -84,10 +80,10 @@ inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
}
}
DO_OOP_WORK_NV_IMPL(PushOrMarkClosure)
DO_OOP_WORK_NV_IMPL(ParPushOrMarkClosure)
DO_OOP_WORK_NV_IMPL(CMSKeepAliveClosure)
DO_OOP_WORK_NV_IMPL(CMSInnerParMarkAndPushClosure)
DO_OOP_WORK_IMPL(PushOrMarkClosure)
DO_OOP_WORK_IMPL(ParPushOrMarkClosure)
DO_OOP_WORK_IMPL(CMSKeepAliveClosure)
DO_OOP_WORK_IMPL(CMSInnerParMarkAndPushClosure)
DO_OOP_WORK_IMPL(CMSParKeepAliveClosure)
#endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_INLINE_HPP

@ -1,63 +0,0 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_CMS_CMS_SPECIALIZED_OOP_CLOSURES_HPP
#define SHARE_GC_CMS_CMS_SPECIALIZED_OOP_CLOSURES_HPP
// The following OopClosure types get specialized versions of
// "oop_oop_iterate" that invoke the closures' do_oop methods
// non-virtually, using a mechanism defined in this file. Extend these
// macros in the obvious way to add specializations for new closures.
// Forward declarations.
// ParNew
class ParScanWithBarrierClosure;
class ParScanWithoutBarrierClosure;
// CMS
class MarkRefsIntoAndScanClosure;
class ParMarkRefsIntoAndScanClosure;
class PushAndMarkClosure;
class ParPushAndMarkClosure;
class PushOrMarkClosure;
class ParPushOrMarkClosure;
class CMSKeepAliveClosure;
class CMSInnerParMarkAndPushClosure;
#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f) \
f(ParScanWithBarrierClosure,_nv) \
f(ParScanWithoutBarrierClosure,_nv)
#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f) \
f(MarkRefsIntoAndScanClosure,_nv) \
f(ParMarkRefsIntoAndScanClosure,_nv) \
f(PushAndMarkClosure,_nv) \
f(ParPushAndMarkClosure,_nv) \
f(PushOrMarkClosure,_nv) \
f(ParPushOrMarkClosure,_nv) \
f(CMSKeepAliveClosure,_nv) \
f(CMSInnerParMarkAndPushClosure,_nv)
#endif // SHARE_GC_CMS_CMS_SPECIALIZED_OOP_CLOSURES_HPP

@ -30,12 +30,14 @@
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/shared/blockOffsetTable.inline.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/space.inline.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/binaryTreeDictionary.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
@ -843,13 +845,13 @@ protected:
void walk_mem_region_with_cl_nopar(MemRegion mr, \
HeapWord* bottom, HeapWord* top, \
ClosureType* cl)
walk_mem_region_with_cl_DECL(ExtendedOopClosure);
walk_mem_region_with_cl_DECL(OopIterateClosure);
walk_mem_region_with_cl_DECL(FilteringClosure);
public:
FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
CMSCollector* collector,
ExtendedOopClosure* cl,
OopIterateClosure* cl,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) :
@ -929,11 +931,11 @@ void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,
// (There are only two of these, rather than N, because the split is due
// only to the introduction of the FilteringClosure, a local part of the
// impl of this abstraction.)
FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure*
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
@ -965,7 +967,7 @@ void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
}
// Apply the given closure to each oop in the space.
void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) {
assert_lock_strong(freelistLock());
HeapWord *cur, *limit;
size_t curSize;

@ -433,7 +433,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
Mutex* freelistLock() const { return &_freelistLock; }
// Iteration support
void oop_iterate(ExtendedOopClosure* cl);
void oop_iterate(OopIterateClosure* cl);
void object_iterate(ObjectClosure* blk);
// Apply the closure to each object in the space whose references
@ -463,7 +463,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
ObjectClosureCareful* cl);
// Override: provides a DCTO_CL specific to this kind of space.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);

@ -2467,7 +2467,7 @@ bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
}
void
ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) {
if (freelistLock()->owned_by_self()) {
Generation::oop_iterate(cl);
} else {
@ -3305,7 +3305,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
pst->all_tasks_completed();
}
class ParConcMarkingClosure: public MetadataAwareOopClosure {
class ParConcMarkingClosure: public MetadataVisitingOopIterateClosure {
private:
CMSCollector* _collector;
CMSConcMarkingTask* _task;
@ -3318,7 +3318,7 @@ class ParConcMarkingClosure: public MetadataAwareOopClosure {
public:
ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
MetadataAwareOopClosure(collector->ref_processor()),
MetadataVisitingOopIterateClosure(collector->ref_processor()),
_collector(collector),
_task(task),
_span(collector->_span),
@ -3382,9 +3382,6 @@ void ParConcMarkingClosure::do_oop(oop obj) {
}
}
void ParConcMarkingClosure::do_oop(oop* p) { ParConcMarkingClosure::do_oop_work(p); }
void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
void ParConcMarkingClosure::trim_queue(size_t max) {
while (_work_queue->size() > max) {
oop new_oop;
@ -4065,9 +4062,9 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
}
class PrecleanCLDClosure : public CLDClosure {
MetadataAwareOopsInGenClosure* _cm_closure;
MetadataVisitingOopsInGenClosure* _cm_closure;
public:
PrecleanCLDClosure(MetadataAwareOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
PrecleanCLDClosure(MetadataVisitingOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
void do_cld(ClassLoaderData* cld) {
if (cld->has_accumulated_modified_oops()) {
cld->clear_accumulated_modified_oops();
@ -4429,7 +4426,7 @@ void CMSParRemarkTask::work(uint worker_id) {
ResourceMark rm;
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
for (int i = 0; i < array->length(); i++) {
par_mrias_cl.do_cld_nv(array->at(i));
Devirtualizer::do_cld(&par_mrias_cl, array->at(i));
}
// We don't need to keep track of new CLDs anymore.
@ -4970,7 +4967,7 @@ void CMSCollector::do_remark_non_parallel() {
ResourceMark rm;
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
for (int i = 0; i < array->length(); i++) {
mrias_cl.do_cld_nv(array->at(i));
Devirtualizer::do_cld(&mrias_cl, array->at(i));
}
// We don't need to keep track of new CLDs anymore.
@ -5803,9 +5800,6 @@ void MarkRefsIntoClosure::do_oop(oop obj) {
}
}
void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
MemRegion span, CMSBitMap* bitMap):
_span(span),
@ -5825,9 +5819,6 @@ void ParMarkRefsIntoClosure::do_oop(oop obj) {
}
}
void ParMarkRefsIntoClosure::do_oop(oop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
// A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
@ -5856,9 +5847,6 @@ void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
}
}
void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
//////////////////////////////////////////////////
// MarkRefsIntoAndScanClosure
//////////////////////////////////////////////////
@ -5933,9 +5921,6 @@ void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
}
}
void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
void MarkRefsIntoAndScanClosure::do_yield_work() {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token");
@ -6016,9 +6001,6 @@ void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
}
}
void ParMarkRefsIntoAndScanClosure::do_oop(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
// This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper.
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
@ -6597,7 +6579,7 @@ PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
CMSCollector* collector, MemRegion span,
CMSBitMap* verification_bm, CMSBitMap* cms_bm,
CMSMarkStack* mark_stack):
MetadataAwareOopClosure(collector->ref_processor()),
MetadataVisitingOopIterateClosure(collector->ref_processor()),
_collector(collector),
_span(span),
_verification_bm(verification_bm),
@ -6654,7 +6636,7 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
MemRegion span,
CMSBitMap* bitMap, CMSMarkStack* markStack,
HeapWord* finger, MarkFromRootsClosure* parent) :
MetadataAwareOopClosure(collector->ref_processor()),
MetadataVisitingOopIterateClosure(collector->ref_processor()),
_collector(collector),
_span(span),
_bitMap(bitMap),
@ -6671,7 +6653,7 @@ ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
HeapWord* finger,
HeapWord* volatile* global_finger_addr,
ParMarkFromRootsClosure* parent) :
MetadataAwareOopClosure(collector->ref_processor()),
MetadataVisitingOopIterateClosure(collector->ref_processor()),
_collector(collector),
_whole_span(collector->_span),
_span(span),
@ -6752,9 +6734,6 @@ void PushOrMarkClosure::do_oop(oop obj) {
}
}
void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
void ParPushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
@ -6801,9 +6780,6 @@ void ParPushOrMarkClosure::do_oop(oop obj) {
}
}
void ParPushOrMarkClosure::do_oop(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceDiscoverer* rd,
@ -6811,7 +6787,7 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
CMSBitMap* mod_union_table,
CMSMarkStack* mark_stack,
bool concurrent_precleaning):
MetadataAwareOopClosure(rd),
MetadataVisitingOopIterateClosure(rd),
_collector(collector),
_span(span),
_bit_map(bit_map),
@ -6883,7 +6859,7 @@ ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
ReferenceDiscoverer* rd,
CMSBitMap* bit_map,
OopTaskQueue* work_queue):
MetadataAwareOopClosure(rd),
MetadataVisitingOopIterateClosure(rd),
_collector(collector),
_span(span),
_bit_map(bit_map),
@ -6892,9 +6868,6 @@ ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
}
void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
// Grey object rescan during second checkpoint phase --
// the parallel version.
void ParPushAndMarkClosure::do_oop(oop obj) {
@ -6937,9 +6910,6 @@ void ParPushAndMarkClosure::do_oop(oop obj) {
}
}
void ParPushAndMarkClosure::do_oop(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
void CMSPrecleanRefsYieldClosure::do_yield_work() {
Mutex* bml = _collector->bitMapLock();
assert_lock_strong(bml);
@ -7606,9 +7576,6 @@ void CMSKeepAliveClosure::do_oop(oop obj) {
}
}
void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
// CMSParKeepAliveClosure: a parallel version of the above.
// The work queues are private to each closure (thread),
// but (may be) available for stealing by other threads.
@ -7629,9 +7596,6 @@ void CMSParKeepAliveClosure::do_oop(oop obj) {
}
}
void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
void CMSParKeepAliveClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop new_oop;
@ -7677,9 +7641,6 @@ void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
}
}
void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
//////////////////////////////////////////////////////////////////
// CMSExpansionCause /////////////////////////////
//////////////////////////////////////////////////////////////////

@ -1190,7 +1190,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
void save_sweep_limit();
// More iteration support
virtual void oop_iterate(ExtendedOopClosure* cl);
virtual void oop_iterate(OopIterateClosure* cl);
virtual void safe_object_iterate(ObjectClosure* cl);
virtual void object_iterate(ObjectClosure* cl);
@ -1307,7 +1307,7 @@ class ParMarkFromRootsClosure: public BitMapClosure {
// The following closures are used to do certain kinds of verification of
// CMS marking.
class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
class PushAndMarkVerifyClosure: public MetadataVisitingOopIterateClosure {
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _verification_bm;

@ -51,6 +51,7 @@
#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
@ -502,12 +503,6 @@ ParScanClosure::ParScanClosure(ParNewGeneration* g,
_boundary = _g->reserved().end();
}
void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
@ -519,9 +514,6 @@ ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
: ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
{}
void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
#ifdef WIN32
#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
#endif
@ -684,17 +676,17 @@ template <class T>
void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
#ifdef ASSERT
{
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
// We never expect to see a null reference being processed
// as a weak reference.
assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
}
#endif // ASSERT
_par_cl->do_oop_nv(p);
Devirtualizer::do_oop_no_verify(_par_cl, p);
if (CMSHeap::heap()->is_in_reserved(p)) {
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);;
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);;
_rs->write_ref_field_gc_par(p, obj);
}
}
@ -710,17 +702,17 @@ template <class T>
void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
#ifdef ASSERT
{
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
// We never expect to see a null reference being processed
// as a weak reference.
assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
}
#endif // ASSERT
_cl->do_oop_nv(p);
Devirtualizer::do_oop_no_verify(_cl, p);
if (CMSHeap::heap()->is_in_reserved(p)) {
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
_rs->write_ref_field_gc_par(p, obj);
}
}
@ -737,7 +729,7 @@ template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
oop new_obj = obj->is_forwarded()
? obj->forwardee()
: _g->DefNewGeneration::copy_to_survivor_space(obj);
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
}
if (_gc_barrier) {
// If p points to a younger generation, mark the card.

@ -57,8 +57,6 @@ class ParScanWithBarrierClosure: public ParScanClosure {
ParScanClosure(g, par_scan_state) {}
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
class ParScanWithoutBarrierClosure: public ParScanClosure {
@ -68,8 +66,6 @@ class ParScanWithoutBarrierClosure: public ParScanClosure {
ParScanClosure(g, par_scan_state) {}
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
@ -99,8 +95,6 @@ class ParScanWeakRefClosure: public ScanWeakRefClosure {
ParScanThreadState* par_scan_state);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
class ParEvacuateFollowersClosure: public VoidClosure {

@ -37,7 +37,7 @@
#include "oops/oop.inline.hpp"
template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
// weak references are sometimes scanned twice; must check
// that to-space doesn't already contain this object
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
@ -53,16 +53,16 @@ template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
obj, obj_sz, m);
}
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
}
}
inline void ParScanWeakRefClosure::do_oop_nv(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
inline void ParScanWeakRefClosure::do_oop_nv(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
inline void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
inline void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
assert(generation()->is_in_reserved(p), "expected ref in generation");
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
// If p points to a younger generation, mark the card.
if ((HeapWord*)obj < gen_boundary()) {
rs()->write_ref_field_gc_par(p, obj);
@ -112,14 +112,14 @@ inline void ParScanClosure::do_oop_work(T* p,
oop new_obj;
if (m->is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj);
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
"forwarded ",
new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
} else {
size_t obj_sz = obj->size_given_klass(objK);
new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
if (root_scan) {
// This may have pushed an object. If we have a root
// category with a lot of roots, can't let the queue get too
@ -137,10 +137,10 @@ inline void ParScanClosure::do_oop_work(T* p,
}
}
inline void ParScanWithBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
inline void ParScanWithBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
inline void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
inline void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
inline void ParScanWithoutBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
inline void ParScanWithoutBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
inline void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
inline void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
#endif // SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP

@ -34,7 +34,7 @@
template <DecoratorSet decorators, typename T>
inline void G1BarrierSet::write_ref_field_pre(T* field) {
if (HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value ||
if (HasDecorator<decorators, IS_DEST_UNINITIALIZED>::value ||
HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
return;
}

@ -2116,7 +2116,7 @@ static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
G1CMTask* task)
: MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
: MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
_g1h(g1h), _task(task)
{ }

@ -29,6 +29,7 @@
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
#include "gc/g1/g1RemSetTrackingPolicy.hpp"

@ -38,7 +38,7 @@
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
class UpdateRSetDeferred : public ExtendedOopClosure {
class UpdateRSetDeferred : public BasicOopIterateClosure {
private:
G1CollectedHeap* _g1h;
DirtyCardQueue* _dcq;

@ -34,6 +34,7 @@
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
class G1AdjustLiveClosure : public StackObj {
G1AdjustClosure* _adjust_closure;

@ -31,6 +31,7 @@
#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "memory/iterator.inline.hpp"
G1FullGCMarkTask::G1FullGCMarkTask(G1FullCollector* collector) :
G1FullGCTask("G1 Parallel Marking Task", collector),

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1FullGCMarker.inline.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "memory/iterator.inline.hpp"
G1FullGCMarker::G1FullGCMarker(uint worker_id, PreservedMarks* preserved_stack, G1CMBitMap* bitmap) :
_worker_id(worker_id),

@ -28,6 +28,7 @@
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
#include "gc/g1/g1FullGCMarker.hpp"
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1StringDedupQueue.hpp"
#include "gc/shared/preservedMarks.inline.hpp"

@ -26,32 +26,12 @@
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1FullGCMarker.inline.hpp"
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
#include "gc/g1/g1_specialized_oop_closures.hpp"
#include "logging/logStream.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
void G1MarkAndPushClosure::do_oop(oop* p) {
do_oop_nv(p);
}
void G1MarkAndPushClosure::do_oop(narrowOop* p) {
do_oop_nv(p);
}
bool G1MarkAndPushClosure::do_metadata() {
return do_metadata_nv();
}
void G1MarkAndPushClosure::do_klass(Klass* k) {
do_klass_nv(k);
}
void G1MarkAndPushClosure::do_cld(ClassLoaderData* cld) {
do_cld_nv(cld);
}
void G1FollowStackClosure::do_void() { _marker->drain_stack(); }
void G1FullKeepAliveClosure::do_oop(oop* p) { do_oop_work(p); }
@ -75,7 +55,7 @@ void G1VerifyOopClosure::print_object(outputStream* out, oop obj) {
#endif // PRODUCT
}
template <class T> void G1VerifyOopClosure::do_oop_nv(T* p) {
template <class T> void G1VerifyOopClosure::do_oop_work(T* p) {
T heap_oop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(heap_oop)) {
_cc++;
@ -121,8 +101,5 @@ template <class T> void G1VerifyOopClosure::do_oop_nv(T* p) {
}
}
template void G1VerifyOopClosure::do_oop_nv(oop*);
template void G1VerifyOopClosure::do_oop_nv(narrowOop*);
// Generate G1 full GC specialized oop_oop_iterate functions.
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
template void G1VerifyOopClosure::do_oop_work(oop*);
template void G1VerifyOopClosure::do_oop_work(narrowOop*);

@ -55,7 +55,7 @@ public:
virtual void do_oop(narrowOop* p);
};
class G1MarkAndPushClosure : public ExtendedOopClosure {
class G1MarkAndPushClosure : public OopIterateClosure {
G1FullGCMarker* _marker;
uint _worker_id;
@ -63,26 +63,21 @@ public:
G1MarkAndPushClosure(uint worker, G1FullGCMarker* marker, ReferenceDiscoverer* ref) :
_marker(marker),
_worker_id(worker),
ExtendedOopClosure(ref) { }
OopIterateClosure(ref) { }
template <class T> inline void do_oop_nv(T* p);
template <class T> inline void do_oop_work(T* p);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
virtual bool do_metadata();
bool do_metadata_nv();
virtual void do_klass(Klass* k);
void do_klass_nv(Klass* k);
virtual void do_cld(ClassLoaderData* cld);
void do_cld_nv(ClassLoaderData* cld);
};
class G1AdjustClosure : public ExtendedOopClosure {
class G1AdjustClosure : public BasicOopIterateClosure {
template <class T> static inline void adjust_pointer(T* p);
public:
template <class T> void do_oop_nv(T* p) { adjust_pointer(p); }
template <class T> void do_oop_work(T* p) { adjust_pointer(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
@ -107,10 +102,10 @@ public:
bool failures() { return _failures; }
void print_object(outputStream* out, oop obj);
template <class T> void do_oop_nv(T* p);
template <class T> void do_oop_work(T* p);
void do_oop(oop* p) { do_oop_nv(p); }
void do_oop(narrowOop* p) { do_oop_nv(p); }
void do_oop(oop* p) { do_oop_work(p); }
void do_oop(narrowOop* p) { do_oop_work(p); }
};
class G1FollowStackClosure: public VoidClosure {

@ -36,19 +36,27 @@
#include "oops/oop.inline.hpp"
template <typename T>
inline void G1MarkAndPushClosure::do_oop_nv(T* p) {
inline void G1MarkAndPushClosure::do_oop_work(T* p) {
_marker->mark_and_push(p);
}
inline bool G1MarkAndPushClosure::do_metadata_nv() {
inline void G1MarkAndPushClosure::do_oop(oop* p) {
do_oop_work(p);
}
inline void G1MarkAndPushClosure::do_oop(narrowOop* p) {
do_oop_work(p);
}
inline bool G1MarkAndPushClosure::do_metadata() {
return true;
}
inline void G1MarkAndPushClosure::do_klass_nv(Klass* k) {
inline void G1MarkAndPushClosure::do_klass(Klass* k) {
_marker->follow_klass(k);
}
inline void G1MarkAndPushClosure::do_cld_nv(ClassLoaderData* cld) {
inline void G1MarkAndPushClosure::do_cld(ClassLoaderData* cld) {
_marker->follow_cld(cld);
}
@ -78,11 +86,11 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
// Forwarded, just update.
assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
RawAccess<OOP_NOT_NULL>::oop_store(p, forwardee);
RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
}
inline void G1AdjustClosure::do_oop(oop* p) { do_oop_nv(p); }
inline void G1AdjustClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
inline void G1AdjustClosure::do_oop(oop* p) { do_oop_work(p); }
inline void G1AdjustClosure::do_oop(narrowOop* p) { do_oop_work(p); }
inline bool G1IsAliveClosure::do_object_b(oop p) {
return _bitmap->is_marked(p) || G1ArchiveAllocator::is_closed_archive_object(p);

@ -35,6 +35,7 @@
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/ticks.hpp"

@ -30,6 +30,7 @@
#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "memory/iterator.inline.hpp"
G1FullGCReferenceProcessingExecutor::G1FullGCReferenceProcessingExecutor(G1FullCollector* collector) :
_collector(collector),

Some files were not shown because too many files have changed in this diff Show More