This commit is contained in:
Lana Steuck 2015-11-12 14:13:49 -08:00
commit c4dd346ef4
562 changed files with 12368 additions and 6932 deletions

View File

@ -11,3 +11,12 @@
^.hgtip
.DS_Store
\.class$
^\.?mx.jvmci/
^src/jdk.vm.ci/share/classes/\w[\w\.]*/.*\.xml
^src/jdk.vm.ci/share/classes/\w[\w\.]*/.*\.iml
^src/jdk.vm.ci/share/classes/\w[\w\.]*/nbproject
^src/jdk.vm.ci/share/classes/\w[\w\.]*/\..*
^test/compiler/jvmci/\w[\w\.]*/.*\.xml
^test/compiler/jvmci/\w[\w\.]*/.*\.iml
^test/compiler/jvmci/\w[\w\.]*/nbproject
^test/compiler/jvmci/\w[\w\.]*/\..*

View File

@ -545,6 +545,7 @@ uintptr_t search_symbol(struct symtab* symtab, uintptr_t base,
return (uintptr_t)NULL;
item.key = (char*) strdup(sym_name);
item.data = NULL;
hsearch_r(item, FIND, &ret, symtab->hash_table);
if (ret) {
struct elf_symbol * sym = (struct elf_symbol *)(ret->data);

View File

@ -40,8 +40,7 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
// FIXME: access should be synchronized and cleared when VM is
// resumed
// String fields
private static IntField offsetField;
private static IntField countField;
private static ByteField coderField;
private static OopField valueField;
// ThreadGroup fields
private static OopField threadGroupParentField;
@ -96,20 +95,30 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
if (charArray == null) {
return null;
}
return charArrayToString(charArray, 0, (int) charArray.getLength());
int length = (int)charArray.getLength();
StringBuffer buf = new StringBuffer(length);
for (int i = 0; i < length; i++) {
buf.append(charArray.getCharAt(i));
}
return buf.toString();
}
public static String charArrayToString(TypeArray charArray, int offset, int length) {
if (charArray == null) {
public static String byteArrayToString(TypeArray byteArray, byte coder) {
if (byteArray == null) {
return null;
}
final int limit = offset + length;
if (Assert.ASSERTS_ENABLED) {
Assert.that(offset >= 0 && limit <= charArray.getLength(), "out of bounds");
}
int length = (int)byteArray.getLength() >> coder;
StringBuffer buf = new StringBuffer(length);
for (int i = offset; i < limit; i++) {
buf.append(charArray.getCharAt(i));
if (coder == 0) {
// Latin1 encoded
for (int i = 0; i < length; i++) {
buf.append((char)(byteArray.getByteAt(i) & 0xff));
}
} else {
// UTF16 encoded
for (int i = 0; i < length; i++) {
buf.append(byteArray.getCharAt(i));
}
}
return buf.toString();
}
@ -141,21 +150,14 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
}
public static String stringOopToString(Oop stringOop) {
if (offsetField == null) {
InstanceKlass k = (InstanceKlass) stringOop.getKlass();
offsetField = (IntField) k.findField("offset", "I"); // optional
countField = (IntField) k.findField("count", "I"); // optional
valueField = (OopField) k.findField("value", "[C");
if (Assert.ASSERTS_ENABLED) {
Assert.that(valueField != null, "Field \'value\' of java.lang.String not found");
}
InstanceKlass k = (InstanceKlass) stringOop.getKlass();
coderField = (ByteField) k.findField("coder", "B");
valueField = (OopField) k.findField("value", "[B");
if (Assert.ASSERTS_ENABLED) {
Assert.that(coderField != null, "Field \'coder\' of java.lang.String not found");
Assert.that(valueField != null, "Field \'value\' of java.lang.String not found");
}
if (offsetField != null && countField != null) {
return charArrayToString((TypeArray) valueField.getValue(stringOop),
offsetField.getValue(stringOop),
countField.getValue(stringOop));
}
return charArrayToString((TypeArray) valueField.getValue(stringOop));
return byteArrayToString((TypeArray) valueField.getValue(stringOop), coderField.getValue(stringOop));
}
public static String stringOopToEscapedString(Oop stringOop) {

View File

@ -268,8 +268,8 @@ public class HeapSummary extends Tool {
VM vm = VM.getVM();
SystemDictionary sysDict = vm.getSystemDictionary();
InstanceKlass strKlass = sysDict.getStringKlass();
// String has a field named 'value' of type 'char[]'.
stringValueField = (OopField) strKlass.findField("value", "[C");
// String has a field named 'value' of type 'byte[]'.
stringValueField = (OopField) strKlass.findField("value", "[B");
}
private long stringSize(Instance instance) {

View File

@ -61,9 +61,8 @@ public class Hashtable extends BasicHashtable {
long h = 0;
int s = 0;
int len = buf.length;
// Emulate the unsigned int in java_lang_String::hash_code
while (len-- > 0) {
h = 31*h + (0xFFFFFFFFL & buf[s]);
h = 31*h + (0xFFL & buf[s]);
s++;
}
return h & 0xFFFFFFFFL;

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -403,6 +403,8 @@ $(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_SERVER_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_SERVER_DIR)/%.symbols: $(C2_BUILD_DIR)/%.symbols
$(install-file)
endif
# Client (C1)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -224,6 +224,11 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "OPENJDK = $(OPENJDK)"; \
echo "$(LP64_SETTING/$(DATA_MODE))"; \
echo; \
echo "STATIC_BUILD = $(STATIC_BUILD)"; \
echo "COMPILER_WARNINGS_FATAL = $(COMPILER_WARNINGS_FATAL)"; \
echo "EXTRA_LDFLAGS = $(EXTRA_LDFLAGS)"; \
echo "LIBRARY_SUFFIX = $(LIBRARY_SUFFIX)"; \
echo; \
echo "# Used for platform dispatching"; \
echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
echo "TARGET_DEFINES += -DTARGET_ARCH_\$$(Platform_arch)"; \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -186,13 +186,16 @@ ifeq ($(JDK6_OR_EARLIER),0)
# executed multiple times. We reduce the noise by at least checking that
# BUILD_FLAVOR has been set.
ifneq ($(BUILD_FLAVOR),)
ifeq ($(BUILD_FLAVOR), product)
FULL_DEBUG_SYMBOLS ?= 1
ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
else
# debug variants always get Full Debug Symbols (if available)
ENABLE_FULL_DEBUG_SYMBOLS = 1
endif
# FULL_DEBUG_SYMBOLS not created for individual static libraries
ifeq ($(STATIC_BUILD),false)
ifeq ($(BUILD_FLAVOR), product)
FULL_DEBUG_SYMBOLS ?= 1
ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
else
# debug variants always get Full Debug Symbols (if available)
ENABLE_FULL_DEBUG_SYMBOLS = 1
endif
endif
$(eval $(call print_info, "ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)"))
# since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
@ -256,16 +259,24 @@ endif # JDK_6_OR_EARLIER
JDK_INCLUDE_SUBDIR=bsd
# Library suffix
ifeq ($(OS_VENDOR),Darwin)
LIBRARY_SUFFIX=dylib
ifneq ($(STATIC_BUILD),true)
ifeq ($(OS_VENDOR),Darwin)
LIBRARY_SUFFIX=dylib
else
LIBRARY_SUFFIX=so
endif
else
LIBRARY_SUFFIX=so
LIBRARY_SUFFIX=a
endif
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# jsig library not needed for static builds
ifneq ($(STATIC_BUILD),true)
# client and server subdirectories have symbolic links to ../libjsig.so
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
@ -286,6 +297,9 @@ EXPORT_MINIMAL_DIR = $(EXPORT_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
ifeq ($(STATIC_BUILD),true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.symbols
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
@ -303,6 +317,9 @@ endif
ifeq ($(JVM_VARIANT_CLIENT),true)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
ifeq ($(STATIC_BUILD),true)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.symbols
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
@ -320,6 +337,9 @@ endif
ifeq ($(JVM_VARIANT_MINIMAL1),true)
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX)
ifeq ($(STATIC_BUILD),true)
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.symbols
endif
endif
# Serviceability Binaries
@ -388,7 +408,9 @@ ifeq ($(OS_VENDOR), Darwin)
endif
# Binaries to 'universalize' if built
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
ifneq ($(STATIC_BUILD),true)
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
endif
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
@ -396,6 +418,13 @@ ifeq ($(OS_VENDOR), Darwin)
# Files to simply copy in place
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/Xusage.txt
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/Xusage.txt
ifeq ($(STATIC_BUILD),true)
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/libjvm.symbols
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/libjvm.symbols
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/minimal/libjvm.symbols
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/libjvm.diz

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -37,15 +37,15 @@ DtraceOutDir = $(GENERATED)/dtracefiles
# Bsd does not build libjvm_db, does not compile on macosx
# disabled in build: rule in vm.make
JVM_DB = libjvm_db
LIBJVM_DB = libjvm_db.dylib
LIBJVM_DB = libjvm_db.$(LIBRARY_SUFFIX)
LIBJVM_DB_DEBUGINFO = libjvm_db.dylib.dSYM
LIBJVM_DB_DEBUGINFO = libjvm_db.$(LIBRARY_SUFFIX).dSYM
LIBJVM_DB_DIZ = libjvm_db.diz
JVM_DTRACE = jvm_dtrace
LIBJVM_DTRACE = libjvm_dtrace.dylib
LIBJVM_DTRACE = libjvm_dtrace.$(LIBRARY_SUFFIX)
LIBJVM_DTRACE_DEBUGINFO = libjvm_dtrace.dylib.dSYM
LIBJVM_DTRACE_DEBUGINFO = libjvm_dtrace.$(LIBRARY_SUFFIX).dSYM
LIBJVM_DTRACE_DIZ = libjvm_dtrace.diz
JVMOFFS = JvmOffsets
@ -167,14 +167,14 @@ endif # ifneq ("${ISA}","${BUILDARCH}")
LFLAGS_GENOFFS += -L.
lib$(GENOFFS).dylib: $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \
lib$(GENOFFS).$(LIBRARY_SUFFIX): $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \
$(LIBJVM.o)
$(QUIETLY) $(CXX) $(CXXFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_GENOFFS) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS).cpp -ljvm
$(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).dylib
$(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).$(LIBRARY_SUFFIX)
$(QUIETLY) $(LINK.CXX) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
./lib$(GENOFFS).dylib
./lib$(GENOFFS).$(LIBRARY_SUFFIX)
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
$(JVMOFFS).h: $(GENOFFS)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -187,7 +187,14 @@ ifeq ($(JVM_VARIANT_ZEROSHARK), true)
CFLAGS += $(LIBFFI_CFLAGS)
CFLAGS += $(LLVM_CFLAGS)
endif
ifeq ($(STATIC_BUILD),true)
CXXFLAGS += -DSTATIC_BUILD
CFLAGS += -DSTATIC_BUILD
else
CFLAGS += $(VM_PICFLAG)
endif
CFLAGS += -fno-rtti
CFLAGS += -fno-exceptions
ifeq ($(USE_CLANG),)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -28,9 +28,9 @@
JSIG = jsig
ifeq ($(OS_VENDOR), Darwin)
LIBJSIG = lib$(JSIG).dylib
LIBJSIG = lib$(JSIG).$(LIBRARY_SUFFIX)
LIBJSIG_DEBUGINFO = lib$(JSIG).dylib.dSYM
LIBJSIG_DEBUGINFO = lib$(JSIG).$(LIBRARY_SUFFIX).dSYM
LIBJSIG_DIZ = lib$(JSIG).diz
else
LIBJSIG = lib$(JSIG).so
@ -61,8 +61,14 @@ endif
$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
@echo $(LOG_INFO) Making signal interposition lib...
ifeq ($(STATIC_BUILD),true)
$(QUIETLY) $(CC) -c $(SYMFLAG) $(EXTRA_CFLAGS) $(ARCHFLAG) $(PICFLAG) \
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $(JSIG).o $<
$(QUIETLY) $(AR) $(ARFLAGS) $@ $(JSIG).o
else
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $<
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $<
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(OS_VENDOR), Darwin)
$(DSYMUTIL) $@

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,11 @@ AS.S = $(AS) $(ASFLAGS)
COMPILE.CC = $(CC_COMPILE) -c
GENASM.CC = $(CC_COMPILE) -S
LINK.CC = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
ifeq ($(STATIC_BUILD),true)
LINK_LIB.CC = $(AR) $(ARFLAGS)
else
LINK_LIB.CC = $(CC) $(LFLAGS) $(SHARED_FLAG)
endif
PREPROCESS.CC = $(CC_COMPILE) -E
COMPILE.CXX = $(CXX_COMPILE) -c

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -28,9 +28,9 @@
SAPROC = saproc
ifeq ($(OS_VENDOR), Darwin)
LIBSAPROC = lib$(SAPROC).dylib
LIBSAPROC = lib$(SAPROC).$(LIBRARY_SUFFIX)
LIBSAPROC_DEBUGINFO = lib$(SAPROC).dylib.dSYM
LIBSAPROC_DEBUGINFO = lib$(SAPROC).$(LIBRARY_SUFFIX).dSYM
LIBSAPROC_DIZ = lib$(SAPROC).diz
else
LIBSAPROC = lib$(SAPROC).so

View File

@ -142,10 +142,10 @@ include $(MAKEFILES_DIR)/dtrace.make
JVM = jvm
ifeq ($(OS_VENDOR), Darwin)
LIBJVM = lib$(JVM).dylib
LIBJVM = lib$(JVM).$(LIBRARY_SUFFIX)
CFLAGS += -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE
LIBJVM_DEBUGINFO = lib$(JVM).dylib.dSYM
LIBJVM_DEBUGINFO = lib$(JVM).$(LIBRARY_SUFFIX).dSYM
LIBJVM_DIZ = lib$(JVM).diz
else
LIBJVM = lib$(JVM).so
@ -261,6 +261,16 @@ mapfile : $(MAPFILE) mapfile_extra vm.def
{ print $$0 } \
}' > $@ < $(MAPFILE)
ifeq ($(STATIC_BUILD),true)
EXPORTED_SYMBOLS = libjvm.symbols
libjvm.symbols : mapfile
$(CP) mapfile libjvm.symbols
else
EXPORTED_SYMBOLS =
endif
mapfile_reorder : mapfile $(REORDERFILE)
rm -f $@
cat $^ > $@
@ -288,9 +298,11 @@ else
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
ifeq ($(OS_VENDOR), Darwin)
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
ifneq ($(STATIC_BUILD),true)
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
endif
else
LFLAGS_VM += -Wl,-z,defs
endif
@ -345,6 +357,10 @@ LD_SCRIPT_FLAG = -Wl,-T,$(LD_SCRIPT)
endif
$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
ifeq ($(STATIC_BUILD),true)
echo Linking static vm...;
$(LINK_LIB.CC) $@ $(LIBJVM.o)
else
$(QUIETLY) { \
echo $(LOG_INFO) Linking vm...; \
$(LINK_LIB.CXX/PRE_HOOK) \
@ -354,6 +370,8 @@ $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
rm -f $@.1; ln -s $@ $@.1; \
}
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(OS_VENDOR), Darwin)
$(DSYMUTIL) $@
@ -410,10 +428,10 @@ include $(MAKEFILES_DIR)/saproc.make
ifeq ($(OS_VENDOR), Darwin)
# no libjvm_db for macosx
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(EXPORTED_SYMBOLS)
echo "Doing vm.make build:"
else
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC) $(EXPORTED_SYMBOLS)
endif
install: install_jvm install_jsig install_saproc

View File

@ -56,10 +56,10 @@ $(eval $(call SetupJavaCompilation, BUILD_JVMCI_SERVICE, \
################################################################################
PROC_SRC_SUBDIRS := \
jdk.vm.ci.compiler \
jdk.vm.ci.hotspot \
jdk.vm.ci.hotspot.amd64 \
jdk.vm.ci.hotspot.sparc \
jdk.vm.ci.runtime \
#
PROC_SRC_DIRS := $(patsubst %, $(SRC_DIR)/%/src, $(PROC_SRC_SUBDIRS))
@ -94,11 +94,7 @@ TARGETS += $(GENSRC_DIR)/_gensrc_proc_done
$(GENSRC_DIR)/META-INF/services/jdk.vm.ci.options.OptionDescriptors: \
$(GENSRC_DIR)/_gensrc_proc_done
$(MKDIR) -p $(@D)
($(CD) $(GENSRC_DIR)/META-INF/jvmci.options && \
$(RM) -f $@; \
for i in $$(ls); do \
echo $${i}_OptionDescriptors >> $@; \
done)
$(FIND) $(GENSRC_DIR) -name '*_OptionDescriptors.java' | $(SED) 's:.*/jdk\.vm\.ci/\(.*\)\.java:\1:' | $(TR) '/' '.' > $@
TARGETS += $(GENSRC_DIR)/META-INF/services/jdk.vm.ci.options.OptionDescriptors

View File

@ -61,6 +61,11 @@ else
CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
CC_VER_MICRO := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f3)
# Workaround Ubuntu bug where -dumpversion doesn't print a micro version
# https://bugs.launchpad.net/ubuntu/+source/gcc-4.8/+bug/1360404
ifeq ($(CC_VER_MICRO),)
CC_VER_MICRO := "0"
endif
endif
ifeq ($(USE_CLANG), true)
@ -224,6 +229,8 @@ ifeq ($(USE_CLANG),)
WARNING_FLAGS += -Wtype-limits
# GCC < 4.8 don't accept this flag for C++.
WARNING_FLAGS += -Wno-format-zero-length
# GCC 4.8 reports less false positives than the older compilers.
WARNING_FLAGS += -Wuninitialized
endif
endif

View File

@ -14150,6 +14150,7 @@ instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp,
instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
%{
predicate(!CompactStrings);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
@ -14165,6 +14166,7 @@ instruct string_compare(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cn
instruct string_indexof(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
iRegI_R0 result, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI tmp4, rFlagsReg cr)
%{
predicate(!CompactStrings);
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
@ -14184,6 +14186,7 @@ instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
immI_le_4 int_cnt2, iRegI_R0 result, iRegI tmp1, iRegI tmp2,
iRegI tmp3, iRegI tmp4, rFlagsReg cr)
%{
predicate(!CompactStrings);
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
@ -14203,6 +14206,7 @@ instruct string_indexof_con(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
%{
predicate(!CompactStrings);
match(Set result (StrEquals (Binary str1 str2) cnt));
effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
@ -14218,6 +14222,7 @@ instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
instruct array_equals(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
iRegP_R10 tmp, rFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (AryEq ary1 ary2));
effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr);

View File

@ -483,15 +483,6 @@ int LIR_Assembler::emit_deopt_handler() {
return offset;
}
// This is the fast version of java.lang.String.compare; it has not
// OSR-entry and therefore, we generate a slow version for OSR's
void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
__ mov(r2, (address)__FUNCTION__);
__ call_Unimplemented();
}
void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
_masm->code_section()->relocate(adr, relocInfo::poll_type);
int pc_offset = code_offset();

View File

@ -79,6 +79,9 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS
define_pd_global(uintx, TypeProfileLevel, 111);
// No performance work done here yet.
define_pd_global(bool, CompactStrings, false);
// avoid biased locking while we are bootstrapping the aarch64 build
define_pd_global(bool, UseBiasedLocking, false);

View File

@ -38,11 +38,11 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
Unimplemented();
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
Unimplemented();
}
void CodeInstaller::pd_relocate_CodeBlob(CodeBlob* cb, NativeInstruction* inst) {
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
Unimplemented();
}

View File

@ -62,7 +62,7 @@ static int check_nonzero(const char* xname, int x) {
void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
const char* error_message) {
Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rscratch2;
Register temp2 = rscratch1; // used by MacroAssembler::cmpptr

View File

@ -1276,7 +1276,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// return to caller
//
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
int compile_id,
BasicType* in_sig_bt,
VMRegPair* in_regs,

View File

@ -72,6 +72,9 @@ define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CM
define_pd_global(uintx, TypeProfileLevel, 111);
// No performance work done here yet.
define_pd_global(bool, CompactStrings, false);
// Platform dependent flag handling: flags only defined on this platform.
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
\

View File

@ -38,11 +38,11 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
Unimplemented();
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
Unimplemented();
}
void CodeInstaller::pd_relocate_CodeBlob(CodeBlob* cb, NativeInstruction* inst) {
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
Unimplemented();
}

View File

@ -73,7 +73,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj_reg, SystemDictionary::WKID klass_id,
Register temp_reg, Register temp2_reg,
const char* error_message) {
Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");

View File

@ -2054,11 +2054,11 @@ const bool Matcher::match_rule_supported(int opcode) {
return (UsePopCountInstruction && VM_Version::has_popcntw());
case Op_StrComp:
return SpecialStringCompareTo;
return SpecialStringCompareTo && !CompactStrings;
case Op_StrEquals:
return SpecialStringEquals;
return SpecialStringEquals && !CompactStrings;
case Op_StrIndexOf:
return SpecialStringIndexOf;
return SpecialStringIndexOf && !CompactStrings;
}
return true; // Per default match rules are supported.
@ -11077,7 +11077,7 @@ instruct string_indexOf_imm1_char(iRegIdst result, iRegPsrc haystack, iRegIsrc h
immP needleImm, immL offsetImm, immI_1 needlecntImm,
iRegIdst tmp1, iRegIdst tmp2,
flagsRegCR0 cr0, flagsRegCR1 cr1) %{
predicate(SpecialStringIndexOf); // type check implicit by parameter type, See Matcher::match_rule_supported
predicate(SpecialStringIndexOf && !CompactStrings); // type check implicit by parameter type, See Matcher::match_rule_supported
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1);
@ -11120,7 +11120,7 @@ instruct string_indexOf_imm1(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt
effect(USE_KILL needle, /* TDEF needle, */ TEMP_DEF result,
TEMP tmp1, TEMP tmp2);
// Required for EA: check if it is still a type_array.
predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
predicate(SpecialStringIndexOf && !CompactStrings && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
ins_cost(180);
@ -11167,7 +11167,7 @@ instruct string_indexOf_imm(iRegIdst result, iRegPsrc haystack, rscratch1RegI ha
effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6);
// Required for EA: check if it is still a type_array.
predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
predicate(SpecialStringIndexOf && !CompactStrings && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
ins_cost(250);
@ -11200,7 +11200,7 @@ instruct string_indexOf(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt
effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
TEMP_DEF result,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6);
predicate(SpecialStringIndexOf); // See Matcher::match_rule_supported.
predicate(SpecialStringIndexOf && !CompactStrings); // See Matcher::match_rule_supported.
ins_cost(300);
ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
@ -11224,7 +11224,7 @@ instruct string_equals_imm(iRegPsrc str1, iRegPsrc str2, uimmI15 cntImm, iRegIds
match(Set result (StrEquals (Binary str1 str2) cntImm));
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2,
KILL cr0, KILL cr6, KILL ctr);
predicate(SpecialStringEquals); // See Matcher::match_rule_supported.
predicate(SpecialStringEquals && !CompactStrings); // See Matcher::match_rule_supported.
ins_cost(250);
ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
@ -11247,7 +11247,7 @@ instruct string_equals(iRegPsrc str1, iRegPsrc str2, iRegIsrc cnt, iRegIdst resu
match(Set result (StrEquals (Binary str1 str2) cnt));
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
KILL cr0, KILL cr1, KILL cr6, KILL ctr);
predicate(SpecialStringEquals); // See Matcher::match_rule_supported.
predicate(SpecialStringEquals && !CompactStrings); // See Matcher::match_rule_supported.
ins_cost(300);
ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
@ -11267,6 +11267,7 @@ instruct string_equals(iRegPsrc str1, iRegPsrc str2, iRegIsrc cnt, iRegIdst resu
// Use dst register classes if register gets killed, as it is the case for TEMP operands!
instruct string_compare(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
iRegPdst tmp, flagsRegCR0 cr0, regCTR ctr) %{
predicate(!CompactStrings);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL cnt1, USE_KILL cnt2, USE_KILL str1, USE_KILL str2, TEMP_DEF result, TEMP tmp, KILL cr0, KILL ctr);
ins_cost(300);

View File

@ -1701,7 +1701,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// return to caller
//
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
methodHandle method,
const methodHandle& method,
int compile_id,
BasicType *in_sig_bt,
VMRegPair *in_regs,

View File

@ -124,6 +124,8 @@ class Assembler : public AbstractAssembler {
impdep1_op3 = 0x36,
aes3_op3 = 0x36,
sha_op3 = 0x36,
bmask_op3 = 0x36,
bshuffle_op3 = 0x36,
alignaddr_op3 = 0x36,
faligndata_op3 = 0x36,
flog3_op3 = 0x36,
@ -194,6 +196,7 @@ class Assembler : public AbstractAssembler {
fnegd_opf = 0x06,
alignaddr_opf = 0x18,
bmask_opf = 0x19,
fadds_opf = 0x41,
faddd_opf = 0x42,
@ -204,6 +207,7 @@ class Assembler : public AbstractAssembler {
fmuls_opf = 0x49,
fmuld_opf = 0x4a,
bshuffle_opf = 0x4c,
fdivs_opf = 0x4d,
fdivd_opf = 0x4e,
@ -1226,6 +1230,9 @@ public:
void edge8n( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(edge_op3) | rs1(s1) | opf(edge8n_opf) | rs2(s2)); }
void bmask( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(bmask_op3) | rs1(s1) | opf(bmask_opf) | rs2(s2)); }
void bshuffle( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis2_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(bshuffle_op3) | fs1(s1, FloatRegisterImpl::D) | opf(bshuffle_opf) | fs2(s2, FloatRegisterImpl::D)); }
// VIS3 instructions
void movstosw( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S)); }

View File

@ -232,118 +232,6 @@ void LIR_Assembler::osr_entry() {
}
// Optimized Library calls
// This is the fast version of java.lang.String.compare; it has not
// OSR-entry and therefore, we generate a slow version for OSR's
void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
Register str0 = left->as_register();
Register str1 = right->as_register();
Label Ldone;
Register result = dst->as_register();
{
// Get a pointer to the first character of string0 in tmp0
// and get string0.length() in str0
// Get a pointer to the first character of string1 in tmp1
// and get string1.length() in str1
// Also, get string0.length()-string1.length() in
// o7 and get the condition code set
// Note: some instructions have been hoisted for better instruction scheduling
Register tmp0 = L0;
Register tmp1 = L1;
Register tmp2 = L2;
int value_offset = java_lang_String:: value_offset_in_bytes(); // char array
if (java_lang_String::has_offset_field()) {
int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
int count_offset = java_lang_String:: count_offset_in_bytes();
__ load_heap_oop(str0, value_offset, tmp0);
__ ld(str0, offset_offset, tmp2);
__ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
__ ld(str0, count_offset, str0);
__ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
} else {
__ load_heap_oop(str0, value_offset, tmp1);
__ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
__ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0);
}
// str1 may be null
add_debug_info_for_null_check_here(info);
if (java_lang_String::has_offset_field()) {
int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
int count_offset = java_lang_String:: count_offset_in_bytes();
__ load_heap_oop(str1, value_offset, tmp1);
__ add(tmp0, tmp2, tmp0);
__ ld(str1, offset_offset, tmp2);
__ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
__ ld(str1, count_offset, str1);
__ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
__ add(tmp1, tmp2, tmp1);
} else {
__ load_heap_oop(str1, value_offset, tmp2);
__ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
__ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1);
}
__ subcc(str0, str1, O7);
}
{
// Compute the minimum of the string lengths, scale it and store it in limit
Register count0 = I0;
Register count1 = I1;
Register limit = L3;
Label Lskip;
__ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter
__ br(Assembler::greater, true, Assembler::pt, Lskip);
__ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter
__ bind(Lskip);
// If either string is empty (or both of them) the result is the difference in lengths
__ cmp(limit, 0);
__ br(Assembler::equal, true, Assembler::pn, Ldone);
__ delayed()->mov(O7, result); // result is difference in lengths
}
{
// Neither string is empty
Label Lloop;
Register base0 = L0;
Register base1 = L1;
Register chr0 = I0;
Register chr1 = I1;
Register limit = L3;
// Shift base0 and base1 to the end of the arrays, negate limit
__ add(base0, limit, base0);
__ add(base1, limit, base1);
__ neg(limit); // limit = -min{string0.length(), string1.length()}
__ lduh(base0, limit, chr0);
__ bind(Lloop);
__ lduh(base1, limit, chr1);
__ subcc(chr0, chr1, chr0);
__ br(Assembler::notZero, false, Assembler::pn, Ldone);
assert(chr0 == result, "result must be pre-placed");
__ delayed()->inccc(limit, sizeof(jchar));
__ br(Assembler::notZero, true, Assembler::pt, Lloop);
__ delayed()->lduh(base0, limit, chr0);
}
// If strings are equal up to min length, return the length difference.
__ mov(O7, result);
// Otherwise, return the difference between the first mismatched chars.
__ bind(Ldone);
}
// --------------------------------------------------------------------------------------------
void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {

View File

@ -86,6 +86,8 @@ define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CM
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
\
product(intx, UseVIS, 99, \

View File

@ -66,6 +66,25 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
}
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64
NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
narrowKlass narrowOop = record_narrow_metadata_reference(constant);
move->set_data((intptr_t)narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at %p/%p", pc, narrowOop);
#else
fatal("compressed Klass* on 32bit");
#endif
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
Metadata* reference = record_metadata_reference(constant);
move->set_data((intptr_t)reference);
TRACE_jvmci_3("relocating (metaspace constant) at %p/%p", pc, reference);
}
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
address pc = _instructions->start() + pc_offset;
NativeInstruction* inst = nativeInstruction_at(pc);
@ -87,10 +106,6 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
}
}
void CodeInstaller::pd_relocate_CodeBlob(CodeBlob* cb, NativeInstruction* inst) {
fatal("CodeInstaller::pd_relocate_CodeBlob - sparc unimp");
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
address pc = (address) inst;
if (inst->is_call()) {
@ -168,16 +183,25 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
if (jvmci_reg < RegisterImpl::number_of_registers) {
// JVMCI Registers are numbered as follows:
// 0..31: Thirty-two General Purpose registers (CPU Registers)
// 32..63: Thirty-two single precision float registers
// 64..95: Thirty-two double precision float registers
// 96..111: Sixteen quad precision float registers
if (jvmci_reg < 32) {
return as_Register(jvmci_reg)->as_VMReg();
} else {
jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers;
floatRegisterNumber += MAX2(0, floatRegisterNumber-32); // Beginning with f32, only every second register is going to be addressed
if (floatRegisterNumber < FloatRegisterImpl::number_of_registers) {
return as_FloatRegister(floatRegisterNumber)->as_VMReg();
jint floatRegisterNumber;
if(jvmci_reg < 64) { // Single precision
floatRegisterNumber = jvmci_reg - 32;
} else if(jvmci_reg < 96) {
floatRegisterNumber = 2 * (jvmci_reg - 64);
} else if(jvmci_reg < 112) {
floatRegisterNumber = 4 * (jvmci_reg - 96);
} else {
fatal("Unknown jvmci register");
}
ShouldNotReachHere();
return NULL;
return as_FloatRegister(floatRegisterNumber)->as_VMReg();
}
}

View File

@ -44,6 +44,9 @@
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER2
#include "opto/intrinsicnode.hpp"
#endif
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@ -4253,27 +4256,385 @@ void MacroAssembler::reinit_heapbase() {
}
}
// Compare char[] arrays aligned to 4 bytes.
void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
Register limit, Register result,
Register chr1, Register chr2, Label& Ldone) {
Label Lvector, Lloop;
assert(chr1 == result, "should be the same");
#ifdef COMPILER2
// Note: limit contains number of bytes (2*char_elements) != 0.
andcc(limit, 0x2, chr1); // trailing character ?
// Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure.
void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result,
Register tmp1, Register tmp2, Register tmp3, Register tmp4,
FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) {
Label Lloop, Lslow;
assert(UseVIS >= 3, "VIS3 is required");
assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result);
assert_different_registers(ftmp1, ftmp2, ftmp3);
// Check if cnt >= 8 (= 16 bytes)
cmp(cnt, 8);
br(Assembler::less, false, Assembler::pn, Lslow);
delayed()->mov(cnt, result); // copy count
// Check for 8-byte alignment of src and dst
or3(src, dst, tmp1);
andcc(tmp1, 7, G0);
br(Assembler::notZero, false, Assembler::pn, Lslow);
delayed()->nop();
// Set mask for bshuffle instruction
Register mask = tmp4;
set(0x13579bdf, mask);
bmask(mask, G0, G0);
// Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters
Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00
add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00
sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000
or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00
// Load first 8 bytes
ldx(src, 0, tmp1);
bind(Lloop);
// Load next 8 bytes
ldx(src, 8, tmp2);
// Check for non-latin1 character by testing if the most significant byte of a char is set.
// Although we have to move the data between integer and floating point registers, this is
// still faster than the corresponding VIS instructions (ford/fand/fcmpd).
or3(tmp1, tmp2, tmp3);
btst(tmp3, mask);
// annul zeroing if branch is not taken to preserve original count
brx(Assembler::notZero, true, Assembler::pn, Ldone);
delayed()->mov(G0, result); // 0 - failed
// Move bytes into float register
movxtod(tmp1, ftmp1);
movxtod(tmp2, ftmp2);
// Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3
bshuffle(ftmp1, ftmp2, ftmp3);
stf(FloatRegisterImpl::D, ftmp3, dst, 0);
// Increment addresses and decrement count
inc(src, 16);
inc(dst, 8);
dec(cnt, 8);
cmp(cnt, 8);
// annul LDX if branch is not taken to prevent access past end of string
br(Assembler::greaterEqual, true, Assembler::pt, Lloop);
delayed()->ldx(src, 0, tmp1);
// Fallback to slow version
bind(Lslow);
}
// Compress char[] to byte[]. Return 0 on failure.
void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) {
Label Lloop;
assert_different_registers(src, dst, cnt, tmp, result);
lduh(src, 0, tmp);
bind(Lloop);
inc(src, sizeof(jchar));
cmp(tmp, 0xff);
// annul zeroing if branch is not taken to preserve original count
br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc
delayed()->mov(G0, result); // 0 - failed
deccc(cnt);
stb(tmp, dst, 0);
inc(dst);
// annul LDUH if branch is not taken to prevent access past end of string
br(Assembler::notZero, true, Assembler::pt, Lloop);
delayed()->lduh(src, 0, tmp); // hoisted
}
// Inflate byte[] to char[] by inflating 16 bytes at once.
void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp,
FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) {
Label Lloop, Lslow;
assert(UseVIS >= 3, "VIS3 is required");
assert_different_registers(src, dst, cnt, tmp);
assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4);
// Check if cnt >= 8 (= 16 bytes)
cmp(cnt, 8);
br(Assembler::less, false, Assembler::pn, Lslow);
delayed()->nop();
// Check for 8-byte alignment of src and dst
or3(src, dst, tmp);
andcc(tmp, 7, G0);
br(Assembler::notZero, false, Assembler::pn, Lslow);
// Initialize float register to zero
FloatRegister zerof = ftmp4;
delayed()->fzero(FloatRegisterImpl::D, zerof);
// Load first 8 bytes
ldf(FloatRegisterImpl::D, src, 0, ftmp1);
bind(Lloop);
inc(src, 8);
dec(cnt, 8);
// Inflate the string by interleaving each byte from the source array
// with a zero byte and storing the result in the destination array.
fpmerge(zerof, ftmp1->successor(), ftmp2);
stf(FloatRegisterImpl::D, ftmp2, dst, 8);
fpmerge(zerof, ftmp1, ftmp3);
stf(FloatRegisterImpl::D, ftmp3, dst, 0);
inc(dst, 16);
cmp(cnt, 8);
// annul LDX if branch is not taken to prevent access past end of string
br(Assembler::greaterEqual, true, Assembler::pt, Lloop);
delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1);
// Fallback to slow version
bind(Lslow);
}
// Inflate byte[] to char[].
void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) {
Label Loop;
assert_different_registers(src, dst, cnt, tmp);
ldub(src, 0, tmp);
bind(Loop);
inc(src);
deccc(cnt);
sth(tmp, dst, 0);
inc(dst, sizeof(jchar));
// annul LDUB if branch is not taken to prevent access past end of string
br(Assembler::notZero, true, Assembler::pt, Loop);
delayed()->ldub(src, 0, tmp); // hoisted
}
void MacroAssembler::string_compare(Register str1, Register str2,
Register cnt1, Register cnt2,
Register tmp1, Register tmp2,
Register result, int ae) {
Label Ldone, Lloop;
assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result);
int stride1, stride2;
// Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a)
// we interchange str1 and str2 in the UL case and negate the result.
// Like this, str1 is always latin1 encoded, expect for the UU case.
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) {
srl(cnt2, 1, cnt2);
}
// See if the lengths are different, and calculate min in cnt1.
// Save diff in case we need it for a tie-breaker.
Label Lskip;
Register diff = tmp1;
subcc(cnt1, cnt2, diff);
br(Assembler::greater, true, Assembler::pt, Lskip);
// cnt2 is shorter, so use its count:
delayed()->mov(cnt2, cnt1);
bind(Lskip);
// Rename registers
Register limit1 = cnt1;
Register limit2 = limit1;
Register chr1 = result;
Register chr2 = cnt2;
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) {
// We need an additional register to keep track of two limits
assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result);
limit2 = tmp2;
}
// Is the minimum length zero?
cmp(limit1, (int)0); // use cast to resolve overloading ambiguity
br(Assembler::equal, true, Assembler::pn, Ldone);
// result is difference in lengths
if (ae == StrIntrinsicNode::UU) {
delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars
} else {
delayed()->mov(diff, result);
}
// Load first characters
if (ae == StrIntrinsicNode::LL) {
stride1 = stride2 = sizeof(jbyte);
ldub(str1, 0, chr1);
ldub(str2, 0, chr2);
} else if (ae == StrIntrinsicNode::UU) {
stride1 = stride2 = sizeof(jchar);
lduh(str1, 0, chr1);
lduh(str2, 0, chr2);
} else {
stride1 = sizeof(jbyte);
stride2 = sizeof(jchar);
ldub(str1, 0, chr1);
lduh(str2, 0, chr2);
}
// Compare first characters
subcc(chr1, chr2, chr1);
br(Assembler::notZero, false, Assembler::pt, Ldone);
assert(chr1 == result, "result must be pre-placed");
delayed()->nop();
// Check if the strings start at same location
cmp(str1, str2);
brx(Assembler::equal, true, Assembler::pn, Ldone);
delayed()->mov(G0, result); // result is zero
// We have no guarantee that on 64 bit the higher half of limit is 0
signx(limit1);
// Get limit
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) {
sll(limit1, 1, limit2);
subcc(limit2, stride2, chr2);
}
subcc(limit1, stride1, chr1);
br(Assembler::zero, true, Assembler::pn, Ldone);
// result is difference in lengths
if (ae == StrIntrinsicNode::UU) {
delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars
} else {
delayed()->mov(diff, result);
}
// Shift str1 and str2 to the end of the arrays, negate limit
add(str1, limit1, str1);
add(str2, limit2, str2);
neg(chr1, limit1); // limit1 = -(limit1-stride1)
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) {
neg(chr2, limit2); // limit2 = -(limit2-stride2)
}
// Compare the rest of the characters
if (ae == StrIntrinsicNode::UU) {
lduh(str1, limit1, chr1);
} else {
ldub(str1, limit1, chr1);
}
bind(Lloop);
if (ae == StrIntrinsicNode::LL) {
ldub(str2, limit2, chr2);
} else {
lduh(str2, limit2, chr2);
}
subcc(chr1, chr2, chr1);
br(Assembler::notZero, false, Assembler::pt, Ldone);
assert(chr1 == result, "result must be pre-placed");
delayed()->inccc(limit1, stride1);
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) {
inccc(limit2, stride2);
}
// annul LDUB if branch is not taken to prevent access past end of string
br(Assembler::notZero, true, Assembler::pt, Lloop);
if (ae == StrIntrinsicNode::UU) {
delayed()->lduh(str1, limit2, chr1);
} else {
delayed()->ldub(str1, limit1, chr1);
}
// If strings are equal up to min length, return the length difference.
if (ae == StrIntrinsicNode::UU) {
// Divide by 2 to get number of chars
sra(diff, 1, result);
} else {
mov(diff, result);
}
// Otherwise, return the difference between the first mismatched chars.
bind(Ldone);
if(ae == StrIntrinsicNode::UL) {
// Negate result (see note above)
neg(result);
}
}
void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2,
Register limit, Register tmp, Register result, bool is_byte) {
Label Ldone, Lvector, Lloop;
assert_different_registers(ary1, ary2, limit, tmp, result);
int length_offset = arrayOopDesc::length_offset_in_bytes();
int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
if (is_array_equ) {
// return true if the same array
cmp(ary1, ary2);
brx(Assembler::equal, true, Assembler::pn, Ldone);
delayed()->add(G0, 1, result); // equal
br_null(ary1, true, Assembler::pn, Ldone);
delayed()->mov(G0, result); // not equal
br_null(ary2, true, Assembler::pn, Ldone);
delayed()->mov(G0, result); // not equal
// load the lengths of arrays
ld(Address(ary1, length_offset), limit);
ld(Address(ary2, length_offset), tmp);
// return false if the two arrays are not equal length
cmp(limit, tmp);
br(Assembler::notEqual, true, Assembler::pn, Ldone);
delayed()->mov(G0, result); // not equal
}
cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn);
delayed()->add(G0, 1, result); // zero-length arrays are equal
if (is_array_equ) {
// load array addresses
add(ary1, base_offset, ary1);
add(ary2, base_offset, ary2);
} else {
// We have no guarantee that on 64 bit the higher half of limit is 0
signx(limit);
}
if (is_byte) {
Label Lskip;
// check for trailing byte
andcc(limit, 0x1, tmp);
br(Assembler::zero, false, Assembler::pt, Lskip);
delayed()->nop();
// compare the trailing byte
sub(limit, sizeof(jbyte), limit);
ldub(ary1, limit, result);
ldub(ary2, limit, tmp);
cmp(result, tmp);
br(Assembler::notEqual, true, Assembler::pt, Ldone);
delayed()->mov(G0, result); // not equal
// only one byte?
cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
delayed()->add(G0, 1, result); // zero-length arrays are equal
bind(Lskip);
} else if (is_array_equ) {
// set byte count
sll(limit, exact_log2(sizeof(jchar)), limit);
}
// check for trailing character
andcc(limit, 0x2, tmp);
br(Assembler::zero, false, Assembler::pt, Lvector);
delayed()->nop();
// compare the trailing char
sub(limit, sizeof(jchar), limit);
lduh(ary1, limit, chr1);
lduh(ary2, limit, chr2);
cmp(chr1, chr2);
lduh(ary1, limit, result);
lduh(ary2, limit, tmp);
cmp(result, tmp);
br(Assembler::notEqual, true, Assembler::pt, Ldone);
delayed()->mov(G0, result); // not equal
// only one char ?
// only one char?
cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
delayed()->add(G0, 1, result); // zero-length arrays are equal
@ -4284,21 +4645,23 @@ void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
add(ary2, limit, ary2);
neg(limit, limit);
lduw(ary1, limit, chr1);
lduw(ary1, limit, result);
bind(Lloop);
lduw(ary2, limit, chr2);
cmp(chr1, chr2);
lduw(ary2, limit, tmp);
cmp(result, tmp);
br(Assembler::notEqual, true, Assembler::pt, Ldone);
delayed()->mov(G0, result); // not equal
inccc(limit, 2*sizeof(jchar));
// annul LDUW if branch is not taken to prevent access past end of array
br(Assembler::notZero, true, Assembler::pt, Lloop);
delayed()->lduw(ary1, limit, chr1); // hoisted
delayed()->lduw(ary1, limit, result); // hoisted
// Caller should set it:
// add(G0, 1, result); // equals
add(G0, 1, result); // equals
bind(Ldone);
}
#endif
// Use BIS for zeroing (count is in bytes).
void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");

View File

@ -1433,10 +1433,31 @@ public:
void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2);
// Compare char[] arrays aligned to 4 bytes.
void char_arrays_equals(Register ary1, Register ary2,
Register limit, Register result,
Register chr1, Register chr2, Label& Ldone);
#ifdef COMPILER2
// Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure.
void string_compress_16(Register src, Register dst, Register cnt, Register result,
Register tmp1, Register tmp2, Register tmp3, Register tmp4,
FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone);
// Compress char[] to byte[]. Return 0 on failure.
void string_compress(Register src, Register dst, Register cnt, Register tmp, Register result, Label& Ldone);
// Inflate byte[] to char[] by inflating 16 bytes at once.
void string_inflate_16(Register src, Register dst, Register cnt, Register tmp,
FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone);
// Inflate byte[] to char[].
void string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone);
void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2,
Register tmp1, Register tmp2,
Register result, int ae);
void array_equals(bool is_array_equ, Register ary1, Register ary2,
Register limit, Register tmp, Register result, bool is_byte);
#endif
// Use BIS for zeroing
void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);

View File

@ -69,7 +69,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj_reg, SystemDictionary::WKID klass_id,
Register temp_reg, Register temp2_reg,
const char* error_message) {
Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
bool did_save = false;
if (temp_reg == noreg || temp2_reg == noreg) {

View File

@ -417,6 +417,67 @@ void NativeMovConstReg::test() {
//-------------------------------------------------------------------
void NativeMovConstReg32::verify() {
NativeInstruction::verify();
// make sure code pattern is actually a "set_metadata" synthetic instruction
// see MacroAssembler::set_oop()
int i0 = long_at(sethi_offset);
int i1 = long_at(add_offset);
// verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
Register rd = inv_rd(i0);
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
fatal("not a set_metadata");
}
}
void NativeMovConstReg32::print() {
tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
}
intptr_t NativeMovConstReg32::data() const {
return data32(long_at(sethi_offset), long_at(add_offset));
}
void NativeMovConstReg32::set_data(intptr_t x) {
set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
CodeBlob* cb = CodeCache::find_blob(instruction_address());
nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) {
RelocIterator iter(nm, instruction_address(), next_instruction_address());
oop* oop_addr = NULL;
Metadata** metadata_addr = NULL;
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation *r = iter.oop_reloc();
if (oop_addr == NULL) {
oop_addr = r->oop_addr();
*oop_addr = cast_to_oop(x);
} else {
assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
}
}
if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation *r = iter.metadata_reloc();
if (metadata_addr == NULL) {
metadata_addr = r->metadata_addr();
*metadata_addr = (Metadata*)x;
} else {
assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
}
}
}
}
}
//-------------------------------------------------------------------
void NativeMovConstRegPatching::verify() {
NativeInstruction::verify();
// Make sure code pattern is sethi/nop/add.

View File

@ -518,6 +518,46 @@ class NativeFarCall: public NativeInstruction {
#endif // _LP64
// An interface for accessing/manipulating 32 bit native set_metadata imm, reg instructions
// (used to manipulate inlined data references, etc.)
// set_metadata imm, reg
// == sethi %hi22(imm), reg ; add reg, %lo10(imm), reg
class NativeMovConstReg32;
inline NativeMovConstReg32* nativeMovConstReg32_at(address address);
class NativeMovConstReg32: public NativeInstruction {
public:
enum Sparc_specific_constants {
sethi_offset = 0,
add_offset = 4,
instruction_size = 8
};
address instruction_address() const { return addr_at(0); }
address next_instruction_address() const { return addr_at(instruction_size); }
// (The [set_]data accessor respects oop_type relocs also.)
intptr_t data() const;
void set_data(intptr_t x);
// report the destination register
Register destination() { return inv_rd(long_at(sethi_offset)); }
void verify();
void print();
// unit test stuff
static void test();
// Creation
friend inline NativeMovConstReg32* nativeMovConstReg32_at(address address) {
NativeMovConstReg32* test = (NativeMovConstReg32*)address;
#ifdef ASSERT
test->verify();
#endif
return test;
}
};
// An interface for accessing/manipulating native set_metadata imm, reg instructions.
// (used to manipulate inlined data references, etc.)
// set_metadata imm, reg

View File

@ -1955,7 +1955,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// return to caller
//
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
int compile_id,
BasicType* in_sig_bt,
VMRegPair* in_regs,

View File

@ -2905,232 +2905,6 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
__ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
%}
enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{
Label Ldone, Lloop;
MacroAssembler _masm(&cbuf);
Register str1_reg = reg_to_register_object($str1$$reg);
Register str2_reg = reg_to_register_object($str2$$reg);
Register cnt1_reg = reg_to_register_object($cnt1$$reg);
Register cnt2_reg = reg_to_register_object($cnt2$$reg);
Register result_reg = reg_to_register_object($result$$reg);
assert(result_reg != str1_reg &&
result_reg != str2_reg &&
result_reg != cnt1_reg &&
result_reg != cnt2_reg ,
"need different registers");
// Compute the minimum of the string lengths(str1_reg) and the
// difference of the string lengths (stack)
// See if the lengths are different, and calculate min in str1_reg.
// Stash diff in O7 in case we need it for a tie-breaker.
Label Lskip;
__ subcc(cnt1_reg, cnt2_reg, O7);
__ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
__ br(Assembler::greater, true, Assembler::pt, Lskip);
// cnt2 is shorter, so use its count:
__ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
__ bind(Lskip);
// reallocate cnt1_reg, cnt2_reg, result_reg
// Note: limit_reg holds the string length pre-scaled by 2
Register limit_reg = cnt1_reg;
Register chr2_reg = cnt2_reg;
Register chr1_reg = result_reg;
// str{12} are the base pointers
// Is the minimum length zero?
__ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity
__ br(Assembler::equal, true, Assembler::pn, Ldone);
__ delayed()->mov(O7, result_reg); // result is difference in lengths
// Load first characters
__ lduh(str1_reg, 0, chr1_reg);
__ lduh(str2_reg, 0, chr2_reg);
// Compare first characters
__ subcc(chr1_reg, chr2_reg, chr1_reg);
__ br(Assembler::notZero, false, Assembler::pt, Ldone);
assert(chr1_reg == result_reg, "result must be pre-placed");
__ delayed()->nop();
{
// Check after comparing first character to see if strings are equivalent
Label LSkip2;
// Check if the strings start at same location
__ cmp(str1_reg, str2_reg);
__ brx(Assembler::notEqual, true, Assembler::pt, LSkip2);
__ delayed()->nop();
// Check if the length difference is zero (in O7)
__ cmp(G0, O7);
__ br(Assembler::equal, true, Assembler::pn, Ldone);
__ delayed()->mov(G0, result_reg); // result is zero
// Strings might not be equal
__ bind(LSkip2);
}
// We have no guarantee that on 64 bit the higher half of limit_reg is 0
__ signx(limit_reg);
__ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg);
__ br(Assembler::equal, true, Assembler::pn, Ldone);
__ delayed()->mov(O7, result_reg); // result is difference in lengths
// Shift str1_reg and str2_reg to the end of the arrays, negate limit
__ add(str1_reg, limit_reg, str1_reg);
__ add(str2_reg, limit_reg, str2_reg);
__ neg(chr1_reg, limit_reg); // limit = -(limit-2)
// Compare the rest of the characters
__ lduh(str1_reg, limit_reg, chr1_reg);
__ bind(Lloop);
// __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted
__ lduh(str2_reg, limit_reg, chr2_reg);
__ subcc(chr1_reg, chr2_reg, chr1_reg);
__ br(Assembler::notZero, false, Assembler::pt, Ldone);
assert(chr1_reg == result_reg, "result must be pre-placed");
__ delayed()->inccc(limit_reg, sizeof(jchar));
// annul LDUH if branch is not taken to prevent access past end of string
__ br(Assembler::notZero, true, Assembler::pt, Lloop);
__ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
// If strings are equal up to min length, return the length difference.
__ mov(O7, result_reg);
// Otherwise, return the difference between the first mismatched chars.
__ bind(Ldone);
%}
enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{
Label Lchar, Lchar_loop, Ldone;
MacroAssembler _masm(&cbuf);
Register str1_reg = reg_to_register_object($str1$$reg);
Register str2_reg = reg_to_register_object($str2$$reg);
Register cnt_reg = reg_to_register_object($cnt$$reg);
Register tmp1_reg = O7;
Register result_reg = reg_to_register_object($result$$reg);
assert(result_reg != str1_reg &&
result_reg != str2_reg &&
result_reg != cnt_reg &&
result_reg != tmp1_reg ,
"need different registers");
__ cmp(str1_reg, str2_reg); //same char[] ?
__ brx(Assembler::equal, true, Assembler::pn, Ldone);
__ delayed()->add(G0, 1, result_reg);
__ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn);
__ delayed()->add(G0, 1, result_reg); // count == 0
//rename registers
Register limit_reg = cnt_reg;
Register chr1_reg = result_reg;
Register chr2_reg = tmp1_reg;
// We have no guarantee that on 64 bit the higher half of limit_reg is 0
__ signx(limit_reg);
//check for alignment and position the pointers to the ends
__ or3(str1_reg, str2_reg, chr1_reg);
__ andcc(chr1_reg, 0x3, chr1_reg);
// notZero means at least one not 4-byte aligned.
// We could optimize the case when both arrays are not aligned
// but it is not frequent case and it requires additional checks.
__ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare
__ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count
// Compare char[] arrays aligned to 4 bytes.
__ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
chr1_reg, chr2_reg, Ldone);
__ ba(Ldone);
__ delayed()->add(G0, 1, result_reg);
// char by char compare
__ bind(Lchar);
__ add(str1_reg, limit_reg, str1_reg);
__ add(str2_reg, limit_reg, str2_reg);
__ neg(limit_reg); //negate count
__ lduh(str1_reg, limit_reg, chr1_reg);
// Lchar_loop
__ bind(Lchar_loop);
__ lduh(str2_reg, limit_reg, chr2_reg);
__ cmp(chr1_reg, chr2_reg);
__ br(Assembler::notEqual, true, Assembler::pt, Ldone);
__ delayed()->mov(G0, result_reg); //not equal
__ inccc(limit_reg, sizeof(jchar));
// annul LDUH if branch is not taken to prevent access past end of string
__ br(Assembler::notZero, true, Assembler::pt, Lchar_loop);
__ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
__ add(G0, 1, result_reg); //equal
__ bind(Ldone);
%}
enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{
Label Lvector, Ldone, Lloop;
MacroAssembler _masm(&cbuf);
Register ary1_reg = reg_to_register_object($ary1$$reg);
Register ary2_reg = reg_to_register_object($ary2$$reg);
Register tmp1_reg = reg_to_register_object($tmp1$$reg);
Register tmp2_reg = O7;
Register result_reg = reg_to_register_object($result$$reg);
int length_offset = arrayOopDesc::length_offset_in_bytes();
int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
// return true if the same array
__ cmp(ary1_reg, ary2_reg);
__ brx(Assembler::equal, true, Assembler::pn, Ldone);
__ delayed()->add(G0, 1, result_reg); // equal
__ br_null(ary1_reg, true, Assembler::pn, Ldone);
__ delayed()->mov(G0, result_reg); // not equal
__ br_null(ary2_reg, true, Assembler::pn, Ldone);
__ delayed()->mov(G0, result_reg); // not equal
//load the lengths of arrays
__ ld(Address(ary1_reg, length_offset), tmp1_reg);
__ ld(Address(ary2_reg, length_offset), tmp2_reg);
// return false if the two arrays are not equal length
__ cmp(tmp1_reg, tmp2_reg);
__ br(Assembler::notEqual, true, Assembler::pn, Ldone);
__ delayed()->mov(G0, result_reg); // not equal
__ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn);
__ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal
// load array addresses
__ add(ary1_reg, base_offset, ary1_reg);
__ add(ary2_reg, base_offset, ary2_reg);
// renaming registers
Register chr1_reg = result_reg; // for characters in ary1
Register chr2_reg = tmp2_reg; // for characters in ary2
Register limit_reg = tmp1_reg; // length
// set byte count
__ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg);
// Compare char[] arrays aligned to 4 bytes.
__ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg,
chr1_reg, chr2_reg, Ldone);
__ add(G0, 1, result_reg); // equals
__ bind(Ldone);
%}
enc_class enc_rethrow() %{
cbuf.set_insts_mark();
Register temp_reg = G3;
@ -10275,33 +10049,204 @@ instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, f
ins_pipe(long_memory_op);
%}
instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
o7RegI tmp, flagsReg ccr) %{
instruct string_compareL(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
o7RegI tmp, flagsReg ccr) %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp);
ins_cost(300);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) );
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
$tmp$$Register, $tmp$$Register,
$result$$Register, StrIntrinsicNode::LL);
%}
ins_pipe(long_memory_op);
%}
instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
o7RegI tmp, flagsReg ccr) %{
instruct string_compareU(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
o7RegI tmp, flagsReg ccr) %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp);
ins_cost(300);
format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
$tmp$$Register, $tmp$$Register,
$result$$Register, StrIntrinsicNode::UU);
%}
ins_pipe(long_memory_op);
%}
instruct string_compareLU(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
o7RegI tmp1, g1RegI tmp2, flagsReg ccr) %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp1, KILL tmp2);
ins_cost(300);
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1,$tmp2" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
$tmp1$$Register, $tmp2$$Register,
$result$$Register, StrIntrinsicNode::LU);
%}
ins_pipe(long_memory_op);
%}
instruct string_compareUL(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
o7RegI tmp1, g1RegI tmp2, flagsReg ccr) %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp1, KILL tmp2);
ins_cost(300);
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1,$tmp2" %}
ins_encode %{
__ string_compare($str2$$Register, $str1$$Register,
$cnt2$$Register, $cnt1$$Register,
$tmp1$$Register, $tmp2$$Register,
$result$$Register, StrIntrinsicNode::UL);
%}
ins_pipe(long_memory_op);
%}
instruct string_equalsL(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
o7RegI tmp, flagsReg ccr) %{
predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrEquals (Binary str1 str2) cnt));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr);
ins_cost(300);
format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %}
ins_encode( enc_String_Equals(str1, str2, cnt, result) );
format %{ "String Equals byte[] $str1,$str2,$cnt -> $result // KILL $tmp" %}
ins_encode %{
__ array_equals(false, $str1$$Register, $str2$$Register,
$cnt$$Register, $tmp$$Register,
$result$$Register, true /* byte */);
%}
ins_pipe(long_memory_op);
%}
instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
o7RegI tmp2, flagsReg ccr) %{
instruct string_equalsU(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
o7RegI tmp, flagsReg ccr) %{
predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (StrEquals (Binary str1 str2) cnt));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr);
ins_cost(300);
format %{ "String Equals char[] $str1,$str2,$cnt -> $result // KILL $tmp" %}
ins_encode %{
__ array_equals(false, $str1$$Register, $str2$$Register,
$cnt$$Register, $tmp$$Register,
$result$$Register, false /* byte */);
%}
ins_pipe(long_memory_op);
%}
instruct array_equalsB(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
o7RegI tmp2, flagsReg ccr) %{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (AryEq ary1 ary2));
effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr);
ins_cost(300);
format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %}
ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result));
ins_encode %{
__ array_equals(true, $ary1$$Register, $ary2$$Register,
$tmp1$$Register, $tmp2$$Register,
$result$$Register, true /* byte */);
%}
ins_pipe(long_memory_op);
%}
instruct array_equalsC(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
o7RegI tmp2, flagsReg ccr) %{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (AryEq ary1 ary2));
effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr);
ins_cost(300);
format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %}
ins_encode %{
__ array_equals(true, $ary1$$Register, $ary2$$Register,
$tmp1$$Register, $tmp2$$Register,
$result$$Register, false /* byte */);
%}
ins_pipe(long_memory_op);
%}
// char[] to byte[] compression
instruct string_compress(o0RegP src, o1RegP dst, g3RegI len, notemp_iRegI result, iRegL tmp, flagsReg ccr) %{
predicate(UseVIS < 3);
match(Set result (StrCompressedCopy src (Binary dst len)));
effect(TEMP result, TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr);
ins_cost(300);
format %{ "String Compress $src,$dst,$len -> $result // KILL $tmp" %}
ins_encode %{
Label Ldone;
__ signx($len$$Register);
__ cmp_zero_and_br(Assembler::zero, $len$$Register, Ldone, false, Assembler::pn);
__ delayed()->mov($len$$Register, $result$$Register); // copy count
__ string_compress($src$$Register, $dst$$Register, $len$$Register, $result$$Register, $tmp$$Register, Ldone);
__ bind(Ldone);
%}
ins_pipe(long_memory_op);
%}
// fast char[] to byte[] compression using VIS instructions
instruct string_compress_fast(o0RegP src, o1RegP dst, g3RegI len, notemp_iRegI result,
iRegL tmp1, iRegL tmp2, iRegL tmp3, iRegL tmp4,
regD ftmp1, regD ftmp2, regD ftmp3, flagsReg ccr) %{
predicate(UseVIS >= 3);
match(Set result (StrCompressedCopy src (Binary dst len)));
effect(TEMP result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP ftmp1, TEMP ftmp2, TEMP ftmp3, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr);
ins_cost(300);
format %{ "String Compress Fast $src,$dst,$len -> $result // KILL $tmp1,$tmp2,$tmp3,$tmp4,$ftmp1,$ftmp2,$ftmp3" %}
ins_encode %{
Label Ldone;
__ signx($len$$Register);
__ string_compress_16($src$$Register, $dst$$Register, $len$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register,
$ftmp1$$FloatRegister, $ftmp2$$FloatRegister, $ftmp3$$FloatRegister, Ldone);
__ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone);
__ string_compress($src$$Register, $dst$$Register, $len$$Register, $result$$Register, $tmp1$$Register, Ldone);
__ bind(Ldone);
%}
ins_pipe(long_memory_op);
%}
// byte[] to char[] inflation
instruct string_inflate(Universe dummy, o0RegP src, o1RegP dst, g3RegI len,
iRegL tmp, flagsReg ccr) %{
match(Set dummy (StrInflatedCopy src (Binary dst len)));
effect(TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr);
ins_cost(300);
format %{ "String Inflate $src,$dst,$len // KILL $tmp" %}
ins_encode %{
Label Ldone;
__ signx($len$$Register);
__ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone);
__ string_inflate($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register, Ldone);
__ bind(Ldone);
%}
ins_pipe(long_memory_op);
%}
// fast byte[] to char[] inflation using VIS instructions
instruct string_inflate_fast(Universe dummy, o0RegP src, o1RegP dst, g3RegI len,
iRegL tmp, regD ftmp1, regD ftmp2, regD ftmp3, regD ftmp4, flagsReg ccr) %{
predicate(UseVIS >= 3);
match(Set dummy (StrInflatedCopy src (Binary dst len)));
effect(TEMP tmp, TEMP ftmp1, TEMP ftmp2, TEMP ftmp3, TEMP ftmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr);
ins_cost(300);
format %{ "String Inflate Fast $src,$dst,$len // KILL $tmp,$ftmp1,$ftmp2,$ftmp3,$ftmp4" %}
ins_encode %{
Label Ldone;
__ signx($len$$Register);
__ string_inflate_16($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register,
$ftmp1$$FloatRegister, $ftmp2$$FloatRegister, $ftmp3$$FloatRegister, $ftmp4$$FloatRegister, Ldone);
__ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone);
__ string_inflate($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register, Ldone);
__ bind(Ldone);
%}
ins_pipe(long_memory_op);
%}

View File

@ -83,7 +83,26 @@
declare_constant(VM_Version::vis1_instructions_m) \
declare_constant(VM_Version::vis2_instructions_m) \
declare_constant(VM_Version::vis3_instructions_m) \
declare_constant(VM_Version::cbcond_instructions_m)
declare_constant(VM_Version::cbcond_instructions_m) \
declare_constant(VM_Version::v8_instructions_m) \
declare_constant(VM_Version::hardware_mul32_m) \
declare_constant(VM_Version::hardware_div32_m) \
declare_constant(VM_Version::hardware_fsmuld_m) \
declare_constant(VM_Version::hardware_popc_m) \
declare_constant(VM_Version::v9_instructions_m) \
declare_constant(VM_Version::sun4v_m) \
declare_constant(VM_Version::blk_init_instructions_m) \
declare_constant(VM_Version::fmaf_instructions_m) \
declare_constant(VM_Version::fmau_instructions_m) \
declare_constant(VM_Version::sparc64_family_m) \
declare_constant(VM_Version::M_family_m) \
declare_constant(VM_Version::T_family_m) \
declare_constant(VM_Version::T1_model_m) \
declare_constant(VM_Version::sparc5_instructions_m) \
declare_constant(VM_Version::aes_instructions_m) \
declare_constant(VM_Version::sha1_instruction_m) \
declare_constant(VM_Version::sha256_instruction_m) \
declare_constant(VM_Version::sha512_instruction_m)
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)

View File

@ -3036,6 +3036,35 @@ void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
emit_int8(imm8);
}
void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_simd_arith(0x75, dst, src, VEX_SIMD_66,
false, (VM_Version::supports_avx512dq() == false));
}
void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(UseAVX > 0, "some form of AVX must be enabled");
emit_vex_arith(0x75, dst, nds, src, VEX_SIMD_66, vector_len,
false, (VM_Version::supports_avx512dq() == false));
}
void Assembler::pmovmskb(Register dst, XMMRegister src) {
assert(VM_Version::supports_sse2(), "");
int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, true, VEX_OPCODE_0F,
false, AVX_128bit, (VM_Version::supports_avx512dq() == false));
emit_int8((unsigned char)0xD7);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::vpmovmskb(Register dst, XMMRegister src) {
assert(VM_Version::supports_avx2(), "");
int vector_len = AVX_256bit;
int encode = vex_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66,
vector_len, VEX_OPCODE_0F, true, false);
emit_int8((unsigned char)0xD7);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, /* no_mask_reg */ true,
@ -3108,6 +3137,17 @@ void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::vpmovzxbw(XMMRegister dst, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
bool vector256 = true;
assert(dst != xnoreg, "sanity");
int dst_enc = dst->encoding();
vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
emit_int8(0x30);
emit_operand(dst, src);
}
// generic
void Assembler::pop(Register dst) {
int encode = prefix_and_encode(dst->encoding());
@ -5370,6 +5410,16 @@ void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
emit_int8((unsigned char)(0xC0 | encode));
}
// duplicate 2-bytes integer data from src into 16 locations in dest
void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_avx2(), "");
bool vector_len = AVX_256bit;
int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
vector_len, VEX_OPCODE_0F_38, false);
emit_int8(0x79);
emit_int8((unsigned char)(0xC0 | encode));
}
// duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL
void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) {
_instruction_uses_vl = true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1682,6 +1682,12 @@ private:
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
void pcmpeqw(XMMRegister dst, XMMRegister src);
void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void pmovmskb(Register dst, XMMRegister src);
void vpmovmskb(Register dst, XMMRegister src);
// SSE 4.1 extract
void pextrd(Register dst, XMMRegister src, int imm8);
void pextrq(Register dst, XMMRegister src, int imm8);
@ -1698,6 +1704,8 @@ private:
void pmovzxbw(XMMRegister dst, XMMRegister src);
void pmovzxbw(XMMRegister dst, Address src);
void vpmovzxbw(XMMRegister dst, Address src);
#ifndef _LP64 // no 32bit push/pop on amd64
void popl(Address dst);
#endif
@ -2116,6 +2124,9 @@ private:
// duplicate 4-bytes integer data from src into 8 locations in dest
void vpbroadcastd(XMMRegister dst, XMMRegister src);
// duplicate 2-bytes integer data from src into 16 locations in dest
void vpbroadcastw(XMMRegister dst, XMMRegister src);
// duplicate n-bytes integer data from src into vector_len locations in dest
void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastb(XMMRegister dst, Address src, int vector_len);

View File

@ -509,86 +509,6 @@ int LIR_Assembler::emit_deopt_handler() {
}
// This is the fast version of java.lang.String.compare; it has not
// OSR-entry and therefore, we generate a slow version for OSR's
void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
__ movptr (rbx, rcx); // receiver is in rcx
__ movptr (rax, arg1->as_register());
// Get addresses of first characters from both Strings
__ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
if (java_lang_String::has_offset_field()) {
__ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
__ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
__ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
} else {
__ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes()));
__ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
}
// rbx, may be NULL
add_debug_info_for_null_check_here(info);
__ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
if (java_lang_String::has_offset_field()) {
__ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
__ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
__ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
} else {
__ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
__ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
}
// compute minimum length (in rax) and difference of lengths (on top of stack)
__ mov (rcx, rbx);
__ subptr(rbx, rax); // subtract lengths
__ push (rbx); // result
__ cmov (Assembler::lessEqual, rax, rcx);
// is minimum length 0?
Label noLoop, haveResult;
__ testptr (rax, rax);
__ jcc (Assembler::zero, noLoop);
// compare first characters
__ load_unsigned_short(rcx, Address(rdi, 0));
__ load_unsigned_short(rbx, Address(rsi, 0));
__ subl(rcx, rbx);
__ jcc(Assembler::notZero, haveResult);
// starting loop
__ decrement(rax); // we already tested index: skip one
__ jcc(Assembler::zero, noLoop);
// set rsi.edi to the end of the arrays (arrays have same length)
// negate the index
__ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
__ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
__ negptr(rax);
// compare the strings in a loop
Label loop;
__ align(wordSize);
__ bind(loop);
__ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0));
__ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0));
__ subl(rcx, rbx);
__ jcc(Assembler::notZero, haveResult);
__ increment(rax);
__ jcc(Assembler::notZero, loop);
// strings are equal up to min length
__ bind(noLoop);
__ pop(rax);
return_op(LIR_OprFact::illegalOpr);
__ bind(haveResult);
// leave instruction is going to discard the TOS value
__ mov (rax, rcx); // result of call is in rax,
}
void LIR_Assembler::return_op(LIR_Opr result) {
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
@ -1667,8 +1587,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register Rtmp1 = noreg;
// check if it needs to be profiled
ciMethodData* md;
ciProfileData* data;
ciMethodData* md = NULL;
ciProfileData* data = NULL;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
@ -1827,8 +1747,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
CodeStub* stub = op->stub();
// check if it needs to be profiled
ciMethodData* md;
ciProfileData* data;
ciMethodData* md = NULL;
ciProfileData* data = NULL;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
@ -2005,7 +1925,8 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
default: ShouldNotReachHere();
default: acond = Assembler::equal; ncond = Assembler::notEqual;
ShouldNotReachHere();
}
if (opr1->is_cpu_register()) {
@ -3181,27 +3102,23 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int elem_size = type2aelembytes(basic_type);
int shift_amount;
Address::ScaleFactor scale;
switch (elem_size) {
case 1 :
shift_amount = 0;
scale = Address::times_1;
break;
case 2 :
shift_amount = 1;
scale = Address::times_2;
break;
case 4 :
shift_amount = 2;
scale = Address::times_4;
break;
case 8 :
shift_amount = 3;
scale = Address::times_8;
break;
default:
scale = Address::no_scale;
ShouldNotReachHere();
}

View File

@ -195,7 +195,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
LIR_Opr r;
LIR_Opr r = NULL;
if (type == T_LONG) {
r = LIR_OprFact::longConst(x);
} else if (type == T_INT) {
@ -484,7 +484,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
address entry;
address entry = NULL;
switch (x->op()) {
case Bytecodes::_lrem:
entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
@ -1072,7 +1072,7 @@ LIR_Opr fixed_register_for(BasicType type) {
void LIRGenerator::do_Convert(Convert* x) {
// flags that vary for the different operations and different SSE-settings
bool fixed_input, fixed_result, round_result, needs_stub;
bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false;
switch (x->op()) {
case Bytecodes::_i2l: // fall through

View File

@ -91,6 +91,8 @@ define_pd_global(size_t, CMSYoungGenPerWorker, 64*M); // default max size of CM
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);
define_pd_global(bool, PreserveFramePointer, false);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \

View File

@ -48,7 +48,7 @@ GetDoubleField_t JNI_FastGetField::jni_fast_GetDoubleField_fp;
// between loads, which is much more efficient than lfence.
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
const char *name;
const char *name = NULL;
switch (type) {
case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
case T_BYTE: name = "jni_fast_GetByteField"; break;
@ -122,7 +122,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
address slow_case_addr;
address slow_case_addr = NULL;
switch (type) {
case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
@ -256,7 +256,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
}
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
const char *name;
const char *name = NULL;
switch (type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
@ -337,7 +337,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
address slow_case_addr;
address slow_case_addr = NULL;
switch (type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;

View File

@ -51,7 +51,7 @@ static const Register rcounter_addr = r11;
// since that may scratch r10!
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
const char *name;
const char *name = NULL;
switch (type) {
case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
case T_BYTE: name = "jni_fast_GetByteField"; break;
@ -111,7 +111,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
address slow_case_addr;
address slow_case_addr = NULL;
switch (type) {
case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
@ -153,7 +153,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
}
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
const char *name;
const char *name = NULL;
switch (type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
@ -206,7 +206,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
address slow_case_addr;
address slow_case_addr = NULL;
switch (type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr();

View File

@ -85,6 +85,23 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
}
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64
address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
*((narrowKlass*) operand) = record_narrow_metadata_reference(constant);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
#else
fatal("compressed Klass* on 32bit");
#endif
} else {
address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
*((Metadata**) operand) = record_metadata_reference(constant);
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
}
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
address pc = _instructions->start() + pc_offset;
@ -100,16 +117,6 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
TRACE_jvmci_3("relocating at " PTR_FORMAT "/" PTR_FORMAT " with destination at " PTR_FORMAT " (%d)", p2i(pc), p2i(operand), p2i(dest), data_offset);
}
void CodeInstaller::pd_relocate_CodeBlob(CodeBlob* cb, NativeInstruction* inst) {
if (cb->is_nmethod()) {
nmethod* nm = (nmethod*) cb;
nativeJump_at((address)inst)->set_jump_destination(nm->verified_entry_point());
} else {
nativeJump_at((address)inst)->set_jump_destination(cb->code_begin());
}
_instructions->relocate((address)inst, runtime_call_Relocation::spec(), Assembler::call32_operand);
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
address pc = (address) inst;
if (inst->is_call()) {

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,6 @@
#include "utilities/macros.hpp"
#include "runtime/rtmLocking.hpp"
// MacroAssembler extends Assembler by frequently used macros.
//
// Instructions for which a 'better' code sequence exists depending
@ -1212,32 +1211,50 @@ public:
// clear memory of size 'cnt' qwords, starting at 'base'.
void clear_mem(Register base, Register cnt, Register rtmp);
#ifdef COMPILER2
void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp);
// IndexOf strings.
// Small strings are loaded through stack if they cross page boundary.
void string_indexof(Register str1, Register str2,
Register cnt1, Register cnt2,
int int_cnt2, Register result,
XMMRegister vec, Register tmp);
XMMRegister vec, Register tmp,
int ae);
// IndexOf for constant substrings with size >= 8 elements
// which don't need to be loaded through stack.
void string_indexofC8(Register str1, Register str2,
Register cnt1, Register cnt2,
int int_cnt2, Register result,
XMMRegister vec, Register tmp);
XMMRegister vec, Register tmp,
int ae);
// Smallest code: we don't need to load through stack,
// check string tail.
// helper function for string_compare
void load_next_elements(Register elem1, Register elem2, Register str1, Register str2,
Address::ScaleFactor scale, Address::ScaleFactor scale1,
Address::ScaleFactor scale2, Register index, int ae);
// Compare strings.
void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result,
XMMRegister vec1);
XMMRegister vec1, int ae);
// Compare char[] arrays.
void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
Register limit, Register result, Register chr,
XMMRegister vec1, XMMRegister vec2);
// Search for Non-ASCII character (Negative byte value) in a byte array,
// return true if it has any and false otherwise.
void has_negatives(Register ary1, Register len,
Register result, Register tmp1,
XMMRegister vec1, XMMRegister vec2);
// Compare char[] or byte[] arrays.
void arrays_equals(bool is_array_equ, Register ary1, Register ary2,
Register limit, Register result, Register chr,
XMMRegister vec1, XMMRegister vec2, bool is_char);
#endif
// Fill primitive arrays
void generate_fill(BasicType t, bool aligned,
@ -1332,6 +1349,15 @@ public:
void fold_8bit_crc32(Register crc, Register table, Register tmp);
void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
// Compress char[] array to byte[].
void char_array_compress(Register src, Register dst, Register len,
XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
XMMRegister tmp4, Register tmp5, Register result);
// Inflate byte[] array to char[].
void byte_array_inflate(Register src, Register dst, Register len,
XMMRegister tmp1, Register tmp2);
#undef VIRTUAL
};

View File

@ -63,7 +63,7 @@ static int check_nonzero(const char* xname, int x) {
void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
const char* error_message) {
Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rdi;
Register temp2 = noreg;

View File

@ -1502,7 +1502,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// return to caller
//
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
int compile_id,
BasicType* in_sig_bt,
VMRegPair* in_regs,

View File

@ -1694,7 +1694,7 @@ class ComputeMoveOrder: public StackObj {
};
static void verify_oop_args(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = rbx; // not part of any compiled calling seq
@ -1804,7 +1804,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
// return to caller
//
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
int compile_id,
BasicType* in_sig_bt,
VMRegPair* in_regs,

View File

@ -82,6 +82,7 @@
declare_constant(VM_Version::CPU_AVX512CD) \
declare_constant(VM_Version::CPU_AVX512BW)
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
declare_preprocessor_constant("VM_Version::CPU_AVX512VL", CPU_AVX512VL)
#endif // CPU_X86_VM_VMSTRUCTS_X86_HPP

View File

@ -11435,16 +11435,62 @@ instruct rep_fast_stosb(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy,
ins_pipe( pipe_slow );
%}
instruct string_compare(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eDXRegI cnt2,
eAXRegI result, regD tmp1, eFlagsReg cr) %{
instruct string_compareL(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eDXRegI cnt2,
eAXRegI result, regD tmp1, eFlagsReg cr) %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$XMMRegister);
$tmp1$$XMMRegister, StrIntrinsicNode::LL);
%}
ins_pipe( pipe_slow );
%}
instruct string_compareU(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eDXRegI cnt2,
eAXRegI result, regD tmp1, eFlagsReg cr) %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$XMMRegister, StrIntrinsicNode::UU);
%}
ins_pipe( pipe_slow );
%}
instruct string_compareLU(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eDXRegI cnt2,
eAXRegI result, regD tmp1, eFlagsReg cr) %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$XMMRegister, StrIntrinsicNode::LU);
%}
ins_pipe( pipe_slow );
%}
instruct string_compareUL(eSIRegP str1, eDXRegI cnt1, eDIRegP str2, eCXRegI cnt2,
eAXRegI result, regD tmp1, eFlagsReg cr) %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
ins_encode %{
__ string_compare($str2$$Register, $str1$$Register,
$cnt2$$Register, $cnt1$$Register, $result$$Register,
$tmp1$$XMMRegister, StrIntrinsicNode::UL);
%}
ins_pipe( pipe_slow );
%}
@ -11457,21 +11503,50 @@ instruct string_equals(eDIRegP str1, eSIRegP str2, eCXRegI cnt, eAXRegI result,
format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp1, $tmp2, $tmp3" %}
ins_encode %{
__ char_arrays_equals(false, $str1$$Register, $str2$$Register,
$cnt$$Register, $result$$Register, $tmp3$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister);
__ arrays_equals(false, $str1$$Register, $str2$$Register,
$cnt$$Register, $result$$Register, $tmp3$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, false /* char */);
%}
ins_pipe( pipe_slow );
%}
// fast search of substring with known size.
instruct string_indexof_conL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 16) {
// IndexOf for constant substrings with size >= 16 elements
// which don't need to be loaded through stack.
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
}
%}
ins_pipe( pipe_slow );
%}
// fast search of substring with known size.
instruct string_indexof_con(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics);
instruct string_indexof_conU(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 8) {
@ -11480,47 +11555,182 @@ instruct string_indexof_con(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_c
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register);
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register);
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
}
%}
ins_pipe( pipe_slow );
%}
instruct string_indexof(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics);
// fast search of substring with known size.
instruct string_indexof_conUL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 8) {
// IndexOf for constant substrings with size >= 8 elements
// which don't need to be loaded through stack.
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
}
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register);
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofU(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofUL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofU_char(eDIRegP str1, eDXRegI cnt1, eAXRegI ch,
eBXRegI result, regD vec1, regD vec2, regD vec3, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics);
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
effect(TEMP vec1, TEMP vec2, TEMP vec3, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result // KILL all" %}
ins_encode %{
__ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, $result$$Register,
$vec1$$XMMRegister, $vec2$$XMMRegister, $vec3$$XMMRegister, $tmp$$Register);
%}
ins_pipe( pipe_slow );
%}
// fast array equals
instruct array_equals(eDIRegP ary1, eSIRegP ary2, eAXRegI result,
regD tmp1, regD tmp2, eCXRegI tmp3, eBXRegI tmp4, eFlagsReg cr)
instruct array_equalsB(eDIRegP ary1, eSIRegP ary2, eAXRegI result,
regD tmp1, regD tmp2, eCXRegI tmp3, eBXRegI tmp4, eFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (AryEq ary1 ary2));
effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr);
//ins_cost(300);
format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
format %{ "Array Equals byte[] $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
ins_encode %{
__ char_arrays_equals(true, $ary1$$Register, $ary2$$Register,
$tmp3$$Register, $result$$Register, $tmp4$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister);
__ arrays_equals(true, $ary1$$Register, $ary2$$Register,
$tmp3$$Register, $result$$Register, $tmp4$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, false /* char */);
%}
ins_pipe( pipe_slow );
%}
instruct array_equalsC(eDIRegP ary1, eSIRegP ary2, eAXRegI result,
regD tmp1, regD tmp2, eCXRegI tmp3, eBXRegI tmp4, eFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (AryEq ary1 ary2));
effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr);
//ins_cost(300);
format %{ "Array Equals char[] $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
ins_encode %{
__ arrays_equals(true, $ary1$$Register, $ary2$$Register,
$tmp3$$Register, $result$$Register, $tmp4$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, true /* char */);
%}
ins_pipe( pipe_slow );
%}
instruct has_negatives(eSIRegP ary1, eCXRegI len, eAXRegI result,
regD tmp1, regD tmp2, eBXRegI tmp3, eFlagsReg cr)
%{
match(Set result (HasNegatives ary1 len));
effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL len, KILL tmp3, KILL cr);
format %{ "has negatives byte[] $ary1,$len -> $result // KILL $tmp1, $tmp2, $tmp3" %}
ins_encode %{
__ has_negatives($ary1$$Register, $len$$Register,
$result$$Register, $tmp3$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
// fast char[] to byte[] compression
instruct string_compress(eSIRegP src, eDIRegP dst, eDXRegI len, regD tmp1, regD tmp2, regD tmp3, regD tmp4,
eCXRegI tmp5, eAXRegI result, eFlagsReg cr) %{
match(Set result (StrCompressedCopy src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);
format %{ "String Compress $src,$dst -> $result // KILL RAX, RCX, RDX" %}
ins_encode %{
__ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister,
$tmp4$$XMMRegister, $tmp5$$Register, $result$$Register);
%}
ins_pipe( pipe_slow );
%}
// fast byte[] to char[] inflation
instruct string_inflate(Universe dummy, eSIRegP src, eDIRegP dst, eDXRegI len,
regD tmp1, eCXRegI tmp2, eFlagsReg cr) %{
match(Set dummy (StrInflatedCopy src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
format %{ "String Inflate $src,$dst // KILL $tmp1, $tmp2" %}
ins_encode %{
__ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
$tmp1$$XMMRegister, $tmp2$$Register);
%}
ins_pipe( pipe_slow );
%}

View File

@ -10447,30 +10447,108 @@ instruct rep_fast_stosb(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dum
ins_pipe( pipe_slow );
%}
instruct string_compare(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr)
instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr)
%{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$XMMRegister);
$tmp1$$XMMRegister, StrIntrinsicNode::LL);
%}
ins_pipe( pipe_slow );
%}
instruct string_compareU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr)
%{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$XMMRegister, StrIntrinsicNode::UU);
%}
ins_pipe( pipe_slow );
%}
instruct string_compareLU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr)
%{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$XMMRegister, StrIntrinsicNode::LU);
%}
ins_pipe( pipe_slow );
%}
instruct string_compareUL(rsi_RegP str1, rdx_RegI cnt1, rdi_RegP str2, rcx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr)
%{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
ins_encode %{
__ string_compare($str2$$Register, $str1$$Register,
$cnt2$$Register, $cnt1$$Register, $result$$Register,
$tmp1$$XMMRegister, StrIntrinsicNode::UL);
%}
ins_pipe( pipe_slow );
%}
// fast search of substring with known size.
instruct string_indexof_con(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
instruct string_indexof_conL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics);
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 16) {
// IndexOf for constant substrings with size >= 16 elements
// which don't need to be loaded through stack.
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
}
%}
ins_pipe( pipe_slow );
%}
// fast search of substring with known size.
instruct string_indexof_conU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 8) {
@ -10479,31 +10557,108 @@ instruct string_indexof_con(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI in
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register);
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register);
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
}
%}
ins_pipe( pipe_slow );
%}
instruct string_indexof(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr)
// fast search of substring with known size.
instruct string_indexof_conUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics);
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 8) {
// IndexOf for constant substrings with size >= 8 elements
// which don't need to be loaded through stack.
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
}
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register);
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofU_char(rdi_RegP str1, rdx_RegI cnt1, rax_RegI ch,
rbx_RegI result, regD vec1, regD vec2, regD vec3, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics);
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
effect(TEMP vec1, TEMP vec2, TEMP vec3, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result // KILL all" %}
ins_encode %{
__ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, $result$$Register,
$vec1$$XMMRegister, $vec2$$XMMRegister, $vec3$$XMMRegister, $tmp$$Register);
%}
ins_pipe( pipe_slow );
%}
@ -10517,26 +10672,86 @@ instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI resu
format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp1, $tmp2, $tmp3" %}
ins_encode %{
__ char_arrays_equals(false, $str1$$Register, $str2$$Register,
$cnt$$Register, $result$$Register, $tmp3$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister);
__ arrays_equals(false, $str1$$Register, $str2$$Register,
$cnt$$Register, $result$$Register, $tmp3$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, false /* char */);
%}
ins_pipe( pipe_slow );
%}
// fast array equals
instruct array_equals(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
regD tmp1, regD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
instruct array_equalsB(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
regD tmp1, regD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (AryEq ary1 ary2));
effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr);
//ins_cost(300);
format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
format %{ "Array Equals byte[] $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
ins_encode %{
__ char_arrays_equals(true, $ary1$$Register, $ary2$$Register,
$tmp3$$Register, $result$$Register, $tmp4$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister);
__ arrays_equals(true, $ary1$$Register, $ary2$$Register,
$tmp3$$Register, $result$$Register, $tmp4$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, false /* char */);
%}
ins_pipe( pipe_slow );
%}
instruct array_equalsC(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
regD tmp1, regD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (AryEq ary1 ary2));
effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr);
format %{ "Array Equals char[] $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
ins_encode %{
__ arrays_equals(true, $ary1$$Register, $ary2$$Register,
$tmp3$$Register, $result$$Register, $tmp4$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, true /* char */);
%}
ins_pipe( pipe_slow );
%}
instruct has_negatives(rsi_RegP ary1, rcx_RegI len, rax_RegI result,
regD tmp1, regD tmp2, rbx_RegI tmp3, rFlagsReg cr)
%{
match(Set result (HasNegatives ary1 len));
effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL len, KILL tmp3, KILL cr);
format %{ "has negatives byte[] $ary1,$len -> $result // KILL $tmp1, $tmp2, $tmp3" %}
ins_encode %{
__ has_negatives($ary1$$Register, $len$$Register,
$result$$Register, $tmp3$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
// fast char[] to byte[] compression
instruct string_compress(rsi_RegP src, rdi_RegP dst, rdx_RegI len, regD tmp1, regD tmp2, regD tmp3, regD tmp4,
rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{
match(Set result (StrCompressedCopy src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);
format %{ "String Compress $src,$dst -> $result // KILL RAX, RCX, RDX" %}
ins_encode %{
__ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister,
$tmp4$$XMMRegister, $tmp5$$Register, $result$$Register);
%}
ins_pipe( pipe_slow );
%}
// fast byte[] to char[] inflation
instruct string_inflate(Universe dummy, rsi_RegP src, rdi_RegP dst, rdx_RegI len,
regD tmp1, rcx_RegI tmp2, rFlagsReg cr) %{
match(Set dummy (StrInflatedCopy src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
format %{ "String Inflate $src,$dst // KILL $tmp1, $tmp2" %}
ins_encode %{
__ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
$tmp1$$XMMRegister, $tmp2$$Register);
%}
ins_pipe( pipe_slow );
%}

View File

@ -69,6 +69,9 @@ define_pd_global(uintx, TypeProfileLevel, 0);
define_pd_global(bool, PreserveFramePointer, false);
// No performance work done here yet.
define_pd_global(bool, CompactStrings, false);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
\
product(bool, UseFastEmptyMethods, true, \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -76,7 +76,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(
}
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
methodHandle method,
const methodHandle& method,
int compile_id,
BasicType *sig_bt,
VMRegPair *regs,

View File

@ -22,15 +22,18 @@
*/
package jdk.vm.ci.amd64;
import static jdk.vm.ci.code.MemoryBarriers.*;
import static jdk.vm.ci.code.Register.*;
import static jdk.vm.ci.code.MemoryBarriers.LOAD_STORE;
import static jdk.vm.ci.code.MemoryBarriers.STORE_STORE;
import static jdk.vm.ci.code.Register.SPECIAL;
import java.nio.*;
import java.util.*;
import java.nio.ByteOrder;
import java.util.EnumSet;
import jdk.vm.ci.code.*;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.Register.RegisterCategory;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.PlatformKind;
/**
* Represents the AMD64 architecture.
@ -65,9 +68,7 @@ public class AMD64 extends Architecture {
r8, r9, r10, r11, r12, r13, r14, r15
};
private static final int XMM_REFERENCE_MAP_SHIFT = 2;
public static final RegisterCategory XMM = new RegisterCategory("XMM", cpuRegisters.length, XMM_REFERENCE_MAP_SHIFT);
public static final RegisterCategory XMM = new RegisterCategory("XMM");
// XMM registers
public static final Register xmm0 = new Register(16, 0, "xmm0", XMM);
@ -79,8 +80,8 @@ public class AMD64 extends Architecture {
public static final Register xmm6 = new Register(22, 6, "xmm6", XMM);
public static final Register xmm7 = new Register(23, 7, "xmm7", XMM);
public static final Register xmm8 = new Register(24, 8, "xmm8", XMM);
public static final Register xmm9 = new Register(25, 9, "xmm9", XMM);
public static final Register xmm8 = new Register(24, 8, "xmm8", XMM);
public static final Register xmm9 = new Register(25, 9, "xmm9", XMM);
public static final Register xmm10 = new Register(26, 10, "xmm10", XMM);
public static final Register xmm11 = new Register(27, 11, "xmm11", XMM);
public static final Register xmm12 = new Register(28, 12, "xmm12", XMM);
@ -88,28 +89,77 @@ public class AMD64 extends Architecture {
public static final Register xmm14 = new Register(30, 14, "xmm14", XMM);
public static final Register xmm15 = new Register(31, 15, "xmm15", XMM);
public static final Register[] xmmRegisters = {
public static final Register xmm16 = new Register(32, 16, "xmm16", XMM);
public static final Register xmm17 = new Register(33, 17, "xmm17", XMM);
public static final Register xmm18 = new Register(34, 18, "xmm18", XMM);
public static final Register xmm19 = new Register(35, 19, "xmm19", XMM);
public static final Register xmm20 = new Register(36, 20, "xmm20", XMM);
public static final Register xmm21 = new Register(37, 21, "xmm21", XMM);
public static final Register xmm22 = new Register(38, 22, "xmm22", XMM);
public static final Register xmm23 = new Register(39, 23, "xmm23", XMM);
public static final Register xmm24 = new Register(40, 24, "xmm24", XMM);
public static final Register xmm25 = new Register(41, 25, "xmm25", XMM);
public static final Register xmm26 = new Register(42, 26, "xmm26", XMM);
public static final Register xmm27 = new Register(43, 27, "xmm27", XMM);
public static final Register xmm28 = new Register(44, 28, "xmm28", XMM);
public static final Register xmm29 = new Register(45, 29, "xmm29", XMM);
public static final Register xmm30 = new Register(46, 30, "xmm30", XMM);
public static final Register xmm31 = new Register(47, 31, "xmm31", XMM);
public static final Register[] xmmRegistersSSE = {
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15
};
public static final Register[] cpuxmmRegisters = {
public static final Register[] xmmRegistersAVX512 = {
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
xmm16, xmm17, xmm18, xmm19, xmm20, xmm21, xmm22, xmm23,
xmm24, xmm25, xmm26, xmm27, xmm28, xmm29, xmm30, xmm31
};
public static final RegisterCategory MASK = new RegisterCategory("MASK", false);
public static final Register k0 = new Register(48, 0, "k0", MASK);
public static final Register k1 = new Register(49, 1, "k1", MASK);
public static final Register k2 = new Register(50, 2, "k2", MASK);
public static final Register k3 = new Register(51, 3, "k3", MASK);
public static final Register k4 = new Register(52, 4, "k4", MASK);
public static final Register k5 = new Register(53, 5, "k5", MASK);
public static final Register k6 = new Register(54, 6, "k6", MASK);
public static final Register k7 = new Register(55, 7, "k7", MASK);
public static final Register[] valueRegistersSSE = {
rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
r8, r9, r10, r11, r12, r13, r14, r15,
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15
};
public static final Register[] valueRegistersAVX512 = {
rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
r8, r9, r10, r11, r12, r13, r14, r15,
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
xmm16, xmm17, xmm18, xmm19, xmm20, xmm21, xmm22, xmm23,
xmm24, xmm25, xmm26, xmm27, xmm28, xmm29, xmm30, xmm31,
k0, k1, k2, k3, k4, k5, k6, k7
};
/**
* Register used to construct an instruction-relative address.
*/
public static final Register rip = new Register(32, -1, "rip", SPECIAL);
public static final Register rip = new Register(56, -1, "rip", SPECIAL);
public static final Register[] allRegisters = {
rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
r8, r9, r10, r11, r12, r13, r14, r15,
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
xmm16, xmm17, xmm18, xmm19, xmm20, xmm21, xmm22, xmm23,
xmm24, xmm25, xmm26, xmm27, xmm28, xmm29, xmm30, xmm31,
k0, k1, k2, k3, k4, k5, k6, k7,
rip
};
@ -151,7 +201,8 @@ public class AMD64 extends Architecture {
AVX512PF,
AVX512ER,
AVX512CD,
AVX512BW
AVX512BW,
AVX512VL
}
private final EnumSet<CPUFeature> features;
@ -166,11 +217,21 @@ public class AMD64 extends Architecture {
private final EnumSet<Flag> flags;
private final AMD64Kind largestKind;
public AMD64(EnumSet<CPUFeature> features, EnumSet<Flag> flags) {
super("AMD64", JavaKind.Long, ByteOrder.LITTLE_ENDIAN, true, allRegisters, LOAD_STORE | STORE_STORE, 1, cpuRegisters.length + (xmmRegisters.length << XMM_REFERENCE_MAP_SHIFT), 8);
super("AMD64", AMD64Kind.QWORD, ByteOrder.LITTLE_ENDIAN, true, allRegisters, LOAD_STORE | STORE_STORE, 1, 8);
this.features = features;
this.flags = flags;
assert features.contains(CPUFeature.SSE2) : "minimum config for x64";
if (features.contains(CPUFeature.AVX512F)) {
largestKind = AMD64Kind.V512_QWORD;
} else if (features.contains(CPUFeature.AVX)) {
largestKind = AMD64Kind.V256_QWORD;
} else {
largestKind = AMD64Kind.V128_QWORD;
}
}
public EnumSet<CPUFeature> getFeatures() {
@ -182,50 +243,60 @@ public class AMD64 extends Architecture {
}
@Override
public PlatformKind getPlatformKind(JavaKind javaKind) {
if (javaKind.isObject()) {
return getWordKind();
public Register[] getAvailableValueRegisters() {
if (features.contains(CPUFeature.AVX512F)) {
return valueRegistersAVX512;
} else {
return javaKind;
return valueRegistersSSE;
}
}
@Override
public PlatformKind getPlatformKind(JavaKind javaKind) {
switch (javaKind) {
case Boolean:
case Byte:
return AMD64Kind.BYTE;
case Short:
case Char:
return AMD64Kind.WORD;
case Int:
return AMD64Kind.DWORD;
case Long:
case Object:
return AMD64Kind.QWORD;
case Float:
return AMD64Kind.SINGLE;
case Double:
return AMD64Kind.DOUBLE;
default:
return null;
}
}
@Override
public boolean canStoreValue(RegisterCategory category, PlatformKind platformKind) {
if (!(platformKind instanceof JavaKind)) {
return false;
AMD64Kind kind = (AMD64Kind) platformKind;
if (kind.isInteger()) {
return category.equals(CPU);
} else if (kind.isXMM()) {
return category.equals(XMM);
} else {
assert kind.isMask();
return category.equals(MASK);
}
JavaKind kind = (JavaKind) platformKind;
if (category.equals(CPU)) {
switch (kind) {
case Boolean:
case Byte:
case Char:
case Short:
case Int:
case Long:
return true;
}
} else if (category.equals(XMM)) {
switch (kind) {
case Float:
case Double:
return true;
}
}
return false;
}
@Override
public PlatformKind getLargestStorableKind(RegisterCategory category) {
public AMD64Kind getLargestStorableKind(RegisterCategory category) {
if (category.equals(CPU)) {
return JavaKind.Long;
return AMD64Kind.QWORD;
} else if (category.equals(XMM)) {
return JavaKind.Double;
return largestKind;
} else if (category.equals(MASK)) {
return AMD64Kind.MASK64;
} else {
return JavaKind.Illegal;
return null;
}
}
}

View File

@ -0,0 +1,214 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.amd64;
import jdk.vm.ci.meta.PlatformKind;
public enum AMD64Kind implements PlatformKind {
// scalar
BYTE(1),
WORD(2),
DWORD(4),
QWORD(8),
SINGLE(4),
DOUBLE(8),
// SSE2
V32_BYTE(4, BYTE),
V32_WORD(4, WORD),
V64_BYTE(8, BYTE),
V64_WORD(8, WORD),
V64_DWORD(8, DWORD),
V128_BYTE(16, BYTE),
V128_WORD(16, WORD),
V128_DWORD(16, DWORD),
V128_QWORD(16, QWORD),
V128_SINGLE(16, SINGLE),
V128_DOUBLE(16, DOUBLE),
// AVX
V256_BYTE(32, BYTE),
V256_WORD(32, WORD),
V256_DWORD(32, DWORD),
V256_QWORD(32, QWORD),
V256_SINGLE(32, SINGLE),
V256_DOUBLE(32, DOUBLE),
// AVX512
V512_BYTE(64, BYTE),
V512_WORD(64, WORD),
V512_DWORD(64, DWORD),
V512_QWORD(64, QWORD),
V512_SINGLE(64, SINGLE),
V512_DOUBLE(64, DOUBLE),
MASK8(1),
MASK16(2),
MASK32(4),
MASK64(8);
private final int size;
private final int vectorLength;
private final AMD64Kind scalar;
private final EnumKey<AMD64Kind> key = new EnumKey<>(this);
private AMD64Kind(int size) {
this.size = size;
this.scalar = this;
this.vectorLength = 1;
}
private AMD64Kind(int size, AMD64Kind scalar) {
this.size = size;
this.scalar = scalar;
assert size % scalar.size == 0;
this.vectorLength = size / scalar.size;
}
public AMD64Kind getScalar() {
return scalar;
}
public int getSizeInBytes() {
return size;
}
public int getVectorLength() {
return vectorLength;
}
public Key getKey() {
return key;
}
public boolean isInteger() {
switch (this) {
case BYTE:
case WORD:
case DWORD:
case QWORD:
return true;
default:
return false;
}
}
public boolean isXMM() {
switch (this) {
case SINGLE:
case DOUBLE:
case V32_BYTE:
case V32_WORD:
case V64_BYTE:
case V64_WORD:
case V64_DWORD:
case V128_BYTE:
case V128_WORD:
case V128_DWORD:
case V128_QWORD:
case V128_SINGLE:
case V128_DOUBLE:
case V256_BYTE:
case V256_WORD:
case V256_DWORD:
case V256_QWORD:
case V256_SINGLE:
case V256_DOUBLE:
case V512_BYTE:
case V512_WORD:
case V512_DWORD:
case V512_QWORD:
case V512_SINGLE:
case V512_DOUBLE:
return true;
default:
return false;
}
}
public boolean isMask() {
switch (this) {
case MASK8:
case MASK16:
case MASK32:
case MASK64:
return true;
default:
return false;
}
}
public char getTypeChar() {
switch (this) {
case BYTE:
return 'b';
case WORD:
return 'w';
case DWORD:
return 'd';
case QWORD:
return 'q';
case SINGLE:
return 'S';
case DOUBLE:
return 'D';
case V32_BYTE:
case V32_WORD:
case V64_BYTE:
case V64_WORD:
case V64_DWORD:
return 'v';
case V128_BYTE:
case V128_WORD:
case V128_DWORD:
case V128_QWORD:
case V128_SINGLE:
case V128_DOUBLE:
return 'x';
case V256_BYTE:
case V256_WORD:
case V256_DWORD:
case V256_QWORD:
case V256_SINGLE:
case V256_DOUBLE:
return 'y';
case V512_BYTE:
case V512_WORD:
case V512_DWORD:
case V512_QWORD:
case V512_SINGLE:
case V512_DOUBLE:
return 'z';
case MASK8:
case MASK16:
case MASK32:
case MASK64:
return 'k';
default:
return '-';
}
}
}

View File

@ -22,11 +22,12 @@
*/
package jdk.vm.ci.code;
import java.nio.*;
import java.util.*;
import java.nio.ByteOrder;
import java.util.Arrays;
import jdk.vm.ci.code.Register.RegisterCategory;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.PlatformKind;
/**
* Represents a CPU architecture, including information such as its endianness, CPU registers, word
@ -34,13 +35,6 @@ import jdk.vm.ci.meta.*;
*/
public abstract class Architecture {
/**
* The number of entries required in a {@link ReferenceMap} covering all the registers that may
* store references. The index of a register in the reference map is given by
* {@link Register#getReferenceMapIndex()}.
*/
private final int registerReferenceMapSize;
/**
* The architecture specific type of a native word.
*/
@ -85,7 +79,7 @@ public abstract class Architecture {
private final int returnAddressSize;
protected Architecture(String name, PlatformKind wordKind, ByteOrder byteOrder, boolean unalignedMemoryAccess, Register[] registers, int implicitMemoryBarriers, int nativeCallDisplacementOffset,
int registerReferenceMapSize, int returnAddressSize) {
int returnAddressSize) {
this.name = name;
this.registers = registers;
this.wordKind = wordKind;
@ -93,7 +87,6 @@ public abstract class Architecture {
this.unalignedMemoryAccess = unalignedMemoryAccess;
this.implicitMemoryBarriers = implicitMemoryBarriers;
this.machineCodeCallDisplacementOffset = nativeCallDisplacementOffset;
this.registerReferenceMapSize = registerReferenceMapSize;
this.returnAddressSize = returnAddressSize;
}
@ -107,10 +100,6 @@ public abstract class Architecture {
return getName().toLowerCase();
}
public int getRegisterReferenceMapSize() {
return registerReferenceMapSize;
}
/**
* Gets the natural size of words (typically registers and pointers) of this architecture, in
* bytes.
@ -131,13 +120,23 @@ public abstract class Architecture {
}
/**
* Gets an array of all available registers on this architecture. The index of each register in
* this array is equal to its {@linkplain Register#number number}.
* Gets an array of all registers that exist on this architecture. This contains all registers
* that exist in the specification of this architecture. Not all of them may be available on
* this particular architecture instance. The index of each register in this array is equal to
* its {@linkplain Register#number number}.
*/
public Register[] getRegisters() {
return registers.clone();
}
/**
* Gets an array of all registers available for storing values on this architecture. This may be
* a subset of {@link #getRegisters()}, depending on the capabilities of this particular CPU.
*/
public Register[] getAvailableValueRegisters() {
return getRegisters();
}
public ByteOrder getByteOrder() {
return byteOrder;
}
@ -207,7 +206,6 @@ public abstract class Architecture {
assert this.byteOrder.equals(that.byteOrder);
assert this.implicitMemoryBarriers == that.implicitMemoryBarriers;
assert this.machineCodeCallDisplacementOffset == that.machineCodeCallDisplacementOffset;
assert this.registerReferenceMapSize == that.registerReferenceMapSize;
assert Arrays.equals(this.registers, that.registers);
assert this.returnAddressSize == that.returnAddressSize;
assert this.unalignedMemoryAccess == that.unalignedMemoryAccess;

View File

@ -22,7 +22,7 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.Locale;
/**
* Exception thrown when the compiler refuses to compile a method because of problems with the

View File

@ -22,9 +22,12 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.Arrays;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaValue;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.Value;
/**
* Represents the Java bytecode frame state(s) at a given position including {@link Value locations}

View File

@ -22,9 +22,9 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.Objects;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.ResolvedJavaMethod;
/**
* Represents a code position, that is, a chain of inlined methods with bytecode locations, that is

View File

@ -22,9 +22,10 @@
*/
package jdk.vm.ci.code;
import static jdk.vm.ci.code.ValueUtil.*;
import jdk.vm.ci.meta.*;
import static jdk.vm.ci.code.ValueUtil.isAllocatableValue;
import static jdk.vm.ci.code.ValueUtil.isStackSlot;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.Value;
/**
* A calling convention describes the locations in which the arguments for a call are placed and the

View File

@ -22,9 +22,14 @@
*/
package jdk.vm.ci.code;
import jdk.vm.ci.code.CompilationResult.*;
import jdk.vm.ci.code.DataSection.*;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.code.CompilationResult.Call;
import jdk.vm.ci.code.CompilationResult.DataPatch;
import jdk.vm.ci.code.CompilationResult.Mark;
import jdk.vm.ci.code.DataSection.Data;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.SpeculationLog;
/**
* Access to code cache related details and requirements.
@ -32,26 +37,62 @@ import jdk.vm.ci.meta.*;
public interface CodeCacheProvider {
/**
* Adds the given compilation result as an implementation of the given method without making it
* the default implementation.
* Installs code for a given method based on a given compilation result without making it the
* default implementation of the method.
*
* @param method a method to which the executable code is begin added
* @param method a method implemented by the installed code
* @param compResult the compilation result to be added
* @param speculationLog the speculation log to be used
* @return a reference to the compiled and ready-to-run code or throws a
* {@link BailoutException} if the code installation failed
* @param log the speculation log to be used
* @param installedCode a predefined {@link InstalledCode} object to use as a reference to the
* installed code. If {@code null}, a new {@link InstalledCode} object will be
* created.
* @return a reference to the ready-to-run code
* @throws BailoutException if the code installation failed
*/
InstalledCode addMethod(ResolvedJavaMethod method, CompilationResult compResult, SpeculationLog speculationLog, InstalledCode predefinedInstalledCode);
default InstalledCode addCode(ResolvedJavaMethod method, CompilationResult compResult, SpeculationLog log, InstalledCode installedCode) {
return installCode(new CompilationRequest(method), compResult, installedCode, log, false);
}
/**
* Sets the given compilation result as the default implementation of the given method.
* Installs code for a given method based on a given compilation result and makes it the default
* implementation of the method.
*
* @param method a method to which the executable code is begin added
* @param method a method implemented by the installed code and for which the installed code
* becomes the default implementation
* @param compResult the compilation result to be added
* @return a reference to the compiled and ready-to-run code or null if the code installation
* failed
* @return a reference to the ready-to-run code
* @throws BailoutException if the code installation failed
*/
InstalledCode setDefaultMethod(ResolvedJavaMethod method, CompilationResult compResult);
default InstalledCode setDefaultCode(ResolvedJavaMethod method, CompilationResult compResult) {
return installCode(new CompilationRequest(method), compResult, null, null, true);
}
/**
* Installs code based on a given compilation result.
*
* @param compRequest details of the method compiled to produce {@code compResult} or
* {@code null} if the input to {@code compResult} was not a
* {@link ResolvedJavaMethod}
* @param compResult the compilation result to be added
* @param installedCode a pre-allocated {@link InstalledCode} object to use as a reference to
* the installed code. If {@code null}, a new {@link InstalledCode} object will be
* created.
* @param log the speculation log to be used
* @param isDefault specifies if the installed code should be made the default implementation of
* {@code compRequest.getMethod()}. The default implementation for a method is the
* code executed for standard calls to the method. This argument is ignored if
* {@code compRequest == null}.
* @return a reference to the compiled and ready-to-run installed code
* @throws BailoutException if the code installation failed
*/
InstalledCode installCode(CompilationRequest compRequest, CompilationResult compResult, InstalledCode installedCode, SpeculationLog log, boolean isDefault);
/**
* Invalidates {@code installedCode} such that {@link InvalidInstalledCodeException} will be
* raised the next time {@code installedCode} is
* {@linkplain InstalledCode#executeVarargs(Object...) executed}.
*/
void invalidateInstalledCode(InstalledCode installedCode);
/**
* Gets a name for a {@link Mark} mark.
@ -102,4 +143,16 @@ public interface CodeCacheProvider {
* Create a new speculation log for the target runtime.
*/
SpeculationLog createSpeculationLog();
/**
* Returns the maximum absolute offset of a PC relative call to a given address from any
* position in the code cache or -1 when not applicable. Intended for determining the required
* size of address/offset fields.
*/
long getMaxCallTargetOffset(long address);
/**
* Determines if debug info should also be emitted at non-safepoint locations.
*/
boolean shouldDebugNonSafepoints();
}

View File

@ -22,9 +22,15 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.MetaUtil;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.Signature;
/**
* Miscellaneous collection of utility methods used by {@code jdk.vm.ci.code} and its clients.
@ -323,49 +329,12 @@ public class CodeUtil {
public interface RefMapFormatter {
String formatStackSlot(int frameRefMapIndex);
String formatRegister(int regRefMapIndex);
}
/**
* Formats a location in a register reference map.
* Formats a location present in a reference map.
*/
public static class DefaultRegFormatter implements RefMapFormatter {
private final Register[] registers;
public DefaultRegFormatter(Architecture arch) {
registers = new Register[arch.getRegisterReferenceMapSize()];
for (Register r : arch.getRegisters()) {
if (r.getReferenceMapIndex() >= 0) {
registers[r.getReferenceMapIndex()] = r;
}
}
}
public String formatStackSlot(int frameRefMapIndex) {
return null;
}
public String formatRegister(int regRefMapIndex) {
int i = regRefMapIndex;
int idx = 0;
while (registers[i] == null) {
i--;
idx++;
}
if (idx == 0) {
return registers[i].toString();
} else {
return String.format("%s+%d", registers[i].toString(), idx);
}
}
}
/**
* Formats a location present in a register or frame reference map.
*/
public static class DefaultRefMapFormatter extends DefaultRegFormatter {
public static class DefaultRefMapFormatter implements RefMapFormatter {
/**
* The size of a stack slot.
@ -383,8 +352,7 @@ public class CodeUtil {
*/
public final int refMapToFPOffset;
public DefaultRefMapFormatter(Architecture arch, int slotSize, Register fp, int refMapToFPOffset) {
super(arch);
public DefaultRefMapFormatter(int slotSize, Register fp, int refMapToFPOffset) {
this.slotSize = slotSize;
this.fp = fp;
this.refMapToFPOffset = refMapToFPOffset;

View File

@ -0,0 +1,78 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.code;
import jdk.vm.ci.meta.ResolvedJavaMethod;
/**
* Represents a request to compile a method.
*/
public class CompilationRequest {
private final ResolvedJavaMethod method;
private final int entryBCI;
/**
* Creates a request to compile a method starting at its entry point.
*
* @param method the method to be compiled
*/
public CompilationRequest(ResolvedJavaMethod method) {
this(method, -1);
}
/**
* Creates a request to compile a method starting at a given BCI.
*
* @param method the method to be compiled
* @param entryBCI the bytecode index (BCI) at which to start compiling where -1 denotes the
* method's entry point
*/
public CompilationRequest(ResolvedJavaMethod method, int entryBCI) {
assert method != null;
this.method = method;
this.entryBCI = entryBCI;
}
/**
* Gets the method to be compiled.
*/
public ResolvedJavaMethod getMethod() {
return method;
}
/**
* Gets the bytecode index (BCI) at which to start compiling where -1 denotes a non-OSR
* compilation request and all other values denote an on stack replacement (OSR) compilation
* request.
*/
public int getEntryBCI() {
return entryBCI;
}
@Override
public String toString() {
return method.format("%H.%n(%p)@" + entryBCI);
}
}

View File

@ -22,13 +22,24 @@
*/
package jdk.vm.ci.code;
import static java.util.Collections.*;
import static jdk.vm.ci.meta.MetaUtil.*;
import static java.util.Collections.emptyList;
import static java.util.Collections.unmodifiableList;
import static jdk.vm.ci.meta.MetaUtil.identityHashCodeString;
import java.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.Assumptions.*;
import jdk.vm.ci.meta.Assumptions.Assumption;
import jdk.vm.ci.meta.InvokeTarget;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.MetaUtil;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.VMConstant;
/**
* Represents the output from compiling a method, including the compiled machine code, associated
@ -115,8 +126,8 @@ public class CompilationResult {
public enum MetaSpaceAccessType {
Move,
Store, // store only works for compressed oops (memory <- 32bit value). Compressed oops is
// not supported using AOT. TODO: Look at HotSpotStoreConstantOp
Store, // store only works for compressed oops (memory <- 32bit value). Compressed oops is
// not supported using AOT. TODO: Look at HotSpotStoreConstantOp
Compare; // HotSpotCompareMemoryConstantOp, HotSpotCompareConstantOp
private MetaSpaceAccessType() {
@ -128,13 +139,11 @@ public class CompilationResult {
*/
public static final class MetaSpaceAccess extends Infopoint {
private static final long serialVersionUID = 1701958512608684706L;
/**
* Metaspace reference.
*/
public final Object reference; // Object here is a HotSpotResolvedObjectType or a
// HotSpotMetaSpaceConstant
// HotSpotMetaSpaceConstant
public final MetaSpaceAccessType type;
@ -296,6 +305,15 @@ public class CompilationResult {
}
return false;
}
@Override
public String toString() {
if (initialized) {
return String.format("DataSection[0x%x]", offset);
} else {
return "DataSection[?]";
}
}
}
/**
@ -528,8 +546,6 @@ public class CompilationResult {
}
}
private int id = -1;
/**
* Specifies whether this compilation is a {@code +ImmutableCode} {@code +GeneratePIC}
* compilation.
@ -612,7 +628,6 @@ public class CompilationResult {
CompilationResult that = (CompilationResult) obj;
// @formatter:off
if (this.entryBCI == that.entryBCI &&
this.id == that.id &&
this.customStackAreaOffset == that.customStackAreaOffset &&
this.totalFrameSize == that.totalFrameSize &&
this.targetCodeSize == that.targetCodeSize &&
@ -632,20 +647,6 @@ public class CompilationResult {
return false;
}
/**
* @return the compile id
*/
public int getId() {
return id;
}
/**
* @param id the compile id to set
*/
public void setId(int id) {
this.id = id;
}
/**
* @return true is this is a {@code +ImmutableCode} {@code +GeneratePIC} compilation, false
* otherwise.

View File

@ -22,15 +22,18 @@
*/
package jdk.vm.ci.code;
import static jdk.vm.ci.meta.MetaUtil.*;
import static jdk.vm.ci.meta.MetaUtil.identityHashCodeString;
import java.nio.*;
import java.util.*;
import java.util.function.*;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Objects;
import java.util.function.Consumer;
import jdk.vm.ci.code.CompilationResult.*;
import jdk.vm.ci.code.DataSection.*;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.code.CompilationResult.DataPatch;
import jdk.vm.ci.code.CompilationResult.DataSectionReference;
import jdk.vm.ci.code.DataSection.Data;
import jdk.vm.ci.meta.SerializableConstant;
public final class DataSection implements Iterable<Data> {
@ -176,11 +179,27 @@ public final class DataSection implements Iterable<Data> {
*/
public DataSectionReference insertData(Data data) {
assert !finalLayout;
if (data.ref == null) {
data.ref = new DataSectionReference();
synchronized (data) {
if (data.ref == null) {
data.ref = new DataSectionReference();
dataItems.add(data);
}
return data.ref;
}
}
/**
* Transfers all {@link Data} from the provided other {@link DataSection} to this
* {@link DataSection}, and empties the other section.
*/
public void addAll(DataSection other) {
assert !finalLayout && !other.finalLayout;
for (Data data : other.dataItems) {
assert data.ref != null;
dataItems.add(data);
}
return data.ref;
other.dataItems.clear();
}
/**
@ -195,14 +214,16 @@ public final class DataSection implements Iterable<Data> {
dataItems.sort((a, b) -> a.alignment - b.alignment);
int position = 0;
int alignment = 1;
for (Data d : dataItems) {
sectionAlignment = lcm(sectionAlignment, d.alignment);
alignment = lcm(alignment, d.alignment);
position = align(position, d.alignment);
d.ref.setOffset(position);
position += d.size;
}
sectionAlignment = alignment;
sectionSize = position;
}

View File

@ -22,7 +22,7 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.Objects;
/**
* Represents the debugging information for a particular point of execution. This information

View File

@ -29,14 +29,19 @@ package jdk.vm.ci.code;
public class InstalledCode {
/**
* Raw address of this code blob.
* Raw address address of entity representing this installed code.
*/
private long address;
protected long address;
/**
* Raw address of entryPoint of this installed code.
*/
protected long entryPoint;
/**
* Counts how often the address field was reassigned.
*/
private long version;
protected long version;
protected final String name;
@ -44,27 +49,29 @@ public class InstalledCode {
this.name = name;
}
public final void setAddress(long address) {
this.address = address;
version++;
}
/**
* @return the address of this code blob
* @return the address of entity representing this installed code.
*/
public final long getAddress() {
return address;
}
/**
* @return the address of this code blob
* @return the address of the normal entry point of the installed code.
*/
public final long getEntryPoint() {
return entryPoint;
}
/**
* @return the version number of this installed code
*/
public final long getVersion() {
return version;
}
/**
* Returns the name of this code blob.
* Returns the name of this installed code.
*/
public String getName() {
return name;
@ -79,10 +86,19 @@ public class InstalledCode {
}
/**
* Returns the number of instruction bytes for this code.
* @return true if the code represented by this object is still valid for invocation, false
* otherwise (may happen due to deopt, etc.)
*/
public long getCodeSize() {
return 0;
public boolean isValid() {
return entryPoint != 0;
}
/**
* @return true if the code represented by this object still exists and might have live
* activations, false otherwise (may happen due to deopt, etc.)
*/
public boolean isAlive() {
return address != 0;
}
/**
@ -92,18 +108,10 @@ public class InstalledCode {
return null;
}
/**
* @return true if the code represented by this object is still valid, false otherwise (may
* happen due to deopt, etc.)
*/
public boolean isValid() {
return address != 0;
}
/**
* Invalidates this installed code such that any subsequent
* {@linkplain #executeVarargs(Object...) invocation} will throw an
* {@link InvalidInstalledCodeException}.
* {@link InvalidInstalledCodeException} and all existing invocations will be deoptimized.
*/
public void invalidate() {
throw new UnsupportedOperationException();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -22,7 +22,8 @@
*/
package jdk.vm.ci.code;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.LIRKind;
/**
* Represents a target machine register.
@ -80,22 +81,15 @@ public final class Register implements Comparable<Register> {
public static class RegisterCategory {
private final String name;
private final int referenceMapOffset;
private final int referenceMapShift;
private final boolean mayContainReference;
public RegisterCategory(String name) {
this(name, 0, 0);
this(name, true);
}
public RegisterCategory(String name, int referenceMapOffset) {
this(name, referenceMapOffset, 0);
}
public RegisterCategory(String name, int referenceMapOffset, int referenceMapShift) {
public RegisterCategory(String name, boolean mayContainReference) {
this.name = name;
this.referenceMapOffset = referenceMapOffset;
this.referenceMapShift = referenceMapShift;
this.mayContainReference = mayContainReference;
}
@Override
@ -112,7 +106,7 @@ public final class Register implements Comparable<Register> {
public boolean equals(Object obj) {
if (obj instanceof RegisterCategory) {
RegisterCategory that = (RegisterCategory) obj;
return this.referenceMapOffset == that.referenceMapOffset && this.referenceMapShift == that.referenceMapShift && this.name.equals(that.name);
return this.name.equals(that.name);
}
return false;
}
@ -138,10 +132,10 @@ public final class Register implements Comparable<Register> {
}
/**
* Get the start index of this register in the {@link ReferenceMap}.
* Determine whether this register needs to be part of the reference map.
*/
public int getReferenceMapIndex() {
return (encoding << registerCategory.referenceMapShift) + registerCategory.referenceMapOffset;
public boolean mayContainReference() {
return registerCategory.mayContainReference;
}
/**

View File

@ -22,7 +22,7 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.Arrays;
/**
* A collection of register attributes. The specific attribute values for a register may be local to

View File

@ -22,8 +22,10 @@
*/
package jdk.vm.ci.code;
import jdk.vm.ci.code.CallingConvention.*;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.code.CallingConvention.Type;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.PlatformKind;
/**
* A register configuration binds roles and {@linkplain RegisterAttributes attributes} to physical

View File

@ -22,7 +22,11 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.TreeMap;
/**
* A map from registers to frame slots. This can be used to describe where callee saved registers

View File

@ -22,7 +22,9 @@
*/
package jdk.vm.ci.code;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.LIRKind;
/**
* Denotes a register that stores a value of a fixed kind. There is exactly one (canonical) instance

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -22,9 +22,9 @@
*/
package jdk.vm.ci.code;
import static jdk.vm.ci.code.ValueUtil.*;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaValue;
import jdk.vm.ci.meta.Value;
/**
* Represents lock information in the debug information.
@ -32,10 +32,10 @@ import jdk.vm.ci.meta.*;
public final class StackLockValue implements JavaValue {
private JavaValue owner;
private StackSlotValue slot;
private AllocatableValue slot;
private final boolean eliminated;
public StackLockValue(JavaValue object, StackSlotValue slot, boolean eliminated) {
public StackLockValue(JavaValue object, AllocatableValue slot, boolean eliminated) {
this.owner = object;
this.slot = slot;
this.eliminated = eliminated;
@ -81,8 +81,7 @@ public final class StackLockValue implements JavaValue {
return false;
}
public void setSlot(StackSlotValue stackSlot) {
assert slot == null || (isVirtualStackSlot(slot) && (slot.equals(stackSlot) || isStackSlot(stackSlot))) : String.format("Can not set slot for %s to %s", this, stackSlot);
public void setSlot(AllocatableValue stackSlot) {
slot = stackSlot;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,13 +22,14 @@
*/
package jdk.vm.ci.code;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.LIRKind;
/**
* Represents a compiler spill slot or an outgoing stack-based argument in a method's frame or an
* incoming stack-based argument in a method's {@linkplain #isInCallerFrame() caller's frame}.
*/
public final class StackSlot extends StackSlotValue {
public final class StackSlot extends AllocatableValue {
private final int offset;
private final boolean addFrameSize;

View File

@ -1,37 +0,0 @@
/*
* Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.code;
import jdk.vm.ci.meta.*;
/**
* Common base class for {@linkplain StackSlot real} and {@linkplain VirtualStackSlot virtual} stack
* slots.
*/
public abstract class StackSlotValue extends AllocatableValue {
public StackSlotValue(LIRKind lirKind) {
super(lirKind);
}
}

View File

@ -22,9 +22,10 @@
*/
package jdk.vm.ci.code;
import static jdk.vm.ci.meta.MetaUtil.*;
import jdk.vm.ci.meta.*;
import static jdk.vm.ci.meta.MetaUtil.identityHashCodeString;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.LIRKind;
import jdk.vm.ci.meta.PlatformKind;
/**
* Represents the target machine for a compiler, including the CPU architecture, the size of
@ -50,9 +51,9 @@ public class TargetDescription {
public final int wordSize;
/**
* The kind to be used for representing raw pointers and CPU registers.
* The {@link JavaKind} to be used for representing raw pointers and CPU registers in Java code.
*/
public final JavaKind wordKind;
public final JavaKind wordJavaKind;
/**
* The stack alignment requirement of the platform. For example, from Appendix D of <a
@ -78,10 +79,12 @@ public class TargetDescription {
this.arch = arch;
this.isMP = isMP;
this.wordSize = arch.getWordSize();
this.wordKind = JavaKind.fromWordSize(wordSize);
this.wordJavaKind = JavaKind.fromWordSize(wordSize);
this.stackAlignment = stackAlignment;
this.implicitNullCheckLimit = implicitNullCheckLimit;
this.inlineObjects = inlineObjects;
assert arch.getPlatformKind(wordJavaKind).equals(arch.getWordKind());
}
@Override
@ -101,7 +104,7 @@ public class TargetDescription {
this.inlineObjects == that.inlineObjects &&
this.isMP == that.isMP &&
this.stackAlignment == that.stackAlignment &&
this.wordKind.equals(that.wordKind) &&
this.wordJavaKind.equals(that.wordJavaKind) &&
this.wordSize == that.wordSize &&
this.arch.equals(that.arch)) {
return true;
@ -116,10 +119,6 @@ public class TargetDescription {
return identityHashCodeString(this);
}
public int getSizeInBytes(PlatformKind kind) {
return kind.getSizeInBytes();
}
public LIRKind getLIRKind(JavaKind javaKind) {
PlatformKind platformKind = arch.getPlatformKind(javaKind);
if (javaKind.isObject()) {

View File

@ -1,124 +0,0 @@
/*
* Copyright (c) 2011, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.code;
import java.math.*;
//JaCoCo Exclude
/**
* Utilities for unsigned comparisons. All methods have correct, but slow, standard Java
* implementations so that they can be used with compilers not supporting the intrinsics.
*/
public class UnsignedMath {
private static final long MASK = 0xffffffffL;
/**
* Unsigned comparison aboveThan for two numbers.
*/
public static boolean aboveThan(int a, int b) {
return (a & MASK) > (b & MASK);
}
/**
* Unsigned comparison aboveOrEqual for two numbers.
*/
public static boolean aboveOrEqual(int a, int b) {
return (a & MASK) >= (b & MASK);
}
/**
* Unsigned comparison belowThan for two numbers.
*/
public static boolean belowThan(int a, int b) {
return (a & MASK) < (b & MASK);
}
/**
* Unsigned comparison belowOrEqual for two numbers.
*/
public static boolean belowOrEqual(int a, int b) {
return (a & MASK) <= (b & MASK);
}
/**
* Unsigned comparison aboveThan for two numbers.
*/
public static boolean aboveThan(long a, long b) {
return (a > b) ^ ((a < 0) != (b < 0));
}
/**
* Unsigned comparison aboveOrEqual for two numbers.
*/
public static boolean aboveOrEqual(long a, long b) {
return (a >= b) ^ ((a < 0) != (b < 0));
}
/**
* Unsigned comparison belowThan for two numbers.
*/
public static boolean belowThan(long a, long b) {
return (a < b) ^ ((a < 0) != (b < 0));
}
/**
* Unsigned comparison belowOrEqual for two numbers.
*/
public static boolean belowOrEqual(long a, long b) {
return (a <= b) ^ ((a < 0) != (b < 0));
}
/**
* Unsigned division for two numbers.
*/
public static int divide(int a, int b) {
return (int) ((a & MASK) / (b & MASK));
}
/**
* Unsigned remainder for two numbers.
*/
public static int remainder(int a, int b) {
return (int) ((a & MASK) % (b & MASK));
}
/**
* Unsigned division for two numbers.
*/
public static long divide(long a, long b) {
return bi(a).divide(bi(b)).longValue();
}
/**
* Unsigned remainder for two numbers.
*/
public static long remainder(long a, long b) {
return bi(a).remainder(bi(b)).longValue();
}
private static BigInteger bi(long unsigned) {
return unsigned >= 0 ? BigInteger.valueOf(unsigned) : BigInteger.valueOf(unsigned & 0x7fffffffffffffffL).setBit(63);
}
}

View File

@ -22,9 +22,14 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.ArrayList;
import java.util.List;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.JavaValue;
import jdk.vm.ci.meta.PlatformKind;
import jdk.vm.ci.meta.Value;
/**
* Utility class for working with the {@link Value} class and its subclasses.
@ -60,6 +65,11 @@ public final class ValueUtil {
return value instanceof JavaConstant;
}
public static JavaConstant asConstantJavaValue(JavaValue value) {
assert value != null;
return (JavaConstant) value;
}
public static boolean isAllocatableValue(Value value) {
assert value != null;
return value instanceof AllocatableValue;
@ -80,26 +90,6 @@ public final class ValueUtil {
return (StackSlot) value;
}
public static boolean isStackSlotValue(Value value) {
assert value != null;
return value instanceof StackSlotValue;
}
public static StackSlotValue asStackSlotValue(Value value) {
assert value != null;
return (StackSlotValue) value;
}
public static boolean isVirtualStackSlot(Value value) {
assert value != null;
return value instanceof VirtualStackSlot;
}
public static VirtualStackSlot asVirtualStackSlot(Value value) {
assert value != null;
return (VirtualStackSlot) value;
}
public static boolean isRegister(Value value) {
assert value != null;
return value instanceof RegisterValue;

View File

@ -22,9 +22,15 @@
*/
package jdk.vm.ci.code;
import java.util.*;
import java.util.Arrays;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.Set;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaValue;
import jdk.vm.ci.meta.ResolvedJavaField;
import jdk.vm.ci.meta.ResolvedJavaType;
/**
* An instance of this class represents an object whose allocation was removed by escape analysis.
@ -134,45 +140,6 @@ public final class VirtualObject implements JavaValue {
return id;
}
private boolean checkValues() {
assert (values == null) == (slotKinds == null);
if (values != null) {
assert values.length == slotKinds.length;
if (!type.isArray()) {
ResolvedJavaField[] fields = type.getInstanceFields(true);
int fieldIndex = 0;
for (int i = 0; i < values.length; i++) {
ResolvedJavaField field = fields[fieldIndex++];
JavaKind valKind = slotKinds[i].getStackKind();
if (field.getJavaKind() == JavaKind.Object) {
assert valKind.isObject() : field + ": " + valKind + " != " + field.getJavaKind();
} else {
if ((valKind == JavaKind.Double || valKind == JavaKind.Long) && field.getJavaKind() == JavaKind.Int) {
assert fields[fieldIndex].getJavaKind() == JavaKind.Int;
fieldIndex++;
} else {
assert valKind == field.getJavaKind().getStackKind() : field + ": " + valKind + " != " + field.getJavaKind();
}
}
}
assert fields.length == fieldIndex : type + ": fields=" + Arrays.toString(fields) + ", field values=" + Arrays.toString(values);
} else {
JavaKind componentKind = type.getComponentType().getJavaKind().getStackKind();
if (componentKind == JavaKind.Object) {
for (int i = 0; i < values.length; i++) {
assert slotKinds[i].isObject() : slotKinds[i] + " != " + componentKind;
}
} else {
for (int i = 0; i < values.length; i++) {
assert slotKinds[i] == componentKind || componentKind.getBitCount() >= slotKinds[i].getBitCount() ||
(componentKind == JavaKind.Int && slotKinds[i].getBitCount() >= JavaKind.Int.getBitCount()) : slotKinds[i] + " != " + componentKind;
}
}
}
}
return true;
}
/**
* Overwrites the current set of values with a new one.
*
@ -183,7 +150,6 @@ public final class VirtualObject implements JavaValue {
public void setValues(JavaValue[] values, JavaKind[] slotKinds) {
this.values = values;
this.slotKinds = slotKinds;
assert checkValues();
}
@Override

View File

@ -1,75 +0,0 @@
/*
* Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.code;
import jdk.vm.ci.meta.*;
/**
* {@link VirtualStackSlot}s are stack slots that are not yet fixed to specific frame offset. They
* are replaced by real {@link StackSlot}s with a fixed position in the frame before code emission.
*/
public abstract class VirtualStackSlot extends StackSlotValue {
private final int id;
public VirtualStackSlot(int id, LIRKind lirKind) {
super(lirKind);
this.id = id;
}
public int getId() {
return id;
}
@Override
public String toString() {
return "vstack:" + id + getKindSuffix();
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + id;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!super.equals(obj)) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
VirtualStackSlot other = (VirtualStackSlot) obj;
if (id != other.id) {
return false;
}
return true;
}
}

View File

@ -18,10 +18,10 @@
* if you need additional information or have any questions.
*/
/**
* Package that defines the interface between a Java application that wants to install code and the
* runtime. The runtime provides in implementation of the {@link jdk.vm.ci.code.CodeCacheProvider}
* interface. The method
* {@link jdk.vm.ci.code.CodeCacheProvider#addMethod(jdk.vm.ci.meta.ResolvedJavaMethod, CompilationResult, jdk.vm.ci.meta.SpeculationLog, InstalledCode)}
* can be used to install code for a given method.
* Package that defines the interface between a Java application that wants to install code and the runtime.
* The runtime provides in implementation of the {@link jdk.vm.ci.code.CodeCacheProvider} interface.
* The method {@link jdk.vm.ci.code.CodeCacheProvider#addCode(jdk.vm.ci.meta.ResolvedJavaMethod, CompilationResult, jdk.vm.ci.meta.SpeculationLog, InstalledCode)}
* can be used to install code.
*/
package jdk.vm.ci.code;

View File

@ -22,7 +22,7 @@
*/
package jdk.vm.ci.code.stack;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.ResolvedJavaMethod;
public interface InspectedFrame {

View File

@ -22,7 +22,7 @@
*/
package jdk.vm.ci.code.stack;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.meta.ResolvedJavaMethod;
public interface StackIntrospection {

View File

@ -22,7 +22,8 @@
*/
package jdk.vm.ci.common;
import java.util.*;
import java.util.ArrayList;
import java.util.Locale;
/**
* Indicates a condition in JVMCI related code that should never occur during normal operation.

View File

@ -1,50 +0,0 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.compiler;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.options.*;
public interface Compiler {
int INVOCATION_ENTRY_BCI = -1;
@Option(help = "", type = OptionType.Debug) OptionValue<String> PrintFilter = new OptionValue<>(null);
@Option(help = "", type = OptionType.Debug) OptionValue<Boolean> PrintCompilation = new OptionValue<>(false);
@Option(help = "", type = OptionType.Debug) OptionValue<Boolean> PrintAfterCompilation = new OptionValue<>(false);
@Option(help = "", type = OptionType.Debug) OptionValue<Boolean> PrintBailout = new OptionValue<>(false);
@Option(help = "", type = OptionType.Debug) OptionValue<Boolean> ExitVMOnBailout = new OptionValue<>(false);
@Option(help = "", type = OptionType.Debug) OptionValue<Boolean> ExitVMOnException = new OptionValue<>(true);
@Option(help = "", type = OptionType.Debug) OptionValue<Boolean> PrintStackTraceOnException = new OptionValue<>(false);
/**
* Request the compilation of a method by this JVMCI compiler. The compiler should compile the
* method to machine code and install it in the code cache if the compilation is successful.
*
* @param method the method that should be compiled
* @param entryBCI the BCI at which to start compiling where -1 denotes a non-OSR compilation
* request and all other values denote an OSR compilation request
* @param jvmciEnv pointer to native {@code JVMCIEnv} object
* @param id a unique identifier for this compilation
*/
void compileMethod(ResolvedJavaMethod method, int entryBCI, long jvmciEnv, int id);
}

View File

@ -22,18 +22,26 @@
*/
package jdk.vm.ci.hotspot.amd64;
import static jdk.vm.ci.inittimer.InitTimer.*;
import static jdk.vm.ci.inittimer.InitTimer.timer;
import java.util.*;
import java.util.EnumSet;
import jdk.vm.ci.amd64.*;
import jdk.vm.ci.code.*;
import jdk.vm.ci.compiler.*;
import jdk.vm.ci.hotspot.*;
import jdk.vm.ci.inittimer.*;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.runtime.*;
import jdk.vm.ci.service.*;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.code.stack.StackIntrospection;
import jdk.vm.ci.hotspot.HotSpotCodeCacheProvider;
import jdk.vm.ci.hotspot.HotSpotConstantReflectionProvider;
import jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider;
import jdk.vm.ci.hotspot.HotSpotMetaAccessProvider;
import jdk.vm.ci.hotspot.HotSpotStackIntrospection;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.inittimer.InitTimer;
import jdk.vm.ci.meta.ConstantReflectionProvider;
import jdk.vm.ci.runtime.JVMCIBackend;
import jdk.vm.ci.service.ServiceProvider;
@ServiceProvider(HotSpotJVMCIBackendFactory.class)
public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFactory {
@ -68,6 +76,9 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
if ((config.x86CPUFeatures & config.cpuLZCNT) != 0) {
features.add(AMD64.CPUFeature.LZCNT);
}
if ((config.x86CPUFeatures & config.cpuERMS) != 0) {
features.add(AMD64.CPUFeature.ERMS);
}
if ((config.x86CPUFeatures & config.cpuAVX) != 0) {
features.add(AMD64.CPUFeature.AVX);
}
@ -77,12 +88,42 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
if ((config.x86CPUFeatures & config.cpuAES) != 0) {
features.add(AMD64.CPUFeature.AES);
}
if ((config.x86CPUFeatures & config.cpuERMS) != 0) {
features.add(AMD64.CPUFeature.ERMS);
if ((config.x86CPUFeatures & config.cpu3DNOWPREFETCH) != 0) {
features.add(AMD64.CPUFeature.AMD_3DNOW_PREFETCH);
}
if ((config.x86CPUFeatures & config.cpuBMI1) != 0) {
features.add(AMD64.CPUFeature.BMI1);
}
if ((config.x86CPUFeatures & config.cpuBMI2) != 0) {
features.add(AMD64.CPUFeature.BMI2);
}
if ((config.x86CPUFeatures & config.cpuRTM) != 0) {
features.add(AMD64.CPUFeature.RTM);
}
if ((config.x86CPUFeatures & config.cpuADX) != 0) {
features.add(AMD64.CPUFeature.ADX);
}
if ((config.x86CPUFeatures & config.cpuAVX512F) != 0) {
features.add(AMD64.CPUFeature.AVX512F);
}
if ((config.x86CPUFeatures & config.cpuAVX512DQ) != 0) {
features.add(AMD64.CPUFeature.AVX512DQ);
}
if ((config.x86CPUFeatures & config.cpuAVX512PF) != 0) {
features.add(AMD64.CPUFeature.AVX512PF);
}
if ((config.x86CPUFeatures & config.cpuAVX512ER) != 0) {
features.add(AMD64.CPUFeature.AVX512ER);
}
if ((config.x86CPUFeatures & config.cpuAVX512CD) != 0) {
features.add(AMD64.CPUFeature.AVX512CD);
}
if ((config.x86CPUFeatures & config.cpuAVX512BW) != 0) {
features.add(AMD64.CPUFeature.AVX512BW);
}
if ((config.x86CPUFeatures & config.cpuAVX512VL) != 0) {
features.add(AMD64.CPUFeature.AVX512VL);
}
return features;
}
@ -97,12 +138,12 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
return flags;
}
protected TargetDescription createTarget(HotSpotVMConfig config, CompilerFactory compilerFactory) {
protected TargetDescription createTarget(HotSpotVMConfig config) {
final int stackFrameAlignment = 16;
final int implicitNullCheckLimit = 4096;
final boolean inlineObjects = true;
Architecture arch = new AMD64(computeFeatures(config), computeFlags(config));
return new TargetDescription(compilerFactory.initializeArchitecture(arch), true, stackFrameAlignment, implicitNullCheckLimit, inlineObjects);
return new TargetDescription(arch, true, stackFrameAlignment, implicitNullCheckLimit, inlineObjects);
}
protected HotSpotConstantReflectionProvider createConstantReflection(HotSpotJVMCIRuntimeProvider runtime) {
@ -132,15 +173,16 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
}
@SuppressWarnings("try")
public JVMCIBackend createJVMCIBackend(HotSpotJVMCIRuntimeProvider runtime, CompilerFactory compilerFactory, JVMCIBackend host) {
public JVMCIBackend createJVMCIBackend(HotSpotJVMCIRuntimeProvider runtime, JVMCIBackend host) {
assert host == null;
TargetDescription target = createTarget(runtime.getConfig(), compilerFactory);
TargetDescription target = createTarget(runtime.getConfig());
RegisterConfig regConfig;
HotSpotCodeCacheProvider codeCache;
ConstantReflectionProvider constantReflection;
HotSpotMetaAccessProvider metaAccess;
StackIntrospection stackIntrospection;
try (InitTimer t = timer("create providers")) {
try (InitTimer rt = timer("create MetaAccess provider")) {
metaAccess = createMetaAccess(runtime);
@ -154,13 +196,16 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
try (InitTimer rt = timer("create ConstantReflection provider")) {
constantReflection = createConstantReflection(runtime);
}
try (InitTimer rt = timer("create StackIntrospection provider")) {
stackIntrospection = new HotSpotStackIntrospection(runtime);
}
}
try (InitTimer rt = timer("instantiate backend")) {
return createBackend(metaAccess, codeCache, constantReflection);
return createBackend(metaAccess, codeCache, constantReflection, stackIntrospection);
}
}
protected JVMCIBackend createBackend(HotSpotMetaAccessProvider metaAccess, HotSpotCodeCacheProvider codeCache, ConstantReflectionProvider constantReflection) {
return new JVMCIBackend(metaAccess, codeCache, constantReflection);
protected JVMCIBackend createBackend(HotSpotMetaAccessProvider metaAccess, HotSpotCodeCacheProvider codeCache, ConstantReflectionProvider constantReflection, StackIntrospection stackIntrospection) {
return new JVMCIBackend(metaAccess, codeCache, constantReflection, stackIntrospection);
}
}

View File

@ -22,16 +22,47 @@
*/
package jdk.vm.ci.hotspot.amd64;
import static jdk.vm.ci.amd64.AMD64.*;
import static jdk.vm.ci.amd64.AMD64.r12;
import static jdk.vm.ci.amd64.AMD64.r15;
import static jdk.vm.ci.amd64.AMD64.r8;
import static jdk.vm.ci.amd64.AMD64.r9;
import static jdk.vm.ci.amd64.AMD64.rax;
import static jdk.vm.ci.amd64.AMD64.rcx;
import static jdk.vm.ci.amd64.AMD64.rdi;
import static jdk.vm.ci.amd64.AMD64.rdx;
import static jdk.vm.ci.amd64.AMD64.rsi;
import static jdk.vm.ci.amd64.AMD64.rsp;
import static jdk.vm.ci.amd64.AMD64.xmm0;
import static jdk.vm.ci.amd64.AMD64.xmm1;
import static jdk.vm.ci.amd64.AMD64.xmm2;
import static jdk.vm.ci.amd64.AMD64.xmm3;
import static jdk.vm.ci.amd64.AMD64.xmm4;
import static jdk.vm.ci.amd64.AMD64.xmm5;
import static jdk.vm.ci.amd64.AMD64.xmm6;
import static jdk.vm.ci.amd64.AMD64.xmm7;
import java.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import jdk.vm.ci.amd64.*;
import jdk.vm.ci.code.*;
import jdk.vm.ci.code.CallingConvention.*;
import jdk.vm.ci.common.*;
import jdk.vm.ci.hotspot.*;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.CallingConvention;
import jdk.vm.ci.code.CallingConvention.Type;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.RegisterAttributes;
import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.StackSlot;
import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.LIRKind;
import jdk.vm.ci.meta.PlatformKind;
import jdk.vm.ci.meta.Value;
public class AMD64HotSpotRegisterConfig implements RegisterConfig {
@ -86,28 +117,30 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
*/
private final boolean needsNativeStackHomeSpace;
private static Register[] initAllocatable(boolean reserveForHeapBase) {
Register[] registers = null;
// @formatter:off
if (reserveForHeapBase) {
registers = new Register[] {
rax, rbx, rcx, rdx, /*rsp,*/ rbp, rsi, rdi, r8, r9, r10, r11, /*r12,*/ r13, r14, /*r15, */
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15
};
} else {
registers = new Register[] {
rax, rbx, rcx, rdx, /*rsp,*/ rbp, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, /*r15, */
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15
};
private static Register[] initAllocatable(Architecture arch, boolean reserveForHeapBase) {
Register[] allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.length - (reserveForHeapBase ? 3 : 2)];
int idx = 0;
for (Register reg : allRegisters) {
if (reg.equals(rsp) || reg.equals(r15)) {
// skip stack pointer and thread register
continue;
}
if (reserveForHeapBase && reg.equals(r12)) {
// skip heap base register
continue;
}
registers[idx++] = reg;
}
// @formatter:on
assert idx == registers.length;
return registers;
}
public AMD64HotSpotRegisterConfig(Architecture architecture, HotSpotVMConfig config) {
this(architecture, config, initAllocatable(config.useCompressedOops));
this(architecture, config, initAllocatable(architecture, config.useCompressedOops));
assert callerSaved.length >= allocatable.length;
}
@ -125,7 +158,7 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
this.needsNativeStackHomeSpace = false;
}
this.allocatable = allocatable.clone();
this.allocatable = allocatable;
Set<Register> callerSaveSet = new HashSet<>();
Collections.addAll(callerSaveSet, allocatable);
Collections.addAll(callerSaveSet, xmmParameterRegisters);
@ -134,7 +167,7 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
callerSaved = callerSaveSet.toArray(new Register[callerSaveSet.size()]);
allAllocatableAreCallerSaved = true;
attributesMap = RegisterAttributes.createMap(this, AMD64.allRegisters);
attributesMap = RegisterAttributes.createMap(this, architecture.getRegisters());
}
@Override
@ -221,7 +254,7 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
if (locations[i] == null) {
LIRKind lirKind = target.getLIRKind(kind);
locations[i] = StackSlot.get(lirKind, currentStackOffset, !type.out);
currentStackOffset += Math.max(target.getSizeInBytes(lirKind.getPlatformKind()), target.wordSize);
currentStackOffset += Math.max(lirKind.getPlatformKind().getSizeInBytes(), target.wordSize);
}
}

View File

@ -22,28 +22,36 @@
*/
package jdk.vm.ci.hotspot.sparc;
import static jdk.vm.ci.inittimer.InitTimer.*;
import static jdk.vm.ci.inittimer.InitTimer.timer;
import java.util.*;
import java.util.EnumSet;
import jdk.vm.ci.code.*;
import jdk.vm.ci.compiler.*;
import jdk.vm.ci.hotspot.*;
import jdk.vm.ci.inittimer.*;
import jdk.vm.ci.runtime.*;
import jdk.vm.ci.service.*;
import jdk.vm.ci.sparc.*;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.code.stack.StackIntrospection;
import jdk.vm.ci.hotspot.HotSpotCodeCacheProvider;
import jdk.vm.ci.hotspot.HotSpotConstantReflectionProvider;
import jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider;
import jdk.vm.ci.hotspot.HotSpotMetaAccessProvider;
import jdk.vm.ci.hotspot.HotSpotStackIntrospection;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.inittimer.InitTimer;
import jdk.vm.ci.runtime.JVMCIBackend;
import jdk.vm.ci.service.ServiceProvider;
import jdk.vm.ci.sparc.SPARC;
import jdk.vm.ci.sparc.SPARC.CPUFeature;
@ServiceProvider(HotSpotJVMCIBackendFactory.class)
public class SPARCHotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFactory {
protected TargetDescription createTarget(HotSpotVMConfig config, CompilerFactory compilerFactory) {
protected TargetDescription createTarget(HotSpotVMConfig config) {
final int stackFrameAlignment = 16;
final int implicitNullCheckLimit = 4096;
final boolean inlineObjects = false;
Architecture arch = new SPARC(computeFeatures(config));
return new TargetDescription(compilerFactory.initializeArchitecture(arch), true, stackFrameAlignment, implicitNullCheckLimit, inlineObjects);
return new TargetDescription(arch, true, stackFrameAlignment, implicitNullCheckLimit, inlineObjects);
}
protected HotSpotCodeCacheProvider createCodeCache(HotSpotJVMCIRuntimeProvider runtime, TargetDescription target, RegisterConfig regConfig) {
@ -64,8 +72,62 @@ public class SPARCHotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
if ((config.sparcFeatures & config.cbcondInstructions) != 0) {
features.add(CPUFeature.CBCOND);
}
if (config.useBlockZeroing) {
features.add(CPUFeature.BLOCK_ZEROING);
if ((config.sparcFeatures & config.v8Instructions) != 0) {
features.add(CPUFeature.V8);
}
if ((config.sparcFeatures & config.hardwareMul32) != 0) {
features.add(CPUFeature.HARDWARE_MUL32);
}
if ((config.sparcFeatures & config.hardwareDiv32) != 0) {
features.add(CPUFeature.HARDWARE_DIV32);
}
if ((config.sparcFeatures & config.hardwareFsmuld) != 0) {
features.add(CPUFeature.HARDWARE_FSMULD);
}
if ((config.sparcFeatures & config.hardwarePopc) != 0) {
features.add(CPUFeature.HARDWARE_POPC);
}
if ((config.sparcFeatures & config.v9Instructions) != 0) {
features.add(CPUFeature.V9);
}
if ((config.sparcFeatures & config.sun4v) != 0) {
features.add(CPUFeature.SUN4V);
}
if ((config.sparcFeatures & config.blkInitInstructions) != 0) {
features.add(CPUFeature.BLK_INIT_INSTRUCTIONS);
}
if ((config.sparcFeatures & config.fmafInstructions) != 0) {
features.add(CPUFeature.FMAF);
}
if ((config.sparcFeatures & config.fmauInstructions) != 0) {
features.add(CPUFeature.FMAU);
}
if ((config.sparcFeatures & config.sparc64Family) != 0) {
features.add(CPUFeature.SPARC64_FAMILY);
}
if ((config.sparcFeatures & config.mFamily) != 0) {
features.add(CPUFeature.M_FAMILY);
}
if ((config.sparcFeatures & config.tFamily) != 0) {
features.add(CPUFeature.T_FAMILY);
}
if ((config.sparcFeatures & config.t1Model) != 0) {
features.add(CPUFeature.T1_MODEL);
}
if ((config.sparcFeatures & config.sparc5Instructions) != 0) {
features.add(CPUFeature.SPARC5);
}
if ((config.sparcFeatures & config.aesInstructions) != 0) {
features.add(CPUFeature.SPARC64_FAMILY);
}
if ((config.sparcFeatures & config.sha1Instruction) != 0) {
features.add(CPUFeature.SHA1);
}
if ((config.sparcFeatures & config.sha256Instruction) != 0) {
features.add(CPUFeature.SHA256);
}
if ((config.sparcFeatures & config.sha512Instruction) != 0) {
features.add(CPUFeature.SHA512);
}
return features;
}
@ -81,20 +143,22 @@ public class SPARCHotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
}
@SuppressWarnings("try")
public JVMCIBackend createJVMCIBackend(HotSpotJVMCIRuntimeProvider runtime, CompilerFactory compilerFactory, JVMCIBackend host) {
public JVMCIBackend createJVMCIBackend(HotSpotJVMCIRuntimeProvider runtime, JVMCIBackend host) {
assert host == null;
TargetDescription target = createTarget(runtime.getConfig(), compilerFactory);
TargetDescription target = createTarget(runtime.getConfig());
HotSpotMetaAccessProvider metaAccess = new HotSpotMetaAccessProvider(runtime);
RegisterConfig regConfig = new SPARCHotSpotRegisterConfig(target, runtime.getConfig());
RegisterConfig regConfig = new SPARCHotSpotRegisterConfig(target.arch, runtime.getConfig());
HotSpotCodeCacheProvider codeCache = createCodeCache(runtime, target, regConfig);
HotSpotConstantReflectionProvider constantReflection = new HotSpotConstantReflectionProvider(runtime);
StackIntrospection stackIntrospection = new HotSpotStackIntrospection(runtime);
try (InitTimer rt = timer("instantiate backend")) {
return createBackend(metaAccess, codeCache, constantReflection);
return createBackend(metaAccess, codeCache, constantReflection, stackIntrospection);
}
}
protected JVMCIBackend createBackend(HotSpotMetaAccessProvider metaAccess, HotSpotCodeCacheProvider codeCache, HotSpotConstantReflectionProvider constantReflection) {
return new JVMCIBackend(metaAccess, codeCache, constantReflection);
protected JVMCIBackend createBackend(HotSpotMetaAccessProvider metaAccess, HotSpotCodeCacheProvider codeCache, HotSpotConstantReflectionProvider constantReflection,
StackIntrospection stackIntrospection) {
return new JVMCIBackend(metaAccess, codeCache, constantReflection, stackIntrospection);
}
}

View File

@ -22,16 +22,72 @@
*/
package jdk.vm.ci.hotspot.sparc;
import static jdk.vm.ci.sparc.SPARC.*;
import static jdk.vm.ci.code.CallingConvention.Type.JavaCall;
import static jdk.vm.ci.code.CallingConvention.Type.JavaCallee;
import static jdk.vm.ci.code.CallingConvention.Type.NativeCall;
import static jdk.vm.ci.meta.JavaKind.Void;
import static jdk.vm.ci.meta.Value.ILLEGAL;
import static jdk.vm.ci.sparc.SPARC.REGISTER_SAFE_AREA_SIZE;
import static jdk.vm.ci.sparc.SPARC.d0;
import static jdk.vm.ci.sparc.SPARC.d2;
import static jdk.vm.ci.sparc.SPARC.d4;
import static jdk.vm.ci.sparc.SPARC.d6;
import static jdk.vm.ci.sparc.SPARC.f0;
import static jdk.vm.ci.sparc.SPARC.f1;
import static jdk.vm.ci.sparc.SPARC.f2;
import static jdk.vm.ci.sparc.SPARC.f3;
import static jdk.vm.ci.sparc.SPARC.f4;
import static jdk.vm.ci.sparc.SPARC.f5;
import static jdk.vm.ci.sparc.SPARC.f6;
import static jdk.vm.ci.sparc.SPARC.f7;
import static jdk.vm.ci.sparc.SPARC.g0;
import static jdk.vm.ci.sparc.SPARC.g2;
import static jdk.vm.ci.sparc.SPARC.g6;
import static jdk.vm.ci.sparc.SPARC.i0;
import static jdk.vm.ci.sparc.SPARC.i1;
import static jdk.vm.ci.sparc.SPARC.i2;
import static jdk.vm.ci.sparc.SPARC.i3;
import static jdk.vm.ci.sparc.SPARC.i4;
import static jdk.vm.ci.sparc.SPARC.i5;
import static jdk.vm.ci.sparc.SPARC.i6;
import static jdk.vm.ci.sparc.SPARC.i7;
import static jdk.vm.ci.sparc.SPARC.l0;
import static jdk.vm.ci.sparc.SPARC.l1;
import static jdk.vm.ci.sparc.SPARC.l2;
import static jdk.vm.ci.sparc.SPARC.l3;
import static jdk.vm.ci.sparc.SPARC.l4;
import static jdk.vm.ci.sparc.SPARC.l5;
import static jdk.vm.ci.sparc.SPARC.l6;
import static jdk.vm.ci.sparc.SPARC.l7;
import static jdk.vm.ci.sparc.SPARC.o0;
import static jdk.vm.ci.sparc.SPARC.o1;
import static jdk.vm.ci.sparc.SPARC.o2;
import static jdk.vm.ci.sparc.SPARC.o3;
import static jdk.vm.ci.sparc.SPARC.o4;
import static jdk.vm.ci.sparc.SPARC.o5;
import static jdk.vm.ci.sparc.SPARC.sp;
import java.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import jdk.vm.ci.code.*;
import jdk.vm.ci.code.CallingConvention.*;
import jdk.vm.ci.common.*;
import jdk.vm.ci.hotspot.*;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.sparc.*;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.CallingConvention;
import jdk.vm.ci.code.CallingConvention.Type;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.RegisterAttributes;
import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.StackSlot;
import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.LIRKind;
import jdk.vm.ci.meta.PlatformKind;
import jdk.vm.ci.sparc.SPARC;
public class SPARCHotSpotRegisterConfig implements RegisterConfig {
@ -41,6 +97,11 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
private final RegisterAttributes[] attributesMap;
/**
* Does native code (C++ code) spill arguments in registers to the parent frame?
*/
private final boolean addNativeRegisterArgumentSlots;
@Override
public Register[] getAllocatableRegisters() {
return allocatable.clone();
@ -50,22 +111,9 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
ArrayList<Register> list = new ArrayList<>();
for (Register reg : registers) {
if (architecture.canStoreValue(reg.getRegisterCategory(), kind)) {
// Special treatment for double precision
// TODO: This is wasteful it uses only half of the registers as float.
if (kind == JavaKind.Double) {
if (reg.getRegisterCategory().equals(FPUd)) {
list.add(reg);
}
} else if (kind == JavaKind.Float) {
if (reg.getRegisterCategory().equals(FPUs)) {
list.add(reg);
}
} else {
list.add(reg);
}
list.add(reg);
}
}
Register[] ret = list.toArray(new Register[list.size()]);
return ret;
}
@ -78,76 +126,57 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
private final Register[] cpuCallerParameterRegisters = {o0, o1, o2, o3, o4, o5};
private final Register[] cpuCalleeParameterRegisters = {i0, i1, i2, i3, i4, i5};
private final Register[] fpuParameterRegisters = {f0, f1, f2, f3, f4, f5, f6, f7};
private final Register[] fpuFloatParameterRegisters = {f0, f1, f2, f3, f4, f5, f6, f7};
private final Register[] fpuDoubleParameterRegisters = {d0, null, d2, null, d4, null, d6, null};
// @formatter:off
private final Register[] callerSaveRegisters =
{g1, g2, g3, g4, g5, g6, g7,
o0, o1, o2, o3, o4, o5, o7,
f0, f1, f2, f3, f4, f5, f6, f7,
f8, f9, f10, f11, f12, f13, f14, f15,
f16, f17, f18, f19, f20, f21, f22, f23,
f24, f25, f26, f27, f28, f29, f30, f31,
d32, d34, d36, d38, d40, d42, d44, d46,
d48, d50, d52, d54, d56, d58, d60, d62};
// @formatter:on
private final Register[] callerSaveRegisters;
/**
* Registers saved by the callee. This lists all L and I registers which are saved in the
* register window.
*/
private final Register[] calleeSaveRegisters = {l0, l1, l2, l3, l4, l5, l6, l7, i0, i1, i2, i3, i4, i5, i6, i7};
private final Register[] calleeSaveRegisters = {
l0, l1, l2, l3, l4, l5, l6, l7,
i0, i1, i2, i3, i4, i5, i6, i7};
// @formatter:on
private static Register[] initAllocatable(boolean reserveForHeapBase) {
Register[] registers = null;
if (reserveForHeapBase) {
// @formatter:off
registers = new Register[]{
// TODO this is not complete
// o7 cannot be used as register because it is always overwritten on call
// and the current register handler would ignore this fact if the called
// method still does not modify registers, in fact o7 is modified by the Call instruction
// There would be some extra handlin necessary to be able to handle the o7 properly for local usage
g1, g4, g5,
o0, o1, o2, o3, o4, o5, /*o6,o7,*/
l0, l1, l2, l3, l4, l5, l6, l7,
i0, i1, i2, i3, i4, i5, /*i6,*/ /*i7,*/
//f0, f1, f2, f3, f4, f5, f6, f7,
f8, f9, f10, f11, f12, f13, f14, f15,
f16, f17, f18, f19, f20, f21, f22, f23,
f24, f25, f26, f27, f28, f29, f30, f31,
d32, d34, d36, d38, d40, d42, d44, d46,
d48, d50, d52, d54, d56, d58, d60, d62
};
// @formatter:on
} else {
// @formatter:off
registers = new Register[]{
// TODO this is not complete
g1, g4, g5,
o0, o1, o2, o3, o4, o5, /*o6, o7,*/
l0, l1, l2, l3, l4, l5, l6, l7,
i0, i1, i2, i3, i4, i5, /*i6,*/ /*i7,*/
// f0, f1, f2, f3, f4, f5, f6, f7
f8, f9, f10, f11, f12, f13, f14, f15,
f16, f17, f18, f19, f20, f21, f22, f23,
f24, f25, f26, f27, f28, f29, f30, f31,
d32, d34, d36, d38, d40, d42, d44, d46,
d48, d50, d52, d54, d56, d58, d60, d62
};
// @formatter:on
private static Register[] initAllocatable(Architecture arch, boolean reserveForHeapBase) {
Register[] allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.length - (reserveForHeapBase ? 4 : 3)];
int idx = 0;
for (Register reg : allRegisters) {
if (reg.equals(sp) || reg.equals(g2) || reg.equals(g0)) {
// skip g0, stack pointer and thread register
continue;
}
if (reserveForHeapBase && reg.equals(g6)) {
// skip heap base register
continue;
}
registers[idx++] = reg;
}
assert idx == registers.length;
return registers;
}
public SPARCHotSpotRegisterConfig(TargetDescription target, HotSpotVMConfig config) {
this(target, initAllocatable(config.useCompressedOops));
public SPARCHotSpotRegisterConfig(Architecture arch, HotSpotVMConfig config) {
this(arch, initAllocatable(arch, config.useCompressedOops), config);
}
public SPARCHotSpotRegisterConfig(TargetDescription target, Register[] allocatable) {
this.architecture = target.arch;
public SPARCHotSpotRegisterConfig(Architecture arch, Register[] allocatable, HotSpotVMConfig config) {
this.architecture = arch;
this.allocatable = allocatable.clone();
this.addNativeRegisterArgumentSlots = config.linuxOs;
HashSet<Register> callerSaveSet = new HashSet<>();
Collections.addAll(callerSaveSet, arch.getAvailableValueRegisters());
for (Register cs : calleeSaveRegisters) {
callerSaveSet.remove(cs);
}
this.callerSaveRegisters = callerSaveSet.toArray(new Register[callerSaveSet.size()]);
attributesMap = RegisterAttributes.createMap(this, SPARC.allRegisters);
}
@ -172,21 +201,31 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
@Override
public CallingConvention getCallingConvention(Type type, JavaType returnType, JavaType[] parameterTypes, TargetDescription target, boolean stackOnly) {
if (type == Type.JavaCall || type == Type.NativeCall) {
if (type == JavaCall || type == NativeCall) {
return callingConvention(cpuCallerParameterRegisters, returnType, parameterTypes, type, target, stackOnly);
}
if (type == Type.JavaCallee) {
if (type == JavaCallee) {
return callingConvention(cpuCalleeParameterRegisters, returnType, parameterTypes, type, target, stackOnly);
}
throw JVMCIError.shouldNotReachHere();
}
public Register[] getCallingConventionRegisters(Type type, JavaKind kind) {
if (architecture.canStoreValue(FPUs, kind) || architecture.canStoreValue(FPUd, kind)) {
return fpuParameterRegisters;
switch (kind) {
case Boolean:
case Byte:
case Short:
case Char:
case Int:
case Long:
case Object:
return type == Type.JavaCallee ? cpuCalleeParameterRegisters : cpuCallerParameterRegisters;
case Double:
case Float:
return fpuFloatParameterRegisters;
default:
throw JVMCIError.shouldNotReachHere("Unknown JavaKind " + kind);
}
assert architecture.canStoreValue(CPU, kind);
return type == Type.JavaCallee ? cpuCalleeParameterRegisters : cpuCallerParameterRegisters;
}
private CallingConvention callingConvention(Register[] generalParameterRegisters, JavaType returnType, JavaType[] parameterTypes, Type type, TargetDescription target, boolean stackOnly) {
@ -213,7 +252,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
}
break;
case Double:
if (!stackOnly && currentFloating < fpuParameterRegisters.length) {
if (!stackOnly && currentFloating < fpuFloatParameterRegisters.length) {
if (currentFloating % 2 != 0) {
// Make register number even to be a double reg
currentFloating++;
@ -224,8 +263,8 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
}
break;
case Float:
if (!stackOnly && currentFloating < fpuParameterRegisters.length) {
Register register = fpuParameterRegisters[currentFloating++];
if (!stackOnly && currentFloating < fpuFloatParameterRegisters.length) {
Register register = fpuFloatParameterRegisters[currentFloating++];
locations[i] = register.asValue(target.getLIRKind(kind));
}
break;
@ -234,20 +273,27 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
}
if (locations[i] == null) {
LIRKind lirKind = target.getLIRKind(kind);
// Stack slot is always aligned to its size in bytes but minimum wordsize
int typeSize = SPARC.spillSlotSize(target, kind);
int typeSize = lirKind.getPlatformKind().getSizeInBytes();
currentStackOffset = roundUp(currentStackOffset, typeSize);
int slotOffset = currentStackOffset + SPARC.REGISTER_SAFE_AREA_SIZE;
locations[i] = StackSlot.get(target.getLIRKind(kind.getStackKind()), slotOffset, !type.out);
int slotOffset = currentStackOffset + REGISTER_SAFE_AREA_SIZE;
locations[i] = StackSlot.get(lirKind, slotOffset, !type.out);
currentStackOffset += typeSize;
}
}
JavaKind returnKind = returnType == null ? JavaKind.Void : returnType.getJavaKind();
AllocatableValue returnLocation = returnKind == JavaKind.Void ? Value.ILLEGAL : getReturnRegister(returnKind, type).asValue(target.getLIRKind(returnKind.getStackKind()));
// Space where callee may spill outgoing parameters o0...o5
int lowerOutgoingSpace = Math.min(locations.length, 6) * target.wordSize;
return new CallingConvention(currentStackOffset + lowerOutgoingSpace, returnLocation, locations);
JavaKind returnKind = returnType == null ? Void : returnType.getJavaKind();
AllocatableValue returnLocation = returnKind == Void ? ILLEGAL : getReturnRegister(returnKind, type).asValue(target.getLIRKind(returnKind.getStackKind()));
int outArgSpillArea;
if (type == NativeCall && addNativeRegisterArgumentSlots) {
// Space for native callee which may spill our outgoing arguments
outArgSpillArea = Math.min(locations.length, generalParameterRegisters.length) * target.wordSize;
} else {
outArgSpillArea = 0;
}
return new CallingConvention(currentStackOffset + outArgSpillArea, returnLocation, locations);
}
private static int roundUp(int number, int mod) {
@ -256,7 +302,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
@Override
public Register getReturnRegister(JavaKind kind) {
return getReturnRegister(kind, Type.JavaCallee);
return getReturnRegister(kind, JavaCallee);
}
private static Register getReturnRegister(JavaKind kind, Type type) {
@ -268,7 +314,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
case Int:
case Long:
case Object:
return type == Type.JavaCallee ? i0 : o0;
return type == JavaCallee ? i0 : o0;
case Float:
return f0;
case Double:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
package jdk.vm.ci.hotspot;
import static jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.runtime;
import static jdk.vm.ci.inittimer.InitTimer.timer;
import java.lang.reflect.Constructor;
@ -36,7 +37,6 @@ import jdk.vm.ci.inittimer.InitTimer;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
import jdk.vm.ci.meta.SpeculationLog;
import sun.misc.Unsafe;
/**
@ -44,7 +44,7 @@ import sun.misc.Unsafe;
* pointer as an argument (e.g., {@link #getSymbol(long)}) is undefined if the argument does not
* denote a valid native object.
*/
public final class CompilerToVM {
final class CompilerToVM {
/**
* Initializes the native part of the JVMCI runtime.
*/
@ -61,6 +61,14 @@ public final class CompilerToVM {
}
}
/**
* Gets the {@link CompilerToVM} instance associated with the singleton
* {@link HotSpotJVMCIRuntime} instance.
*/
public static CompilerToVM compilerToVM() {
return runtime().getCompilerToVM();
}
/**
* Copies the original bytecode of {@code method} into a new byte array and returns it.
*
@ -301,7 +309,7 @@ public final class CompilerToVM {
* {@link HotSpotVMConfig#codeInstallResultDependenciesFailed} or
* {@link HotSpotVMConfig#codeInstallResultDependenciesInvalid}.
*/
public native int installCode(TargetDescription target, HotSpotCompiledCode compiledCode, InstalledCode code, SpeculationLog speculationLog);
native int installCode(TargetDescription target, HotSpotCompiledCode compiledCode, InstalledCode code, HotSpotSpeculationLog speculationLog);
public native int getMetadata(TargetDescription target, HotSpotCompiledCode compiledCode, HotSpotMetaData metaData);
@ -317,18 +325,18 @@ public final class CompilerToVM {
* @param timeUnitsPerSecond the granularity of the units for the {@code time} value
* @param installedCode the nmethod installed as a result of the compilation
*/
public synchronized native void notifyCompilationStatistics(int id, HotSpotResolvedJavaMethodImpl method, boolean osr, int processedBytecodes, long time, long timeUnitsPerSecond,
synchronized native void notifyCompilationStatistics(int id, HotSpotResolvedJavaMethodImpl method, boolean osr, int processedBytecodes, long time, long timeUnitsPerSecond,
InstalledCode installedCode);
/**
* Resets all compilation statistics.
*/
public native void resetCompilationStatistics();
native void resetCompilationStatistics();
/**
* Initializes the fields of {@code config}.
*/
native long initializeConfiguration();
native long initializeConfiguration(HotSpotVMConfig config);
/**
* Resolves the implementation of {@code method} for virtual dispatches on objects of dynamic
@ -367,7 +375,7 @@ public final class CompilerToVM {
* @param address an address that may be called from any code in the code cache
* @return -1 if {@code address == 0}
*/
public native long getMaxCallTargetOffset(long address);
native long getMaxCallTargetOffset(long address);
/**
* Gets a textual disassembly of {@code codeBlob}.
@ -376,7 +384,7 @@ public final class CompilerToVM {
* {@code codeBlob} could not be disassembled for some reason
*/
// The HotSpot disassembler seems not to be thread safe so it's better to synchronize its usage
public synchronized native String disassembleCodeBlob(long codeBlob);
synchronized native String disassembleCodeBlob(InstalledCode installedCode);
/**
* Gets a stack trace element for {@code method} at bytecode index {@code bci}.
@ -454,12 +462,12 @@ public final class CompilerToVM {
* Invalidates {@code installedCode} such that {@link InvalidInstalledCodeException} will be
* raised the next time {@code installedCode} is executed.
*/
public native void invalidateInstalledCode(InstalledCode installedCode);
native void invalidateInstalledCode(InstalledCode installedCode);
/**
* Collects the current values of all JVMCI benchmark counters, summed up over all threads.
*/
public native long[] collectCounters();
native long[] collectCounters();
/**
* Determines if {@code metaspaceMethodData} is mature.
@ -489,7 +497,7 @@ public final class CompilerToVM {
* @param methods the methods to look for, where {@code null} means that any frame is returned
* @return the frame, or {@code null} if the end of the stack was reached during the search
*/
public native HotSpotStackFrameReference getNextStackFrame(HotSpotStackFrameReference frame, HotSpotResolvedJavaMethodImpl[] methods, int initialSkip);
native HotSpotStackFrameReference getNextStackFrame(HotSpotStackFrameReference frame, ResolvedJavaMethod[] methods, int initialSkip);
/**
* Materializes all virtual objects within {@code stackFrame} updates its locals.
@ -512,30 +520,34 @@ public final class CompilerToVM {
/**
* Determines if debug info should also be emitted at non-safepoint locations.
*/
public native boolean shouldDebugNonSafepoints();
native boolean shouldDebugNonSafepoints();
/**
* Writes {@code length} bytes from {@code bytes} starting at offset {@code offset} to the
* HotSpot's log stream.
*
* @exception NullPointerException if <code>bytes</code> is <code>null</code>.
* @exception NullPointerException if {@code bytes == null}
* @exception IndexOutOfBoundsException if copying would cause access of data outside array
* bounds.
* bounds
*/
public native void writeDebugOutput(byte[] bytes, int offset, int length);
native void writeDebugOutput(byte[] bytes, int offset, int length);
/**
* Flush HotSpot's log stream.
*/
public native void flushDebugOutput();
native void flushDebugOutput();
/**
* Read a value representing a metaspace Method* and return the
* {@link HotSpotResolvedJavaMethodImpl} wrapping it. This method does no checking that the
* location actually contains a valid Method*. If the {@code base} object is a
* Read a HotSpot Method* value from the memory location described by {@code base} plus
* {@code displacement} and return the {@link HotSpotResolvedJavaMethodImpl} wrapping it. This
* method does no checking that the memory location actually contains a valid pointer and may
* crash the VM if an invalid location is provided. If the {@code base} is null then
* {@code displacement} is used by itself. If {@code base} is a
* {@link HotSpotResolvedJavaMethodImpl}, {@link HotSpotConstantPool} or
* {@link HotSpotResolvedObjectTypeImpl} then the metaspace pointer is fetched from that object
* and used as the base. Otherwise the object itself is used as the base.
* and added to {@code displacement}. Any other non-null object type causes an
* {@link IllegalArgumentException} to be thrown.
*
* @param base an object to read from or null
* @param displacement
@ -544,12 +556,14 @@ public final class CompilerToVM {
native HotSpotResolvedJavaMethodImpl getResolvedJavaMethod(Object base, long displacement);
/**
* Read a value representing a metaspace ConstantPool* and return the
* {@link HotSpotConstantPool} wrapping it. This method does no checking that the location
* actually contains a valid ConstantPool*. If the {@code base} object is a
* {@link HotSpotResolvedJavaMethodImpl}, {@link HotSpotConstantPool} or
* {@link HotSpotResolvedObjectTypeImpl} then the metaspace pointer is fetched from that object
* and used as the base. Otherwise the object itself is used as the base.
* Read a HotSpot ConstantPool* value from the memory location described by {@code base} plus
* {@code displacement} and return the {@link HotSpotConstantPool} wrapping it. This method does
* no checking that the memory location actually contains a valid pointer and may crash the VM
* if an invalid location is provided. If the {@code base} is null then {@code displacement} is
* used by itself. If {@code base} is a {@link HotSpotResolvedJavaMethodImpl},
* {@link HotSpotConstantPool} or {@link HotSpotResolvedObjectTypeImpl} then the metaspace
* pointer is fetched from that object and added to {@code displacement}. Any other non-null
* object type causes an {@link IllegalArgumentException} to be thrown.
*
* @param base an object to read from or null
* @param displacement
@ -558,12 +572,15 @@ public final class CompilerToVM {
native HotSpotConstantPool getConstantPool(Object base, long displacement);
/**
* Read a value representing a metaspace Klass* and return the
* {@link HotSpotResolvedObjectTypeImpl} wrapping it. The method does no checking that the
* location actually contains a valid Klass*. If the {@code base} object is a
* Read a HotSpot Klass* value from the memory location described by {@code base} plus
* {@code displacement} and return the {@link HotSpotResolvedObjectTypeImpl} wrapping it. This
* method does no checking that the memory location actually contains a valid pointer and may
* crash the VM if an invalid location is provided. If the {@code base} is null then
* {@code displacement} is used by itself. If {@code base} is a
* {@link HotSpotResolvedJavaMethodImpl}, {@link HotSpotConstantPool} or
* {@link HotSpotResolvedObjectTypeImpl} then the metaspace pointer is fetched from that object
* and used as the base. Otherwise the object itself is used as the base.
* and added to {@code displacement}. Any other non-null object type causes an
* {@link IllegalArgumentException} to be thrown.
*
* @param base an object to read from or null
* @param displacement
@ -571,4 +588,17 @@ public final class CompilerToVM {
* @return null or the resolved method for this location
*/
native HotSpotResolvedObjectTypeImpl getResolvedJavaType(Object base, long displacement, boolean compressed);
/**
* Return the size of the HotSpot ProfileData* pointed at by {@code position}. If
* {@code position} is outside the space of the MethodData then an
* {@link IllegalArgumentException} is thrown. A {@code position} inside the MethodData but that
* isn't pointing at a valid ProfileData will crash the VM.
*
* @param metaspaceMethodData
* @param position
* @return the size of the ProfileData item pointed at by {@code position}
* @throws IllegalArgumentException if an out of range position is given
*/
native int methodDataProfileDataSize(long metaspaceMethodData, int position);
}

View File

@ -22,15 +22,30 @@
*/
package jdk.vm.ci.hotspot;
import static jdk.vm.ci.hotspot.HotSpotCompressedNullConstant.*;
import static jdk.vm.ci.hotspot.HotSpotCompressedNullConstant.COMPRESSED_NULL;
import java.lang.reflect.*;
import java.lang.reflect.Field;
import jdk.vm.ci.code.*;
import jdk.vm.ci.code.CompilationResult.*;
import jdk.vm.ci.code.DataSection.*;
import jdk.vm.ci.common.*;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.code.BailoutException;
import jdk.vm.ci.code.CodeCacheProvider;
import jdk.vm.ci.code.CompilationRequest;
import jdk.vm.ci.code.CompilationResult;
import jdk.vm.ci.code.CompilationResult.Call;
import jdk.vm.ci.code.CompilationResult.ConstantReference;
import jdk.vm.ci.code.CompilationResult.DataPatch;
import jdk.vm.ci.code.CompilationResult.Mark;
import jdk.vm.ci.code.DataSection;
import jdk.vm.ci.code.DataSection.Data;
import jdk.vm.ci.code.DataSection.DataBuilder;
import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.SerializableConstant;
import jdk.vm.ci.meta.SpeculationLog;
import jdk.vm.ci.meta.VMConstant;
/**
* HotSpot implementation of {@link CodeCacheProvider}.
@ -98,72 +113,64 @@ public class HotSpotCodeCacheProvider implements CodeCacheProvider {
return runtime.getConfig().runtimeCallStackSize;
}
public InstalledCode logOrDump(InstalledCode installedCode, CompilationResult compResult) {
HotSpotJVMCIRuntime.runtime().notifyInstall(this, installedCode, compResult);
private InstalledCode logOrDump(InstalledCode installedCode, CompilationResult compResult) {
((HotSpotJVMCIRuntime) runtime).notifyInstall(this, installedCode, compResult);
return installedCode;
}
private InstalledCode installCode(CompilationResult compResult, HotSpotCompiledNmethod compiledCode, InstalledCode installedCode, SpeculationLog log) {
int result = runtime.getCompilerToVM().installCode(target, compiledCode, installedCode, log);
if (result != config.codeInstallResultOk) {
String msg = compiledCode.getInstallationFailureMessage();
String resultDesc = config.getCodeInstallResultDescription(result);
if (msg != null) {
msg = String.format("Code installation failed: %s%n%s", resultDesc, msg);
} else {
msg = String.format("Code installation failed: %s", resultDesc);
}
if (result == config.codeInstallResultDependenciesInvalid) {
throw new AssertionError(resultDesc + " " + msg);
}
throw new BailoutException(result != config.codeInstallResultDependenciesFailed, msg);
}
return logOrDump(installedCode, compResult);
}
public InstalledCode installMethod(HotSpotResolvedJavaMethod method, CompilationResult compResult, long jvmciEnv, boolean isDefault) {
if (compResult.getId() == -1) {
compResult.setId(method.allocateCompileId(compResult.getEntryBCI()));
}
HotSpotInstalledCode installedCode = new HotSpotNmethod(method, compResult.getName(), isDefault);
HotSpotCompiledNmethod compiledCode = new HotSpotCompiledNmethod(method, compResult, jvmciEnv);
return installCode(compResult, compiledCode, installedCode, method.getSpeculationLog());
}
@Override
public InstalledCode addMethod(ResolvedJavaMethod method, CompilationResult compResult, SpeculationLog log, InstalledCode predefinedInstalledCode) {
HotSpotResolvedJavaMethod hotspotMethod = (HotSpotResolvedJavaMethod) method;
if (compResult.getId() == -1) {
compResult.setId(hotspotMethod.allocateCompileId(compResult.getEntryBCI()));
}
InstalledCode installedCode = predefinedInstalledCode;
public InstalledCode installCode(CompilationRequest compRequest, CompilationResult compResult, InstalledCode installedCode, SpeculationLog log, boolean isDefault) {
HotSpotResolvedJavaMethod method = compRequest != null ? (HotSpotResolvedJavaMethod) compRequest.getMethod() : null;
InstalledCode resultInstalledCode;
if (installedCode == null) {
HotSpotInstalledCode code = new HotSpotNmethod(hotspotMethod, compResult.getName(), false);
installedCode = code;
if (method == null) {
// Must be a stub
resultInstalledCode = new HotSpotRuntimeStub(compResult.getName());
} else {
resultInstalledCode = new HotSpotNmethod(method, compResult.getName(), isDefault);
}
} else {
resultInstalledCode = installedCode;
}
HotSpotCompiledNmethod compiledCode = new HotSpotCompiledNmethod(hotspotMethod, compResult);
return installCode(compResult, compiledCode, installedCode, log);
HotSpotCompiledCode compiledCode;
if (method != null) {
final int id;
final long jvmciEnv;
if (compRequest instanceof HotSpotCompilationRequest) {
HotSpotCompilationRequest hsCompRequest = (HotSpotCompilationRequest) compRequest;
id = hsCompRequest.getId();
jvmciEnv = hsCompRequest.getJvmciEnv();
} else {
id = method.allocateCompileId(compRequest.getEntryBCI());
jvmciEnv = 0L;
}
compiledCode = new HotSpotCompiledNmethod(method, compResult, id, jvmciEnv);
} else {
compiledCode = new HotSpotCompiledCode(compResult);
}
int result = runtime.getCompilerToVM().installCode(target, compiledCode, resultInstalledCode, (HotSpotSpeculationLog) log);
if (result != config.codeInstallResultOk) {
String resultDesc = config.getCodeInstallResultDescription(result);
if (compiledCode instanceof HotSpotCompiledNmethod) {
HotSpotCompiledNmethod compiledNmethod = (HotSpotCompiledNmethod) compiledCode;
String msg = compiledNmethod.getInstallationFailureMessage();
if (msg != null) {
msg = String.format("Code installation failed: %s%n%s", resultDesc, msg);
} else {
msg = String.format("Code installation failed: %s", resultDesc);
}
if (result == config.codeInstallResultDependenciesInvalid) {
throw new AssertionError(resultDesc + " " + msg);
}
throw new BailoutException(result != config.codeInstallResultDependenciesFailed, msg);
} else {
throw new BailoutException("Error installing %s: %s", compResult.getName(), resultDesc);
}
}
return logOrDump(resultInstalledCode, compResult);
}
@Override
public InstalledCode setDefaultMethod(ResolvedJavaMethod method, CompilationResult compResult) {
HotSpotResolvedJavaMethod hotspotMethod = (HotSpotResolvedJavaMethod) method;
return installMethod(hotspotMethod, compResult, 0L, true);
}
public HotSpotNmethod addExternalMethod(ResolvedJavaMethod method, CompilationResult compResult) {
HotSpotResolvedJavaMethod javaMethod = (HotSpotResolvedJavaMethod) method;
if (compResult.getId() == -1) {
compResult.setId(javaMethod.allocateCompileId(compResult.getEntryBCI()));
}
HotSpotNmethod code = new HotSpotNmethod(javaMethod, compResult.getName(), false, true);
HotSpotCompiledNmethod compiled = new HotSpotCompiledNmethod(javaMethod, compResult);
CompilerToVM vm = runtime.getCompilerToVM();
int result = vm.installCode(target, compiled, code, null);
if (result != runtime.getConfig().codeInstallResultOk) {
return null;
}
return code;
public void invalidateInstalledCode(InstalledCode installedCode) {
runtime.getCompilerToVM().invalidateInstalledCode(installedCode);
}
public boolean needsDataPatch(JavaConstant constant) {
@ -176,35 +183,29 @@ public class HotSpotCodeCacheProvider implements CodeCacheProvider {
if (constant instanceof VMConstant) {
VMConstant vmConstant = (VMConstant) constant;
boolean compressed;
long raw;
if (constant instanceof HotSpotObjectConstant) {
HotSpotObjectConstant c = (HotSpotObjectConstant) vmConstant;
if (constant instanceof HotSpotConstant) {
HotSpotConstant c = (HotSpotConstant) vmConstant;
compressed = c.isCompressed();
raw = 0xDEADDEADDEADDEADL;
} else if (constant instanceof HotSpotMetaspaceConstant) {
HotSpotMetaspaceConstant meta = (HotSpotMetaspaceConstant) constant;
compressed = meta.isCompressed();
raw = meta.rawValue();
} else {
throw new JVMCIError(String.valueOf(constant));
}
size = target.getSizeInBytes(compressed ? JavaKind.Int : target.wordKind);
size = compressed ? 4 : target.wordSize;
if (size == 4) {
builder = (buffer, patch) -> {
patch.accept(new DataPatch(buffer.position(), new ConstantReference(vmConstant)));
buffer.putInt((int) raw);
buffer.putInt(0xDEADDEAD);
};
} else {
assert size == 8;
builder = (buffer, patch) -> {
patch.accept(new DataPatch(buffer.position(), new ConstantReference(vmConstant)));
buffer.putLong(raw);
buffer.putLong(0xDEADDEADDEADDEADL);
};
}
} else if (JavaConstant.isNull(constant)) {
boolean compressed = COMPRESSED_NULL.equals(constant);
size = target.getSizeInBytes(compressed ? JavaKind.Int : target.wordKind);
size = compressed ? 4 : target.wordSize;
builder = DataBuilder.zero(size);
} else if (constant instanceof SerializableConstant) {
SerializableConstant s = (SerializableConstant) constant;
@ -250,8 +251,7 @@ public class HotSpotCodeCacheProvider implements CodeCacheProvider {
public String disassemble(InstalledCode code) {
if (code.isValid()) {
long codeBlob = code.getAddress();
return runtime.getCompilerToVM().disassembleCodeBlob(codeBlob);
return runtime.getCompilerToVM().disassembleCodeBlob(code);
}
return null;
}
@ -259,4 +259,35 @@ public class HotSpotCodeCacheProvider implements CodeCacheProvider {
public SpeculationLog createSpeculationLog() {
return new HotSpotSpeculationLog();
}
public long getMaxCallTargetOffset(long address) {
return runtime.getCompilerToVM().getMaxCallTargetOffset(address);
}
public boolean shouldDebugNonSafepoints() {
return runtime.getCompilerToVM().shouldDebugNonSafepoints();
}
/**
* Notifies the VM of statistics for a completed compilation.
*
* @param id the identifier of the compilation
* @param method the method compiled
* @param osr specifies if the compilation was for on-stack-replacement
* @param processedBytecodes the number of bytecodes processed during the compilation, including
* the bytecodes of all inlined methods
* @param time the amount time spent compiling {@code method}
* @param timeUnitsPerSecond the granularity of the units for the {@code time} value
* @param installedCode the nmethod installed as a result of the compilation
*/
public void notifyCompilationStatistics(int id, HotSpotResolvedJavaMethod method, boolean osr, int processedBytecodes, long time, long timeUnitsPerSecond, InstalledCode installedCode) {
runtime.getCompilerToVM().notifyCompilationStatistics(id, (HotSpotResolvedJavaMethodImpl) method, osr, processedBytecodes, time, timeUnitsPerSecond, installedCode);
}
/**
* Resets all compilation statistics.
*/
public void resetCompilationStatistics() {
runtime.getCompilerToVM().resetCompilationStatistics();
}
}

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot;
import jdk.vm.ci.code.CompilationRequest;
/**
* A compilation request with extra HotSpot specific context such as a compilation identifier and
* the address of a {@code JVMCIEnv} object that provides native context for a compilation.
*/
public class HotSpotCompilationRequest extends CompilationRequest {
private final long jvmciEnv;
private final int id;
/**
* Creates a request to compile a method starting at a given BCI and allocates an identifier to
* the request.
*
* @param method the method to be compiled
* @param entryBCI the bytecode index (BCI) at which to start compiling where -1 denotes the
* method's entry point
* @param jvmciEnv address of a native {@code JVMCIEnv} object or 0L
*/
public HotSpotCompilationRequest(HotSpotResolvedJavaMethod method, int entryBCI, long jvmciEnv) {
this(method, entryBCI, jvmciEnv, method.allocateCompileId(entryBCI));
}
/**
* Creates a request to compile a method starting at a given BCI.
*
* @param method the method to be compiled
* @param entryBCI the bytecode index (BCI) at which to start compiling where -1 denotes the
* method's entry point
* @param jvmciEnv address of a native {@code JVMCIEnv} object or 0L
* @param id an identifier for the request
*/
public HotSpotCompilationRequest(HotSpotResolvedJavaMethod method, int entryBCI, long jvmciEnv, int id) {
super(method, entryBCI);
this.jvmciEnv = jvmciEnv;
this.id = id;
}
@Override
public HotSpotResolvedJavaMethod getMethod() {
return (HotSpotResolvedJavaMethod) super.getMethod();
}
/**
* Gets the address of the native {@code JVMCIEnv} object or 0L if no such object exists.
*/
public long getJvmciEnv() {
return jvmciEnv;
}
/**
* Gets the VM allocated identifier for this compilation.
*/
public int getId() {
return id;
}
}

View File

@ -22,12 +22,16 @@
*/
package jdk.vm.ci.hotspot;
import java.nio.*;
import java.util.*;
import java.util.stream.*;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Stream;
import java.util.stream.Stream.Builder;
import jdk.vm.ci.code.*;
import jdk.vm.ci.code.BytecodeFrame;
import jdk.vm.ci.code.CompilationResult;
import jdk.vm.ci.code.CompilationResult.CodeAnnotation;
import jdk.vm.ci.code.CompilationResult.CodeComment;
import jdk.vm.ci.code.CompilationResult.DataPatch;
@ -36,14 +40,15 @@ import jdk.vm.ci.code.CompilationResult.Infopoint;
import jdk.vm.ci.code.CompilationResult.JumpTable;
import jdk.vm.ci.code.CompilationResult.Mark;
import jdk.vm.ci.code.CompilationResult.Site;
import jdk.vm.ci.meta.*;
import jdk.vm.ci.code.DataSection;
import jdk.vm.ci.meta.Assumptions.Assumption;
import jdk.vm.ci.meta.ResolvedJavaMethod;
/**
* A {@link CompilationResult} with additional HotSpot-specific information required for installing
* the code in HotSpot's code cache.
*/
public abstract class HotSpotCompiledCode {
public class HotSpotCompiledCode {
public final String name;
public final Site[] sites;
@ -113,9 +118,7 @@ public abstract class HotSpotCompiledCode {
targetCodeSize = compResult.getTargetCodeSize();
DataSection data = compResult.getDataSection();
if (!data.isFinalized()) {
data.finalizeLayout();
}
data.finalizeLayout();
dataSection = new byte[data.getSectionSize()];
ByteBuffer buffer = ByteBuffer.wrap(dataSection).order(ByteOrder.nativeOrder());
@ -176,4 +179,9 @@ public abstract class HotSpotCompiledCode {
Arrays.sort(result, new SiteComparator());
return result;
}
@Override
public String toString() {
return name;
}
}

View File

@ -22,8 +22,8 @@
*/
package jdk.vm.ci.hotspot;
import jdk.vm.ci.code.*;
import jdk.vm.ci.inittimer.*;
import jdk.vm.ci.code.CompilationResult;
import jdk.vm.ci.inittimer.SuppressFBWarnings;
/**
* {@link HotSpotCompiledCode} destined for installation as an nmethod.
@ -32,8 +32,17 @@ public final class HotSpotCompiledNmethod extends HotSpotCompiledCode {
public final HotSpotResolvedJavaMethod method;
public final int entryBCI;
/**
* Compilation identifier.
*/
public final int id;
/**
* Address of a native {@code JVMCIEnv} object or 0L if no such object exists.
*/
public final long jvmciEnv;
public final boolean hasUnsafeAccess;
/**
@ -42,15 +51,11 @@ public final class HotSpotCompiledNmethod extends HotSpotCompiledCode {
*/
@SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", justification = "set by the VM") private String installationFailureMessage;
public HotSpotCompiledNmethod(HotSpotResolvedJavaMethod method, CompilationResult compResult) {
this(method, compResult, 0L);
}
public HotSpotCompiledNmethod(HotSpotResolvedJavaMethod method, CompilationResult compResult, long jvmciEnv) {
public HotSpotCompiledNmethod(HotSpotResolvedJavaMethod method, CompilationResult compResult, int id, long jvmciEnv) {
super(compResult);
this.method = method;
this.entryBCI = compResult.getEntryBCI();
this.id = compResult.getId();
this.id = id;
this.jvmciEnv = jvmciEnv;
this.hasUnsafeAccess = compResult.hasUnsafeAccess();
}

Some files were not shown because too many files have changed in this diff Show More