This commit is contained in:
Phil Race 2018-05-02 09:16:10 -07:00
commit de8051e063
1329 changed files with 84730 additions and 17534 deletions

View File

@ -482,3 +482,4 @@ f7363de371c9a1f668bd0a01b7df3d1ddb9cc58b jdk-11+7
0c3e252cea44f06aef570ef464950ab97c669970 jdk-11+9
6fa770f9f8ab296e1ce255ec17ccf6d4e1051886 jdk-10+46
69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10
e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11

View File

@ -439,7 +439,7 @@ jdk.internal.vm.compiler_ADD_JAVAC_FLAGS += -parameters -XDstringConcat=inline \
#
jdk.internal.vm.compiler_EXCLUDES += \
org.graalvm.collections.test \
jdk.internal.vm.compiler.collections.test \
org.graalvm.compiler.core.match.processor \
org.graalvm.compiler.nodeinfo.processor \
org.graalvm.compiler.options.processor \

View File

@ -47,8 +47,8 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
$(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.collections/src \
$(SRC_DIR)/jdk.internal.vm.compiler.word/src \
$(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
$(SRC_DIR)/org.graalvm.compiler.core/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
@ -102,7 +102,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
$(SRC_DIR)/org.graalvm.collections/src \
$(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
$(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \
$(SRC_DIR)/org.graalvm.util/src \
@ -118,8 +118,8 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
$(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.collections/src \
$(SRC_DIR)/jdk.internal.vm.compiler.word/src \
$(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
$(SRC_DIR)/org.graalvm.compiler.bytecode/src \
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \

View File

@ -429,7 +429,7 @@ ifneq ($(findstring javafx., $(IMPORTED_MODULES)), )
endif
# All modules to have docs generated by docs-jdk-api target
JDK_MODULES := $(sort $(DOCS_MODULES))
JDK_MODULES := $(sort $(filter-out $(MODULES_FILTER), $(DOCS_MODULES)))
$(eval $(call SetupApiDocsGeneration, JDK_API, \
MODULES := $(JDK_MODULES), \
@ -561,7 +561,7 @@ $(eval $(call SetupCopyFiles, COPY_JDWP_PROTOCOL, \
JDK_SPECS_TARGETS += $(COPY_JDWP_PROTOCOL)
# Get jvmti.html from the main jvm variant (all variants' jvmti.html are identical).
JVMTI_HTML := $(HOTSPOT_OUTPUTDIR)/variant-$(JVM_VARIANT_MAIN)/gensrc/jvmtifiles/jvmti.html
JVMTI_HTML ?= $(HOTSPOT_OUTPUTDIR)/variant-$(JVM_VARIANT_MAIN)/gensrc/jvmtifiles/jvmti.html
$(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \
FILES := $(JVMTI_HTML), \
DEST := $(DOCS_OUTPUTDIR)/specs, \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -61,11 +61,12 @@ $(CLASSLIST_FILE): $(INTERIM_IMAGE_DIR)/bin/java$(EXE_SUFFIX) $(CLASSLIST_JAR)
$(call MakeDir, $(LINK_OPT_DIR))
$(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $@))
$(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $(JLI_TRACE_FILE)))
$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@ \
$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw \
-Djava.lang.invoke.MethodHandle.TRACE_RESOLVE=true \
-cp $(SUPPORT_OUTPUTDIR)/classlist.jar \
build.tools.classlist.HelloClasslist \
$(LOG_DEBUG) 2>&1 > $(JLI_TRACE_FILE)
$(GREP) -v HelloClasslist $@.raw > $@
# The jli trace is created by the same recipe as classlist. By declaring these
# dependencies, make will correctly rebuild both jli trace and classlist

View File

@ -117,7 +117,7 @@ JLINK_ORDER_RESOURCES += \
JLINK_TOOL := $(JLINK) -J-Djlink.debug=true \
--module-path $(IMAGES_OUTPUTDIR)/jmods \
--endian $(OPENJDK_BUILD_CPU_ENDIAN) \
--endian $(OPENJDK_TARGET_CPU_ENDIAN) \
--release-info $(BASE_RELEASE_FILE) \
--order-resources=$(call CommaList, $(JLINK_ORDER_RESOURCES)) \
--dedup-legal-notices=error-if-not-same-content \

View File

@ -128,16 +128,22 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
AC_ARG_ENABLE([warnings-as-errors], [AS_HELP_STRING([--disable-warnings-as-errors],
[do not consider native warnings to be an error @<:@enabled@:>@])])
# Set default value.
if test "x$TOOLCHAIN_TYPE" = xxlc; then
WARNINGS_AS_ERRORS=false
else
WARNINGS_AS_ERRORS=true
fi
AC_MSG_CHECKING([if native warnings are errors])
if test "x$enable_warnings_as_errors" = "xyes"; then
AC_MSG_RESULT([yes (explicitly set)])
WARNINGS_AS_ERRORS=true
elif test "x$enable_warnings_as_errors" = "xno"; then
AC_MSG_RESULT([no])
AC_MSG_RESULT([no (explicitly set)])
WARNINGS_AS_ERRORS=false
elif test "x$enable_warnings_as_errors" = "x"; then
AC_MSG_RESULT([yes (default)])
WARNINGS_AS_ERRORS=true
AC_MSG_RESULT([${WARNINGS_AS_ERRORS} (default)])
else
AC_MSG_ERROR([--enable-warnings-as-errors accepts no argument])
fi

View File

@ -233,15 +233,17 @@ AC_DEFUN_ONCE([FLAGS_PRE_TOOLCHAIN],
# The sysroot flags are needed for configure to be able to run the compilers
FLAGS_SETUP_SYSROOT_FLAGS
# For solstudio and xlc, the word size flag is required for correct behavior.
# For clang/gcc, the flag is only strictly required for reduced builds, but
# set it always where possible (x86, sparc and ppc).
if test "x$TOOLCHAIN_TYPE" = xxlc; then
MACHINE_FLAG="-q${OPENJDK_TARGET_CPU_BITS}"
elif test "x$TOOLCHAIN_TYPE" != xmicrosoft; then
if test "x$OPENJDK_TARGET_CPU" != xaarch64 &&
test "x$OPENJDK_TARGET_CPU" != xarm &&
test "x$OPENJDK_TARGET_CPU" != xmips &&
test "x$OPENJDK_TARGET_CPU" != xmipsel &&
test "x$OPENJDK_TARGET_CPU" != xmips64 &&
test "x$OPENJDK_TARGET_CPU" != xmips64el; then
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
elif test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86 ||
test "x$OPENJDK_TARGET_CPU_ARCH" = xsparc ||
test "x$OPENJDK_TARGET_CPU_ARCH" = xppc; then
MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
fi
fi

View File

@ -238,6 +238,9 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
if test "x$OPENJDK_TARGET_OS" = xaix ; then
INCLUDE_SA=false
fi
if test "x$OPENJDK_TARGET_CPU" = xs390x ; then
INCLUDE_SA=false
fi
AC_SUBST(INCLUDE_SA)
# Compress jars

View File

@ -32,7 +32,7 @@ $(eval $(call IncludeCustomExtension, hotspot/lib/JvmFeatures.gmk))
ifeq ($(call check-jvm-feature, compiler1), true)
JVM_CFLAGS_FEATURES += -DCOMPILER1
else
JVM_EXCLUDE_PATTERNS += c1_
JVM_EXCLUDE_PATTERNS += c1_ c1/
endif
ifeq ($(call check-jvm-feature, compiler2), true)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -322,12 +322,13 @@ class Bundle {
|| key.startsWith(CLDRConverter.METAZONE_ID_PREFIX)) {
@SuppressWarnings("unchecked")
Map<String, String> nameMap = (Map<String, String>) myMap.get(key);
// Convert key/value pairs to an array.
String[] names = new String[ZONE_NAME_KEYS.length];
int ix = 0;
for (String nameKey : ZONE_NAME_KEYS) {
String name = nameMap.get(nameKey);
if (name == null) {
if (name == null && parentsMap != null) {
@SuppressWarnings("unchecked")
Map<String, String> parentNames = (Map<String, String>) parentsMap.get(key);
if (parentNames != null) {
@ -357,29 +358,6 @@ class Bundle {
}
}
}
// If there are still any nulls, try filling in them from en data.
if (hasNulls(names) && !id.equals("en")) {
@SuppressWarnings("unchecked")
String[] enNames = (String[]) Bundle.getBundle("en").getTargetMap().get(key);
if (enNames == null) {
if (metaKey != null) {
@SuppressWarnings("unchecked")
String[] metaNames = (String[]) Bundle.getBundle("en").getTargetMap().get(metaKey);
enNames = metaNames;
}
}
if (enNames != null) {
for (int i = 0; i < names.length; i++) {
if (names[i] == null) {
names[i] = enNames[i];
}
}
}
// If there are still nulls, give up names.
if (hasNulls(names)) {
names = null;
}
}
}
// replace the Map with the array
if (names != null) {

View File

@ -31,6 +31,7 @@ import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.*;
import java.text.MessageFormat;
import java.time.*;
import java.util.*;
import java.util.ResourceBundle.Control;
@ -82,13 +83,15 @@ public class CLDRConverter {
static final String CALENDAR_FIRSTDAY_PREFIX = "firstDay.";
static final String CALENDAR_MINDAYS_PREFIX = "minDays.";
static final String TIMEZONE_ID_PREFIX = "timezone.id.";
static final String EXEMPLAR_CITY_PREFIX = "timezone.excity.";
static final String ZONE_NAME_PREFIX = "timezone.displayname.";
static final String METAZONE_ID_PREFIX = "metazone.id.";
static final String PARENT_LOCALE_PREFIX = "parentLocale.";
static final String[] EMPTY_ZONE = {"", "", "", "", "", ""};
private static SupplementDataParseHandler handlerSuppl;
private static SupplementalMetadataParseHandler handlerSupplMeta;
private static LikelySubtagsParseHandler handlerLikelySubtags;
static SupplementalMetadataParseHandler handlerSupplMeta;
static NumberingSystemsParseHandler handlerNumbering;
static MetaZonesParseHandler handlerMetaZones;
static TimeZoneParseHandler handlerTimeZone;
@ -425,7 +428,7 @@ public class CLDRConverter {
parseLDMLFile(new File(LIKELYSUBTAGS_SOURCE_FILE), handlerLikelySubtags);
// Parse supplementalMetadata
// Currently only interested in deprecated time zone ids.
// Currently interested in deprecated time zone ids and language aliases.
handlerSupplMeta = new SupplementalMetadataParseHandler();
parseLDMLFile(new File(SPPL_META_SOURCE_FILE), handlerSupplMeta);
}
@ -662,23 +665,18 @@ public class CLDRConverter {
Arrays.deepEquals(data,
(String[])map.get(METAZONE_ID_PREFIX + me.getValue())))
.findAny();
if (cldrMeta.isPresent()) {
names.put(tzid, cldrMeta.get().getValue());
} else {
cldrMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
// check the JRE meta key, add if there is not.
Optional<Map.Entry<String[], String>> jreMeta =
jreMetaMap.entrySet().stream()
.filter(jm -> Arrays.deepEquals(data, jm.getKey()))
.findAny();
if (jreMeta.isPresent()) {
names.put(tzid, jreMeta.get().getValue());
} else {
jreMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
String metaName = "JRE_" + tzid.replaceAll("[/-]", "_");
names.put(METAZONE_ID_PREFIX + metaName, data);
names.put(tzid, metaName);
jreMetaMap.put(data, metaName);
}
}
});
});
}
});
}
@ -705,6 +703,26 @@ public class CLDRConverter {
}
});
// exemplar cities.
Map<String, Object> exCities = map.entrySet().stream()
.filter(e -> e.getKey().startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX))
.collect(Collectors
.toMap(Map.Entry::getKey, Map.Entry::getValue));
names.putAll(exCities);
if (!id.equals("en") &&
!names.isEmpty()) {
// CLDR does not have UTC entry, so add it here.
names.put("UTC", EMPTY_ZONE);
// no metazone zones
Arrays.asList(handlerMetaZones.get(MetaZonesParseHandler.NO_METAZONE_KEY)
.split("\\s")).stream()
.forEach(tz -> {
names.put(tz, EMPTY_ZONE);
});
}
return names;
}
@ -769,6 +787,10 @@ public class CLDRConverter {
"field.hour",
"timezone.hourFormat",
"timezone.gmtFormat",
"timezone.gmtZeroFormat",
"timezone.regionFormat",
"timezone.regionFormat.daylight",
"timezone.regionFormat.standard",
"field.minute",
"field.second",
"field.zone",

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -103,19 +103,30 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
case "key":
// for LocaleNames
// copy string
{
String key = convertOldKeyName(attributes.getValue("type"));
if (key.length() == 2) {
pushStringEntry(qName, attributes,
CLDRConverter.LOCALE_KEY_PREFIX +
convertOldKeyName(attributes.getValue("type")));
CLDRConverter.LOCALE_KEY_PREFIX + key);
} else {
pushIgnoredContainer(qName);
}
}
break;
case "type":
// for LocaleNames/CalendarNames
// copy string
{
String key = convertOldKeyName(attributes.getValue("key"));
if (key.length() == 2) {
pushStringEntry(qName, attributes,
CLDRConverter.LOCALE_TYPE_PREFIX +
convertOldKeyName(attributes.getValue("key")) + "." +
CLDRConverter.LOCALE_TYPE_PREFIX + key + "." +
attributes.getValue("type"));
} else {
pushIgnoredContainer(qName);
}
}
break;
//
@ -445,6 +456,16 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
case "gmtFormat":
pushStringEntry(qName, attributes, "timezone.gmtFormat");
break;
case "gmtZeroFormat":
pushStringEntry(qName, attributes, "timezone.gmtZeroFormat");
break;
case "regionFormat":
{
String type = attributes.getValue("type");
pushStringEntry(qName, attributes, "timezone.regionFormat" +
(type == null ? "" : "." + type));
}
break;
case "zone":
{
String tzid = attributes.getValue("type"); // Olson tz id
@ -474,8 +495,8 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
case "daylight": // daylight saving (summer) time name
pushStringEntry(qName, attributes, CLDRConverter.ZONE_NAME_PREFIX + qName + "." + zoneNameStyle);
break;
case "exemplarCity": // not used in JDK
pushIgnoredContainer(qName);
case "exemplarCity":
pushStringEntry(qName, attributes, CLDRConverter.EXEMPLAR_CITY_PREFIX);
break;
//
@ -877,12 +898,17 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
case "generic":
case "standard":
case "daylight":
case "exemplarCity":
if (zonePrefix != null && (currentContainer instanceof Entry)) {
@SuppressWarnings("unchecked")
Map<String, String> valmap = (Map<String, String>) get(zonePrefix + getContainerKey());
Entry<?> entry = (Entry<?>) currentContainer;
if (qName.equals("exemplarCity")) {
put(CLDRConverter.EXEMPLAR_CITY_PREFIX + getContainerKey(), (String) entry.getValue());
} else {
valmap.put(entry.getKey(), (String) entry.getValue());
}
}
break;
case "monthWidth":

View File

@ -35,6 +35,8 @@ import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
class MetaZonesParseHandler extends AbstractLDMLHandler<String> {
final static String NO_METAZONE_KEY = "no.metazone.defined";
private String tzid, metazone;
// for java.time.format.ZoneNames.java
@ -101,10 +103,17 @@ class MetaZonesParseHandler extends AbstractLDMLHandler<String> {
assert qName.equals(currentContainer.getqName()) : "current=" + currentContainer.getqName() + ", param=" + qName;
switch (qName) {
case "timezone":
if (tzid == null || metazone == null) {
if (tzid == null) {
throw new InternalError();
}
} else if (metazone == null) {
String no_meta = get(NO_METAZONE_KEY);
put(NO_METAZONE_KEY, no_meta == null ? tzid : no_meta + " " + tzid);
CLDRConverter.info("No metazone defined for %s%n", tzid);
} else {
put(tzid, metazone);
}
tzid = null;
metazone = null;
break;
}
currentContainer = currentContainer.getParent();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -211,11 +211,13 @@ class ResourceBundleGenerator implements BundleGenerator {
if (value == null) {
CLDRConverter.warning("null value for " + key);
} else if (value instanceof String) {
if (type == BundleType.TIMEZONE ||
((String)value).startsWith(META_VALUE_PREFIX)) {
out.printf(" { \"%s\", %s },\n", key, CLDRConverter.saveConvert((String) value, useJava));
String valStr = (String)value;
if (type == BundleType.TIMEZONE &&
!key.startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX) ||
valStr.startsWith(META_VALUE_PREFIX)) {
out.printf(" { \"%s\", %s },\n", key, CLDRConverter.saveConvert(valStr, useJava));
} else {
out.printf(" { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert((String) value, useJava));
out.printf(" { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert(valStr, useJava));
}
} else if (value instanceof String[]) {
String[] values = (String[]) value;
@ -268,7 +270,8 @@ class ResourceBundleGenerator implements BundleGenerator {
out.printf("public class %s implements LocaleDataMetaInfo {\n", className);
out.printf(" private static final Map<String, String> resourceNameToLocales = new HashMap<>();\n" +
(CLDRConverter.isBaseModule ?
" private static final Map<Locale, String[]> parentLocalesMap = new HashMap<>();\n\n" :
" private static final Map<Locale, String[]> parentLocalesMap = new HashMap<>();\n" +
" private static final Map<String, String> languageAliasMap = new HashMap<>();\n\n" :
"\n") +
" static {\n");
@ -299,24 +302,35 @@ class ResourceBundleGenerator implements BundleGenerator {
} else {
if ("AvailableLocales".equals(key)) {
out.printf(" resourceNameToLocales.put(\"%s\",\n", key);
out.printf(" \"%s\");\n", toLocaleList(metaInfo.get(key), false));
out.printf(" \"%s\");\n", toLocaleList(applyLanguageAliases(metaInfo.get(key)), false));
}
}
}
// for languageAliasMap
if (CLDRConverter.isBaseModule) {
CLDRConverter.handlerSupplMeta.getLanguageAliasData().forEach((key, value) -> {
out.printf(" languageAliasMap.put(\"%s\", \"%s\");\n", key, value);
});
}
out.printf(" }\n\n");
// end of static initializer block.
// Short TZ names for delayed initialization
// Canonical TZ names for delayed initialization
if (CLDRConverter.isBaseModule) {
out.printf(" private static class TZShortIDMapHolder {\n");
out.printf(" static final Map<String, String> tzShortIDMap = new HashMap<>();\n");
out.printf(" private static class TZCanonicalIDMapHolder {\n");
out.printf(" static final Map<String, String> tzCanonicalIDMap = new HashMap<>(600);\n");
out.printf(" static {\n");
CLDRConverter.handlerTimeZone.getData().entrySet().stream()
.forEach(e -> {
out.printf(" tzShortIDMap.put(\"%s\", \"%s\");\n", e.getKey(),
((String)e.getValue()));
String[] ids = ((String)e.getValue()).split("\\s");
out.printf(" tzCanonicalIDMap.put(\"%s\", \"%s\");\n", e.getKey(),
ids[0]);
for (int i = 1; i < ids.length; i++) {
out.printf(" tzCanonicalIDMap.put(\"%s\", \"%s\");\n", ids[i],
ids[0]);
}
});
out.printf(" }\n }\n\n");
}
@ -333,8 +347,12 @@ class ResourceBundleGenerator implements BundleGenerator {
if (CLDRConverter.isBaseModule) {
out.printf(" @Override\n" +
" public Map<String, String> tzShortIDs() {\n" +
" return TZShortIDMapHolder.tzShortIDMap;\n" +
" public Map<String, String> getLanguageAliasMap() {\n" +
" return languageAliasMap;\n" +
" }\n\n");
out.printf(" @Override\n" +
" public Map<String, String> tzCanonicalIDs() {\n" +
" return TZCanonicalIDMapHolder.tzCanonicalIDMap;\n" +
" }\n\n");
out.printf(" public Map<Locale, String[]> parentLocales() {\n" +
" return parentLocalesMap;\n" +
@ -370,4 +388,13 @@ class ResourceBundleGenerator implements BundleGenerator {
}
return sb.toString();
}
private static SortedSet<String> applyLanguageAliases(SortedSet<String> tags) {
CLDRConverter.handlerSupplMeta.getLanguageAliasData().forEach((key, value) -> {
if (tags.remove(key)) {
tags.add(value);
}
});
return tags;
}
}

View File

@ -27,6 +27,8 @@ package build.tools.cldrconverter;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
@ -38,6 +40,12 @@ import org.xml.sax.SAXException;
*/
class SupplementalMetadataParseHandler extends AbstractLDMLHandler<Object> {
private final Map<String, String> languageAliasMap;
SupplementalMetadataParseHandler() {
languageAliasMap = new HashMap<>();
}
@Override
public InputSource resolveEntity(String publicID, String systemID) throws IOException, SAXException {
// avoid HTTP traffic to unicode.org
@ -57,6 +65,17 @@ class SupplementalMetadataParseHandler extends AbstractLDMLHandler<Object> {
}
pushIgnoredContainer(qName);
break;
case "languageAlias":
String aliasReason = attributes.getValue("reason");
if ("deprecated".equals(aliasReason) || "legacy".equals(aliasReason)) {
String tag = attributes.getValue("type");
if (!checkLegacyLocales(tag)) {
languageAliasMap.put(tag.replaceAll("_", "-"),
attributes.getValue("replacement").replaceAll("_", "-"));
}
}
pushIgnoredContainer(qName);
break;
default:
// treat anything else as a container
pushContainer(qName, attributes);
@ -69,4 +88,13 @@ class SupplementalMetadataParseHandler extends AbstractLDMLHandler<Object> {
.map(k -> String.format(" \"%s\", \"%s\",", k, get(k)))
.sorted();
}
Map<String, String> getLanguageAliasData() {
return languageAliasMap;
}
// skip language aliases for JDK legacy locales for ISO compatibility
private boolean checkLegacyLocales(String tag) {
return (tag.startsWith("no") || tag.startsWith("in")
|| tag.startsWith("iw") || tag.startsWith("ji"));
}
}

View File

@ -88,7 +88,6 @@ $(eval $(call SetupJdkExecutable, BUILD_UNPACKEXE, \
CFLAGS_solaris := -KPIC, \
CFLAGS_macosx := -fPIC, \
DISABLED_WARNINGS_gcc := unused-result implicit-fallthrough, \
DISABLED_WARNINGS_microsoft := 4005, \
LDFLAGS := $(UNPACKEXE_ZIPOBJS) \
$(LDFLAGS_JDKEXE) $(LDFLAGS_CXX_JDK) \
$(call SET_SHARED_LIBRARY_ORIGIN), \

View File

@ -40,7 +40,6 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBUNPACK, \
$(LIBJAVA_HEADER_FLAGS), \
CFLAGS_release := -DPRODUCT, \
DISABLED_WARNINGS_gcc := implicit-fallthrough, \
DISABLED_WARNINGS_microsoft := 4005, \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_windows := -map:$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/unpack.map -debug, \

View File

@ -48,6 +48,21 @@ BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(OUTPUTDIR)/support/test/hotspot/jtreg/native
BUILD_HOTSPOT_JTREG_IMAGE_DIR := $(TEST_IMAGE_DIR)/hotspot/jtreg
################################################################################
# Former VM TestBase tests.
################################################################################
VM_TESTBASE_DIR := $(TOPDIR)/test/hotspot/jtreg/vmTestbase
VM_SHARE_INCLUDES := \
-I$(VM_TESTBASE_DIR)/vm/share \
-I$(VM_TESTBASE_DIR)/nsk/share/native \
-I$(VM_TESTBASE_DIR)/nsk/share/jni
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libProcessUtils := $(VM_SHARE_INCLUDES)
################################################################################
# Platform specific setup
ifneq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_ARCH), solaris-sparc)
BUILD_HOTSPOT_JTREG_EXCLUDE += liboverflow.c exeThreadSignalMask.c

View File

@ -32,9 +32,6 @@
#include "nativeInst_aarch64.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#endif
#define __ ce->masm()->
@ -350,42 +347,4 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ b(_continuation);
}
/////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cbz(pre_val_reg, _continuation);
ce->store_parameter(pre_val()->as_register(), 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ b(_continuation);
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register new_val_reg = new_val()->as_register();
__ cbz(new_val_reg, _continuation);
ce->store_parameter(addr()->as_pointer_register(), 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
__ b(_continuation);
}
#endif // INCLUDE_ALL_GCS
/////////////////////////////////////////////////////////////////////////////
#undef __

View File

@ -1558,7 +1558,16 @@ void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
assert(VM_Version::supports_cx8(), "wrong machine");
Register addr = as_reg(op->addr());
Register addr;
if (op->addr()->is_register()) {
addr = as_reg(op->addr());
} else {
assert(op->addr()->is_address(), "what else?");
LIR_Address* addr_ptr = op->addr()->as_address_ptr();
assert(addr_ptr->disp() == 0, "need 0 disp");
assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index");
addr = as_reg(addr_ptr->base());
}
Register newval = as_reg(op->new_value());
Register cmpval = as_reg(op->cmp_value());
Label succeed, fail, around;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -144,8 +144,22 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
// accumulate fixed displacements
if (index->is_constant()) {
large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;
LIR_Const *constant = index->as_constant_ptr();
if (constant->type() == T_INT) {
large_disp += index->as_jint() << shift;
} else {
assert(constant->type() == T_LONG, "should be");
jlong c = index->as_jlong() << shift;
if ((jlong)((jint)c) == c) {
large_disp += c;
index = LIR_OprFact::illegalOpr;
} else {
LIR_Opr tmp = new_register(T_LONG);
__ move(index, tmp);
index = tmp;
// apply shift and displacement below
}
}
}
if (index->is_register()) {
@ -183,9 +197,8 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
}
}
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type, bool needs_card_mark) {
BasicType type) {
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size);
@ -206,16 +219,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
LIR_Address::scale(type),
offset_in_bytes, type);
}
if (needs_card_mark) {
// This store will need a precise card mark, so go ahead and
// compute the full adddres instead of computing once for the
// store and again for the card mark.
LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp);
return new LIR_Address(tmp, type);
} else {
return addr;
}
}
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
@ -305,87 +309,17 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
__ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
}
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
!get_jobject_constant(x->value())->is_null_object() ||
x->should_profile());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
array.load_item();
index.load_nonconstant();
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check || x->check_boolean()) {
value.load_item();
} else {
value.load_for_store(x->elt_type());
}
set_no_result(x);
// the CodeEmitInfo must be duplicated for each different
// LIR-instruction because spilling can occur anywhere between two
// instructions and so the debug information must be different
CodeEmitInfo* range_check_info = state_for(x);
CodeEmitInfo* null_check_info = NULL;
if (x->needs_null_check()) {
null_check_info = new CodeEmitInfo(range_check_info);
}
// emit array address setup early so it schedules better
// FIXME? No harm in this on aarch64, and it might help
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// range_check also does the null check
null_check_info = NULL;
}
}
if (GenerateArrayStoreCheck && needs_store_check) {
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
LIR_Opr tmp1 = new_register(objectType);
LIR_Opr tmp2 = new_register(objectType);
LIR_Opr tmp3 = new_register(objectType);
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
}
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(value.result(), array_addr, null_check_info);
// Seems to be a precise
post_barrier(LIR_OprFact::address(array_addr), value.result());
} else {
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
__ move(result, array_addr, null_check_info);
}
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
}
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
assert(x->is_pinned(),"");
LIRItem obj(x->obj(), this);
@ -771,76 +705,42 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
}
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
LIRItem offset(x->argument_at(1), this); // offset of field
LIRItem cmp (x->argument_at(2), this); // value to compare with field
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
assert(obj.type()->tag() == objectTag, "invalid type");
// In 64bit the type can be long, sparc doesn't have this assert
// assert(offset.type()->tag() == intTag, "invalid type");
assert(cmp.type()->tag() == type->tag(), "invalid type");
assert(val.type()->tag() == type->tag(), "invalid type");
// get address of field
obj.load_item();
offset.load_nonconstant();
val.load_item();
cmp.load_item();
LIR_Address* a;
if(offset.result()->is_constant()) {
jlong c = offset.result()->as_jlong();
if ((jlong)((jint)c) == c) {
a = new LIR_Address(obj.result(),
(jint)c,
as_BasicType(type));
} else {
LIR_Opr tmp = new_register(T_LONG);
__ move(offset.result(), tmp);
a = new LIR_Address(obj.result(),
tmp,
as_BasicType(type));
}
} else {
a = new LIR_Address(obj.result(),
offset.result(),
0,
as_BasicType(type));
}
LIR_Opr addr = new_pointer_register();
__ leal(LIR_OprFact::address(a), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
// Do the pre-write barrier, if any.
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
LIR_Opr result = rlock_result(x);
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
if (type == objectType)
__ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
result);
else if (type == intType)
__ cas_int(addr, cmp.result(), val.result(), ill, ill);
else if (type == longType)
__ cas_long(addr, cmp.result(), val.result(), ill, ill);
else {
new_value.load_item();
cmp_value.load_item();
LIR_Opr result = new_register(T_INT);
if (type == T_OBJECT || type == T_ARRAY) {
__ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
} else if (type == T_INT) {
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
} else if (type == T_LONG) {
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
} else {
ShouldNotReachHere();
Unimplemented();
}
__ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
return result;
}
if (type == objectType) { // Write-barrier needed for Object fields.
// Seems to be precise
post_barrier(addr, val.result());
}
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
bool is_oop = type == T_OBJECT || type == T_ARRAY;
LIR_Opr result = new_register(type);
value.load_item();
assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
LIR_Opr tmp = new_register(T_INT);
__ xchg(addr, value.result(), result, tmp);
return result;
}
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
LIR_Opr result = new_register(type);
value.load_item();
assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
LIR_Opr tmp = new_register(T_INT);
__ xadd(addr, value.result(), result, tmp);
return result;
}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
@ -1287,7 +1187,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// must do this before locking the destination register as an oop register,
// and before the obj is loaded (the latter is for deoptimization)
patching_info = state_for(x, x->state_before());
@ -1433,84 +1333,3 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
__ volatile_load_mem_reg(address, result, info);
}
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) {
LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst);
}
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
BasicType type, bool is_volatile) {
LIR_Address* addr = new LIR_Address(src, offset, type);
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(data, addr);
assert(src->is_register(), "must be register");
// Seems to be a precise address
post_barrier(LIR_OprFact::address(addr), data);
} else {
__ move(data, addr);
}
}
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
BasicType type = x->basic_type();
LIRItem src(x->object(), this);
LIRItem off(x->offset(), this);
LIRItem value(x->value(), this);
src.load_item();
off.load_nonconstant();
// We can cope with a constant increment in an xadd
if (! (x->is_add()
&& value.is_constant()
&& can_inline_as_constant(x->value()))) {
value.load_item();
}
LIR_Opr dst = rlock_result(x, type);
LIR_Opr data = value.result();
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
LIR_Opr offset = off.result();
if (data == dst) {
LIR_Opr tmp = new_register(data->type());
__ move(data, tmp);
data = tmp;
}
LIR_Address* addr;
if (offset->is_constant()) {
jlong l = offset->as_jlong();
assert((jlong)((jint)l) == l, "offset too large for constant");
jint c = (jint)l;
addr = new LIR_Address(src.result(), c, type);
} else {
addr = new LIR_Address(src.result(), offset, type);
}
LIR_Opr tmp = new_register(T_INT);
LIR_Opr ptr = LIR_OprFact::illegalOpr;
if (x->is_add()) {
__ xadd(LIR_OprFact::address(addr), data, dst, tmp);
} else {
if (is_obj) {
// Do the pre-write barrier, if any.
ptr = new_pointer_register();
__ add(src.result(), off.result(), ptr);
pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
__ xchg(LIR_OprFact::address(addr), data, dst, tmp);
if (is_obj) {
post_barrier(ptr, data);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -358,6 +358,16 @@ void C1_MacroAssembler::remove_frame(int framesize) {
void C1_MacroAssembler::verified_entry() {
}
void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) {
// rbp, + 0: link
// + 1: return address
// + 2: argument with offset 0
// + 3: argument with offset 1
// + 4: ...
ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));
}
#ifndef PRODUCT
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -109,4 +109,6 @@ using MacroAssembler::null_check;
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
void load_parameter(int offset_in_words, Register reg);
#endif // CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP

View File

@ -43,11 +43,6 @@
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_aarch64.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#endif
// Implementation of StubAssembler
@ -173,31 +168,32 @@ class StubFrame: public StackObj {
~StubFrame();
};;
void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
set_info(name, must_gc_arguments);
enter();
}
void StubAssembler::epilogue() {
leave();
ret(lr);
}
#define __ _sasm->
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
_sasm = sasm;
__ set_info(name, must_gc_arguments);
__ enter();
__ prologue(name, must_gc_arguments);
}
// load parameters that were stored with LIR_Assembler::store_parameter
// Note: offsets for store_parameter and load_argument must match
void StubFrame::load_argument(int offset_in_words, Register reg) {
// rbp, + 0: link
// + 1: return address
// + 2: argument with offset 0
// + 3: argument with offset 1
// + 4: ...
__ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));
__ load_parameter(offset_in_words, reg);
}
StubFrame::~StubFrame() {
__ leave();
__ ret(lr);
__ epilogue();
}
#undef __
@ -1100,136 +1096,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
#if INCLUDE_ALL_GCS
case g1_pre_barrier_slow_id:
{
StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
// arg0 : previous value of memory
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ mov(r0, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
__ should_not_reach_here();
break;
}
const Register pre_val = r0;
const Register thread = rthread;
const Register tmp = rscratch1;
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
Label done;
Label runtime;
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ ldrw(tmp, in_progress);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ ldrb(tmp, in_progress);
}
__ cbzw(tmp, done);
// Can we store original value in the thread's buffer?
__ ldr(tmp, queue_index);
__ cbz(tmp, runtime);
__ sub(tmp, tmp, wordSize);
__ str(tmp, queue_index);
__ ldr(rscratch2, buffer);
__ add(tmp, tmp, rscratch2);
f.load_argument(0, rscratch2);
__ str(rscratch2, Address(tmp, 0));
__ b(done);
__ bind(runtime);
__ push_call_clobbered_registers();
f.load_argument(0, pre_val);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
__ pop_call_clobbered_registers();
__ bind(done);
}
break;
case g1_post_barrier_slow_id:
{
StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ mov(r0, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
__ should_not_reach_here();
break;
}
// arg0: store_address
Address store_addr(rfp, 2*BytesPerWord);
Label done;
Label runtime;
// At this point we know new_value is non-NULL and the new_value crosses regions.
// Must check to see if card is already dirty
const Register thread = rthread;
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
const Register card_offset = rscratch2;
// LR is free here, so we can use it to hold the byte_map_base.
const Register byte_map_base = lr;
assert_different_registers(card_offset, byte_map_base, rscratch1);
f.load_argument(0, card_offset);
__ lsr(card_offset, card_offset, CardTable::card_shift);
__ load_byte_map_base(byte_map_base);
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
__ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
__ br(Assembler::EQ, done);
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
__ membar(Assembler::StoreLoad);
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
__ cbzw(rscratch1, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
__ strb(zr, Address(byte_map_base, card_offset));
// Convert card offset into an address in card_addr
Register card_addr = card_offset;
__ add(card_addr, byte_map_base, card_addr);
__ ldr(rscratch1, queue_index);
__ cbz(rscratch1, runtime);
__ sub(rscratch1, rscratch1, wordSize);
__ str(rscratch1, queue_index);
// Reuse LR to hold buffer_addr
const Register buffer_addr = lr;
__ ldr(buffer_addr, buffer);
__ str(card_addr, Address(buffer_addr, rscratch1));
__ b(done);
__ bind(runtime);
__ push_call_clobbered_registers();
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
__ pop_call_clobbered_registers();
__ bind(done);
}
break;
#endif
case predicate_failed_trap_id:
{
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);

View File

@ -24,6 +24,9 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/g1/c1/g1BarrierSetC1.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
@ -307,4 +310,167 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(*stub->entry());
assert(stub->pre_val()->is_register(), "Precondition.");
Register pre_val_reg = stub->pre_val()->as_register();
if (stub->do_load()) {
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
}
__ cbz(pre_val_reg, *stub->continuation());
ce->store_parameter(stub->pre_val()->as_register(), 0);
__ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
__ b(*stub->continuation());
}
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
__ bind(*stub->entry());
assert(stub->addr()->is_register(), "Precondition.");
assert(stub->new_val()->is_register(), "Precondition.");
Register new_val_reg = stub->new_val()->as_register();
__ cbz(new_val_reg, *stub->continuation());
ce->store_parameter(stub->addr()->as_pointer_register(), 0);
__ far_call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
__ b(*stub->continuation());
}
#undef __
#define __ sasm->
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
__ prologue("g1_pre_barrier", false);
// arg0 : previous value of memory
BarrierSet* bs = BarrierSet::barrier_set();
const Register pre_val = r0;
const Register thread = rthread;
const Register tmp = rscratch1;
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
Label done;
Label runtime;
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ ldrw(tmp, in_progress);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ ldrb(tmp, in_progress);
}
__ cbzw(tmp, done);
// Can we store original value in the thread's buffer?
__ ldr(tmp, queue_index);
__ cbz(tmp, runtime);
__ sub(tmp, tmp, wordSize);
__ str(tmp, queue_index);
__ ldr(rscratch2, buffer);
__ add(tmp, tmp, rscratch2);
__ load_parameter(0, rscratch2);
__ str(rscratch2, Address(tmp, 0));
__ b(done);
__ bind(runtime);
__ push_call_clobbered_registers();
__ load_parameter(0, pre_val);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
__ pop_call_clobbered_registers();
__ bind(done);
__ epilogue();
}
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
__ prologue("g1_post_barrier", false);
// arg0: store_address
Address store_addr(rfp, 2*BytesPerWord);
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;
// At this point we know new_value is non-NULL and the new_value crosses regions.
// Must check to see if card is already dirty
const Register thread = rthread;
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
const Register card_offset = rscratch2;
// LR is free here, so we can use it to hold the byte_map_base.
const Register byte_map_base = lr;
assert_different_registers(card_offset, byte_map_base, rscratch1);
__ load_parameter(0, card_offset);
__ lsr(card_offset, card_offset, CardTable::card_shift);
__ load_byte_map_base(byte_map_base);
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
__ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
__ br(Assembler::EQ, done);
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
__ membar(Assembler::StoreLoad);
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
__ cbzw(rscratch1, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
__ strb(zr, Address(byte_map_base, card_offset));
// Convert card offset into an address in card_addr
Register card_addr = card_offset;
__ add(card_addr, byte_map_base, card_addr);
__ ldr(rscratch1, queue_index);
__ cbz(rscratch1, runtime);
__ sub(rscratch1, rscratch1, wordSize);
__ str(rscratch1, queue_index);
// Reuse LR to hold buffer_addr
const Register buffer_addr = lr;
__ ldr(buffer_addr, buffer);
__ str(card_addr, Address(buffer_addr, rscratch1));
__ b(done);
__ bind(runtime);
__ push_call_clobbered_registers();
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
__ pop_call_clobbered_registers();
__ bind(done);
__ epilogue();
}
#undef __
#endif // COMPILER1

View File

@ -27,6 +27,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#include "utilities/macros.hpp"
class LIR_Assembler;
class StubAssembler;
class G1PreBarrierStub;
class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@ -54,6 +60,14 @@ protected:
Address dst, Register val, Register tmp1, Register tmp2);
public:
#ifdef COMPILER1
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
#endif
void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread);
};

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "runtime/jniHandles.hpp"
#define __ masm->
@ -64,3 +65,11 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
default: Unimplemented();
}
}
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
// If mask changes we need to ensure that the inverse is still encodable as an immediate
STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
__ andr(obj, obj, ~JNIHandles::weak_tag_mask);
__ ldr(obj, Address(obj, 0)); // *obj
}

View File

@ -40,6 +40,9 @@ public:
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
virtual void barrier_stubs_init() {}
};

View File

@ -56,7 +56,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
__ strb(zr, Address(obj, rscratch1));
__ bind(L_already_dirty);
} else {
if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
if (ct->scanned_concurrently()) {
__ membar(Assembler::StoreStore);
}
__ strb(zr, Address(obj, rscratch1));
@ -79,7 +79,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
const Register count = end; // 'end' register contains bytes count now
__ load_byte_map_base(scratch);
__ add(start, start, scratch);
if (UseConcMarkSweepGC) {
if (ct->scanned_concurrently()) {
__ membar(__ StoreStore);
}
__ bind(L_loop);

View File

@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
@ -82,11 +84,9 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
// robj ^ rcounter ^ rcounter == robj
// robj is address dependent on rcounter.
// If mask changes we need to ensure that the inverse is still encodable as an immediate
STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
__ andr(robj, robj, ~JNIHandles::weak_tag_mask);
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->try_resolve_jobject_in_native(masm, c_rarg0, robj, rscratch1, slow);
__ ldr(robj, Address(robj, 0)); // *obj
__ lsr(roffset, c_rarg2, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
@ -177,4 +177,3 @@ address JNI_FastGetField::generate_fast_get_float_field() {
address JNI_FastGetField::generate_fast_get_double_field() {
return generate_fast_get_int_field0(T_DOUBLE);
}

View File

@ -30,6 +30,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#define __ _masm->

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -133,9 +133,29 @@ void NativeMovConstReg::set_data(intptr_t x) {
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
*(intptr_t*)addr = x;
} else {
// Store x into the instruction stream.
MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
ICache::invalidate_range(instruction_address(), instruction_size);
}
// Find and replace the oop/metadata corresponding to this
// instruction in oops section.
CodeBlob* cb = CodeCache::find_blob(instruction_address());
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) {
RelocIterator iter(nm, instruction_address(), next_instruction_address());
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop* oop_addr = iter.oop_reloc()->oop_addr();
*oop_addr = cast_to_oop(x);
break;
} else if (iter.type() == relocInfo::metadata_type) {
Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
*metadata_addr = (Metadata*)x;
break;
}
}
}
}
void NativeMovConstReg::print() {
@ -348,7 +368,7 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
CodeBuffer cb(code_pos, instruction_size);
MacroAssembler a(&cb);
a.mov(rscratch1, entry);
a.movptr(rscratch1, (uintptr_t)entry);
a.br(rscratch1);
ICache::invalidate_range(code_pos, instruction_size);

View File

@ -33,9 +33,6 @@
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "vmreg_arm.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
@ -466,45 +463,4 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ b(_continuation);
}
/////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cbz(pre_val_reg, _continuation);
ce->verify_reserved_argument_area_size(1);
__ str(pre_val_reg, Address(SP));
__ call(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id), relocInfo::runtime_call_type);
__ b(_continuation);
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register new_val_reg = new_val()->as_register();
__ cbz(new_val_reg, _continuation);
ce->verify_reserved_argument_area_size(1);
__ str(addr()->as_pointer_register(), Address(SP));
__ call(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id), relocInfo::runtime_call_type);
__ b(_continuation);
}
#endif // INCLUDE_ALL_GCS
/////////////////////////////////////////////////////////////////////////////
#undef __

View File

@ -34,6 +34,7 @@
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "runtime/sharedRuntime.hpp"
@ -374,33 +375,18 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
}
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type, bool needs_card_mark) {
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type) {
int base_offset = arrayOopDesc::base_offset_in_bytes(type);
int elem_size = type2aelembytes(type);
if (index_opr->is_constant()) {
int offset = base_offset + index_opr->as_constant_ptr()->as_jint() * elem_size;
if (needs_card_mark) {
LIR_Opr base_opr = new_pointer_register();
add_large_constant(array_opr, offset, base_opr);
return new LIR_Address(base_opr, (intx)0, type);
} else {
return generate_address(array_opr, offset, type);
}
} else {
assert(index_opr->is_register(), "must be");
int scale = exact_log2(elem_size);
if (needs_card_mark) {
LIR_Opr base_opr = new_pointer_register();
LIR_Address* addr = make_address(base_opr, index_opr, (LIR_Address::Scale)scale, type);
__ add(array_opr, LIR_OprFact::intptrConst(base_offset), base_opr);
__ add(base_opr, LIR_OprFact::address(addr), base_opr); // add with shifted/extended register
return new LIR_Address(base_opr, type);
} else {
return generate_address(array_opr, index_opr, scale, base_offset, type);
}
}
}
@ -542,88 +528,17 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
}
}
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
LIR_Opr tmp1 = FrameMap::R0_oop_opr;
LIR_Opr tmp2 = FrameMap::R1_oop_opr;
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
}
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
!get_jobject_constant(x->value())->is_null_object() ||
x->should_profile());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
array.load_item();
index.load_nonconstant();
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check || x->check_boolean()) {
value.load_item();
} else {
value.load_for_store(x->elt_type());
}
set_no_result(x);
// the CodeEmitInfo must be duplicated for each different
// LIR-instruction because spilling can occur anywhere between two
// instructions and so the debug information must be different
CodeEmitInfo* range_check_info = state_for(x);
CodeEmitInfo* null_check_info = NULL;
if (x->needs_null_check()) {
null_check_info = new CodeEmitInfo(range_check_info);
}
// emit array address setup early so it schedules better
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// range_check also does the null check
null_check_info = NULL;
}
}
if (GenerateArrayStoreCheck && needs_store_check) {
LIR_Opr tmp1 = FrameMap::R0_oop_opr;
LIR_Opr tmp2 = FrameMap::R1_oop_opr;
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
__ store_check(value.result(), array.result(), tmp1, tmp2,
LIR_OprFact::illegalOpr, store_check_info,
x->profiled_method(), x->profiled_bci());
}
#if INCLUDE_ALL_GCS
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
#endif // INCLUDE_ALL_GCS
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
__ move(result, array_addr, null_check_info);
if (obj_store) {
post_barrier(LIR_OprFact::address(array_addr), value.result());
}
}
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
assert(x->is_pinned(),"");
LIRItem obj(x->obj(), this);
@ -1060,56 +975,52 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
#endif // __SOFTFP__
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
LIRItem offset(x->argument_at(1), this); // offset of field
LIRItem cmp (x->argument_at(2), this); // value to compare with field
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
LIR_Opr addr = new_pointer_register();
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
// get address of field
obj.load_item();
offset.load_item();
cmp.load_item();
val.load_item();
__ add(obj.result(), offset.result(), addr);
LIR_Opr result = rlock_result(x);
if (type == objectType) {
#if INCLUDE_ALL_GCS
// Do the pre-write barrier, if any.
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
#endif // INCLUDE_ALL_GCS
new_value.load_item();
cmp_value.load_item();
LIR_Opr result = new_register(T_INT);
if (type == T_OBJECT || type == T_ARRAY) {
#ifdef AARCH64
if (UseCompressedOops) {
tmp1 = new_pointer_register();
tmp2 = new_pointer_register();
}
#endif // AARCH64
__ cas_obj(addr, cmp.result(), val.result(), tmp1, tmp2, result);
post_barrier(addr, val.result());
}
else if (type == intType) {
__ cas_int(addr, cmp.result(), val.result(), tmp1, tmp1, result);
}
else if (type == longType) {
#endif
__ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
} else if (type == T_INT) {
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp1, result);
} else if (type == T_LONG) {
#ifndef AARCH64
tmp1 = new_register(T_LONG);
#endif // !AARCH64
__ cas_long(addr, cmp.result(), val.result(), tmp1, tmp2, result);
}
else {
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp2, result);
} else {
ShouldNotReachHere();
}
return result;
}
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
bool is_oop = type == T_OBJECT || type == T_ARRAY;
LIR_Opr result = new_register(type);
value.load_item();
assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr;
__ xchg(addr, value.result(), result, tmp);
return result;
}
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
LIR_Opr result = new_register(type);
value.load_item();
assert(type == T_INT LP64_ONLY( || type == T_LONG), "unexpected type");
LIR_Opr tmp = new_register(type);
__ xadd(addr, value.result(), result, tmp);
return result;
}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
address runtime_func;
@ -1409,7 +1320,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
patching_info = state_for(x, x->state_before());
}
@ -1669,110 +1580,3 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
// TODO-AARCH64 implement with ldar instruction
__ load(address, result, info, lir_patch_none);
}
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) {
#ifdef AARCH64
__ load(new LIR_Address(src, offset, type), dst);
#else
assert(offset->is_single_cpu(), "must be");
if (is_volatile && dst->is_double_cpu()) {
LIR_Opr tmp = new_pointer_register();
__ add(src, offset, tmp);
__ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, type), dst, NULL);
} else if (type == T_FLOAT || type == T_DOUBLE) {
// fld doesn't have indexed addressing mode
LIR_Opr tmp = new_register(T_INT);
__ add(src, offset, tmp);
__ load(new LIR_Address(tmp, (intx)0, type), dst);
} else {
__ load(new LIR_Address(src, offset, type), dst);
}
#endif // AARCH64
}
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
BasicType type, bool is_volatile) {
#ifdef AARCH64
LIR_Address* addr = new LIR_Address(src, offset, type);
if (type == T_ARRAY || type == T_OBJECT) {
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(data, addr);
assert(src->is_register(), "must be register");
post_barrier(LIR_OprFact::address(addr), data);
} else {
__ move(data, addr);
}
#else
assert(offset->is_single_cpu(), "must be");
if (is_volatile && data->is_double_cpu()) {
LIR_Opr tmp = new_register(T_INT);
__ add(src, offset, tmp);
__ volatile_store_mem_reg(data, new LIR_Address(tmp, (intx)0, type), NULL);
} else if (type == T_FLOAT || type == T_DOUBLE) {
// fst doesn't have indexed addressing mode
LIR_Opr tmp = new_register(T_INT);
__ add(src, offset, tmp);
__ move(data, new LIR_Address(tmp, (intx)0, type));
} else {
LIR_Address* addr = new LIR_Address(src, offset, type);
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
#if INCLUDE_ALL_GCS
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
#endif // INCLUDE_ALL_GCS
__ move(data, addr);
if (is_obj) {
assert(src->is_register(), "must be register");
post_barrier(LIR_OprFact::address(addr), data);
}
}
#endif // AARCH64
}
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
BasicType type = x->basic_type();
LIRItem src(x->object(), this);
LIRItem off(x->offset(), this);
LIRItem value(x->value(), this);
src.load_item();
if (x->is_add()) {
value.load_nonconstant();
} else {
value.load_item();
}
off.load_nonconstant();
LIR_Opr dst = rlock_result(x, type);
LIR_Opr data = value.result();
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
assert (type == T_INT || type == T_LONG || (!x->is_add() && is_obj), "unexpected type");
LIR_Opr addr_ptr = new_pointer_register();
__ add(src.result(), off.result(), addr_ptr);
LIR_Address* addr = new LIR_Address(addr_ptr, (intx)0, type);
if (x->is_add()) {
LIR_Opr tmp = new_register(type);
__ xadd(addr_ptr, data, dst, tmp);
} else {
LIR_Opr tmp = (UseCompressedOops && is_obj) ? new_pointer_register() : LIR_OprFact::illegalOpr;
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
__ xchg(addr_ptr, data, dst, tmp);
if (is_obj) {
// Seems to be a precise address
post_barrier(LIR_OprFact::address(addr), data);
}
}
}

View File

@ -42,11 +42,6 @@
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#endif
// Note: Rtemp usage is this file should not impact C2 and should be
// correct as long as it is not implicitly used in lower layers (the
@ -356,6 +351,13 @@ static void restore_live_registers_without_return(StubAssembler* sasm, bool rest
restore_live_registers(sasm, true, true, false, restore_fpu_registers);
}
void StubAssembler::save_live_registers() {
::save_live_registers(this);
}
void StubAssembler::restore_live_registers_without_return() {
::restore_live_registers_without_return(this);
}
void Runtime1::initialize_pd() {
}
@ -533,201 +535,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
#if INCLUDE_ALL_GCS
case g1_pre_barrier_slow_id:
{
// Input:
// - pre_val pushed on the stack
__ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ mov(R0, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
__ should_not_reach_here();
break;
}
// save at least the registers that need saving if the runtime is called
#ifdef AARCH64
__ raw_push(R0, R1);
__ raw_push(R2, R3);
const int nb_saved_regs = 4;
#else // AARCH64
const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
const int nb_saved_regs = 6;
assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
__ push(saved_regs);
#endif // AARCH64
const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
const Register r_index_1 = R1;
const Register r_buffer_2 = R2;
Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
Label done;
Label runtime;
// Is marking still active?
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ ldrb(R1, queue_active);
__ cbz(R1, done);
__ ldr(r_index_1, queue_index);
__ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
__ ldr(r_buffer_2, buffer);
__ subs(r_index_1, r_index_1, wordSize);
__ b(runtime, lt);
__ str(r_index_1, queue_index);
__ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
__ bind(done);
#ifdef AARCH64
__ raw_pop(R2, R3);
__ raw_pop(R0, R1);
#else // AARCH64
__ pop(saved_regs);
#endif // AARCH64
__ ret();
__ bind(runtime);
save_live_registers(sasm);
assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
__ mov(c_rarg1, Rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
restore_live_registers_without_return(sasm);
__ b(done);
}
break;
case g1_post_barrier_slow_id:
{
// Input:
// - store_addr, pushed on the stack
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ mov(R0, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
__ should_not_reach_here();
break;
}
Label done;
Label recheck;
Label runtime;
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
// save at least the registers that need saving if the runtime is called
#ifdef AARCH64
__ raw_push(R0, R1);
__ raw_push(R2, R3);
const int nb_saved_regs = 4;
#else // AARCH64
const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
const int nb_saved_regs = 6;
assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
__ push(saved_regs);
#endif // AARCH64
const Register r_card_addr_0 = R0; // must be R0 for the slow case
const Register r_obj_0 = R0;
const Register r_card_base_1 = R1;
const Register r_tmp2 = R2;
const Register r_index_2 = R2;
const Register r_buffer_3 = R3;
const Register tmp1 = Rtemp;
__ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
// Note: there is a comment in x86 code about not using
// ExternalAddress / lea, due to relocation not working
// properly for that address. Should be OK for arm, where we
// explicitly specify that 'cardtable' has a relocInfo::none
// type.
__ lea(r_card_base_1, cardtable);
__ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
// first quick check without barrier
__ ldrb(r_tmp2, Address(r_card_addr_0));
__ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
__ b(recheck, ne);
__ bind(done);
#ifdef AARCH64
__ raw_pop(R2, R3);
__ raw_pop(R0, R1);
#else // AARCH64
__ pop(saved_regs);
#endif // AARCH64
__ ret();
__ bind(recheck);
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
// reload card state after the barrier that ensures the stored oop was visible
__ ldrb(r_tmp2, Address(r_card_addr_0));
assert(CardTable::dirty_card_val() == 0, "adjust this code");
__ cbz(r_tmp2, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
// Card table is aligned so the lowest byte of the table address base is zero.
__ strb(r_card_base_1, Address(r_card_addr_0));
} else {
__ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
}
__ ldr(r_index_2, queue_index);
__ ldr(r_buffer_3, buffer);
__ subs(r_index_2, r_index_2, wordSize);
__ b(runtime, lt); // go to runtime if now negative
__ str(r_index_2, queue_index);
__ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
__ b(done);
__ bind(runtime);
save_live_registers(sasm);
assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
__ mov(c_rarg1, Rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
restore_live_registers_without_return(sasm);
__ b(done);
}
break;
#endif // INCLUDE_ALL_GCS
case new_instance_id:
case fast_new_instance_id:
case fast_new_instance_init_check_id:

View File

@ -26,12 +26,18 @@
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/g1/c1/g1BarrierSetC1.hpp"
#endif
#define __ masm->
@ -120,3 +126,227 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
#endif // !R9_IS_SCRATCHED
#endif // !AARCH64
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(*stub->entry());
assert(stub->pre_val()->is_register(), "Precondition.");
Register pre_val_reg = stub->pre_val()->as_register();
if (stub->do_load()) {
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
}
__ cbz(pre_val_reg, *stub->continuation());
ce->verify_reserved_argument_area_size(1);
__ str(pre_val_reg, Address(SP));
__ call(bs->pre_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
__ b(*stub->continuation());
}
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
__ bind(*stub->entry());
assert(stub->addr()->is_register(), "Precondition.");
assert(stub->new_val()->is_register(), "Precondition.");
Register new_val_reg = stub->new_val()->as_register();
__ cbz(new_val_reg, *stub->continuation());
ce->verify_reserved_argument_area_size(1);
__ str(stub->addr()->as_pointer_register(), Address(SP));
__ call(bs->post_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
__ b(*stub->continuation());
}
#undef __
#define __ sasm->
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
// Input:
// - pre_val pushed on the stack
__ set_info("g1_pre_barrier_slow_id", false);
// save at least the registers that need saving if the runtime is called
#ifdef AARCH64
__ raw_push(R0, R1);
__ raw_push(R2, R3);
const int nb_saved_regs = 4;
#else // AARCH64
const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
const int nb_saved_regs = 6;
assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
__ push(saved_regs);
#endif // AARCH64
const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
const Register r_index_1 = R1;
const Register r_buffer_2 = R2;
Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
Label done;
Label runtime;
// Is marking still active?
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ ldrb(R1, queue_active);
__ cbz(R1, done);
__ ldr(r_index_1, queue_index);
__ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
__ ldr(r_buffer_2, buffer);
__ subs(r_index_1, r_index_1, wordSize);
__ b(runtime, lt);
__ str(r_index_1, queue_index);
__ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
__ bind(done);
#ifdef AARCH64
__ raw_pop(R2, R3);
__ raw_pop(R0, R1);
#else // AARCH64
__ pop(saved_regs);
#endif // AARCH64
__ ret();
__ bind(runtime);
__ save_live_registers();
assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
__ mov(c_rarg1, Rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
__ restore_live_registers_without_return();
__ b(done);
}
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
// Input:
// - store_addr, pushed on the stack
__ set_info("g1_post_barrier_slow_id", false);
Label done;
Label recheck;
Label runtime;
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
// save at least the registers that need saving if the runtime is called
#ifdef AARCH64
__ raw_push(R0, R1);
__ raw_push(R2, R3);
const int nb_saved_regs = 4;
#else // AARCH64
const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
const int nb_saved_regs = 6;
assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
__ push(saved_regs);
#endif // AARCH64
const Register r_card_addr_0 = R0; // must be R0 for the slow case
const Register r_obj_0 = R0;
const Register r_card_base_1 = R1;
const Register r_tmp2 = R2;
const Register r_index_2 = R2;
const Register r_buffer_3 = R3;
const Register tmp1 = Rtemp;
__ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
// Note: there is a comment in x86 code about not using
// ExternalAddress / lea, due to relocation not working
// properly for that address. Should be OK for arm, where we
// explicitly specify that 'cardtable' has a relocInfo::none
// type.
__ lea(r_card_base_1, cardtable);
__ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
// first quick check without barrier
__ ldrb(r_tmp2, Address(r_card_addr_0));
__ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
__ b(recheck, ne);
__ bind(done);
#ifdef AARCH64
__ raw_pop(R2, R3);
__ raw_pop(R0, R1);
#else // AARCH64
__ pop(saved_regs);
#endif // AARCH64
__ ret();
__ bind(recheck);
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
// reload card state after the barrier that ensures the stored oop was visible
__ ldrb(r_tmp2, Address(r_card_addr_0));
assert(CardTable::dirty_card_val() == 0, "adjust this code");
__ cbz(r_tmp2, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
// Card table is aligned so the lowest byte of the table address base is zero.
__ strb(r_card_base_1, Address(r_card_addr_0));
} else {
__ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
}
__ ldr(r_index_2, queue_index);
__ ldr(r_buffer_3, buffer);
__ subs(r_index_2, r_index_2, wordSize);
__ b(runtime, lt); // go to runtime if now negative
__ str(r_index_2, queue_index);
__ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
__ b(done);
__ bind(runtime);
__ save_live_registers();
assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
__ mov(c_rarg1, Rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
__ restore_live_registers_without_return();
__ b(done);
}
#undef __
#endif // COMPILER1

View File

@ -27,6 +27,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#include "utilities/macros.hpp"
class LIR_Assembler;
class StubAssembler;
class G1PreBarrierStub;
class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@ -34,6 +40,15 @@ protected:
Register addr, Register count, int callee_saved_regs);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp);
#ifdef COMPILER1
public:
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
#endif
};
#endif // CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP

View File

@ -35,6 +35,7 @@
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/preserveException.hpp"
#define __ _masm->

View File

@ -29,6 +29,7 @@
#include "nativeInst_arm.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {

View File

@ -33,9 +33,6 @@
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
@ -470,58 +467,4 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ b(_continuation);
}
///////////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cmpdi(CCR0, pre_val_reg, 0);
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
__ mtctr(R0);
__ bctrl();
__ b(_continuation);
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register addr_reg = addr()->as_pointer_register();
Register new_val_reg = new_val()->as_register();
__ cmpdi(CCR0, new_val_reg, 0);
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mtctr(R0);
__ mr(R0, addr_reg); // Pass addr in R0.
__ bctrl();
__ b(_continuation);
}
#endif // INCLUDE_ALL_GCS
///////////////////////////////////////////////////////////////////////////////////
#undef __

View File

@ -2978,7 +2978,9 @@ void LIR_Assembler::peephole(LIR_List* lir) {
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
const Register Rptr = src->as_pointer_register(),
const LIR_Address *addr = src->as_address_ptr();
assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!");
const Register Rptr = addr->base()->as_pointer_register(),
Rtmp = tmp->as_register();
Register Rco = noreg;
if (UseCompressedOops && data->is_oop()) {

View File

@ -149,7 +149,12 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
// Accumulate fixed displacements.
if (index->is_constant()) {
large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;
LIR_Const *constant = index->as_constant_ptr();
if (constant->type() == T_LONG) {
large_disp += constant->as_jlong() << shift;
} else {
large_disp += (intx)(constant->as_jint()) << shift;
}
index = LIR_OprFact::illegalOpr;
}
@ -190,7 +195,7 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type, bool needs_card_mark) {
BasicType type) {
int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size);
@ -230,13 +235,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
__ add(index_opr, array_opr, base_opr);
}
}
if (needs_card_mark) {
LIR_Opr ptr = new_pointer_register();
__ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
return new LIR_Address(ptr, type);
} else {
return new LIR_Address(base_opr, offset, type);
}
}
@ -320,80 +319,12 @@ void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp)
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
!get_jobject_constant(x->value())->is_null_object() ||
x->should_profile());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
array.load_item();
index.load_nonconstant();
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check || x->check_boolean()) {
value.load_item();
} else {
value.load_for_store(x->elt_type());
}
set_no_result(x);
// The CodeEmitInfo must be duplicated for each different
// LIR-instruction because spilling can occur anywhere between two
// instructions and so the debug information must be different.
CodeEmitInfo* range_check_info = state_for(x);
CodeEmitInfo* null_check_info = NULL;
if (x->needs_null_check()) {
null_check_info = new CodeEmitInfo(range_check_info);
}
// Emit array address setup early so it schedules better.
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// Range_check also does the null check.
null_check_info = NULL;
}
}
if (GenerateArrayStoreCheck && needs_store_check) {
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
// Following registers are used by slow_subtype_check:
LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
LIR_Opr tmp3 = FrameMap::R6_opr; // temp
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3,
store_check_info, x->profiled_method(), x->profiled_bci());
}
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
__ move(result, array_addr, null_check_info);
if (obj_store) {
// Precise card mark.
post_barrier(LIR_OprFact::address(array_addr), value.result());
}
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
}
@ -702,24 +633,68 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
LIRItem offset(x->argument_at(1), this); // offset of field
LIRItem cmp (x->argument_at(2), this); // Value to compare with field.
LIRItem val (x->argument_at(3), this); // Replace field with val if matches cmp.
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
LIR_Opr result = new_register(T_INT);
LIR_Opr t1 = LIR_OprFact::illegalOpr;
LIR_Opr t2 = LIR_OprFact::illegalOpr;
LIR_Opr addr = new_pointer_register();
cmp_value.load_item();
new_value.load_item();
// Get address of field.
obj.load_item();
offset.load_item();
cmp.load_item();
val.load_item();
// Volatile load may be followed by Unsafe CAS.
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ membar();
} else {
__ membar_release();
}
__ add(obj.result(), offset.result(), addr);
if (type == T_OBJECT || type == T_ARRAY) {
if (UseCompressedOops) {
t1 = new_register(T_OBJECT);
t2 = new_register(T_OBJECT);
}
__ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else if (type == T_INT) {
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else if (type == T_LONG) {
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else {
Unimplemented();
}
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
result, type);
return result;
}
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
LIR_Opr result = new_register(type);
LIR_Opr tmp = FrameMap::R0_opr;
value.load_item();
// Volatile load may be followed by Unsafe CAS.
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ membar();
} else {
__ membar_release();
}
__ xchg(addr, value.result(), result, tmp);
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ membar_acquire();
} else {
__ membar();
}
return result;
}
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
LIR_Opr result = new_register(type);
LIR_Opr tmp = FrameMap::R0_opr;
value.load_item();
// Volatile load may be followed by Unsafe CAS.
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
@ -728,33 +703,14 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
__ membar_release();
}
if (type == objectType) { // Write-barrier needed for Object fields.
// Only cmp value can get overwritten, no do_load required.
pre_barrier(LIR_OprFact::illegalOpr /* addr */, cmp.result() /* pre_val */,
false /* do_load */, false /* patch */, NULL);
}
__ xadd(addr, value.result(), result, tmp);
if (type == objectType) {
if (UseCompressedOops) {
t1 = new_register(T_OBJECT);
t2 = new_register(T_OBJECT);
}
__ cas_obj(addr, cmp.result(), val.result(), t1, t2);
} else if (type == intType) {
__ cas_int(addr, cmp.result(), val.result(), t1, t2);
} else if (type == longType) {
__ cas_long(addr, cmp.result(), val.result(), t1, t2);
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ membar_acquire();
} else {
ShouldNotReachHere();
}
// Benerate conditional move of boolean result.
LIR_Opr result = rlock_result(x);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
result, as_BasicType(type));
if (type == objectType) { // Write-barrier needed for Object fields.
// Precise card mark since could either be object or array.
post_barrier(addr, val.result());
__ membar();
}
return result;
}
@ -1112,7 +1068,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// Must do this before locking the destination register as
// an oop register, and before the obj is loaded (so x->obj()->item()
// is valid for creating a debug info location).
@ -1255,110 +1211,6 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
}
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
BasicType type, bool is_volatile) {
LIR_Opr base_op = src;
LIR_Opr index_op = offset;
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
#ifndef _LP64
if (is_volatile && type == T_LONG) {
__ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
} else
#endif
{
if (type == T_BOOLEAN) {
type = T_BYTE;
}
LIR_Address* addr;
if (type == T_ARRAY || type == T_OBJECT) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, index_op, tmp);
addr = new LIR_Address(tmp, type);
} else {
addr = new LIR_Address(base_op, index_op, type);
}
if (is_obj) {
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
// _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
}
__ move(data, addr);
if (is_obj) {
// This address is precise.
post_barrier(LIR_OprFact::address(addr), data);
}
}
}
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) {
#ifndef _LP64
if (is_volatile && type == T_LONG) {
__ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
} else
#endif
{
LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst);
}
}
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
BasicType type = x->basic_type();
LIRItem src(x->object(), this);
LIRItem off(x->offset(), this);
LIRItem value(x->value(), this);
src.load_item();
value.load_item();
off.load_nonconstant();
LIR_Opr dst = rlock_result(x, type);
LIR_Opr data = value.result();
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
LIR_Opr tmp = FrameMap::R0_opr;
LIR_Opr ptr = new_pointer_register();
__ add(src.result(), off.result(), ptr);
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ membar();
} else {
__ membar_release();
}
if (x->is_add()) {
__ xadd(ptr, data, dst, tmp);
} else {
const bool can_move_barrier = true; // TODO: port GraphKit::can_move_pre_barrier() from C2
if (!can_move_barrier && is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
__ xchg(ptr, data, dst, tmp);
if (is_obj) {
// Seems to be a precise address.
post_barrier(ptr, data);
if (can_move_barrier) {
pre_barrier(LIR_OprFact::illegalOpr, dst /* pre_val */,
false /* do_load */, false /* patch */, NULL);
}
}
}
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ membar_acquire();
} else {
__ membar();
}
}
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
assert(UseCRC32Intrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);

View File

@ -42,11 +42,6 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#endif
// Implementation of StubAssembler
@ -708,164 +703,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
#if INCLUDE_ALL_GCS
case g1_pre_barrier_slow_id:
{
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
goto unimplemented_entry;
}
__ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
// Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
const int stack_slots = 3;
Register pre_val = R0; // previous value of memory
Register tmp = R14;
Register tmp2 = R15;
Label refill, restart, marking_not_active;
int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
// Spill
__ std(tmp, -16, R1_SP);
__ std(tmp2, -24, R1_SP);
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ lwz(tmp, satb_q_active_byte_offset, R16_thread);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ lbz(tmp, satb_q_active_byte_offset, R16_thread);
}
__ cmpdi(CCR0, tmp, 0);
__ beq(CCR0, marking_not_active);
__ bind(restart);
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
// size_t so ld_ptr is appropriate.
__ ld(tmp, satb_q_index_byte_offset, R16_thread);
// index == 0?
__ cmpdi(CCR0, tmp, 0);
__ beq(CCR0, refill);
__ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
__ ld(pre_val, -8, R1_SP); // Load from stack.
__ addi(tmp, tmp, -oopSize);
__ std(tmp, satb_q_index_byte_offset, R16_thread);
__ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
__ bind(marking_not_active);
// Restore temp registers and return-from-leaf.
__ ld(tmp2, -24, R1_SP);
__ ld(tmp, -16, R1_SP);
__ blr();
__ bind(refill);
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ mflr(R0);
__ std(R0, _abi(lr), R1_SP);
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
__ pop_frame();
__ ld(R0, _abi(lr), R1_SP);
__ mtlr(R0);
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ b(restart);
}
break;
case g1_post_barrier_slow_id:
{
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
goto unimplemented_entry;
}
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
// Using stack slots: spill addr, spill tmp2
const int stack_slots = 2;
Register tmp = R0;
Register addr = R14;
Register tmp2 = R15;
jbyte* byte_map_base = ci_card_table_address();
Label restart, refill, ret;
// Spill
__ std(addr, -8, R1_SP);
__ std(tmp2, -16, R1_SP);
__ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
__ add(addr, tmp2, addr);
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
// Return if young card.
__ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
__ beq(CCR0, ret);
// Return if sequential consistent value is already dirty.
__ membar(Assembler::StoreLoad);
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
__ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
__ beq(CCR0, ret);
// Not dirty.
// First, dirty it.
__ li(tmp, G1CardTable::dirty_card_val());
__ stb(tmp, 0, addr);
int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
__ bind(restart);
// Get the index into the update buffer. DirtyCardQueue::_index is
// a size_t so ld_ptr is appropriate here.
__ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
// index == 0?
__ cmpdi(CCR0, tmp2, 0);
__ beq(CCR0, refill);
__ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
__ addi(tmp2, tmp2, -oopSize);
__ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
__ add(tmp2, tmp, tmp2);
__ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
// Restore temp registers and return-from-leaf.
__ bind(ret);
__ ld(tmp2, -16, R1_SP);
__ ld(addr, -8, R1_SP);
__ blr();
__ bind(refill);
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ mflr(R0);
__ std(R0, _abi(lr), R1_SP);
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
__ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
__ pop_frame();
__ ld(R0, _abi(lr), R1_SP);
__ mtlr(R0);
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ b(restart);
}
break;
#endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
__ set_info("predicate_failed_trap", dont_gc_arguments);
@ -889,7 +726,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break;
default:
unimplemented_entry:
{
__ set_info("unimplemented entry", dont_gc_arguments);
__ mflr(R0);

View File

@ -26,12 +26,17 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/g1/c1/g1BarrierSetC1.hpp"
#endif
#define __ masm->
@ -339,4 +344,209 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value
__ bind(done);
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(*stub->entry());
assert(stub->pre_val()->is_register(), "Precondition.");
Register pre_val_reg = stub->pre_val()->as_register();
if (stub->do_load()) {
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
}
__ cmpdi(CCR0, pre_val_reg, 0);
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
//__ load_const_optimized(R0, c_code);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
__ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
__ mtctr(R0);
__ bctrl();
__ b(*stub->continuation());
}
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
__ bind(*stub->entry());
assert(stub->addr()->is_register(), "Precondition.");
assert(stub->new_val()->is_register(), "Precondition.");
Register addr_reg = stub->addr()->as_pointer_register();
Register new_val_reg = stub->new_val()->as_register();
__ cmpdi(CCR0, new_val_reg, 0);
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
//__ load_const_optimized(R0, c_code);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
__ mtctr(R0);
__ mr(R0, addr_reg); // Pass addr in R0.
__ bctrl();
__ b(*stub->continuation());
}
#undef __
#define __ sasm->
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
BarrierSet* bs = BarrierSet::barrier_set();
__ set_info("g1_pre_barrier_slow_id", false);
// Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
const int stack_slots = 3;
Register pre_val = R0; // previous value of memory
Register tmp = R14;
Register tmp2 = R15;
Label refill, restart, marking_not_active;
int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
// Spill
__ std(tmp, -16, R1_SP);
__ std(tmp2, -24, R1_SP);
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ lwz(tmp, satb_q_active_byte_offset, R16_thread);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ lbz(tmp, satb_q_active_byte_offset, R16_thread);
}
__ cmpdi(CCR0, tmp, 0);
__ beq(CCR0, marking_not_active);
__ bind(restart);
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
// size_t so ld_ptr is appropriate.
__ ld(tmp, satb_q_index_byte_offset, R16_thread);
// index == 0?
__ cmpdi(CCR0, tmp, 0);
__ beq(CCR0, refill);
__ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
__ ld(pre_val, -8, R1_SP); // Load from stack.
__ addi(tmp, tmp, -oopSize);
__ std(tmp, satb_q_index_byte_offset, R16_thread);
__ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
__ bind(marking_not_active);
// Restore temp registers and return-from-leaf.
__ ld(tmp2, -24, R1_SP);
__ ld(tmp, -16, R1_SP);
__ blr();
__ bind(refill);
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ mflr(R0);
__ std(R0, _abi(lr), R1_SP);
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
__ pop_frame();
__ ld(R0, _abi(lr), R1_SP);
__ mtlr(R0);
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ b(restart);
}
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
__ set_info("g1_post_barrier_slow_id", false);
// Using stack slots: spill addr, spill tmp2
const int stack_slots = 2;
Register tmp = R0;
Register addr = R14;
Register tmp2 = R15;
jbyte* byte_map_base = bs->card_table()->byte_map_base();
Label restart, refill, ret;
// Spill
__ std(addr, -8, R1_SP);
__ std(tmp2, -16, R1_SP);
__ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
__ add(addr, tmp2, addr);
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
// Return if young card.
__ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
__ beq(CCR0, ret);
// Return if sequential consistent value is already dirty.
__ membar(Assembler::StoreLoad);
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
__ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
__ beq(CCR0, ret);
// Not dirty.
// First, dirty it.
__ li(tmp, G1CardTable::dirty_card_val());
__ stb(tmp, 0, addr);
int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
__ bind(restart);
// Get the index into the update buffer. DirtyCardQueue::_index is
// a size_t so ld_ptr is appropriate here.
__ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
// index == 0?
__ cmpdi(CCR0, tmp2, 0);
__ beq(CCR0, refill);
__ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
__ addi(tmp2, tmp2, -oopSize);
__ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
__ add(tmp2, tmp, tmp2);
__ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
// Restore temp registers and return-from-leaf.
__ bind(ret);
__ ld(tmp2, -16, R1_SP);
__ ld(addr, -8, R1_SP);
__ blr();
__ bind(refill);
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ mflr(R0);
__ std(R0, _abi(lr), R1_SP);
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
__ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
__ pop_frame();
__ ld(R0, _abi(lr), R1_SP);
__ mtlr(R0);
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ b(restart);
}
#undef __
#endif // COMPILER1

View File

@ -28,6 +28,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#include "utilities/macros.hpp"
class LIR_Assembler;
class StubAssembler;
class G1PreBarrierStub;
class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@ -45,6 +51,14 @@ protected:
Register tmp1, Register tmp2, Register tmp3, bool needs_frame);
public:
#ifdef COMPILER1
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
#endif
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, bool needs_frame, Label *is_null = NULL);

View File

@ -50,7 +50,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
Label Lskip_loop, Lstore_loop;
if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); }
if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
__ sldi_(count, count, LogBytesPerHeapOop);
__ beq(CCR0, Lskip_loop); // zero length
@ -75,11 +75,13 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
jbyte* byte_map_base,
Register tmp, Register obj) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert_different_registers(obj, tmp, R0);
__ load_const_optimized(tmp, (address)byte_map_base, R0);
__ srdi(obj, obj, CardTable::card_shift);
__ li(R0, CardTable::dirty_card_val());
if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); }
if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
__ stbx(R0, tmp, obj);
}

View File

@ -34,9 +34,6 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_s390.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
#undef CHECK_BAILOUT
@ -453,46 +450,4 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
///////////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
__ branch_optimized(Assembler::bcondZero, _continuation);
ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_pre_barrier_slow_id));
CHECK_BAILOUT();
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register new_val_reg = new_val()->as_register();
__ z_ltgr(new_val_reg, new_val_reg);
__ branch_optimized(Assembler::bcondZero, _continuation);
__ z_lgr(Z_R1_scratch, addr()->as_pointer_register());
ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_post_barrier_slow_id));
CHECK_BAILOUT();
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
#endif // INCLUDE_ALL_GCS
#undef __

View File

@ -572,16 +572,90 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
assert(src->is_constant(), "should not call otherwise");
assert(dest->is_address(), "should not call otherwise");
// See special case in LIRGenerator::do_StoreIndexed.
// T_BYTE: Special case for card mark store.
assert(type == T_BYTE || !dest->as_address_ptr()->index()->is_valid(), "not supported");
LIR_Const* c = src->as_constant_ptr();
Address addr = as_Address(dest->as_address_ptr());
int store_offset = -1;
if (dest->as_address_ptr()->index()->is_valid()) {
switch (type) {
case T_INT: // fall through
case T_FLOAT:
__ load_const_optimized(Z_R0_scratch, c->as_jint_bits());
store_offset = __ offset();
if (Immediate::is_uimm12(addr.disp())) {
__ z_st(Z_R0_scratch, addr);
} else {
__ z_sty(Z_R0_scratch, addr);
}
break;
case T_ADDRESS:
__ load_const_optimized(Z_R1_scratch, c->as_jint_bits());
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
break;
case T_OBJECT: // fall through
case T_ARRAY:
if (c->as_jobject() == NULL) {
if (UseCompressedOops && !wide) {
__ clear_reg(Z_R1_scratch, false);
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
} else {
__ clear_reg(Z_R1_scratch, true);
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
}
} else {
jobject2reg(c->as_jobject(), Z_R1_scratch);
if (UseCompressedOops && !wide) {
__ encode_heap_oop(Z_R1_scratch);
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
} else {
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
}
}
assert(store_offset >= 0, "check");
break;
case T_LONG: // fall through
case T_DOUBLE:
__ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits()));
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
break;
case T_BOOLEAN: // fall through
case T_BYTE:
__ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
store_offset = __ offset();
if (Immediate::is_uimm12(addr.disp())) {
__ z_stc(Z_R0_scratch, addr);
} else {
__ z_stcy(Z_R0_scratch, addr);
}
break;
case T_CHAR: // fall through
case T_SHORT:
__ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint()));
store_offset = __ offset();
if (Immediate::is_uimm12(addr.disp())) {
__ z_sth(Z_R0_scratch, addr);
} else {
__ z_sthy(Z_R0_scratch, addr);
}
break;
default:
ShouldNotReachHere();
}
} else { // no index
unsigned int lmem = 0;
unsigned int lcon = 0;
int64_t cbits = 0;
switch (type) {
case T_INT: // fall through
case T_FLOAT:
@ -629,24 +703,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
default:
ShouldNotReachHere();
};
// Index register is normally not supported, but for
// LIRGenerator::CardTableBarrierSet_post_barrier we make an exception.
if (type == T_BYTE && dest->as_address_ptr()->index()->is_valid()) {
__ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
store_offset = __ offset();
if (Immediate::is_uimm12(addr.disp())) {
__ z_stc(Z_R0_scratch, addr);
} else {
__ z_stcy(Z_R0_scratch, addr);
}
}
if (store_offset == -1) {
store_offset = __ store_const(addr, cbits, lmem, lcon);
assert(store_offset >= 0, "check");
}
}
if (info != NULL) {
add_debug_info_for_null_check(store_offset, info);

View File

@ -140,7 +140,13 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
int shift, int disp, BasicType type) {
assert(base->is_register(), "must be");
if (index->is_constant()) {
intptr_t large_disp = ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp;
intx large_disp = disp;
LIR_Const *constant = index->as_constant_ptr();
if (constant->type() == T_LONG) {
large_disp += constant->as_jlong() << shift;
} else {
large_disp += (intx)(constant->as_jint()) << shift;
}
if (Displacement::is_validDisp(large_disp)) {
return new LIR_Address(base, large_disp, type);
}
@ -159,7 +165,7 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
}
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type, bool needs_card_mark) {
BasicType type) {
int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size);
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
@ -181,16 +187,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
index_opr,
offset_in_bytes, type);
}
if (needs_card_mark) {
// This store will need a precise card mark, so go ahead and
// compute the full adddres instead of computing once for the
// store and again for the card mark.
LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp);
return new LIR_Address(tmp, type);
} else {
return addr;
}
}
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
@ -252,86 +249,11 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
!get_jobject_constant(x->value())->is_null_object() ||
x->should_profile());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
array.load_item();
index.load_nonconstant(20);
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check || x->check_boolean()) {
value.load_item();
} else {
value.load_for_store(x->elt_type());
}
set_no_result(x);
// The CodeEmitInfo must be duplicated for each different
// LIR-instruction because spilling can occur anywhere between two
// instructions and so the debug information must be different.
CodeEmitInfo* range_check_info = state_for (x);
CodeEmitInfo* null_check_info = NULL;
if (x->needs_null_check()) {
null_check_info = new CodeEmitInfo(range_check_info);
}
// Emit array address setup early so it schedules better.
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
if (value.result()->is_constant() && array_addr->index()->is_valid()) {
// Constants cannot be stored with index register on ZARCH_64 (see LIR_Assembler::const2mem()).
LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(array_addr), tmp);
array_addr = new LIR_Address(tmp, x->elt_type());
}
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// Range_check also does the null check.
null_check_info = NULL;
}
}
if (GenerateArrayStoreCheck && needs_store_check) {
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
LIR_Opr tmp1 = new_register(objectType);
LIR_Opr tmp2 = new_register(objectType);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
}
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
__ move(result, array_addr, null_check_info);
if (obj_store) {
// Precise card mark
post_barrier(LIR_OprFact::address(array_addr), value.result());
}
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
}
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
@ -665,59 +587,42 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
}
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
LIRItem offset(x->argument_at(1), this); // offset of field
LIRItem cmp (x->argument_at(2), this); // Value to compare with field.
LIRItem val (x->argument_at(3), this); // Replace field with val if matches cmp.
// Get address of field.
obj.load_item();
offset.load_nonconstant(20);
cmp.load_item();
val.load_item();
LIR_Opr addr = new_pointer_register();
LIR_Address* a;
if (offset.result()->is_constant()) {
assert(Immediate::is_simm20(offset.result()->as_jlong()), "should have been loaded into register");
a = new LIR_Address(obj.result(),
offset.result()->as_jlong(),
as_BasicType(type));
} else {
a = new LIR_Address(obj.result(),
offset.result(),
0,
as_BasicType(type));
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
LIR_Opr t1 = LIR_OprFact::illegalOpr;
LIR_Opr t2 = LIR_OprFact::illegalOpr;
cmp_value.load_item();
new_value.load_item();
if (type == T_OBJECT) {
if (UseCompressedOops) {
t1 = new_register(T_OBJECT);
t2 = new_register(T_OBJECT);
}
__ leal(LIR_OprFact::address(a), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
if (type == objectType) {
__ cas_obj(addr, cmp.result(), val.result(), new_register(T_OBJECT), new_register(T_OBJECT));
} else if (type == intType) {
__ cas_int(addr, cmp.result(), val.result(), ill, ill);
} else if (type == longType) {
__ cas_long(addr, cmp.result(), val.result(), ill, ill);
__ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else if (type == T_INT) {
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else if (type == T_LONG) {
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else {
ShouldNotReachHere();
}
// Generate conditional move of boolean result.
LIR_Opr result = rlock_result(x);
LIR_Opr result = new_register(T_INT);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
result, as_BasicType(type));
if (type == objectType) { // Write-barrier needed for Object fields.
// Precise card mark since could either be object or array
post_barrier(addr, val.result());
}
result, type);
return result;
}
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
Unimplemented(); // Currently not supported on this platform.
return LIR_OprFact::illegalOpr;
}
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
LIR_Opr result = new_register(type);
value.load_item();
__ xadd(addr, value.result(), result, LIR_OprFact::illegalOpr);
return result;
}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
switch (x->id()) {
@ -970,7 +875,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// Must do this before locking the destination register as an oop register,
// and before the obj is loaded (the latter is for deoptimization).
patching_info = state_for (x, x->state_before());
@ -1104,57 +1009,6 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
__ load(address, result, info);
}
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
BasicType type, bool is_volatile) {
LIR_Address* addr = new LIR_Address(src, offset, type);
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(data, addr);
assert(src->is_register(), "must be register");
// Seems to be a precise address.
post_barrier(LIR_OprFact::address(addr), data);
} else {
__ move(data, addr);
}
}
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) {
LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst);
}
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
BasicType type = x->basic_type();
assert (x->is_add() && type != T_ARRAY && type != T_OBJECT, "not supported");
LIRItem src(x->object(), this);
LIRItem off(x->offset(), this);
LIRItem value(x->value(), this);
src.load_item();
value.load_item();
off.load_nonconstant(20);
LIR_Opr dst = rlock_result(x, type);
LIR_Opr data = value.result();
LIR_Opr offset = off.result();
LIR_Address* addr;
if (offset->is_constant()) {
assert(Immediate::is_simm20(offset->as_jlong()), "should have been loaded into register");
addr = new LIR_Address(src.result(), offset->as_jlong(), type);
} else {
addr = new LIR_Address(src.result(), offset, type);
}
__ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
}
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
assert(UseCRC32Intrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);

View File

@ -42,11 +42,6 @@
#include "utilities/macros.hpp"
#include "vmreg_s390.inline.hpp"
#include "registerSaver_s390.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#endif
// Implementation of StubAssembler
@ -190,15 +185,6 @@ static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_
return RegisterSaver::save_live_registers(sasm, reg_set);
}
static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
__ block_comment("save_volatile_registers");
RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
int frame_size_in_slots =
RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
}
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
__ block_comment("restore_live_registers");
RegisterSaver::RegisterSet reg_set =
@ -214,12 +200,6 @@ static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_f
RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
}
static void restore_volatile_registers(StubAssembler* sasm) {
__ block_comment("restore_volatile_registers");
RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
RegisterSaver::restore_live_registers(sasm, reg_set);
}
void Runtime1::initialize_pd() {
// Nothing to do.
}
@ -764,160 +744,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break;
#endif // TODO
#if INCLUDE_ALL_GCS
case g1_pre_barrier_slow_id:
{ // Z_R1_scratch: previous value of memory
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ should_not_reach_here(FILE_AND_LINE);
break;
}
__ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
Register pre_val = Z_R1_scratch;
Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val.
Register tmp2 = Z_R7;
Label refill, restart, marking_not_active;
int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
// Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
}
__ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
__ bind(restart);
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
// size_t so ld_ptr is appropriate.
__ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
// index == 0?
__ z_brz(refill);
__ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
__ add2reg(tmp, -oopSize);
__ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
__ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
__ bind(marking_not_active);
// Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
__ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_br(Z_R14);
__ bind(refill);
save_volatile_registers(sasm);
__ z_lgr(tmp, pre_val); // save pre_val
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
Z_thread);
__ z_lgr(pre_val, tmp); // restore pre_val
restore_volatile_registers(sasm);
__ z_bru(restart);
}
break;
case g1_post_barrier_slow_id:
{ // Z_R1_scratch: oop address, address of updated memory slot
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ should_not_reach_here(FILE_AND_LINE);
break;
}
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
Register addr_oop = Z_R1_scratch;
Register addr_card = Z_R1_scratch;
Register r1 = Z_R6; // Must be saved/restored.
Register r2 = Z_R7; // Must be saved/restored.
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
jbyte* byte_map_base = ci_card_table_address();
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
Label not_already_dirty, restart, refill, young_card;
// Calculate address of card corresponding to the updated oop slot.
AddressLiteral rs(byte_map_base);
__ z_srlg(addr_card, addr_oop, CardTable::card_shift);
addr_oop = noreg; // dead now
__ load_const_optimized(cardtable, rs); // cardtable := <card table base>
__ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
__ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
__ z_bre(young_card);
__ z_sync(); // Required to support concurrent cleaning.
__ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
__ z_brne(not_already_dirty);
__ bind(young_card);
// We didn't take the branch, so we're already dirty: restore
// used registers and return.
__ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_br(Z_R14);
// Not dirty.
__ bind(not_already_dirty);
// First, dirty it: [addr_card] := 0
__ z_mvi(0, addr_card, CardTable::dirty_card_val());
Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
Register buf = r2;
cardtable = noreg; // now dead
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
__ bind(restart);
// Get the index into the update buffer. DirtyCardQueue::_index is
// a size_t so z_ltg is appropriate here.
__ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
// index == 0?
__ z_brz(refill);
__ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
__ add2reg(idx, -oopSize);
__ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
__ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
// Restore killed registers and return.
__ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_br(Z_R14);
__ bind(refill);
save_volatile_registers(sasm);
__ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
Z_thread);
__ z_lgr(addr_card, idx);
restore_volatile_registers(sasm); // Restore addr_card.
__ z_bru(restart);
}
break;
#endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
__ set_info("predicate_failed_trap", dont_gc_arguments);

View File

@ -33,6 +33,11 @@
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/g1/c1/g1BarrierSetC1.hpp"
#endif
#define __ masm->
@ -406,4 +411,209 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value
__ bind(Ldone);
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(*stub->entry());
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
assert(stub->pre_val()->is_register(), "Precondition.");
Register pre_val_reg = stub->pre_val()->as_register();
if (stub->do_load()) {
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
}
__ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
__ branch_optimized(Assembler::bcondZero, *stub->continuation());
ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
__ branch_optimized(Assembler::bcondAlways, *stub->continuation());
}
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
__ bind(*stub->entry());
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
assert(stub->addr()->is_register(), "Precondition.");
assert(stub->new_val()->is_register(), "Precondition.");
Register new_val_reg = stub->new_val()->as_register();
__ z_ltgr(new_val_reg, new_val_reg);
__ branch_optimized(Assembler::bcondZero, *stub->continuation());
__ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register());
ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin());
__ branch_optimized(Assembler::bcondAlways, *stub->continuation());
}
#undef __
#define __ sasm->
static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
__ block_comment("save_volatile_registers");
RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
}
static void restore_volatile_registers(StubAssembler* sasm) {
__ block_comment("restore_volatile_registers");
RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
RegisterSaver::restore_live_registers(sasm, reg_set);
}
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
// Z_R1_scratch: previous value of memory
BarrierSet* bs = BarrierSet::barrier_set();
__ set_info("g1_pre_barrier_slow_id", false);
Register pre_val = Z_R1_scratch;
Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val.
Register tmp2 = Z_R7;
Label refill, restart, marking_not_active;
int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
// Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
}
__ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
__ bind(restart);
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
// size_t so ld_ptr is appropriate.
__ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
// index == 0?
__ z_brz(refill);
__ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
__ add2reg(tmp, -oopSize);
__ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
__ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
__ bind(marking_not_active);
// Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
__ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_br(Z_R14);
__ bind(refill);
save_volatile_registers(sasm);
__ z_lgr(tmp, pre_val); // save pre_val
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
Z_thread);
__ z_lgr(pre_val, tmp); // restore pre_val
restore_volatile_registers(sasm);
__ z_bru(restart);
}
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
// Z_R1_scratch: oop address, address of updated memory slot
BarrierSet* bs = BarrierSet::barrier_set();
__ set_info("g1_post_barrier_slow_id", false);
Register addr_oop = Z_R1_scratch;
Register addr_card = Z_R1_scratch;
Register r1 = Z_R6; // Must be saved/restored.
Register r2 = Z_R7; // Must be saved/restored.
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
jbyte* byte_map_base = ct->byte_map_base();
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
Label not_already_dirty, restart, refill, young_card;
// Calculate address of card corresponding to the updated oop slot.
AddressLiteral rs(byte_map_base);
__ z_srlg(addr_card, addr_oop, CardTable::card_shift);
addr_oop = noreg; // dead now
__ load_const_optimized(cardtable, rs); // cardtable := <card table base>
__ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
__ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
__ z_bre(young_card);
__ z_sync(); // Required to support concurrent cleaning.
__ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
__ z_brne(not_already_dirty);
__ bind(young_card);
// We didn't take the branch, so we're already dirty: restore
// used registers and return.
__ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_br(Z_R14);
// Not dirty.
__ bind(not_already_dirty);
// First, dirty it: [addr_card] := 0
__ z_mvi(0, addr_card, CardTable::dirty_card_val());
Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
Register buf = r2;
cardtable = noreg; // now dead
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
__ bind(restart);
// Get the index into the update buffer. DirtyCardQueue::_index is
// a size_t so z_ltg is appropriate here.
__ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
// index == 0?
__ z_brz(refill);
__ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
__ add2reg(idx, -oopSize);
__ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
__ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
// Restore killed registers and return.
__ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
__ z_br(Z_R14);
__ bind(refill);
save_volatile_registers(sasm);
__ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
Z_thread);
__ z_lgr(addr_card, idx);
restore_volatile_registers(sasm); // Restore addr_card.
__ z_bru(restart);
}
#undef __
#endif // COMPILER1

View File

@ -28,6 +28,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#include "utilities/macros.hpp"
class LIR_Assembler;
class StubAssembler;
class G1PreBarrierStub;
class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@ -50,6 +56,14 @@ class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3);
public:
#ifdef COMPILER1
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
#endif
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& src, Register dst, Register tmp1, Register tmp2, Label *is_null = NULL);

View File

@ -8401,16 +8401,6 @@ instruct compU_reg_uimm(flagsReg cr, iRegI op1, uimmI op2) %{
ins_pipe(pipe_class_dummy);
%}
instruct compU_reg_imm0(flagsReg cr, iRegI op1, immI_0 zero) %{
match(Set cr (CmpU op1 zero));
ins_cost(DEFAULT_COST_LOW);
size(2);
format %{ "LTR $op1,$op1\t # unsigned" %}
opcode(LTR_ZOPC);
ins_encode(z_rrform(op1, op1));
ins_pipe(pipe_class_dummy);
%}
instruct compU_reg_mem(flagsReg cr, iRegI op1, memory op2)%{
match(Set cr (CmpU op1 (LoadI op2)));
ins_cost(MEMORY_REF_COST);

View File

@ -32,9 +32,6 @@
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
@ -454,63 +451,4 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ delayed()->nop();
}
///////////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
if (__ is_in_wdisp16_range(_continuation)) {
__ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation);
} else {
__ cmp(pre_val_reg, G0);
__ brx(Assembler::equal, false, Assembler::pn, _continuation);
}
__ delayed()->nop();
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
__ delayed()->mov(pre_val_reg, G4);
__ br(Assembler::always, false, Assembler::pt, _continuation);
__ delayed()->nop();
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register addr_reg = addr()->as_pointer_register();
Register new_val_reg = new_val()->as_register();
if (__ is_in_wdisp16_range(_continuation)) {
__ br_null(new_val_reg, /*annul*/false, Assembler::pt, _continuation);
} else {
__ cmp(new_val_reg, G0);
__ brx(Assembler::equal, false, Assembler::pn, _continuation);
}
__ delayed()->nop();
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
__ delayed()->mov(addr_reg, G4);
__ br(Assembler::always, false, Assembler::pt, _continuation);
__ delayed()->nop();
}
#endif // INCLUDE_ALL_GCS
///////////////////////////////////////////////////////////////////////////////////
#undef __

View File

@ -193,7 +193,7 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type, bool needs_card_mark) {
BasicType type) {
int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size);
@ -231,13 +231,8 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
__ add(index_opr, array_opr, base_opr);
}
}
if (needs_card_mark) {
LIR_Opr ptr = new_pointer_register();
__ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
return new LIR_Address(ptr, type);
} else {
return new LIR_Address(base_opr, offset, type);
}
}
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
@ -311,85 +306,16 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
}
}
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
!get_jobject_constant(x->value())->is_null_object() ||
x->should_profile());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
array.load_item();
index.load_nonconstant();
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check || x->check_boolean()) {
value.load_item();
} else {
value.load_for_store(x->elt_type());
}
set_no_result(x);
// the CodeEmitInfo must be duplicated for each different
// LIR-instruction because spilling can occur anywhere between two
// instructions and so the debug information must be different
CodeEmitInfo* range_check_info = state_for(x);
CodeEmitInfo* null_check_info = NULL;
if (x->needs_null_check()) {
null_check_info = new CodeEmitInfo(range_check_info);
}
// emit array address setup early so it schedules better
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// range_check also does the null check
null_check_info = NULL;
}
}
if (GenerateArrayStoreCheck && needs_store_check) {
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
LIR_Opr tmp1 = FrameMap::G1_opr;
LIR_Opr tmp2 = FrameMap::G3_opr;
LIR_Opr tmp3 = FrameMap::G5_opr;
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
}
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
__ move(result, array_addr, null_check_info);
if (obj_store) {
// Precise card mark
post_barrier(LIR_OprFact::address(array_addr), value.result());
}
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
}
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
assert(x->is_pinned(),"");
@ -635,51 +561,47 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
}
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
LIRItem offset(x->argument_at(1), this); // offset of field
LIRItem cmp (x->argument_at(2), this); // value to compare with field
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
// Use temps to avoid kills
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
LIR_Opr result = new_register(T_INT);
LIR_Opr t1 = FrameMap::G1_opr;
LIR_Opr t2 = FrameMap::G3_opr;
LIR_Opr addr = new_pointer_register();
// get address of field
obj.load_item();
offset.load_item();
cmp.load_item();
val.load_item();
__ add(obj.result(), offset.result(), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
cmp_value.load_item();
new_value.load_item();
if (type == T_OBJECT || type == T_ARRAY) {
__ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else if (type == T_INT) {
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else if (type == T_LONG) {
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else {
Unimplemented();
}
if (type == objectType)
__ cas_obj(addr, cmp.result(), val.result(), t1, t2);
else if (type == intType)
__ cas_int(addr, cmp.result(), val.result(), t1, t2);
else if (type == longType)
__ cas_long(addr, cmp.result(), val.result(), t1, t2);
else {
ShouldNotReachHere();
}
// generate conditional move of boolean result
LIR_Opr result = rlock_result(x);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
result, as_BasicType(type));
if (type == objectType) { // Write-barrier needed for Object fields.
// Precise card mark since could either be object or array
post_barrier(addr, val.result());
}
result, type);
return result;
}
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
bool is_obj = type == T_OBJECT || type == T_ARRAY;
LIR_Opr result = new_register(type);
LIR_Opr tmp = LIR_OprFact::illegalOpr;
value.load_item();
if (is_obj) {
tmp = FrameMap::G3_opr;
}
// Because we want a 2-arg form of xchg
__ move(value.result(), result);
__ xchg(addr, result, result, tmp);
return result;
}
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
Unimplemented();
return LIR_OprFact::illegalOpr;
}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
switch (x->id()) {
@ -1202,7 +1124,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) {
void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// must do this before locking the destination register as an oop register,
// and before the obj is loaded (so x->obj()->item() is valid for creating a debug info location)
patching_info = state_for(x, x->state_before());
@ -1338,94 +1260,3 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
__ load(address, result, info);
}
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
BasicType type, bool is_volatile) {
LIR_Opr base_op = src;
LIR_Opr index_op = offset;
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
{
if (type == T_BOOLEAN) {
type = T_BYTE;
}
LIR_Address* addr;
if (type == T_ARRAY || type == T_OBJECT) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, index_op, tmp);
addr = new LIR_Address(tmp, type);
} else {
addr = new LIR_Address(base_op, index_op, type);
}
if (is_obj) {
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
// _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
}
__ move(data, addr);
if (is_obj) {
// This address is precise
post_barrier(LIR_OprFact::address(addr), data);
}
}
}
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) {
{
LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst);
}
}
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
BasicType type = x->basic_type();
LIRItem src(x->object(), this);
LIRItem off(x->offset(), this);
LIRItem value(x->value(), this);
src.load_item();
value.load_item();
off.load_nonconstant();
LIR_Opr dst = rlock_result(x, type);
LIR_Opr data = value.result();
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
LIR_Opr offset = off.result();
// Because we want a 2-arg form of xchg
__ move(data, dst);
assert (!x->is_add() && (type == T_INT || (is_obj && UseCompressedOops)), "unexpected type");
LIR_Address* addr;
if (offset->is_constant()) {
jlong l = offset->as_jlong();
assert((jlong)((jint)l) == l, "offset too large for constant");
jint c = (jint)l;
addr = new LIR_Address(src.result(), c, type);
} else {
addr = new LIR_Address(src.result(), offset, type);
}
LIR_Opr tmp = LIR_OprFact::illegalOpr;
LIR_Opr ptr = LIR_OprFact::illegalOpr;
if (is_obj) {
// Do the pre-write barrier, if any.
// barriers on sparc don't work with a base + index address
tmp = FrameMap::G3_opr;
ptr = new_pointer_register();
__ add(src.result(), off.result(), ptr);
pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
__ xchg(LIR_OprFact::address(addr), dst, dst, tmp);
if (is_obj) {
// Seems to be a precise address
post_barrier(ptr, data);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -92,4 +92,7 @@
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
void save_live_registers_no_oop_map(bool save_fpu_registers);
void restore_live_registers(bool restore_fpu_registers);
#endif // CPU_SPARC_VM_C1_MACROASSEMBLER_SPARC_HPP

View File

@ -40,11 +40,6 @@
#include "utilities/macros.hpp"
#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#endif
// Implementation of StubAssembler
@ -145,10 +140,16 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
return call_RT(oop_result1, metadata_result, entry, 3);
}
void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
set_info(name, must_gc_arguments);
}
void StubAssembler::epilogue() {
delayed()->restore();
}
// Implementation of Runtime1
#define __ sasm->
static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
@ -156,7 +157,7 @@ static int reg_save_size_in_words;
static int frame_size_in_bytes = -1;
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
assert(frame_size_in_bytes == sasm->total_frame_size_in_bytes(reg_save_size_in_words),
"mismatch in calculation");
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
@ -183,7 +184,9 @@ static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
return oop_map;
}
static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
#define __ this->
void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) {
assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
"mismatch in calculation");
__ save_frame_c1(frame_size_in_bytes);
@ -211,11 +214,9 @@ static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers
__ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
}
}
return generate_oop_map(sasm, save_fpu_registers);
}
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) {
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
Register r = as_Register(i);
if (r == G1 || r == G3 || r == G4 || r == G5) {
@ -231,6 +232,18 @@ static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registe
}
}
#undef __
#define __ sasm->
static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
sasm->save_live_registers_no_oop_map(save_fpu_registers);
return generate_oop_map(sasm, save_fpu_registers);
}
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
sasm->restore_live_registers(restore_fpu_registers);
}
void Runtime1::initialize_pd() {
// compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
@ -759,165 +772,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
#if INCLUDE_ALL_GCS
case g1_pre_barrier_slow_id:
{ // G4: previous value of memory
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ save_frame(0);
__ set((int)id, O1);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
__ should_not_reach_here();
break;
}
__ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
Register pre_val = G4;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
Label refill, restart;
int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ ld(G2_thread, satb_q_active_byte_offset, tmp);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ ldsb(G2_thread, satb_q_active_byte_offset, tmp);
}
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, restart);
__ retl();
__ delayed()->nop();
__ bind(restart);
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
// size_t so ld_ptr is appropriate
__ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
// index == 0?
__ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);
__ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
__ sub(tmp, oopSize, tmp);
__ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
// Use return-from-leaf
__ retl();
__ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);
__ bind(refill);
save_live_registers(sasm);
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address,
SATBMarkQueueSet::handle_zero_index_for_thread),
G2_thread);
restore_live_registers(sasm);
__ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
__ delayed()->restore();
}
break;
case g1_post_barrier_slow_id:
{
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ save_frame(0);
__ set((int)id, O1);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
__ should_not_reach_here();
break;
}
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
Register addr = G4;
Register cardtable = G5;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
jbyte* byte_map_base = ci_card_table_address();
Label not_already_dirty, restart, refill, young_card;
__ srlx(addr, CardTable::card_shift, addr);
AddressLiteral rs(byte_map_base);
__ set(rs, cardtable); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
__ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
// We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf
__ retl();
__ delayed()->nop();
// Not dirty.
__ bind(not_already_dirty);
// Get cardtable + tmp into a reg by itself
__ add(addr, cardtable, tmp2);
// First, dirty it.
__ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty).
Register tmp3 = cardtable;
Register tmp4 = tmp;
// these registers are now dead
addr = cardtable = tmp = noreg;
int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
__ bind(restart);
// Get the index into the update buffer. DirtyCardQueue::_index is
// a size_t so ld_ptr is appropriate here.
__ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
// index == 0?
__ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill);
__ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
__ sub(tmp3, oopSize, tmp3);
__ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card>
// Use return-from-leaf
__ retl();
__ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);
__ bind(refill);
save_live_registers(sasm);
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address,
DirtyCardQueueSet::handle_zero_index_for_thread),
G2_thread);
restore_live_registers(sasm);
__ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
__ delayed()->restore();
}
break;
#endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
__ set_info("predicate_failed_trap", dont_gc_arguments);

View File

@ -25,13 +25,18 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/g1/c1/g1BarrierSetC1.hpp"
#endif
#define __ masm->
@ -476,8 +481,6 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator
}
}
#undef __
void G1BarrierSetAssembler::barrier_stubs_init() {
if (dirty_card_log_enqueue == 0) {
G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
@ -494,3 +497,211 @@ void G1BarrierSetAssembler::barrier_stubs_init() {
assert(satb_log_enqueue_frameless != 0, "postcondition.");
}
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(*stub->entry());
assert(stub->pre_val()->is_register(), "Precondition.");
Register pre_val_reg = stub->pre_val()->as_register();
if (stub->do_load()) {
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
}
if (__ is_in_wdisp16_range(*stub->continuation())) {
__ br_null(pre_val_reg, /*annul*/false, Assembler::pt, *stub->continuation());
} else {
__ cmp(pre_val_reg, G0);
__ brx(Assembler::equal, false, Assembler::pn, *stub->continuation());
}
__ delayed()->nop();
__ call(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
__ delayed()->mov(pre_val_reg, G4);
__ br(Assembler::always, false, Assembler::pt, *stub->continuation());
__ delayed()->nop();
}
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
__ bind(*stub->entry());
assert(stub->addr()->is_register(), "Precondition.");
assert(stub->new_val()->is_register(), "Precondition.");
Register addr_reg = stub->addr()->as_pointer_register();
Register new_val_reg = stub->new_val()->as_register();
if (__ is_in_wdisp16_range(*stub->continuation())) {
__ br_null(new_val_reg, /*annul*/false, Assembler::pt, *stub->continuation());
} else {
__ cmp(new_val_reg, G0);
__ brx(Assembler::equal, false, Assembler::pn, *stub->continuation());
}
__ delayed()->nop();
__ call(bs->post_barrier_c1_runtime_code_blob()->code_begin());
__ delayed()->mov(addr_reg, G4);
__ br(Assembler::always, false, Assembler::pt, *stub->continuation());
__ delayed()->nop();
}
#undef __
#define __ sasm->
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
__ prologue("g1_pre_barrier", false);
// G4: previous value of memory
Register pre_val = G4;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
Label refill, restart;
int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ ld(G2_thread, satb_q_active_byte_offset, tmp);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ ldsb(G2_thread, satb_q_active_byte_offset, tmp);
}
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, restart);
__ retl();
__ delayed()->nop();
__ bind(restart);
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
// size_t so ld_ptr is appropriate
__ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
// index == 0?
__ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);
__ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
__ sub(tmp, oopSize, tmp);
__ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
// Use return-from-leaf
__ retl();
__ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);
__ bind(refill);
__ save_live_registers_no_oop_map(true);
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address,
SATBMarkQueueSet::handle_zero_index_for_thread),
G2_thread);
__ restore_live_registers(true);
__ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
__ epilogue();
}
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
__ prologue("g1_post_barrier", false);
G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
Register addr = G4;
Register cardtable = G5;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
jbyte* byte_map_base = bs->card_table()->byte_map_base();
Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64
__ srlx(addr, CardTable::card_shift, addr);
#else
__ srl(addr, CardTable::card_shift, addr);
#endif
AddressLiteral rs((address)byte_map_base);
__ set(rs, cardtable); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
__ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
assert(G1CardTable::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
// We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf
__ retl();
__ delayed()->nop();
// Not dirty.
__ bind(not_already_dirty);
// Get cardtable + tmp into a reg by itself
__ add(addr, cardtable, tmp2);
// First, dirty it.
__ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty).
Register tmp3 = cardtable;
Register tmp4 = tmp;
// these registers are now dead
addr = cardtable = tmp = noreg;
int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
__ bind(restart);
// Get the index into the update buffer. DirtyCardQueue::_index is
// a size_t so ld_ptr is appropriate here.
__ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
// index == 0?
__ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill);
__ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
__ sub(tmp3, oopSize, tmp3);
__ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card>
// Use return-from-leaf
__ retl();
__ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);
__ bind(refill);
__ save_live_registers_no_oop_map(true);
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address,
DirtyCardQueueSet::handle_zero_index_for_thread),
G2_thread);
__ restore_live_registers(true);
__ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
__ epilogue();
}
#undef __
#endif // COMPILER1

View File

@ -27,6 +27,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#include "utilities/macros.hpp"
class LIR_Assembler;
class StubAssembler;
class G1PreBarrierStub;
class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@ -40,6 +46,14 @@ protected:
Register val, Address dst, Register tmp);
public:
#ifdef COMPILER1
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
#endif
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address src, Register dst, Register tmp);
virtual void barrier_stubs_init();

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/jniHandles.hpp"
#define __ masm->
@ -98,3 +99,9 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
default: Unimplemented();
}
}
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
__ andn(obj, JNIHandles::weak_tag_mask, obj);
__ ld_ptr(obj, 0, obj);
}

View File

@ -44,6 +44,10 @@ public:
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address src, Register dst, Register tmp);
// Support for jniFastGetField to try resolving a jobject/jweak in native
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
virtual void barrier_stubs_init() {}
};

View File

@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
@ -68,8 +70,11 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
__ andcc (G4, 1, G0);
__ br (Assembler::notZero, false, Assembler::pn, label1);
__ delayed()->srl (O2, 2, O4);
__ andn (O1, JNIHandles::weak_tag_mask, O1);
__ ld_ptr (O1, 0, O5);
__ mov(O1, O5);
// Both O5 and G3 are clobbered by try_resolve_jobject_in_native.
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->try_resolve_jobject_in_native(masm, /* jni_env */ O0, /* obj */ O5, /* tmp */ G3, label1);
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();

View File

@ -35,6 +35,7 @@
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"

View File

@ -31,6 +31,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/preserveException.hpp"

View File

@ -33,9 +33,6 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
@ -521,45 +518,4 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ jmp(_continuation);
}
/////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(pre_val()->as_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ jmp(_continuation);
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register new_val_reg = new_val()->as_register();
__ cmpptr(new_val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(addr()->as_pointer_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
__ jmp(_continuation);
}
#endif // INCLUDE_ALL_GCS
/////////////////////////////////////////////////////////////////////////////
#undef __

View File

@ -33,6 +33,7 @@
#include "ci/ciArray.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "vmreg_x86.inline.hpp"
@ -152,9 +153,27 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
int shift, int disp, BasicType type) {
assert(base->is_register(), "must be");
if (index->is_constant()) {
LIR_Const *constant = index->as_constant_ptr();
#ifdef _LP64
jlong c;
if (constant->type() == T_INT) {
c = (jlong(index->as_jint()) << shift) + disp;
} else {
assert(constant->type() == T_LONG, "should be");
c = (index->as_jlong() << shift) + disp;
}
if ((jlong)((jint)c) == c) {
return new LIR_Address(base, (jint)c, type);
} else {
LIR_Opr tmp = new_register(T_LONG);
__ move(index, tmp);
return new LIR_Address(base, tmp, type);
}
#else
return new LIR_Address(base,
((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp,
((intx)(constant->as_jint()) << shift) + disp,
type);
#endif
} else {
return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
}
@ -162,7 +181,7 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type, bool needs_card_mark) {
BasicType type) {
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
LIR_Address* addr;
@ -183,16 +202,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
LIR_Address::scale(type),
offset_in_bytes, type);
}
if (needs_card_mark) {
// This store will need a precise card mark, so go ahead and
// compute the full adddres instead of computing once for the
// store and again for the card mark.
LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp);
return new LIR_Address(tmp, type);
} else {
return addr;
}
}
@ -253,86 +263,16 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
__ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
}
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
!get_jobject_constant(x->value())->is_null_object() ||
x->should_profile());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
array.load_item();
index.load_nonconstant();
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check || x->check_boolean()) {
value.load_item();
} else {
value.load_for_store(x->elt_type());
}
set_no_result(x);
// the CodeEmitInfo must be duplicated for each different
// LIR-instruction because spilling can occur anywhere between two
// instructions and so the debug information must be different
CodeEmitInfo* range_check_info = state_for(x);
CodeEmitInfo* null_check_info = NULL;
if (x->needs_null_check()) {
null_check_info = new CodeEmitInfo(range_check_info);
}
// emit array address setup early so it schedules better
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// range_check also does the null check
null_check_info = NULL;
}
}
if (GenerateArrayStoreCheck && needs_store_check) {
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
LIR_Opr tmp1 = new_register(objectType);
LIR_Opr tmp2 = new_register(objectType);
LIR_Opr tmp3 = new_register(objectType);
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
}
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(value.result(), array_addr, null_check_info);
// Seems to be a precise
post_barrier(LIR_OprFact::address(array_addr), value.result());
} else {
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
__ move(result, array_addr, null_check_info);
}
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
}
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
assert(x->is_pinned(),"");
@ -715,93 +655,48 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
}
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
LIRItem offset(x->argument_at(1), this); // offset of field
LIRItem cmp (x->argument_at(2), this); // value to compare with field
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
assert(obj.type()->tag() == objectTag, "invalid type");
// In 64bit the type can be long, sparc doesn't have this assert
// assert(offset.type()->tag() == intTag, "invalid type");
assert(cmp.type()->tag() == type->tag(), "invalid type");
assert(val.type()->tag() == type->tag(), "invalid type");
// get address of field
obj.load_item();
offset.load_nonconstant();
LIR_Opr addr = new_pointer_register();
LIR_Address* a;
if(offset.result()->is_constant()) {
#ifdef _LP64
jlong c = offset.result()->as_jlong();
if ((jlong)((jint)c) == c) {
a = new LIR_Address(obj.result(),
(jint)c,
as_BasicType(type));
} else {
LIR_Opr tmp = new_register(T_LONG);
__ move(offset.result(), tmp);
a = new LIR_Address(obj.result(),
tmp,
as_BasicType(type));
}
#else
a = new LIR_Address(obj.result(),
offset.result()->as_jint(),
as_BasicType(type));
#endif
} else {
a = new LIR_Address(obj.result(),
offset.result(),
0,
as_BasicType(type));
}
__ leal(LIR_OprFact::address(a), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
// Do the pre-write barrier, if any.
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
if (type == objectType) {
cmp.load_item_force(FrameMap::rax_oop_opr);
val.load_item();
} else if (type == intType) {
cmp.load_item_force(FrameMap::rax_opr);
val.load_item();
} else if (type == longType) {
cmp.load_item_force(FrameMap::long0_opr);
val.load_item_force(FrameMap::long1_opr);
} else {
ShouldNotReachHere();
}
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
if (type == objectType)
__ cas_obj(addr, cmp.result(), val.result(), ill, ill);
else if (type == intType)
__ cas_int(addr, cmp.result(), val.result(), ill, ill);
else if (type == longType)
__ cas_long(addr, cmp.result(), val.result(), ill, ill);
else {
ShouldNotReachHere();
if (type == T_OBJECT || type == T_ARRAY) {
cmp_value.load_item_force(FrameMap::rax_oop_opr);
new_value.load_item();
__ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
} else if (type == T_INT) {
cmp_value.load_item_force(FrameMap::rax_opr);
new_value.load_item();
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
} else if (type == T_LONG) {
cmp_value.load_item_force(FrameMap::long0_opr);
new_value.load_item_force(FrameMap::long1_opr);
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
} else {
Unimplemented();
}
// generate conditional move of boolean result
LIR_Opr result = rlock_result(x);
LIR_Opr result = new_register(T_INT);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
result, as_BasicType(type));
if (type == objectType) { // Write-barrier needed for Object fields.
// Seems to be precise
post_barrier(addr, val.result());
}
result, type);
return result;
}
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
bool is_oop = type == T_OBJECT || type == T_ARRAY;
LIR_Opr result = new_register(type);
value.load_item();
// Because we want a 2-arg form of xchg and xadd
__ move(value.result(), result);
assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
__ xchg(addr, result, result, LIR_OprFact::illegalOpr);
return result;
}
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
LIR_Opr result = new_register(type);
value.load_item();
// Because we want a 2-arg form of xchg and xadd
__ move(value.result(), result);
assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
__ xadd(addr, result, result, LIR_OprFact::illegalOpr);
return result;
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
@ -1419,7 +1314,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL;
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
// must do this before locking the destination register as an oop register,
// and before the obj is loaded (the latter is for deoptimization)
patching_info = state_for(x, x->state_before());
@ -1570,8 +1465,6 @@ void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
}
}
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
if (address->type() == T_LONG) {
@ -1593,100 +1486,3 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
__ load(address, result, info);
}
}
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) {
if (is_volatile && type == T_LONG) {
LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
LIR_Opr tmp = new_register(T_DOUBLE);
__ load(addr, tmp);
LIR_Opr spill = new_register(T_LONG);
set_vreg_flag(spill, must_start_in_memory);
__ move(tmp, spill);
__ move(spill, dst);
} else {
LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst);
}
}
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
BasicType type, bool is_volatile) {
if (is_volatile && type == T_LONG) {
LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
LIR_Opr tmp = new_register(T_DOUBLE);
LIR_Opr spill = new_register(T_DOUBLE);
set_vreg_flag(spill, must_start_in_memory);
__ move(data, spill);
__ move(spill, tmp);
__ move(tmp, addr);
} else {
LIR_Address* addr = new LIR_Address(src, offset, type);
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
__ move(data, addr);
assert(src->is_register(), "must be register");
// Seems to be a precise address
post_barrier(LIR_OprFact::address(addr), data);
} else {
__ move(data, addr);
}
}
}
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
BasicType type = x->basic_type();
LIRItem src(x->object(), this);
LIRItem off(x->offset(), this);
LIRItem value(x->value(), this);
src.load_item();
value.load_item();
off.load_nonconstant();
LIR_Opr dst = rlock_result(x, type);
LIR_Opr data = value.result();
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
LIR_Opr offset = off.result();
assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");
LIR_Address* addr;
if (offset->is_constant()) {
#ifdef _LP64
jlong c = offset->as_jlong();
if ((jlong)((jint)c) == c) {
addr = new LIR_Address(src.result(), (jint)c, type);
} else {
LIR_Opr tmp = new_register(T_LONG);
__ move(offset, tmp);
addr = new LIR_Address(src.result(), tmp, type);
}
#else
addr = new LIR_Address(src.result(), offset->as_jint(), type);
#endif
} else {
addr = new LIR_Address(src.result(), offset, type);
}
// Because we want a 2-arg form of xchg and xadd
__ move(data, dst);
if (x->is_add()) {
__ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
} else {
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
__ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
if (is_obj) {
// Seems to be a precise address
post_barrier(LIR_OprFact::address(addr), data);
}
}
}

View File

@ -356,6 +356,15 @@ void C1_MacroAssembler::verified_entry() {
verify_FPU(0, "method_entry");
}
void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) {
// rbp, + 0: link
// + 1: return address
// + 2: argument with offset 0
// + 3: argument with offset 1
// + 4: ...
movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
}
#ifndef PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -121,4 +121,9 @@
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
void load_parameter(int offset_in_words, Register reg);
void save_live_registers_no_oop_map(int num_rt_args, bool save_fpu_registers);
void restore_live_registers(bool restore_fpu_registers);
#endif // CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP

View File

@ -41,12 +41,6 @@
#include "runtime/vframeArray.hpp"
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#endif
// Implementation of StubAssembler
@ -212,31 +206,32 @@ class StubFrame: public StackObj {
~StubFrame();
};
void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
set_info(name, must_gc_arguments);
enter();
}
void StubAssembler::epilogue() {
leave();
ret(0);
}
#define __ _sasm->
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
_sasm = sasm;
__ set_info(name, must_gc_arguments);
__ enter();
__ prologue(name, must_gc_arguments);
}
// load parameters that were stored with LIR_Assembler::store_parameter
// Note: offsets for store_parameter and load_argument must match
void StubFrame::load_argument(int offset_in_words, Register reg) {
// rbp, + 0: link
// + 1: return address
// + 2: argument with offset 0
// + 3: argument with offset 1
// + 4: ...
__ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
__ load_parameter(offset_in_words, reg);
}
StubFrame::~StubFrame() {
__ leave();
__ ret(0);
__ epilogue();
}
#undef __
@ -244,8 +239,6 @@ StubFrame::~StubFrame() {
// Implementation of Runtime1
#define __ sasm->
const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
@ -310,8 +303,6 @@ enum reg_save_layout {
reg_save_frame_size // As noted: neglects any parameters to runtime // 504
};
// Save off registers which might be killed by calls into the runtime.
// Tries to smart of about FP registers. In particular we separate
// saving and describing the FPU registers for deoptimization since we
@ -418,8 +409,9 @@ static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
return map;
}
static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
bool save_fpu_registers = true) {
#define __ this->
void C1_MacroAssembler::save_live_registers_no_oop_map(int num_rt_args, bool save_fpu_registers) {
__ block_comment("save_live_registers");
__ pusha(); // integer registers
@ -493,12 +485,12 @@ static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
// FPU stack must be empty now
__ verify_FPU(0, "save_live_registers");
return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
}
#undef __
#define __ sasm->
static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) {
if (restore_fpu_registers) {
if (UseSSE >= 2) {
// restore XMM registers
@ -549,14 +541,28 @@ static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true)
__ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
}
#undef __
#define __ this->
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) {
__ block_comment("restore_live_registers");
restore_fpu(sasm, restore_fpu_registers);
restore_fpu(this, restore_fpu_registers);
__ popa();
}
#undef __
#define __ sasm->
static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
bool save_fpu_registers = true) {
sasm->save_live_registers_no_oop_map(num_rt_args, save_fpu_registers);
return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
}
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
sasm->restore_live_registers(restore_fpu_registers);
}
static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
__ block_comment("restore_live_registers_except_rax");
@ -1557,159 +1563,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
#if INCLUDE_ALL_GCS
case g1_pre_barrier_slow_id:
{
StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
// arg0 : previous value of memory
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ movptr(rax, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
__ should_not_reach_here();
break;
}
__ push(rax);
__ push(rdx);
const Register pre_val = rax;
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
const Register tmp = rdx;
NOT_LP64(__ get_thread(thread);)
Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
Label done;
Label runtime;
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ cmpl(queue_active, 0);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(queue_active, 0);
}
__ jcc(Assembler::equal, done);
// Can we store original value in the thread's buffer?
__ movptr(tmp, queue_index);
__ testptr(tmp, tmp);
__ jcc(Assembler::zero, runtime);
__ subptr(tmp, wordSize);
__ movptr(queue_index, tmp);
__ addptr(tmp, buffer);
// prev_val (rax)
f.load_argument(0, pre_val);
__ movptr(Address(tmp, 0), pre_val);
__ jmp(done);
__ bind(runtime);
save_live_registers(sasm, 3);
// load the pre-value
f.load_argument(0, rcx);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
restore_live_registers(sasm);
__ bind(done);
__ pop(rdx);
__ pop(rax);
}
break;
case g1_post_barrier_slow_id:
{
StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->kind() != BarrierSet::G1BarrierSet) {
__ movptr(rax, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
__ should_not_reach_here();
break;
}
// arg0: store_address
Address store_addr(rbp, 2*BytesPerWord);
Label done;
Label enqueued;
Label runtime;
// At this point we know new_value is non-NULL and the new_value crosses regions.
// Must check to see if card is already dirty
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
__ push(rax);
__ push(rcx);
const Register cardtable = rax;
const Register card_addr = rcx;
f.load_argument(0, card_addr);
__ shrptr(card_addr, CardTable::card_shift);
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
// a valid address and therefore is not properly handled by the relocation code.
__ movptr(cardtable, ci_card_table_address_as<intptr_t>());
__ addptr(card_addr, cardtable);
NOT_LP64(__ get_thread(thread);)
__ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
__ jcc(Assembler::equal, done);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
__ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
__ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
const Register tmp = rdx;
__ push(rdx);
__ movptr(tmp, queue_index);
__ testptr(tmp, tmp);
__ jcc(Assembler::zero, runtime);
__ subptr(tmp, wordSize);
__ movptr(queue_index, tmp);
__ addptr(tmp, buffer);
__ movptr(Address(tmp, 0), card_addr);
__ jmp(enqueued);
__ bind(runtime);
save_live_registers(sasm, 3);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
restore_live_registers(sasm);
__ bind(enqueued);
__ pop(rdx);
__ bind(done);
__ pop(rcx);
__ pop(rax);
}
break;
#endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);

View File

@ -32,6 +32,11 @@
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/g1/c1/g1BarrierSetC1.hpp"
#endif
#define __ masm->
@ -399,3 +404,193 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
}
NOT_LP64(imasm->restore_bcp());
}
#ifdef COMPILER1
#undef __
#define __ ce->masm()->
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(*stub->entry());
assert(stub->pre_val()->is_register(), "Precondition.");
Register pre_val_reg = stub->pre_val()->as_register();
if (stub->do_load()) {
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
}
__ cmpptr(pre_val_reg, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, *stub->continuation());
ce->store_parameter(stub->pre_val()->as_register(), 0);
__ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
__ jmp(*stub->continuation());
}
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
__ bind(*stub->entry());
assert(stub->addr()->is_register(), "Precondition.");
assert(stub->new_val()->is_register(), "Precondition.");
Register new_val_reg = stub->new_val()->as_register();
__ cmpptr(new_val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, *stub->continuation());
ce->store_parameter(stub->addr()->as_pointer_register(), 0);
__ call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
__ jmp(*stub->continuation());
}
#undef __
#define __ sasm->
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
__ prologue("g1_pre_barrier", false);
// arg0 : previous value of memory
__ push(rax);
__ push(rdx);
const Register pre_val = rax;
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
const Register tmp = rdx;
NOT_LP64(__ get_thread(thread);)
Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
Label done;
Label runtime;
// Is marking still active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ cmpl(queue_active, 0);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(queue_active, 0);
}
__ jcc(Assembler::equal, done);
// Can we store original value in the thread's buffer?
__ movptr(tmp, queue_index);
__ testptr(tmp, tmp);
__ jcc(Assembler::zero, runtime);
__ subptr(tmp, wordSize);
__ movptr(queue_index, tmp);
__ addptr(tmp, buffer);
// prev_val (rax)
__ load_parameter(0, pre_val);
__ movptr(Address(tmp, 0), pre_val);
__ jmp(done);
__ bind(runtime);
__ save_live_registers_no_oop_map(3, true);
// load the pre-value
__ load_parameter(0, rcx);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
__ restore_live_registers(true);
__ bind(done);
__ pop(rdx);
__ pop(rax);
__ epilogue();
}
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
__ prologue("g1_post_barrier", false);
// arg0: store_address
Address store_addr(rbp, 2*BytesPerWord);
CardTableBarrierSet* ct =
barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label enqueued;
Label runtime;
// At this point we know new_value is non-NULL and the new_value crosses regions.
// Must check to see if card is already dirty
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
__ push(rax);
__ push(rcx);
const Register cardtable = rax;
const Register card_addr = rcx;
__ load_parameter(0, card_addr);
__ shrptr(card_addr, CardTable::card_shift);
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
// a valid address and therefore is not properly handled by the relocation code.
__ movptr(cardtable, (intptr_t)ct->card_table()->byte_map_base());
__ addptr(card_addr, cardtable);
NOT_LP64(__ get_thread(thread);)
__ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
__ jcc(Assembler::equal, done);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
__ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
__ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
const Register tmp = rdx;
__ push(rdx);
__ movptr(tmp, queue_index);
__ testptr(tmp, tmp);
__ jcc(Assembler::zero, runtime);
__ subptr(tmp, wordSize);
__ movptr(queue_index, tmp);
__ addptr(tmp, buffer);
__ movptr(Address(tmp, 0), card_addr);
__ jmp(enqueued);
__ bind(runtime);
__ save_live_registers_no_oop_map(3, true);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
__ restore_live_registers(true);
__ bind(enqueued);
__ pop(rdx);
__ bind(done);
__ pop(rcx);
__ pop(rax);
__ epilogue();
}
#undef __
#endif // COMPILER1

View File

@ -28,6 +28,11 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class LIR_Assembler;
class StubAssembler;
class G1PreBarrierStub;
class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count);
@ -52,6 +57,12 @@ class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
Address dst, Register val, Register tmp1, Register tmp2);
public:
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread);
};

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/jniHandles.hpp"
#define __ masm->
@ -108,3 +109,9 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
default: Unimplemented();
}
}
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
__ clear_jweak_tag(obj);
__ movptr(obj, Address(obj, 0));
}

View File

@ -44,6 +44,10 @@ public:
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
// Support for jniFastGetField to try resolving a jobject/jweak in native
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
virtual void barrier_stubs_init() {}
};

View File

@ -90,8 +90,9 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// register obj is destroyed afterwards.
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(bs);
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
__ shrptr(obj, CardTable::card_shift);
@ -102,15 +103,15 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
intptr_t disp = (intptr_t) ct->card_table()->byte_map_base();
if (__ is_simm32(disp)) {
card_addr = Address(noreg, obj, Address::times_1, disp);
intptr_t byte_map_base = (intptr_t)ct->byte_map_base();
if (__ is_simm32(byte_map_base)) {
card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {
// By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
// By doing it as an ExternalAddress 'byte_map_base' could be converted to a rip-relative
// displacement and done in a single instruction given favorable mapping and a
// smarter version of as_Address. However, 'ExternalAddress' generates a relocation
// entry and that entry is not properly handled by the relocation code.
AddressLiteral cardtable((address)ct->card_table()->byte_map_base(), relocInfo::none);
AddressLiteral cardtable((address)byte_map_base, relocInfo::none);
Address index(noreg, obj, Address::times_1);
card_addr = __ as_Address(ArrayAddress(cardtable, index));
}
@ -118,7 +119,7 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
int dirty = CardTable::dirty_card_val();
if (UseCondCardMark) {
Label L_already_dirty;
if (UseConcMarkSweepGC) {
if (ct->scanned_concurrently()) {
__ membar(Assembler::StoreLoad);
}
__ cmpb(card_addr, dirty);

View File

@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
@ -42,6 +44,7 @@
// c_rarg1: obj
// c_rarg2: jfield id
static const Register rtmp = r8;
static const Register robj = r9;
static const Register rcounter = r10;
static const Register roffset = r11;
@ -81,12 +84,14 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
// robj is data dependent on rcounter.
}
__ clear_jweak_tag(robj);
__ movptr(robj, Address(robj, 0)); // *obj
__ mov (roffset, c_rarg2);
__ shrptr(roffset, 2); // offset
// Both robj and rtmp are clobbered by try_resolve_jobject_in_native.
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->try_resolve_jobject_in_native(masm, /* jni_env */ c_rarg0, robj, rtmp, slow);
DEBUG_ONLY(__ movl(rtmp, 0xDEADC0DE);)
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
switch (type) {

View File

@ -37,6 +37,7 @@
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"

View File

@ -31,6 +31,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/preserveException.hpp"

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* Copyright 2007, 2008, 2010, 2018, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,10 +50,10 @@
: "Q"(*(volatile long*)src));
#elif defined(S390) && !defined(_LP64)
double tmp;
asm volatile ("ld %0, 0(%1)\n"
"std %0, 0(%2)\n"
: "=r"(tmp)
: "a"(src), "a"(dst));
asm volatile ("ld %0, %2\n"
"std %0, %1\n"
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
: "Q"(*(volatile double*)src));
#else
*(jlong *) dst = *(const jlong *) src;
#endif

View File

@ -1171,6 +1171,9 @@ const char *InstructForm::mach_base_class(FormDict &globals) const {
else if (is_ideal_nop()) {
return "MachNopNode";
}
else if (is_ideal_jump()) {
return "MachJumpNode";
}
else if (is_mach_constant()) {
return "MachConstantNode";
}

View File

@ -3936,6 +3936,9 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
fprintf(fp_cpp, "%s node->_prob = _leaf->as_If()->_prob;\n", indent);
fprintf(fp_cpp, "%s node->_fcnt = _leaf->as_If()->_fcnt;\n", indent);
}
if (inst->is_ideal_jump()) {
fprintf(fp_cpp, "%s node->_probs = _leaf->as_Jump()->_probs;\n", indent);
}
if( inst->is_ideal_fastlock() ) {
fprintf(fp_cpp, "%s node->_counters = _leaf->as_FastLock()->counters();\n", indent);
fprintf(fp_cpp, "%s node->_rtm_counters = _leaf->as_FastLock()->rtm_counters();\n", indent);

View File

@ -285,7 +285,7 @@ private:
protected:
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) { return false; }
virtual bool do_unloading_jvmci(bool unloading_occurred) { return false; }
};

View File

@ -183,28 +183,21 @@ void AOTLoader::universe_init() {
// Shifts are static values which initialized by 0 until java heap initialization.
// AOT libs are loaded before heap initialized so shift values are not set.
// It is okay since ObjectAlignmentInBytes flag which defines shifts value is set before AOT libs are loaded.
// Set shifts value based on first AOT library config.
// AOT sets shift values during heap and metaspace initialization.
// Check shifts value to make sure thay did not change.
if (UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
int oop_shift = Universe::narrow_oop_shift();
if (oop_shift == 0) {
Universe::set_narrow_oop_shift(AOTLib::narrow_oop_shift());
} else {
FOR_ALL_AOT_LIBRARIES(lib) {
(*lib)->verify_flag(AOTLib::narrow_oop_shift(), oop_shift, "Universe::narrow_oop_shift");
}
(*lib)->verify_flag((*lib)->config()->_narrowOopShift, oop_shift, "Universe::narrow_oop_shift");
}
if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set
int klass_shift = Universe::narrow_klass_shift();
if (klass_shift == 0) {
Universe::set_narrow_klass_shift(AOTLib::narrow_klass_shift());
} else {
FOR_ALL_AOT_LIBRARIES(lib) {
(*lib)->verify_flag(AOTLib::narrow_klass_shift(), klass_shift, "Universe::narrow_klass_shift");
(*lib)->verify_flag((*lib)->config()->_narrowKlassShift, klass_shift, "Universe::narrow_klass_shift");
}
}
}
}
// Create heaps for all the libraries
// Create heaps for all valid libraries
FOR_ALL_AOT_LIBRARIES(lib) {
if ((*lib)->is_valid()) {
AOTCodeHeap* heap = new AOTCodeHeap(*lib);
@ -213,6 +206,9 @@ void AOTLoader::universe_init() {
add_heap(heap);
CodeCache::add_heap(heap);
}
} else {
// Unload invalid libraries
os::dll_unload((*lib)->dl_handle());
}
}
}
@ -223,20 +219,29 @@ void AOTLoader::universe_init() {
}
}
// Set shift value for compressed oops and classes based on first AOT library config.
// AOTLoader::universe_init(), which is called later, will check the shift value again to make sure nobody change it.
// This code is not executed during CDS dump because it runs in Interpreter mode and AOT is disabled in this mode.
void AOTLoader::set_narrow_oop_shift() {
// This method is called from Universe::initialize_heap().
if (UseAOT && libraries_count() > 0 &&
UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
if (Universe::narrow_oop_shift() == 0) {
// 0 is valid shift value for small heap but we can safely increase it
// at this point when nobody used it yet.
Universe::set_narrow_oop_shift(AOTLib::narrow_oop_shift());
}
}
}
void AOTLoader::set_narrow_klass_shift() {
// This method could be called from Metaspace::set_narrow_klass_base_and_shift().
// In case it is not called (during dump CDS, for example) the corresponding code in
// AOTLoader::universe_init(), which is called later, will set the shift value.
// This method is called from Metaspace::set_narrow_klass_base_and_shift().
if (UseAOT && libraries_count() > 0 &&
UseCompressedOops && AOTLib::narrow_oop_shift_initialized() &&
UseCompressedClassPointers) {
int klass_shift = Universe::narrow_klass_shift();
if (klass_shift == 0) {
if (Universe::narrow_klass_shift() == 0) {
Universe::set_narrow_klass_shift(AOTLib::narrow_klass_shift());
} else {
FOR_ALL_AOT_LIBRARIES(lib) {
(*lib)->verify_flag(AOTLib::narrow_klass_shift(), klass_shift, "Universe::narrow_klass_shift");
}
}
}
}

View File

@ -57,6 +57,7 @@ public:
static void initialize() NOT_AOT({ FLAG_SET_ERGO(bool, UseAOT, false); });
static void universe_init() NOT_AOT_RETURN;
static void set_narrow_oop_shift() NOT_AOT_RETURN;
static void set_narrow_klass_shift() NOT_AOT_RETURN;
static bool contains(address p) NOT_AOT({ return false; });
static void load_for_klass(InstanceKlass* ik, Thread* thread) NOT_AOT_RETURN;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -648,7 +648,7 @@ void Canonicalizer::do_NewTypeArray (NewTypeArray* x) {}
void Canonicalizer::do_NewObjectArray (NewObjectArray* x) {}
void Canonicalizer::do_NewMultiArray (NewMultiArray* x) {}
void Canonicalizer::do_CheckCast (CheckCast* x) {
if (x->klass()->is_loaded()) {
if (x->klass()->is_loaded() && !x->is_invokespecial_receiver_check()) {
Value obj = x->obj();
ciType* klass = obj->exact_type();
if (klass == NULL) klass = obj->declared_type();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -533,92 +533,4 @@ class ArrayCopyStub: public CodeStub {
#endif // PRODUCT
};
//////////////////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
// Code stubs for Garbage-First barriers.
class G1PreBarrierStub: public CodeStub {
private:
bool _do_load;
LIR_Opr _addr;
LIR_Opr _pre_val;
LIR_PatchCode _patch_code;
CodeEmitInfo* _info;
public:
// Version that _does_ generate a load of the previous value from addr.
// addr (the address of the field to be read) must be a LIR_Address
// pre_val (a temporary register) must be a register;
G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
_addr(addr), _pre_val(pre_val), _do_load(true),
_patch_code(patch_code), _info(info)
{
assert(_pre_val->is_register(), "should be temporary register");
assert(_addr->is_address(), "should be the address of the field");
}
// Version that _does not_ generate load of the previous value; the
// previous value is assumed to have already been loaded into pre_val.
G1PreBarrierStub(LIR_Opr pre_val) :
_addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
_patch_code(lir_patch_none), _info(NULL)
{
assert(_pre_val->is_register(), "should be a register");
}
LIR_Opr addr() const { return _addr; }
LIR_Opr pre_val() const { return _pre_val; }
LIR_PatchCode patch_code() const { return _patch_code; }
CodeEmitInfo* info() const { return _info; }
bool do_load() const { return _do_load; }
virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) {
if (_do_load) {
// don't pass in the code emit info since it's processed in the fast
// path
if (_info != NULL)
visitor->do_slow_case(_info);
else
visitor->do_slow_case();
visitor->do_input(_addr);
visitor->do_temp(_pre_val);
} else {
visitor->do_slow_case();
visitor->do_input(_pre_val);
}
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); }
#endif // PRODUCT
};
class G1PostBarrierStub: public CodeStub {
private:
LIR_Opr _addr;
LIR_Opr _new_val;
public:
// addr (the address of the object head) and new_val must be registers.
G1PostBarrierStub(LIR_Opr addr, LIR_Opr new_val): _addr(addr), _new_val(new_val) { }
LIR_Opr addr() const { return _addr; }
LIR_Opr new_val() const { return _new_val; }
virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) {
// don't pass in the code emit info since it's processed in the fast path
visitor->do_slow_case();
visitor->do_input(_addr);
visitor->do_input(_new_val);
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("G1PostBarrierStub"); }
#endif // PRODUCT
};
#endif // INCLUDE_ALL_GCS
//////////////////////////////////////////////////////////////////////////////////////////
#endif // SHARE_VM_C1_C1_CODESTUBS_HPP

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_C1_C1_DECORATORS_HPP
#define SHARE_VM_C1_C1_DECORATORS_HPP
#include "oops/accessDecorators.hpp"
#include "utilities/globalDefinitions.hpp"
// Use the C1_NEEDS_PATCHING decorator for situations when the access is using
// an offset that is not yet known and will require patching
const DecoratorSet C1_NEEDS_PATCHING = DECORATOR_LAST << 1;
// Use the C1_MASK_BOOLEAN decorator for boolean accesses where the value
// needs to be masked.
const DecoratorSet C1_MASK_BOOLEAN = DECORATOR_LAST << 2;
// The C1_WRITE_ACCESS decorator is used to mark writing accesses.
const DecoratorSet C1_WRITE_ACCESS = DECORATOR_LAST << 3;
// The C1_READ_ACCESS decorator is used to mark reading accesses.
const DecoratorSet C1_READ_ACCESS = DECORATOR_LAST << 4;
#endif // SHARE_VM_C1_C1_DECORATORS_HPP

View File

@ -1324,7 +1324,7 @@ void GraphBuilder::ret(int local_index) {
void GraphBuilder::table_switch() {
Bytecode_tableswitch sw(stream());
const int l = sw.length();
if (CanonicalizeNodes && l == 1) {
if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) {
// total of 2 successors => use If instead of switch
// Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node.
@ -1368,7 +1368,7 @@ void GraphBuilder::table_switch() {
void GraphBuilder::lookup_switch() {
Bytecode_lookupswitch sw(stream());
const int l = sw.number_of_pairs();
if (CanonicalizeNodes && l == 1) {
if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) {
// total of 2 successors => use If instead of switch
// Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -260,6 +260,8 @@ class LIR_Assembler: public CompilationResourceObj {
#include CPU_HEADER(c1_LIRAssembler)
public:
static int call_stub_size() {
if (UseAOT) {
return _call_stub_size + _call_aot_stub_size;

View File

@ -34,19 +34,14 @@
#include "ci/ciInstance.hpp"
#include "ci/ciObjArray.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef TRACE_HAVE_INTRINSICS
#include "trace/traceMacros.hpp"
#endif
@ -313,11 +308,6 @@ jlong LIRItem::get_jlong_constant() const {
//--------------------------------------------------------------
void LIRGenerator::init() {
_bs = BarrierSet::barrier_set();
}
void LIRGenerator::block_do_prolog(BlockBegin* block) {
#ifndef PRODUCT
if (PrintIRWithLIR) {
@ -1245,19 +1235,9 @@ void LIRGenerator::do_Reference_get(Intrinsic* x) {
info = state_for(x);
}
LIR_Address* referent_field_adr =
new LIR_Address(reference.result(), referent_offset, T_OBJECT);
LIR_Opr result = rlock_result(x);
__ load(referent_field_adr, result, info);
// Register the value in the referent field with the pre-barrier
pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
result /* pre_val */,
false /* do_load */,
false /* patch */,
NULL /* info */);
LIR_Opr result = rlock_result(x, T_OBJECT);
access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
reference, LIR_OprFact::intConst(referent_offset), result);
}
// Example: clazz.isInstance(object)
@ -1454,222 +1434,27 @@ LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
return result;
}
// Various barriers
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info) {
// Do the pre-write barrier, if any.
switch (_bs->kind()) {
#if INCLUDE_ALL_GCS
case BarrierSet::G1BarrierSet:
G1BarrierSet_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableBarrierSet:
// No pre barriers
break;
default :
ShouldNotReachHere();
}
}
void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
switch (_bs->kind()) {
#if INCLUDE_ALL_GCS
case BarrierSet::G1BarrierSet:
G1BarrierSet_post_barrier(addr, new_val);
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableBarrierSet:
CardTableBarrierSet_post_barrier(addr, new_val);
break;
default :
ShouldNotReachHere();
}
}
////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void LIRGenerator::G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info) {
// First we test whether marking is in progress.
BasicType flag_type;
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
flag_type = T_INT;
} else {
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
"Assumption");
// Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
// need to use unsigned instructions to use the large offset to load the satb_mark_queue.
flag_type = T_BOOLEAN;
}
LIR_Opr thrd = getThreadPointer();
LIR_Address* mark_active_flag_addr =
new LIR_Address(thrd, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), flag_type);
// Read the marking-in-progress flag.
LIR_Opr flag_val = new_register(T_INT);
__ load(mark_active_flag_addr, flag_val);
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
LIR_PatchCode pre_val_patch_code = lir_patch_none;
CodeStub* slow;
if (do_load) {
assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
if (patch)
pre_val_patch_code = lir_patch_normal;
pre_val = new_register(T_OBJECT);
if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be");
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
}
slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
} else {
assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
assert(pre_val->is_register(), "must be");
assert(pre_val->type() == T_OBJECT, "must be an object");
assert(info == NULL, "sanity");
slow = new G1PreBarrierStub(pre_val);
}
__ branch(lir_cond_notEqual, T_INT, slow);
__ branch_destination(slow->continuation());
}
void LIRGenerator::G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
// If the "new_val" is a constant NULL, no barrier is necessary.
if (new_val->is_constant() &&
new_val->as_constant_ptr()->as_jobject() == NULL) return;
if (!new_val->is_register()) {
LIR_Opr new_val_reg = new_register(T_OBJECT);
if (new_val->is_constant()) {
__ move(new_val, new_val_reg);
} else {
__ leal(new_val, new_val_reg);
}
new_val = new_val_reg;
}
assert(new_val->is_register(), "must be a register at this point");
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
LIR_Opr ptr = new_pointer_register();
if (!address->index()->is_valid() && address->disp() == 0) {
__ move(address->base(), ptr);
} else {
assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
__ leal(addr, ptr);
}
addr = ptr;
}
assert(addr->is_register(), "must be a register at this point");
LIR_Opr xor_res = new_pointer_register();
LIR_Opr xor_shift_res = new_pointer_register();
if (TwoOperandLIRForm ) {
__ move(addr, xor_res);
__ logical_xor(xor_res, new_val, xor_res);
__ move(xor_res, xor_shift_res);
__ unsigned_shift_right(xor_shift_res,
LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
xor_shift_res,
LIR_OprDesc::illegalOpr());
} else {
__ logical_xor(addr, new_val, xor_res);
__ unsigned_shift_right(xor_res,
LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
xor_shift_res,
LIR_OprDesc::illegalOpr());
}
if (!new_val->is_register()) {
LIR_Opr new_val_reg = new_register(T_OBJECT);
__ leal(new_val, new_val_reg);
new_val = new_val_reg;
}
assert(new_val->is_register(), "must be a register at this point");
__ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
CodeStub* slow = new G1PostBarrierStub(addr, new_val);
__ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
__ branch_destination(slow->continuation());
}
#endif // INCLUDE_ALL_GCS
////////////////////////////////////////////////////////////////////////
void LIRGenerator::CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
LIR_Const* card_table_base = new LIR_Const(ci_card_table_address());
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
// ptr cannot be an object because we use this barrier for array card marks
// and addr can point in the middle of an array.
LIR_Opr ptr = new_pointer_register();
if (!address->index()->is_valid() && address->disp() == 0) {
__ move(address->base(), ptr);
} else {
assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
__ leal(addr, ptr);
}
addr = ptr;
}
assert(addr->is_register(), "must be a register at this point");
#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
#else
LIR_Opr tmp = new_pointer_register();
if (TwoOperandLIRForm) {
__ move(addr, tmp);
__ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
} else {
__ unsigned_shift_right(addr, CardTable::card_shift, tmp);
}
LIR_Address* card_addr;
if (can_inline_as_constant(card_table_base)) {
card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
} else {
card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
}
LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
if (UseCondCardMark) {
LIR_Opr cur_value = new_register(T_INT);
if (UseConcMarkSweepGC) {
__ membar_storeload();
}
__ move(card_addr, cur_value);
LabelObj* L_already_dirty = new LabelObj();
__ cmp(lir_cond_equal, cur_value, dirty);
__ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
__ move(dirty, card_addr);
__ branch_destination(L_already_dirty->label());
} else {
#if INCLUDE_ALL_GCS
if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
__ membar_storestore();
}
#endif
__ move(dirty, card_addr);
}
#endif
}
//------------------------field access--------------------------------------
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object
LIRItem offset(x->argument_at(1), this); // offset of field
LIRItem cmp (x->argument_at(2), this); // value to compare with field
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
assert(obj.type()->tag() == objectTag, "invalid type");
// In 64bit the type can be long, sparc doesn't have this assert
// assert(offset.type()->tag() == intTag, "invalid type");
assert(cmp.type()->tag() == type->tag(), "invalid type");
assert(val.type()->tag() == type->tag(), "invalid type");
LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
obj, offset, cmp, val);
set_result(x, result);
}
// Comment copied form templateTable_i486.cpp
// ----------------------------------------------------------------------------
// Volatile variables demand their effects be made known to all CPU's in
@ -1702,7 +1487,6 @@ void LIRGenerator::do_StoreField(StoreField* x) {
bool needs_patching = x->needs_patching();
bool is_volatile = x->field()->is_volatile();
BasicType field_type = x->field_type();
bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
CodeEmitInfo* info = NULL;
if (needs_patching) {
@ -1717,7 +1501,6 @@ void LIRGenerator::do_StoreField(StoreField* x) {
}
}
LIRItem object(x->obj(), this);
LIRItem value(x->value(), this);
@ -1755,48 +1538,147 @@ void LIRGenerator::do_StoreField(StoreField* x) {
__ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
}
LIR_Address* address;
DecoratorSet decorators = IN_HEAP;
if (is_volatile) {
decorators |= MO_SEQ_CST;
}
if (needs_patching) {
// we need to patch the offset in the instruction so don't allow
// generate_address to try to be smart about emitting the -1.
// Otherwise the patching code won't know how to find the
// instruction to patch.
address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
decorators |= C1_NEEDS_PATCHING;
}
access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
}
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
!get_jobject_constant(x->value())->is_null_object() ||
x->should_profile());
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem value(x->value(), this);
LIRItem length(this);
array.load_item();
index.load_nonconstant();
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check || x->check_boolean()) {
value.load_item();
} else {
address = generate_address(object.result(), x->offset(), field_type);
value.load_for_store(x->elt_type());
}
if (is_volatile && os::is_MP()) {
__ membar_release();
set_no_result(x);
// the CodeEmitInfo must be duplicated for each different
// LIR-instruction because spilling can occur anywhere between two
// instructions and so the debug information must be different
CodeEmitInfo* range_check_info = state_for(x);
CodeEmitInfo* null_check_info = NULL;
if (x->needs_null_check()) {
null_check_info = new CodeEmitInfo(range_check_info);
}
if (is_oop) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(address),
LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load*/,
needs_patching,
(info ? new CodeEmitInfo(info) : NULL));
}
bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
if (needs_atomic_access && !needs_patching) {
volatile_field_store(value.result(), address, info);
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
} else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
__ store(value.result(), address, info, patch_code);
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// range_check also does the null check
null_check_info = NULL;
}
}
if (is_oop) {
// Store to object so mark the card of the header
post_barrier(object.result(), value.result());
if (GenerateArrayStoreCheck && needs_store_check) {
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
}
if (!support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
__ membar();
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
if (x->check_boolean()) {
decorators |= C1_MASK_BOOLEAN;
}
access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
NULL, null_check_info);
}
void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIR_Opr offset, LIR_Opr result,
CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
decorators |= C1_READ_ACCESS;
LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
if (access.is_raw()) {
_barrier_set->BarrierSetC1::load_at(access, result);
} else {
_barrier_set->load_at(access, result);
}
}
void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIR_Opr offset, LIR_Opr value,
CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
decorators |= C1_WRITE_ACCESS;
LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
if (access.is_raw()) {
_barrier_set->BarrierSetC1::store_at(access, value);
} else {
_barrier_set->store_at(access, value);
}
}
LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
// Atomic operations are SEQ_CST by default
decorators |= C1_READ_ACCESS;
decorators |= C1_WRITE_ACCESS;
decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
LIRAccess access(this, decorators, base, offset, type);
if (access.is_raw()) {
return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
} else {
return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
}
}
LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value) {
// Atomic operations are SEQ_CST by default
decorators |= C1_READ_ACCESS;
decorators |= C1_WRITE_ACCESS;
decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
LIRAccess access(this, decorators, base, offset, type);
if (access.is_raw()) {
return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
} else {
return _barrier_set->atomic_xchg_at(access, value);
}
}
LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value) {
// Atomic operations are SEQ_CST by default
decorators |= C1_READ_ACCESS;
decorators |= C1_WRITE_ACCESS;
decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
LIRAccess access(this, decorators, base, offset, type);
if (access.is_raw()) {
return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
} else {
return _barrier_set->atomic_add_at(access, value);
}
}
void LIRGenerator::do_LoadField(LoadField* x) {
bool needs_patching = x->needs_patching();
@ -1843,33 +1725,18 @@ void LIRGenerator::do_LoadField(LoadField* x) {
__ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
}
LIR_Opr reg = rlock_result(x, field_type);
LIR_Address* address;
DecoratorSet decorators = IN_HEAP;
if (is_volatile) {
decorators |= MO_SEQ_CST;
}
if (needs_patching) {
// we need to patch the offset in the instruction so don't allow
// generate_address to try to be smart about emitting the -1.
// Otherwise the patching code won't know how to find the
// instruction to patch.
address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
} else {
address = generate_address(object.result(), x->offset(), field_type);
decorators |= C1_NEEDS_PATCHING;
}
if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
__ membar();
}
bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
if (needs_atomic_access && !needs_patching) {
volatile_field_load(address, reg, info);
} else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
__ load(address, reg, info, patch_code);
}
if (is_volatile && os::is_MP()) {
__ membar_acquire();
}
LIR_Opr result = rlock_result(x, field_type);
access_load_at(decorators, field_type,
object, LIR_OprFact::intConst(x->offset()), result,
info ? new CodeEmitInfo(info) : NULL, info);
}
@ -1968,9 +1835,6 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
}
}
// emit array address setup early so it schedules better
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
if (GenerateRangeChecks && needs_range_check) {
if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
__ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
@ -1986,7 +1850,12 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
}
}
__ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
LIR_Opr result = rlock_result(x, x->elt_type());
access_load_at(decorators, x->elt_type(),
array, index.result(), result,
NULL, null_check_info);
}
@ -2272,157 +2141,21 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
off.load_item();
src.load_item();
LIR_Opr value = rlock_result(x, x->basic_type());
DecoratorSet decorators = IN_HEAP;
if (support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) {
__ membar();
if (x->is_volatile()) {
decorators |= MO_SEQ_CST;
}
get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
#if INCLUDE_ALL_GCS
// We might be reading the value of the referent field of a
// Reference object in order to attach it back to the live
// object graph. If G1 is enabled then we need to record
// the value that is being returned in an SATB log buffer.
//
// We need to generate code similar to the following...
//
// if (offset == java_lang_ref_Reference::referent_offset) {
// if (src != NULL) {
// if (klass(src)->reference_type() != REF_NONE) {
// pre_barrier(..., value, ...);
// }
// }
// }
if (UseG1GC && type == T_OBJECT) {
bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
bool gen_offset_check = true; // Assume we need to generate the offset guard.
bool gen_source_check = true; // Assume we need to check the src object for null.
bool gen_type_check = true; // Assume we need to check the reference_type.
if (off.is_constant()) {
jlong off_con = (off.type()->is_int() ?
(jlong) off.get_jint_constant() :
off.get_jlong_constant());
if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
// The constant offset is something other than referent_offset.
// We can skip generating/checking the remaining guards and
// skip generation of the code stub.
gen_pre_barrier = false;
} else {
// The constant offset is the same as referent_offset -
// we do not need to generate a runtime offset check.
gen_offset_check = false;
}
}
// We don't need to generate stub if the source object is an array
if (gen_pre_barrier && src.type()->is_array()) {
gen_pre_barrier = false;
}
if (gen_pre_barrier) {
// We still need to continue with the checks.
if (src.is_constant()) {
ciObject* src_con = src.get_jobject_constant();
guarantee(src_con != NULL, "no source constant");
if (src_con->is_null_object()) {
// The constant src object is null - We can skip
// generating the code stub.
gen_pre_barrier = false;
} else {
// Non-null constant source object. We still have to generate
// the slow stub - but we don't need to generate the runtime
// null object check.
gen_source_check = false;
}
}
}
if (gen_pre_barrier && !PatchALot) {
// Can the klass of object be statically determined to be
// a sub-class of Reference?
ciType* type = src.value()->declared_type();
if ((type != NULL) && type->is_loaded()) {
if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
gen_type_check = false;
} else if (type->is_klass() &&
!compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
// Not Reference and not Object klass.
gen_pre_barrier = false;
}
}
}
if (gen_pre_barrier) {
LabelObj* Lcont = new LabelObj();
// We can have generate one runtime check here. Let's start with
// the offset check.
if (gen_offset_check) {
// if (offset != referent_offset) -> continue
// If offset is an int then we can do the comparison with the
// referent_offset constant; otherwise we need to move
// referent_offset into a temporary register and generate
// a reg-reg compare.
LIR_Opr referent_off;
if (off.type()->is_int()) {
referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
} else {
assert(off.type()->is_long(), "what else?");
referent_off = new_register(T_LONG);
__ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
}
__ cmp(lir_cond_notEqual, off.result(), referent_off);
__ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
}
if (gen_source_check) {
// offset is a const and equals referent offset
// if (source == null) -> continue
__ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
__ branch(lir_cond_equal, T_OBJECT, Lcont->label());
}
LIR_Opr src_klass = new_register(T_OBJECT);
if (gen_type_check) {
// We have determined that offset == referent_offset && src != null.
// if (src->_klass->_reference_type == REF_NONE) -> continue
__ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
LIR_Opr reference_type = new_register(T_INT);
__ move(reference_type_addr, reference_type);
__ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
__ branch(lir_cond_equal, T_INT, Lcont->label());
}
{
// We have determined that src->_klass->_reference_type != REF_NONE
// so register the value in the referent field with the pre-barrier.
pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
value /* pre_val */,
false /* do_load */,
false /* patch */,
NULL /* info */);
}
__ branch_destination(Lcont->label());
}
}
#endif // INCLUDE_ALL_GCS
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
/* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
if (type == T_BOOLEAN) {
LabelObj* equalZeroLabel = new LabelObj();
__ cmp(lir_cond_equal, value, 0);
__ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
__ move(LIR_OprFact::intConst(1), value);
__ branch_destination(equalZeroLabel->label());
decorators |= C1_MASK_BOOLEAN;
}
if (type == T_ARRAY || type == T_OBJECT) {
decorators |= ON_UNKNOWN_OOP_REF;
}
LIR_Opr result = rlock_result(x, type);
access_load_at(decorators, type,
src, off.result(), result);
}
@ -2442,11 +2175,36 @@ void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
set_no_result(x);
if (x->is_volatile() && os::is_MP()) __ membar_release();
put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
if (!support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) __ membar();
DecoratorSet decorators = IN_HEAP;
if (type == T_ARRAY || type == T_OBJECT) {
decorators |= ON_UNKNOWN_OOP_REF;
}
if (x->is_volatile()) {
decorators |= MO_SEQ_CST;
}
access_store_at(decorators, type, src, off.result(), data.result());
}
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
BasicType type = x->basic_type();
LIRItem src(x->object(), this);
LIRItem off(x->offset(), this);
LIRItem value(x->value(), this);
DecoratorSet decorators = IN_HEAP | MO_SEQ_CST;
if (type == T_ARRAY || type == T_OBJECT) {
decorators |= ON_UNKNOWN_OOP_REF;
}
LIR_Opr result;
if (x->is_add()) {
result = access_atomic_add_at(decorators, type, src, off, value);
} else {
result = access_atomic_xchg_at(decorators, type, src, off, value);
}
set_result(x, result);
}
void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
int lng = x->length();
@ -2552,6 +2310,36 @@ void LIRGenerator::do_TableSwitch(TableSwitch* x) {
int hi_key = x->hi_key();
int len = x->length();
LIR_Opr value = tag.result();
if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
ciMethod* method = x->state()->scope()->method();
ciMethodData* md = method->method_data_or_null();
ciProfileData* data = md->bci_to_data(x->state()->bci());
assert(data->is_MultiBranchData(), "bad profile data?");
int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
LIR_Opr md_reg = new_register(T_METADATA);
__ metadata2reg(md->constant_encoding(), md_reg);
LIR_Opr data_offset_reg = new_pointer_register();
LIR_Opr tmp_reg = new_pointer_register();
__ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
for (int i = 0; i < len; i++) {
int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
__ cmp(lir_cond_equal, value, i + lo_key);
__ move(data_offset_reg, tmp_reg);
__ cmove(lir_cond_equal,
LIR_OprFact::intptrConst(count_offset),
tmp_reg,
data_offset_reg, T_INT);
}
LIR_Opr data_reg = new_pointer_register();
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
__ move(data_addr, data_reg);
__ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
__ move(data_reg, data_addr);
}
if (UseTableRanges) {
do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
} else {
@ -2577,6 +2365,37 @@ void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
move_to_phi(x->state());
LIR_Opr value = tag.result();
int len = x->length();
if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
ciMethod* method = x->state()->scope()->method();
ciMethodData* md = method->method_data_or_null();
ciProfileData* data = md->bci_to_data(x->state()->bci());
assert(data->is_MultiBranchData(), "bad profile data?");
int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
LIR_Opr md_reg = new_register(T_METADATA);
__ metadata2reg(md->constant_encoding(), md_reg);
LIR_Opr data_offset_reg = new_pointer_register();
LIR_Opr tmp_reg = new_pointer_register();
__ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
for (int i = 0; i < len; i++) {
int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
__ cmp(lir_cond_equal, value, x->key_at(i));
__ move(data_offset_reg, tmp_reg);
__ cmove(lir_cond_equal,
LIR_OprFact::intptrConst(count_offset),
tmp_reg,
data_offset_reg, T_INT);
}
LIR_Opr data_reg = new_pointer_register();
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
__ move(data_addr, data_reg);
__ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
__ move(data_reg, data_addr);
}
if (UseTableRanges) {
do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
} else {
@ -3765,8 +3584,7 @@ void LIRGenerator::do_MemBar(MemBar* x) {
}
}
LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
if (x->check_boolean()) {
LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
LIR_Opr value_fixed = rlock_byte(T_BYTE);
if (TwoOperandLIRForm) {
__ move(value, value_fixed);
@ -3784,6 +3602,12 @@ LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr
__ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
__ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
value = value_fixed;
return value;
}
LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
if (x->check_boolean()) {
value = mask_boolean(array, value, null_check_info);
}
return value;
}

View File

@ -25,12 +25,16 @@
#ifndef SHARE_VM_C1_C1_LIRGENERATOR_HPP
#define SHARE_VM_C1_C1_LIRGENERATOR_HPP
#include "c1/c1_Decorators.hpp"
#include "c1/c1_Instruction.hpp"
#include "c1/c1_LIR.hpp"
#include "ci/ciMethodData.hpp"
#include "gc/shared/barrierSet.hpp"
#include "utilities/macros.hpp"
#include "utilities/sizes.hpp"
class BarrierSetC1;
// The classes responsible for code emission and register allocation
@ -165,7 +169,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
Values _instruction_for_operand;
BitMap2D _vreg_flags; // flags which can be set on a per-vreg basis
LIR_List* _lir;
BarrierSet* _bs;
LIRGenerator* gen() {
return this;
@ -173,6 +176,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN;
public:
#ifdef ASSERT
LIR_List* lir(const char * file, int line) const {
_lir->set_file_and_line(file, line);
@ -183,6 +187,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
return _lir;
}
private:
// a simple cache of constants used within a block
GrowableArray<LIR_Const*> _constants;
LIR_OprList _reg_for_constants;
@ -190,6 +195,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
friend class PhiResolver;
public:
// unified bailout support
void bailout(const char* msg) const { compilation()->bailout(msg); }
bool bailed_out() const { return compilation()->bailed_out(); }
@ -233,14 +239,15 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val);
void move_to_phi(ValueStack* cur_state);
// code emission
void do_ArithmeticOp_Long (ArithmeticOp* x);
void do_ArithmeticOp_Int (ArithmeticOp* x);
void do_ArithmeticOp_FPU (ArithmeticOp* x);
// platform dependent
LIR_Opr getThreadPointer();
private:
// code emission
void do_ArithmeticOp_Long(ArithmeticOp* x);
void do_ArithmeticOp_Int (ArithmeticOp* x);
void do_ArithmeticOp_FPU (ArithmeticOp* x);
void do_RegisterFinalizer(Intrinsic* x);
void do_isInstance(Intrinsic* x);
void do_isPrimitive(Intrinsic* x);
@ -258,6 +265,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_update_CRC32C(Intrinsic* x);
void do_vectorizedMismatch(Intrinsic* x);
public:
LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
@ -265,27 +273,41 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
// GC Barriers
// Access API
// generic interface
private:
BarrierSetC1 *_barrier_set;
void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
public:
void access_store_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIR_Opr offset, LIR_Opr value,
CodeEmitInfo* patch_info = NULL, CodeEmitInfo* store_emit_info = NULL);
// specific implementations
// pre barriers
void access_load_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIR_Opr offset, LIR_Opr result,
CodeEmitInfo* patch_info = NULL, CodeEmitInfo* load_emit_info = NULL);
void G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
bool do_load, bool patch, CodeEmitInfo* info);
LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
// post barriers
LIR_Opr access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value);
LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value);
// These need to guarantee JMM volatile semantics are preserved on each platform
// and requires one implementation per architecture.
LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
void G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
void CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
virtual void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
#endif
// specific implementations
void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
static LIR_Opr result_register_for(ValueType* type, bool callee = false);
@ -354,7 +376,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Address* generate_address(LIR_Opr base, int disp, BasicType type) {
return generate_address(base, LIR_OprFact::illegalOpr, 0, disp, type);
}
LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type, bool needs_card_mark);
LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type);
// the helper for generate_address
void add_large_constant(LIR_Opr src, int c, LIR_Opr dest);
@ -433,8 +455,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_soft_float_compare(If *x);
#endif // __SOFTFP__
void init();
SwitchRangeArray* create_lookup_ranges(TableSwitch* x);
SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
@ -452,6 +472,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void profile_arguments(ProfileCall* x);
void profile_parameters(Base* x);
void profile_parameters_at_call(ProfileCall* x);
LIR_Opr mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
LIR_Opr maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
public:
@ -478,8 +499,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
: _compilation(compilation)
, _method(method)
, _virtual_register_number(LIR_OprDesc::vreg_base)
, _vreg_flags(num_vreg_flags) {
init();
, _vreg_flags(num_vreg_flags)
, _barrier_set(BarrierSet::barrier_set()->barrier_set_c1()) {
}
// for virtual registers, maps them back to Phi's or Local's

View File

@ -74,6 +74,9 @@ class StubAssembler: public C1_MacroAssembler {
void set_frame_size(int size);
void set_num_rt_args(int args);
void save_live_registers();
void restore_live_registers_without_return();
// accessors
const char* name() const { return _name; }
bool must_gc_arguments() const { return _must_gc_arguments; }
@ -86,6 +89,9 @@ class StubAssembler: public C1_MacroAssembler {
int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1);
int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2);
int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3);
void prologue(const char* name, bool must_gc_arguments);
void epilogue();
};
#endif // SHARE_VM_C1_C1_MACROASSEMBLER_HPP

View File

@ -39,6 +39,7 @@
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/interpreter.hpp"
@ -178,9 +179,17 @@ static void deopt_caller() {
}
}
class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
private:
Runtime1::StubID _id;
public:
StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {}
virtual OopMapSet* generate_code(StubAssembler* sasm) {
return Runtime1::generate_code_for(_id, sasm);
}
};
void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
assert(0 <= id && id < number_of_ids, "illegal stub id");
CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
ResourceMark rm;
// create code buffer for code storage
CodeBuffer code(buffer_blob);
@ -192,33 +201,12 @@ void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
Compilation::setup_code_buffer(&code, 0);
// create assembler for code generation
StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
StubAssembler* sasm = new StubAssembler(&code, name, stub_id);
// generate code for runtime stub
oop_maps = generate_code_for(id, sasm);
oop_maps = cl->generate_code(sasm);
assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
"if stub has an oop map it must have a valid frame size");
#ifdef ASSERT
// Make sure that stubs that need oopmaps have them
switch (id) {
// These stubs don't need to have an oopmap
case dtrace_object_alloc_id:
case g1_pre_barrier_slow_id:
case g1_post_barrier_slow_id:
case slow_subtype_check_id:
case fpu2long_stub_id:
case unwind_exception_id:
case counter_overflow_id:
#if defined(SPARC) || defined(PPC32)
case handle_exception_nofpu_id: // Unused on sparc
#endif
break;
// All other stubs should have oopmaps
default:
assert(oop_maps != NULL, "must have an oopmap");
}
#endif
assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap");
// align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
sasm->align(BytesPerWord);
@ -228,17 +216,42 @@ void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
frame_size = sasm->frame_size();
must_gc_arguments = sasm->must_gc_arguments();
// create blob - distinguish a few special cases
CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
&code,
CodeOffsets::frame_never_safe,
frame_size,
oop_maps,
must_gc_arguments);
// install blob
assert(blob != NULL, "blob must exist");
_blobs[id] = blob;
return blob;
}
void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
assert(0 <= id && id < number_of_ids, "illegal stub id");
bool expect_oop_map = true;
#ifdef ASSERT
// Make sure that stubs that need oopmaps have them
switch (id) {
// These stubs don't need to have an oopmap
case dtrace_object_alloc_id:
case slow_subtype_check_id:
case fpu2long_stub_id:
case unwind_exception_id:
case counter_overflow_id:
#if defined(SPARC) || defined(PPC32)
case handle_exception_nofpu_id: // Unused on sparc
#endif
expect_oop_map = false;
break;
default:
break;
}
#endif
StubIDStubAssemblerCodeGenClosure cl(id);
CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
// install blob
_blobs[id] = blob;
}
void Runtime1::initialize(BufferBlob* blob) {
// platform-dependent initialization
@ -257,9 +270,10 @@ void Runtime1::initialize(BufferBlob* blob) {
}
}
#endif
BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
bs->generate_c1_runtime_stubs(blob);
}
CodeBlob* Runtime1::blob_for(StubID id) {
assert(0 <= id && id < number_of_ids, "illegal stub id");
return _blobs[id];

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,8 +68,6 @@ class StubAssembler;
stub(load_klass_patching) \
stub(load_mirror_patching) \
stub(load_appendix_patching) \
stub(g1_pre_barrier_slow) \
stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \
stub(counter_overflow) \
stub(predicate_failed_trap) \
@ -80,6 +78,11 @@ class StubAssembler;
#define STUB_NAME(x) #x " Runtime1 stub",
#define LAST_STUB_NAME(x) #x " Runtime1 stub"
class StubAssemblerCodeGenClosure: public Closure {
public:
virtual OopMapSet* generate_code(StubAssembler* sasm) = 0;
};
class Runtime1: public AllStatic {
friend class VMStructs;
friend class ArrayCopyStub;
@ -121,8 +124,11 @@ class Runtime1: public AllStatic {
static const char* _blob_names[];
// stub generation
public:
static CodeBlob* generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure *cl);
static void generate_blob_for(BufferBlob* blob, StubID id);
static OopMapSet* generate_code_for(StubID id, StubAssembler* sasm);
private:
static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument);
static OopMapSet* generate_handle_exception(StubID id, StubAssembler* sasm);
static void generate_unwind_exception(StubAssembler *sasm);

View File

@ -283,7 +283,6 @@ InstanceKlass* ClassListParser::load_class_from_source(Symbol* class_name, TRAPS
error("AppCDS custom class loaders not supported on this platform");
#endif
assert(UseAppCDS, "must be");
if (!is_super_specified()) {
error("If source location is specified, super class must be also specified");
}
@ -383,10 +382,8 @@ Klass* ClassListParser::load_current_class(TRAPS) {
} else {
// If "source:" tag is specified, all super class and super interfaces must be specified in the
// class list file.
if (UseAppCDS) {
klass = load_class_from_source(class_name_symbol, CHECK_NULL);
}
}
if (klass != NULL && klass->is_instance_klass() && is_id_specified()) {
InstanceKlass* ik = InstanceKlass::cast(klass);

View File

@ -270,14 +270,6 @@ ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
// check if file exists
struct stat st;
if (os::stat(path, &st) == 0) {
#if INCLUDE_CDS
if (DumpSharedSpaces) {
// We have already check in ClassLoader::check_shared_classpath() that the directory is empty, so
// we should never find a file underneath it -- unless user has added a new file while we are running
// the dump, in which case let's quit!
ShouldNotReachHere();
}
#endif
// found file, open it
int file_handle = os::open(path, 0, 0);
if (file_handle != -1) {
@ -644,24 +636,6 @@ void ClassLoader::trace_class_path(const char* msg, const char* name) {
}
}
#if INCLUDE_CDS
void ClassLoader::check_shared_classpath(const char *path) {
if (strcmp(path, "") == 0) {
exit_with_path_failure("Cannot have empty path in archived classpaths", NULL);
}
struct stat st;
if (os::stat(path, &st) == 0) {
if ((st.st_mode & S_IFMT) != S_IFREG) { // is not a regular file
if (!os::dir_is_empty(path)) {
tty->print_cr("Error: non-empty directory '%s'", path);
exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL);
}
}
}
}
#endif
void ClassLoader::setup_bootstrap_search_path() {
const char* sys_class_path = Arguments::get_sysclasspath();
if (PrintSharedArchiveAndExit) {
@ -713,8 +687,6 @@ void ClassLoader::setup_app_search_path(const char *class_path) {
strncpy(path, &class_path[start], end - start);
path[end - start] = '\0';
check_shared_classpath(path);
update_class_path_entry_list(path, false, false);
while (class_path[end] == os::path_separator()[0]) {
@ -757,7 +729,6 @@ void ClassLoader::update_module_path_entry_list(const char *path, TRAPS) {
}
void ClassLoader::setup_module_search_path(const char* path, TRAPS) {
check_shared_classpath(path);
update_module_path_entry_list(path, THREAD);
}
#endif // INCLUDE_CDS
@ -886,11 +857,6 @@ void ClassLoader::setup_boot_search_path(const char *class_path) {
update_class_path_entry_list(path, false, true);
}
#if INCLUDE_CDS
if (DumpSharedSpaces) {
check_shared_classpath(path);
}
#endif
while (class_path[end] == os::path_separator()[0]) {
end++;
}
@ -1082,11 +1048,6 @@ void ClassLoader::add_to_app_classpath_entries(const char* path,
if (entry->is_jar_file()) {
ClassLoaderExt::process_jar_manifest(entry, check_for_duplicates);
} else {
if (!os::dir_is_empty(path)) {
tty->print_cr("Error: non-empty directory '%s'", path);
exit_with_path_failure("Cannot have non-empty directory in app classpaths", NULL);
}
}
#endif
}

View File

@ -422,7 +422,6 @@ class ClassLoader: AllStatic {
}
return num_entries;
}
static void check_shared_classpath(const char *path);
static void finalize_shared_paths_misc_info();
static int get_shared_paths_misc_info_size();
static void* get_shared_paths_misc_info();

View File

@ -54,8 +54,17 @@ jshort ClassLoaderExt::_app_module_paths_start_index = ClassLoaderExt::max_class
bool ClassLoaderExt::_has_app_classes = false;
bool ClassLoaderExt::_has_platform_classes = false;
void ClassLoaderExt::append_boot_classpath(ClassPathEntry* new_entry) {
#if INCLUDE_CDS
warning("Sharing is only supported for boot loader classes because bootstrap classpath has been appended");
FileMapHeaderExt* header = (FileMapHeaderExt*)FileMapInfo::current_info()->header();
header->set_has_platform_or_app_classes(false);
#endif
ClassLoader::add_to_boot_append_entries(new_entry);
}
void ClassLoaderExt::setup_app_search_path() {
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
_app_class_paths_start_index = ClassLoader::num_boot_classpath_entries();
char* app_class_path = os::strdup(Arguments::get_appclasspath());
@ -85,8 +94,8 @@ void ClassLoaderExt::process_module_table(ModuleEntryTable* met, TRAPS) {
}
}
}
void ClassLoaderExt::setup_module_search_path(TRAPS) {
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
void ClassLoaderExt::setup_module_paths(TRAPS) {
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
_app_module_paths_start_index = ClassLoader::num_boot_classpath_entries() +
ClassLoader::num_app_classpath_entries();
Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
@ -215,16 +224,8 @@ void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry,
}
void ClassLoaderExt::setup_search_paths() {
if (UseAppCDS) {
shared_paths_misc_info()->record_app_offset();
ClassLoaderExt::setup_app_search_path();
}
}
void ClassLoaderExt::setup_module_paths(TRAPS) {
if (UseAppCDS) {
ClassLoaderExt::setup_module_search_path(THREAD);
}
}
Thread* ClassLoaderExt::Context::_dump_thread = NULL;
@ -251,11 +252,9 @@ void ClassLoaderExt::record_result(ClassLoaderExt::Context *context,
}
void ClassLoaderExt::finalize_shared_paths_misc_info() {
if (UseAppCDS) {
if (!_has_app_classes) {
shared_paths_misc_info()->pop_app();
}
}
}
// Load the class of the given name from the location given by path. The path is specified by
@ -264,7 +263,7 @@ void ClassLoaderExt::finalize_shared_paths_misc_info() {
InstanceKlass* ClassLoaderExt::load_class(Symbol* name, const char* path, TRAPS) {
assert(name != NULL, "invariant");
assert(DumpSharedSpaces && UseAppCDS, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
ResourceMark rm(THREAD);
const char* class_name = name->as_C_string();
@ -322,7 +321,7 @@ static GrowableArray<CachedClassPathEntry>* cached_path_entries = NULL;
ClassPathEntry* ClassLoaderExt::find_classpath_entry_from_cache(const char* path, TRAPS) {
// This is called from dump time so it's single threaded and there's no need for a lock.
assert(DumpSharedSpaces && UseAppCDS, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
if (cached_path_entries == NULL) {
cached_path_entries = new (ResourceObj::C_HEAP, mtClass) GrowableArray<CachedClassPathEntry>(20, /*c heap*/ true);
}

View File

@ -95,7 +95,6 @@ private:
static char* get_class_path_attr(const char* jar_path, char* manifest, jint manifest_size);
static void setup_app_search_path(); // Only when -Xshare:dump
static void process_module_table(ModuleEntryTable* met, TRAPS);
static void setup_module_search_path(TRAPS);
static SharedPathsMiscInfoExt* shared_paths_misc_info() {
return (SharedPathsMiscInfoExt*)_shared_paths_misc_info;
}
@ -112,15 +111,7 @@ public:
CDS_ONLY(static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates);)
// Called by JVMTI code to add boot classpath
static void append_boot_classpath(ClassPathEntry* new_entry) {
#if INCLUDE_CDS
if (UseAppCDS) {
warning("UseAppCDS is disabled because bootstrap classpath has been appended");
UseAppCDS = false;
}
#endif
ClassLoader::add_to_boot_append_entries(new_entry);
}
static void append_boot_classpath(ClassPathEntry* new_entry);
static void setup_search_paths() NOT_CDS_RETURN;
static void setup_module_paths(TRAPS) NOT_CDS_RETURN;

View File

@ -173,13 +173,11 @@ void SharedClassUtil::initialize(TRAPS) {
int size = FileMapInfo::get_number_of_shared_paths();
if (size > 0) {
SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
if (!DumpSharedSpaces) {
FileMapHeaderExt* header = (FileMapHeaderExt*)FileMapInfo::current_info()->header();
ClassLoaderExt::init_paths_start_index(header->_app_class_paths_start_index);
ClassLoaderExt::init_app_module_paths_start_index(header->_app_module_paths_start_index);
}
}
}
if (DumpSharedSpaces) {
if (SharedArchiveConfigFile) {
@ -229,19 +227,20 @@ void FileMapHeaderExt::populate(FileMapInfo* mapinfo, size_t alignment) {
}
bool FileMapHeaderExt::validate() {
if (UseAppCDS) {
const char* prop = Arguments::get_property("java.system.class.loader");
if (prop != NULL) {
warning("UseAppCDS is disabled because the java.system.class.loader property is specified (value = \"%s\"). "
"To enable UseAppCDS, this property must be not be set", prop);
UseAppCDS = false;
}
}
if (!FileMapInfo::FileMapHeader::validate()) {
return false;
}
// This must be done after header validation because it might change the
// header data
const char* prop = Arguments::get_property("java.system.class.loader");
if (prop != NULL) {
warning("Archived non-system classes are disabled because the "
"java.system.class.loader property is specified (value = \"%s\"). "
"To use archived non-system classes, this property must be not be set", prop);
_has_platform_or_app_classes = false;
}
// For backwards compatibility, we don't check the verification setting
// if the archive only contains system classes.
if (_has_platform_or_app_classes &&

Some files were not shown because too many files have changed in this diff Show More