This commit is contained in:
Phil Race 2019-12-06 12:06:09 -08:00
commit 1a73baeb10
1215 changed files with 79114 additions and 10008 deletions
.hgtags
doc
make
src/hotspot

@ -598,3 +598,4 @@ c16ac7a2eba4e73cb4f7ee9294dd647860eebff0 jdk-14+21
15936b142f86731afa4b1a2c0fe4a01e806c4944 jdk-14+23
438337c846fb071900ddb6922bddf8b3e895a514 jdk-14+24
17d242844fc9e7d18b3eac97426490a9c246119e jdk-14+25
288777cf0702914e5266bc1e5d380eed9032ca41 jdk-14+26

@ -168,6 +168,8 @@ TEST FAILURE</code></pre>
<p>Additional VM options to JTReg (<code>-vmoption</code>).</p>
<h4 id="aot_modules-1">AOT_MODULES</h4>
<p>Generate AOT modules before testing for the specified module, or set of modules. If multiple modules are specified, they should be separated by space (or, to help avoid quoting issues, the special value <code>%20</code>).</p>
<h4 id="retry_count">RETRY_COUNT</h4>
<p>Retry failed tests up to a set number of times. Defaults to 0.</p>
<h3 id="gtest-keywords">Gtest keywords</h3>
<h4 id="repeat">REPEAT</h4>
<p>The number of times to repeat the tests (<code>--gtest_repeat</code>).</p>

@ -332,6 +332,10 @@ Generate AOT modules before testing for the specified module, or set of
modules. If multiple modules are specified, they should be separated by space
(or, to help avoid quoting issues, the special value `%20`).
#### RETRY_COUNT
Retry failed tests up to a set number of times. Defaults to 0.
### Gtest keywords
#### REPEAT

@ -1,5 +1,5 @@
#
# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -80,6 +80,7 @@ define SetupInterimModule
ADD_JAVAC_FLAGS := --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules \
$$(INTERIM_LANGTOOLS_ADD_EXPORTS) \
--patch-module java.base=$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim \
--add-exports java.base/jdk.internal=java.compiler.interim \
--add-exports java.base/jdk.internal=jdk.compiler.interim \
-Xlint:-module, \
))

@ -380,6 +380,13 @@ endif
################################################################################
jdk.incubator.jpackage_COPY += .gif .png .txt .spec .script .prerm .preinst .postrm .postinst .list .sh \
.desktop .copyright .control .plist .template .icns .scpt .entitlements .wxs .wxl .wxi .ico .bmp
jdk.incubator.jpackage_CLEAN += .properties
################################################################################
jdk.jconsole_COPY += .gif .png
jdk.jconsole_CLEAN_FILES += $(wildcard \

@ -300,7 +300,8 @@ $(eval $(call SetTestOpt,FAILURE_HANDLER_TIMEOUT,JTREG))
$(eval $(call ParseKeywordVariable, JTREG, \
SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR FAILURE_HANDLER_TIMEOUT \
TEST_MODE ASSERT VERBOSE RETAIN MAX_MEM RUN_PROBLEM_LISTS, \
TEST_MODE ASSERT VERBOSE RETAIN MAX_MEM RUN_PROBLEM_LISTS \
RETRY_COUNT, \
STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \
EXTRA_PROBLEM_LISTS AOT_MODULES, \
))
@ -851,6 +852,7 @@ define SetupRunJtregTestBody
JTREG_VERBOSE ?= fail,error,summary
JTREG_RETAIN ?= fail,error
JTREG_RUN_PROBLEM_LISTS ?= false
JTREG_RETRY_COUNT ?= 0
ifneq ($$($1_JTREG_MAX_MEM), 0)
$1_JTREG_BASIC_OPTIONS += -vmoption:-Xmx$$($1_JTREG_MAX_MEM)
@ -942,25 +944,43 @@ define SetupRunJtregTestBody
clean-workdir-$1:
$$(RM) -r $$($1_TEST_SUPPORT_DIR)
$1_COMMAND_LINE := \
$$(JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \
-Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \
$$($1_JTREG_BASIC_OPTIONS) \
-testjdk:$$(JDK_UNDER_TEST) \
-dir:$$(JTREG_TOPDIR) \
-reportDir:$$($1_TEST_RESULTS_DIR) \
-workDir:$$($1_TEST_SUPPORT_DIR) \
-status:$$$${JTREG_STATUS} \
$$(JTREG_OPTIONS) \
$$(JTREG_FAILURE_HANDLER_OPTIONS) \
$$(JTREG_COV_OPTIONS) \
$$($1_TEST_NAME) \
&& $$(ECHO) $$$$? > $$($1_EXITCODE) \
|| $$(ECHO) $$$$? > $$($1_EXITCODE)
ifneq ($$(JTREG_RETRY_COUNT), 0)
$1_COMMAND_LINE := \
for i in {0..$$(JTREG_RETRY_COUNT)}; do \
if [ "$$$$i" != 0 ]; then \
$$(PRINTF) "\nRetrying Jtreg run. Attempt: $$$$i\n"; \
fi; \
$$($1_COMMAND_LINE); \
if [ "`$$(CAT) $$($1_EXITCODE)`" = "0" ]; then \
break; \
fi; \
export JTREG_STATUS="-status:error,fail"; \
done
endif
run-test-$1: pre-run-test clean-workdir-$1 $$($1_AOT_TARGETS)
$$(call LogWarn)
$$(call LogWarn, Running test '$$($1_TEST)')
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, ( \
$$(COV_ENVIRONMENT) \
$$(JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \
-Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \
$$($1_JTREG_BASIC_OPTIONS) \
-testjdk:$$(JDK_UNDER_TEST) \
-dir:$$(JTREG_TOPDIR) \
-reportDir:$$($1_TEST_RESULTS_DIR) \
-workDir:$$($1_TEST_SUPPORT_DIR) \
$$(JTREG_OPTIONS) \
$$(JTREG_FAILURE_HANDLER_OPTIONS) \
$$(JTREG_COV_OPTIONS) \
$$($1_TEST_NAME) \
&& $$(ECHO) $$$$? > $$($1_EXITCODE) \
|| $$(ECHO) $$$$? > $$($1_EXITCODE) \
$$(COV_ENVIRONMENT) $$($1_COMMAND_LINE) \
))
$1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/text/stats.txt

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -345,7 +345,7 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
# When compiling code to be executed by the Boot JDK, force compatibility with the
# oldest supported bootjdk.
BOOT_JDK_SOURCETARGET="-source 9 -target 9"
BOOT_JDK_SOURCETARGET="-source 13 -target 13"
AC_SUBST(BOOT_JDK_SOURCETARGET)
AC_SUBST(JAVAC_FLAGS)

@ -347,7 +347,8 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
# Only enable ZGC on supported platforms
AC_MSG_CHECKING([if zgc can be built])
if (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \
(test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64") ||
(test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64") || \
(test "x$OPENJDK_TARGET_OS" = "xwindows" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \
(test "x$OPENJDK_TARGET_OS" = "xmacosx" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"); then
AC_MSG_RESULT([yes])
else

@ -128,6 +128,7 @@ endif
JRE_TOOL_MODULES += \
jdk.jdwp.agent \
jdk.incubator.jpackage \
jdk.pack \
jdk.scripting.nashorn.shell \
#
@ -149,6 +150,7 @@ DOCS_MODULES += \
jdk.editpad \
jdk.hotspot.agent \
jdk.httpserver \
jdk.incubator.jpackage \
jdk.jartool \
jdk.javadoc \
jdk.jcmd \
@ -242,6 +244,13 @@ ifeq ($(ENABLE_AOT), false)
MODULES_FILTER += jdk.aot
endif
################################################################################
# jpackage is only on windows, macosx, and linux
ifeq ($(call isTargetOs, windows macosx linux), false)
MODULES_FILTER += jdk.incubator.jpackage
endif
################################################################################
# Module list macros

@ -397,6 +397,7 @@ endef
# ARFLAGS the archiver flags to be used
# OBJECT_DIR the directory where we store the object files
# OUTPUT_DIR the directory where the resulting binary is put
# SYMBOLS_DIR the directory where the debug symbols are put, defaults to OUTPUT_DIR
# INCLUDES only pick source from these directories
# EXCLUDES do not pick source from these directories
# INCLUDE_FILES only compile exactly these files!
@ -533,8 +534,6 @@ define SetupNativeCompilationBody
$$(call SetIfEmpty, $1_SYSROOT_CFLAGS, $$($$($1_TOOLCHAIN)_SYSROOT_CFLAGS))
$$(call SetIfEmpty, $1_SYSROOT_LDFLAGS, $$($$($1_TOOLCHAIN)_SYSROOT_LDFLAGS))
# Make sure the dirs exist.
$$(call MakeDir, $$($1_OBJECT_DIR) $$($1_OUTPUT_DIR))
$$(foreach d, $$($1_SRC), $$(if $$(wildcard $$d), , \
$$(error SRC specified to SetupNativeCompilation $1 contains missing directory $$d)))
@ -911,30 +910,31 @@ define SetupNativeCompilationBody
ifeq ($$($1_COPY_DEBUG_SYMBOLS), true)
ifneq ($$($1_DEBUG_SYMBOLS), false)
$$(call SetIfEmpty, $1_SYMBOLS_DIR, $$($1_OUTPUT_DIR))
# Only copy debug symbols for dynamic libraries and programs.
ifneq ($$($1_TYPE), STATIC_LIBRARY)
# Generate debuginfo files.
ifeq ($(call isTargetOs, windows), true)
$1_EXTRA_LDFLAGS += -debug "-pdb:$$($1_OUTPUT_DIR)/$$($1_NOSUFFIX).pdb" \
"-map:$$($1_OUTPUT_DIR)/$$($1_NOSUFFIX).map"
$1_DEBUGINFO_FILES := $$($1_OUTPUT_DIR)/$$($1_NOSUFFIX).pdb \
$$($1_OUTPUT_DIR)/$$($1_NOSUFFIX).map
$1_EXTRA_LDFLAGS += -debug "-pdb:$$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).pdb" \
"-map:$$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).map"
$1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).pdb \
$$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).map
else ifeq ($(call isTargetOs, linux solaris), true)
$1_DEBUGINFO_FILES := $$($1_OUTPUT_DIR)/$$($1_NOSUFFIX).debuginfo
$1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).debuginfo
# Setup the command line creating debuginfo files, to be run after linking.
# It cannot be run separately since it updates the original target file
$1_CREATE_DEBUGINFO_CMDS := \
$$($1_OBJCOPY) --only-keep-debug $$($1_TARGET) $$($1_DEBUGINFO_FILES) $$(NEWLINE) \
$(CD) $$($1_OUTPUT_DIR) && \
$(CD) $$($1_SYMBOLS_DIR) && \
$$($1_OBJCOPY) --add-gnu-debuglink=$$($1_DEBUGINFO_FILES) $$($1_TARGET)
else ifeq ($(call isTargetOs, macosx), true)
$1_DEBUGINFO_FILES := \
$$($1_OUTPUT_DIR)/$$($1_BASENAME).dSYM/Contents/Info.plist \
$$($1_OUTPUT_DIR)/$$($1_BASENAME).dSYM/Contents/Resources/DWARF/$$($1_BASENAME)
$$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM/Contents/Info.plist \
$$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM/Contents/Resources/DWARF/$$($1_BASENAME)
$1_CREATE_DEBUGINFO_CMDS := \
$(DSYMUTIL) --out $$($1_OUTPUT_DIR)/$$($1_BASENAME).dSYM $$($1_TARGET)
$(DSYMUTIL) --out $$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM $$($1_TARGET)
endif
# Since the link rule creates more than one file that we want to track,
@ -956,14 +956,14 @@ define SetupNativeCompilationBody
$1 += $$($1_DEBUGINFO_FILES)
ifeq ($$($1_ZIP_EXTERNAL_DEBUG_SYMBOLS), true)
$1_DEBUGINFO_ZIP := $$($1_OUTPUT_DIR)/$$($1_NOSUFFIX).diz
$1_DEBUGINFO_ZIP := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).diz
$1 += $$($1_DEBUGINFO_ZIP)
# The dependency on TARGET is needed for debuginfo files
# to be rebuilt properly.
$$($1_DEBUGINFO_ZIP): $$($1_DEBUGINFO_FILES) $$($1_TARGET)
$(CD) $$($1_OUTPUT_DIR) && \
$(ZIPEXE) -q -r $$@ $$(subst $$($1_OUTPUT_DIR)/,, $$($1_DEBUGINFO_FILES))
$(CD) $$($1_SYMBOLS_DIR) && \
$(ZIPEXE) -q -r $$@ $$(subst $$($1_SYMBOLS_DIR)/,, $$($1_DEBUGINFO_FILES))
endif
endif # !STATIC_LIBRARY
@ -999,6 +999,7 @@ define SetupNativeCompilationBody
$$($1_TARGET): $$($1_TARGET_DEPS)
$$(call LogInfo, Building static library $$($1_BASENAME))
$$(call MakeDir, $$($1_OUTPUT_DIR) $$($1_SYMBOLS_DIR))
$$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
$$($1_AR) $$($1_ARFLAGS) $(AR_OUT_OPTION)$$($1_TARGET) $$($1_ALL_OBJS) \
$$($1_RES))
@ -1100,7 +1101,9 @@ define SetupNativeCompilationBody
# Keep as much as possible on one execution line for best performance
# on Windows
$$(call LogInfo, Linking $$($1_BASENAME))
$$(call MakeDir, $$($1_OUTPUT_DIR) $$($1_SYMBOLS_DIR))
ifeq ($(call isTargetOs, windows), true)
$$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
$$($1_LD) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $$($1_SYSROOT_LDFLAGS) \
$(LD_OUT_OPTION)$$($1_TARGET) $$($1_LD_OBJ_ARG) $$($1_RES) $$(GLOBAL_LIBS) \

@ -1043,7 +1043,7 @@ var getJibProfilesDependencies = function (input, common) {
jtreg: {
server: "javare",
revision: "4.2",
build_number: "b14",
build_number: "b16",
checksum_file: "MD5_VALUES",
file: "jtreg_bin-4.2.zip",
environment_name: "JT_HOME",

@ -468,7 +468,7 @@ JDWP "Java(tm) Debug Wire Protocol"
"<li>deleting a method</li>"
"<li>changing class modifiers</li>"
"<li>changing method modifiers</li>"
"<li>changing the <code>NestHost</code> or <code>NestMembers</code> class attributes</li>"
"<li>changing the <code>NestHost</code>, <code>NestMembers</code>, or <code>Record</code> class attributes</li>"
"</ul>"
"<p>"
"Requires canRedefineClasses capability - see "
@ -3167,8 +3167,8 @@ JDWP "Java(tm) Debug Wire Protocol"
"than its counterpart in the old class version and "
"canUnrestrictedlyRedefineClasses is false.")
(Constant CLASS_ATTRIBUTE_CHANGE_NOT_IMPLEMENTED
=72 "The new class version has different NestHost or "
"NestMembers class attribute and "
=72 "The new class version has a different NestHost, "
"NestMembers, or Record class attribute and "
"canUnrestrictedlyRedefineClasses is false.")
(Constant NOT_IMPLEMENTED =99 "The functionality is not implemented in "
"this virtual machine.")

@ -122,6 +122,7 @@ JVM_GetNestMembers
JVM_GetPrimitiveArrayElement
JVM_GetProperties
JVM_GetProtectionDomain
JVM_GetRecordComponents
JVM_GetSimpleBinaryName
JVM_GetStackAccessControlContext
JVM_GetSystemPackage
@ -143,6 +144,7 @@ JVM_IsArrayClass
JVM_IsConstructorIx
JVM_IsInterface
JVM_IsPrimitiveClass
JVM_IsRecord
JVM_IsSameClassPackage
JVM_IsSupportedJNIVersion
JVM_IsThreadAlive

@ -157,9 +157,9 @@ abstract class AbstractLDMLHandler<V> extends DefaultHandler {
}
}
void pushStringListElement(String qName, Attributes attributes, int index) {
void pushStringListElement(String qName, Attributes attributes, int index, String count) {
if (!pushIfIgnored(qName, attributes)) {
currentContainer = new StringListElement(qName, currentContainer, index);
currentContainer = new StringListElement(qName, currentContainer, index, count);
}
}

@ -242,14 +242,14 @@ class Bundle {
if (i < size) {
pattern = patterns.get(i);
if (!pattern.isEmpty()) {
return pattern;
return "{" + pattern + "}";
}
}
// if not found, try parent
if (i < psize) {
pattern = pList.get(i);
if (!pattern.isEmpty()) {
return pattern;
return "{" + pattern + "}";
}
}
// bail out with empty string

@ -70,6 +70,7 @@ public class CLDRConverter {
private static String LIKELYSUBTAGS_SOURCE_FILE;
private static String TIMEZONE_SOURCE_FILE;
private static String WINZONES_SOURCE_FILE;
private static String PLURALS_SOURCE_FILE;
static String DESTINATION_DIR = "build/gensrc";
static final String LOCALE_NAME_PREFIX = "locale.displayname.";
@ -93,6 +94,7 @@ public class CLDRConverter {
private static SupplementDataParseHandler handlerSuppl;
private static LikelySubtagsParseHandler handlerLikelySubtags;
private static WinZonesParseHandler handlerWinZones;
static PluralsParseHandler handlerPlurals;
static SupplementalMetadataParseHandler handlerSupplMeta;
static NumberingSystemsParseHandler handlerNumbering;
static MetaZonesParseHandler handlerMetaZones;
@ -244,6 +246,7 @@ public class CLDRConverter {
TIMEZONE_SOURCE_FILE = CLDR_BASE + "/bcp47/timezone.xml";
SPPL_META_SOURCE_FILE = CLDR_BASE + "/supplemental/supplementalMetadata.xml";
WINZONES_SOURCE_FILE = CLDR_BASE + "/supplemental/windowsZones.xml";
PLURALS_SOURCE_FILE = CLDR_BASE + "/supplemental/plurals.xml";
if (BASE_LOCALES.isEmpty()) {
setupBaseLocales("en-US");
@ -264,6 +267,9 @@ public class CLDRConverter {
// Generate Windows tzmappings
generateWindowsTZMappings();
// Generate Plural rules
generatePluralRules();
}
}
@ -451,6 +457,10 @@ public class CLDRConverter {
// Parse windowsZones
handlerWinZones = new WinZonesParseHandler();
parseLDMLFile(new File(WINZONES_SOURCE_FILE), handlerWinZones);
// Parse plurals
handlerPlurals = new PluralsParseHandler();
parseLDMLFile(new File(PLURALS_SOURCE_FILE), handlerPlurals);
}
// Parsers for data in "bcp47" directory
@ -1161,6 +1171,52 @@ public class CLDRConverter {
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
}
/**
* Generate ResourceBundle source file for plural rules. The generated
* class is {@code sun.text.resources.PluralRules} which has one public
* two dimensional array {@code rulesArray}. Each array element consists
* of two elements that designate the locale and the locale's plural rules
* string. The latter has the syntax from Unicode Consortium's
* <a href="http://unicode.org/reports/tr35/tr35-numbers.html#Plural_rules_syntax">
* Plural rules syntax</a>. {@code samples} and {@code "other"} are being ommited.
*
* @throws Exception
*/
private static void generatePluralRules() throws Exception {
Files.createDirectories(Paths.get(DESTINATION_DIR, "sun", "text", "resources"));
Files.write(Paths.get(DESTINATION_DIR, "sun", "text", "resources", "PluralRules.java"),
Stream.concat(
Stream.concat(
Stream.of(
"package sun.text.resources;",
"public final class PluralRules {",
" public static final String[][] rulesArray = {"
),
pluralRulesStream().sorted()
),
Stream.of(
" };",
"}"
)
)
.collect(Collectors.toList()),
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
}
private static Stream<String> pluralRulesStream() {
return handlerPlurals.getData().entrySet().stream()
.filter(e -> !((Map<String, String>)e.getValue()).isEmpty())
.map(e -> {
String loc = e.getKey();
Map<String, String> rules = (Map<String, String>)e.getValue();
return " {\"" + loc + "\", \"" +
rules.entrySet().stream()
.map(rule -> rule.getKey() + ":" + rule.getValue().replaceFirst("@.*", ""))
.map(String::trim)
.collect(Collectors.joining(";")) + "\"},";
});
}
// for debug
static void dumpMap(Map<String, Object> map) {
map.entrySet().stream()
@ -1179,3 +1235,4 @@ public class CLDRConverter {
.forEach(System.out::println);
}
}

@ -54,7 +54,6 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
private String currentContext = ""; // "format"/"stand-alone"
private String currentWidth = ""; // "wide"/"narrow"/"abbreviated"
private String currentStyle = ""; // short, long for decimalFormat
private String compactCount = ""; // one or other for decimalFormat
LDMLParseHandler(String id) {
this.id = id;
@ -577,32 +576,12 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
if (currentStyle == null) {
pushContainer(qName, attributes);
} else {
// The compact number patterns parsing assumes that the order
// of patterns are always in the increasing order of their
// type attribute i.e. type = 1000...
// Between the inflectional forms for a type (e.g.
// count = "one" and count = "other" for type = 1000), it is
// assumed that the count = "one" always appears before
// count = "other"
switch (currentStyle) {
case "short":
case "long":
String count = attributes.getValue("count");
// first pattern of count = "one" or count = "other"
if ((count.equals("one") || count.equals("other"))
&& compactCount.equals("")) {
compactCount = count;
pushStringListElement(qName, attributes,
(int) Math.log10(Double.parseDouble(attributes.getValue("type"))));
} else if ((count.equals("one") || count.equals("other"))
&& compactCount.equals(count)) {
// extract patterns with similar "count"
// attribute value
pushStringListElement(qName, attributes,
(int) Math.log10(Double.parseDouble(attributes.getValue("type"))));
} else {
pushIgnoredContainer(qName);
}
pushStringListElement(qName, attributes,
(int) Math.log10(Double.parseDouble(attributes.getValue("type"))),
attributes.getValue("count"));
break;
default:
pushIgnoredContainer(qName);
@ -1051,7 +1030,6 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
break;
case "decimalFormatLength":
currentStyle = "";
compactCount = "";
putIfEntry();
break;
case "currencyFormats":

@ -0,0 +1,105 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package build.tools.cldrconverter;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
/**
* Handles parsing of files in Locale Data Markup Language for
* plurals.xml
*/
class PluralsParseHandler extends AbstractLDMLHandler<Object> {
@Override
public InputSource resolveEntity(String publicID, String systemID) throws IOException, SAXException {
// avoid HTTP traffic to unicode.org
if (systemID.startsWith(CLDRConverter.SPPL_LDML_DTD_SYSTEM_ID)) {
return new InputSource((new File(CLDRConverter.LOCAL_SPPL_LDML_DTD)).toURI().toString());
}
return null;
}
@Override
public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException {
switch (qName) {
case "plurals":
// Only deal with "cardinal" type for now.
if (attributes.getValue("type").equals("cardinal")) {
pushContainer(qName, attributes);
} else {
// ignore
pushIgnoredContainer(qName);
}
break;
case "pluralRules":
// key: locales
pushKeyContainer(qName, attributes, attributes.getValue("locales"));
break;
case "pluralRule":
pushStringEntry(qName, attributes, attributes.getValue("count"));
break;
default:
// treat anything else as a container
pushContainer(qName, attributes);
break;
}
}
@Override
public void endElement(String uri, String localName, String qName) throws SAXException {
assert qName.equals(currentContainer.getqName()) : "current=" + currentContainer.getqName() + ", param=" + qName;
switch (qName) {
case "pluralRule":
assert !(currentContainer instanceof Entry);
Entry entry = (Entry)currentContainer;
final String count = entry.getKey();
final String rule = (String)entry.getValue();
String locales = ((KeyContainer)(currentContainer.getParent())).getKey();
Arrays.stream(locales.split("\\s"))
.forEach(loc -> {
Map<String, String> rules = (Map<String, String>)get(loc);
if (rules == null) {
rules = new HashMap<>();
put(loc, rules);
}
if (!count.equals("other")) {
rules.put(count, rule);
}
});
break;
}
currentContainer = currentContainer.getParent();
}
}

@ -309,7 +309,7 @@ class ResourceBundleGenerator implements BundleGenerator {
// for languageAliasMap
if (CLDRConverter.isBaseModule) {
CLDRConverter.handlerSupplMeta.getLanguageAliasData().forEach((key, value) -> {
out.printf(" languageAliasMap.put(\"%s\", \"%s\");\n", key, value);
out.printf(" languageAliasMap.put(\"%s\", \"%s\");\n", key, value);
});
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,20 +28,22 @@ package build.tools.cldrconverter;
class StringListElement extends Container {
StringListEntry list;
String count;
int index;
StringListElement(String qName, Container parent, int index) {
StringListElement(String qName, Container parent, int index, String count) {
super(qName, parent);
while (!(parent instanceof StringListEntry)) {
parent = parent.getParent();
}
list = (StringListEntry) parent;
this.index = index;
this.count = count;
}
@Override
void addCharacters(char[] characters, int start, int length) {
list.addCharacters(index, characters, start, length);
list.addCharacters(index, count, characters, start, length);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,13 +38,22 @@ class StringListEntry extends Entry<List<String>> {
value = new ArrayList<>();
}
void addCharacters(int index, char[] characters, int start, int length) {
// fill with empty strings when the patterns start from index > 0
if (value.size() < index) {
IntStream.range(0, index).forEach(i -> value.add(i, ""));
value.add(index, new String(characters, start, length));
void addCharacters(int index, String count, char[] characters, int start, int length) {
int size = value.size();
String elem = count + ":" + new String(characters, start, length);
// quote embedded spaces, if any
elem = elem.replaceAll(" ", "' '");
if (size < index) {
// fill with empty strings when the patterns start from index > size
IntStream.range(size, index).forEach(i -> value.add(i, ""));
value.add(index, elem);
} else if (size == index) {
value.add(index, elem);
} else {
value.add(index, new String(characters, start, length));
// concatenate the pattern with the delimiter ' '
value.set(index, value.get(index) + " " + elem);
}
}

@ -46,6 +46,7 @@ import java.util.Objects;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static java.nio.charset.StandardCharsets.UTF_8;
/**
* Fixup HTML generated by pandoc.
@ -98,6 +99,10 @@ public class Main {
* If no output file is specified, the program will write to standard output.
* Any error messages will be written to the standard error stream.
*
* Consistent with the
* <a href="https://pandoc.org/MANUAL.html#character-encoding">pandoc tool</a>,
* input and output text is encoded as UTF-8.
*
* @param args the command-line arguments
*/
public static void main(String... args) {
@ -184,7 +189,7 @@ public class Main {
if (inFile != null) {
read(inFile);
} else {
read(new BufferedReader(new InputStreamReader(System.in)));
read(new BufferedReader(new InputStreamReader(System.in, UTF_8)));
}
}
}
@ -198,9 +203,9 @@ public class Main {
*/
private Writer openWriter(Path file) throws IOException {
if (file != null) {
return Files.newBufferedWriter(file);
return Files.newBufferedWriter(file, UTF_8);
} else {
return new BufferedWriter(new OutputStreamWriter(System.out) {
return new BufferedWriter(new OutputStreamWriter(System.out, UTF_8) {
@Override
public void close() throws IOException {
flush();
@ -615,7 +620,7 @@ public class Main {
* @param file the file
*/
void read(Path file) {
try (Reader r = Files.newBufferedReader(file)) {
try (Reader r = Files.newBufferedReader(file, UTF_8)) {
this.file = file;
read(r);
} catch (IOException e) {

@ -0,0 +1,30 @@
#
# Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include LauncherCommon.gmk
$(eval $(call SetupBuildLauncher, jpackage, \
MAIN_CLASS := jdk.incubator.jpackage.main.Main, \
))

@ -29,6 +29,7 @@ $(eval $(call SetupBuildLauncher, jconsole, \
MAIN_CLASS := sun.tools.jconsole.JConsole, \
JAVA_ARGS := --add-opens java.base/java.io=jdk.jconsole \
--add-modules ALL-DEFAULT \
-Djconsole.showOutputViewer, \
-Djconsole.showOutputViewer \
-Djdk.attach.allowAttachSelf=true, \
CFLAGS_windows := -DJAVAW, \
))

@ -0,0 +1,140 @@
#
# Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include LibCommon.gmk
################################################################################
# Output app launcher library in resources dir, and symbols in the object dir
$(eval $(call SetupJdkLibrary, BUILD_LIB_APPLAUNCHER, \
NAME := applauncher, \
OUTPUT_DIR := $(JDK_OUTPUTDIR)/modules/$(MODULE)/jdk/incubator/jpackage/internal/resources, \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libapplauncher, \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
OPTIMIZATION := LOW, \
CFLAGS := $(CXXFLAGS_JDKLIB), \
CFLAGS_windows := -EHsc -DUNICODE -D_UNICODE, \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(LIBCXX), \
LIBS_windows := user32.lib shell32.lib advapi32.lib ole32.lib, \
LIBS_linux := -ldl -lpthread, \
LIBS_macosx := -ldl -framework Cocoa, \
))
$(BUILD_LIB_APPLAUNCHER): $(call FindLib, java.base, java)
TARGETS += $(BUILD_LIB_APPLAUNCHER)
JPACKAGE_APPLAUNCHER_SRC := \
$(TOPDIR)/src/jdk.incubator.jpackage/$(OPENJDK_TARGET_OS)/native/jpackageapplauncher
# Output app launcher executable in resources dir, and symbols in the object dir
$(eval $(call SetupJdkExecutable, BUILD_JPACKAGE_APPLAUNCHEREXE, \
NAME := jpackageapplauncher, \
OUTPUT_DIR := $(JDK_OUTPUTDIR)/modules/$(MODULE)/jdk/incubator/jpackage/internal/resources, \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/jpackageapplauncher, \
SRC := $(JPACKAGE_APPLAUNCHER_SRC), \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
OPTIMIZATION := LOW, \
CFLAGS := $(CXXFLAGS_JDKEXE), \
CFLAGS_windows := -EHsc -DLAUNCHERC -DUNICODE -D_UNICODE, \
LDFLAGS := $(LDFLAGS_JDKEXE), \
LIBS_macosx := -framework Cocoa, \
LIBS := $(LIBCXX), \
LIBS_linux := -ldl, \
LIBS_windows := user32.lib shell32.lib advapi32.lib, \
))
TARGETS += $(BUILD_JPACKAGE_APPLAUNCHEREXE)
################################################################################
ifeq ($(call isTargetOs, windows), true)
$(eval $(call SetupJdkLibrary, BUILD_LIB_JPACKAGE, \
NAME := jpackage, \
OPTIMIZATION := LOW, \
CFLAGS := $(CXXFLAGS_JDKLIB), \
CFLAGS_windows := -EHsc -DUNICODE -D_UNICODE, \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(LIBCXX), \
LIBS_windows := user32.lib shell32.lib advapi32.lib ole32.lib, \
))
TARGETS += $(BUILD_LIB_JPACKAGE)
# Build Wix custom action helper
# Output library in resources dir, and symbols in the object dir
$(eval $(call SetupJdkLibrary, BUILD_LIB_WIXHELPER, \
NAME := wixhelper, \
OUTPUT_DIR := $(JDK_OUTPUTDIR)/modules/$(MODULE)/jdk/incubator/jpackage/internal/resources, \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libwixhelper, \
OPTIMIZATION := LOW, \
CFLAGS := $(CXXFLAGS_JDKLIB), \
CFLAGS_windows := -EHsc -DUNICODE -D_UNICODE -MT, \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK), \
LIBS := $(LIBCXX), \
LIBS_windows := msi.lib Shlwapi.lib User32.lib, \
))
TARGETS += $(BUILD_LIB_WIXHELPER)
# Build exe installer wrapper for msi installer
$(eval $(call SetupJdkExecutable, BUILD_JPACKAGE_MSIWRAPPER, \
NAME := msiwrapper, \
OUTPUT_DIR := $(JDK_OUTPUTDIR)/modules/$(MODULE)/jdk/incubator/jpackage/internal/resources, \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/msiwrapper, \
SRC := $(TOPDIR)/src/jdk.incubator.jpackage/$(OPENJDK_TARGET_OS)/native/msiwrapper, \
EXTRA_FILES := $(addprefix $(TOPDIR)/src/jdk.incubator.jpackage/$(OPENJDK_TARGET_OS)/native/libjpackage/, \
FileUtils.cpp Log.cpp WinSysInfo.cpp tstrings.cpp WinErrorHandling.cpp ErrorHandling.cpp), \
CFLAGS := $(CXXFLAGS_JDKEXE) -MT \
$(addprefix -I$(TOPDIR)/src/jdk.incubator.jpackage/$(OPENJDK_TARGET_OS)/native/, msiwrapper libjpackage), \
CFLAGS_windows := -EHsc -DUNICODE -D_UNICODE, \
LDFLAGS := $(LDFLAGS_JDKEXE), \
LIBS := $(LIBCXX), \
))
TARGETS += $(BUILD_JPACKAGE_MSIWRAPPER)
# Build non-console version of launcher
$(eval $(call SetupJdkExecutable, BUILD_JPACKAGE_APPLAUNCHERWEXE, \
NAME := jpackageapplauncherw, \
OUTPUT_DIR := $(JDK_OUTPUTDIR)/modules/$(MODULE)/jdk/incubator/jpackage/internal/resources, \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/jpackageapplauncherw, \
SRC := $(JPACKAGE_APPLAUNCHER_SRC), \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
OPTIMIZATION := LOW, \
CFLAGS := $(CXXFLAGS_JDKEXE), \
CFLAGS_windows := -EHsc -DUNICODE -D_UNICODE, \
LDFLAGS := $(LDFLAGS_JDKEXE), \
LIBS := $(LIBCXX), \
LIBS_windows := user32.lib shell32.lib advapi32.lib, \
))
TARGETS += $(BUILD_JPACKAGE_APPLAUNCHERWEXE)
endif

@ -1857,13 +1857,14 @@ static enum RC rc_class(OptoReg::Name reg) {
// we have 30 int registers * 2 halves
// (rscratch1 and rscratch2 are omitted)
int slots_of_int_registers = RegisterImpl::max_slots_per_register * (RegisterImpl::number_of_registers - 2);
if (reg < 60) {
if (reg < slots_of_int_registers) {
return rc_int;
}
// we have 32 float register * 2 halves
if (reg < 60 + 128) {
// we have 32 float register * 4 halves
if (reg < slots_of_int_registers + FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers) {
return rc_float;
}
@ -2293,6 +2294,24 @@ const bool Matcher::require_postalloc_expand = false;
// the cpu only look at the lower 5/6 bits anyway?
const bool Matcher::need_masked_shift_count = false;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}
bool Matcher::is_generic_reg2reg_move(MachNode* m) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
// This affects two different things:
// - how Decode nodes are matched
// - how ImplicitNullCheck opportunities are recognized

@ -604,7 +604,9 @@ class InternalAddress: public Address {
InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {}
};
const int FPUStateSizeInWords = 32 * 2;
const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers *
FloatRegisterImpl::save_slots_per_register;
typedef enum {
PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM,
PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM,

@ -23,9 +23,9 @@
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
//
// The heap can have three different layouts, depending on the max heap size.
@ -142,7 +142,7 @@ uintptr_t ZPlatformAddressBase() {
size_t ZPlatformAddressOffsetBits() {
const size_t min_address_offset_bits = 42; // 4TB
const size_t max_address_offset_bits = 44; // 16TB
const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset_bits = log2_intptr(address_offset);
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}

@ -26,10 +26,12 @@
#include "precompiled.hpp"
#include "register_aarch64.hpp"
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers << 1;
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers *
RegisterImpl::max_slots_per_register;
const int ConcreteRegisterImpl::max_fpr
= ConcreteRegisterImpl::max_gpr + (FloatRegisterImpl::number_of_registers << 1);
= ConcreteRegisterImpl::max_gpr +
FloatRegisterImpl::number_of_registers * FloatRegisterImpl::max_slots_per_register;
const char* RegisterImpl::name() const {
const char* names[number_of_registers] = {

@ -44,7 +44,8 @@ class RegisterImpl: public AbstractRegisterImpl {
enum {
number_of_registers = 32,
number_of_byte_registers = 32,
number_of_registers_for_jvmci = 34 // Including SP and ZR.
number_of_registers_for_jvmci = 34, // Including SP and ZR.
max_slots_per_register = 2
};
// derived registers, offsets, and addresses
@ -127,7 +128,10 @@ inline FloatRegister as_FloatRegister(int encoding) {
class FloatRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 32
number_of_registers = 32,
max_slots_per_register = 4,
save_slots_per_register = 2,
extra_save_slots_per_register = max_slots_per_register - save_slots_per_register
};
// construction
@ -193,8 +197,8 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
// There is no requirement that any ordering here matches any ordering c2 gives
// it's optoregs.
number_of_registers = (2 * RegisterImpl::number_of_registers +
4 * FloatRegisterImpl::number_of_registers +
number_of_registers = (RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers +
FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers +
1) // flags
};

@ -33,6 +33,7 @@
#include "interpreter/interp_masm.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "runtime/safepointMechanism.hpp"
@ -99,15 +100,15 @@ class RegisterSaver {
// Capture info about frame layout
enum layout {
fpu_state_off = 0,
fpu_state_end = fpu_state_off+FPUStateSizeInWords-1,
fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
// The frame sender code expects that rfp will be in
// the "natural" place and will override any oopMap
// setting for it. We must therefore force the layout
// so that it agrees with the frame sender code.
r0_off = fpu_state_off+FPUStateSizeInWords,
rfp_off = r0_off + 30 * 2,
return_off = rfp_off + 2, // slot for return address
reg_save_size = return_off + 2};
r0_off = fpu_state_off + FPUStateSizeInWords,
rfp_off = r0_off + (RegisterImpl::number_of_registers - 2) * RegisterImpl::max_slots_per_register,
return_off = rfp_off + RegisterImpl::max_slots_per_register, // slot for return address
reg_save_size = return_off + RegisterImpl::max_slots_per_register};
};
@ -115,19 +116,20 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
#if COMPILER2_OR_JVMCI
if (save_vectors) {
// Save upper half of vector registers
int vect_words = 32 * 8 / wordSize;
int vect_words = FloatRegisterImpl::number_of_registers * FloatRegisterImpl::extra_save_slots_per_register /
VMRegImpl::slots_per_word;
additional_frame_words += vect_words;
}
#else
assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
#endif
int frame_size_in_bytes = align_up(additional_frame_words*wordSize +
reg_save_size*BytesPerInt, 16);
int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
reg_save_size * BytesPerInt, 16);
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
// The caller will allocate additional_frame_words
int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
// CodeBlob frame size is in words.
int frame_size_in_words = frame_size_in_bytes / wordSize;
*total_frame_words = frame_size_in_words;
@ -147,10 +149,10 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
Register r = as_Register(i);
if (r < rheapbase && r != rscratch1 && r != rscratch2) {
int sp_offset = 2 * (i + 32); // SP offsets are in 4-byte words,
// register slots are 8 bytes
// wide, 32 floating-point
// registers
// SP offsets are in 4-byte words.
// Register slots are 8 bytes wide, 32 floating-point registers.
int sp_offset = RegisterImpl::max_slots_per_register * i +
FloatRegisterImpl::save_slots_per_register * FloatRegisterImpl::number_of_registers;
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots),
r->as_VMReg());
}
@ -158,7 +160,8 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
FloatRegister r = as_FloatRegister(i);
int sp_offset = save_vectors ? (4 * i) : (2 * i);
int sp_offset = save_vectors ? (FloatRegisterImpl::max_slots_per_register * i) :
(FloatRegisterImpl::save_slots_per_register * i);
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
r->as_VMReg());
}

@ -33,15 +33,17 @@ void VMRegImpl::set_regName() {
Register reg = ::as_Register(0);
int i;
for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
regName[i++] = reg->name();
regName[i++] = reg->name();
for (int j = 0 ; j < RegisterImpl::max_slots_per_register ; j++) {
regName[i++] = reg->name();
}
reg = reg->successor();
}
FloatRegister freg = ::as_FloatRegister(0);
for ( ; i < ConcreteRegisterImpl::max_fpr ; ) {
regName[i++] = freg->name();
regName[i++] = freg->name();
for (int j = 0 ; j < FloatRegisterImpl::max_slots_per_register ; j++) {
regName[i++] = freg->name();
}
freg = freg->successor();
}

@ -38,13 +38,14 @@ inline Register as_Register() {
assert( is_Register(), "must be");
// Yuk
return ::as_Register(value() >> 1);
return ::as_Register(value() / RegisterImpl::max_slots_per_register);
}
inline FloatRegister as_FloatRegister() {
assert( is_FloatRegister() && is_even(value()), "must be" );
// Yuk
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1);
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) /
FloatRegisterImpl::max_slots_per_register);
}
inline bool is_concrete() {

@ -28,11 +28,12 @@
inline VMReg RegisterImpl::as_VMReg() {
if( this==noreg ) return VMRegImpl::Bad();
return VMRegImpl::as_VMReg(encoding() << 1 );
return VMRegImpl::as_VMReg(encoding() * RegisterImpl::max_slots_per_register);
}
inline VMReg FloatRegisterImpl::as_VMReg() {
return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr);
return VMRegImpl::as_VMReg((encoding() * FloatRegisterImpl::max_slots_per_register) +
ConcreteRegisterImpl::max_gpr);
}
#endif // CPU_AARCH64_VMREG_AARCH64_INLINE_HPP

@ -1077,6 +1077,24 @@ const bool Matcher::need_masked_shift_count = true;
const bool Matcher::convi2l_type_required = true;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}
bool Matcher::is_generic_reg2reg_move(MachNode* m) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
// Should the Matcher clone shifts on addressing modes, expecting them
// to be subsumed into complex addressing expressions or compute them
// into registers?

@ -2426,6 +2426,24 @@ const bool Matcher::require_postalloc_expand = true;
// PowerPC requires masked shift counts.
const bool Matcher::need_masked_shift_count = true;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}
bool Matcher::is_generic_reg2reg_move(MachNode* m) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
// This affects two different things:
// - how Decode nodes are matched
// - how ImplicitNullCheck opportunities are recognized

@ -1658,6 +1658,24 @@ const bool Matcher::require_postalloc_expand = false;
// Constant shift counts are handled in Ideal phase.
const bool Matcher::need_masked_shift_count = false;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}
bool Matcher::is_generic_reg2reg_move(MachNode* m) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
// Set this as clone_shift_expressions.
bool Matcher::narrow_oop_use_complex_address() {
if (CompressedOops::base() == NULL && CompressedOops::shift() == 0) return true;

@ -1815,6 +1815,24 @@ const bool Matcher::require_postalloc_expand = false;
// the cpu only look at the lower 5/6 bits anyway?
const bool Matcher::need_masked_shift_count = false;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}
bool Matcher::is_generic_reg2reg_move(MachNode* m) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
bool Matcher::narrow_oop_use_complex_address() {
assert(UseCompressedOops, "only for compressed oops code");
return false;

@ -517,8 +517,11 @@ private:
// Sort by size, largest first
_xmm_registers.sort(xmm_compare_register_size);
// On Windows, the caller reserves stack space for spilling register arguments
const int arg_spill_size = frame::arg_reg_save_area_bytes;
// Stack pointer must be 16 bytes aligned for the call
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size, 16);
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + arg_spill_size, 16);
}
public:

@ -23,9 +23,9 @@
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
//
// The heap can have three different layouts, depending on the max heap size.
@ -142,7 +142,7 @@ uintptr_t ZPlatformAddressBase() {
size_t ZPlatformAddressOffsetBits() {
const size_t min_address_offset_bits = 42; // 4TB
const size_t max_address_offset_bits = 44; // 16TB
const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset_bits = log2_intptr(address_offset);
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}

@ -1356,7 +1356,7 @@ public:
void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vinserti32x4(dst, dst, src, imm8);
} else if (UseAVX > 1) {
// vinserti128 is available only in AVX2
@ -1367,7 +1367,7 @@ public:
}
void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vinserti32x4(dst, dst, src, imm8);
} else if (UseAVX > 1) {
// vinserti128 is available only in AVX2
@ -1378,7 +1378,7 @@ public:
}
void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vextracti32x4(dst, src, imm8);
} else if (UseAVX > 1) {
// vextracti128 is available only in AVX2
@ -1389,7 +1389,7 @@ public:
}
void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vextracti32x4(dst, src, imm8);
} else if (UseAVX > 1) {
// vextracti128 is available only in AVX2
@ -1414,7 +1414,7 @@ public:
}
void vinsertf128_high(XMMRegister dst, XMMRegister src) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vinsertf32x4(dst, dst, src, 1);
} else {
Assembler::vinsertf128(dst, dst, src, 1);
@ -1422,7 +1422,7 @@ public:
}
void vinsertf128_high(XMMRegister dst, Address src) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vinsertf32x4(dst, dst, src, 1);
} else {
Assembler::vinsertf128(dst, dst, src, 1);
@ -1430,7 +1430,7 @@ public:
}
void vextractf128_high(XMMRegister dst, XMMRegister src) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vextractf32x4(dst, src, 1);
} else {
Assembler::vextractf128(dst, src, 1);
@ -1438,7 +1438,7 @@ public:
}
void vextractf128_high(Address dst, XMMRegister src) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vextractf32x4(dst, src, 1);
} else {
Assembler::vextractf128(dst, src, 1);
@ -1480,7 +1480,7 @@ public:
}
void vinsertf128_low(XMMRegister dst, XMMRegister src) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vinsertf32x4(dst, dst, src, 0);
} else {
Assembler::vinsertf128(dst, dst, src, 0);
@ -1488,7 +1488,7 @@ public:
}
void vinsertf128_low(XMMRegister dst, Address src) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vinsertf32x4(dst, dst, src, 0);
} else {
Assembler::vinsertf128(dst, dst, src, 0);
@ -1496,7 +1496,7 @@ public:
}
void vextractf128_low(XMMRegister dst, XMMRegister src) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vextractf32x4(dst, src, 0);
} else {
Assembler::vextractf128(dst, src, 0);
@ -1504,7 +1504,7 @@ public:
}
void vextractf128_low(Address dst, XMMRegister src) {
if (UseAVX > 2) {
if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
Assembler::vextractf32x4(dst, src, 0);
} else {
Assembler::vextractf128(dst, src, 0);

File diff suppressed because it is too large Load Diff

@ -4130,72 +4130,6 @@ operand vlRegD() %{
interface(REG_INTER);
%}
// Vectors : note, we use legacy registers to avoid extra (unneeded in 32-bit VM)
// runtime code generation via reg_class_dynamic.
operand vecS() %{
constraint(ALLOC_IN_RC(vectors_reg_legacy));
match(VecS);
format %{ %}
interface(REG_INTER);
%}
operand legVecS() %{
constraint(ALLOC_IN_RC(vectors_reg_legacy));
match(VecS);
format %{ %}
interface(REG_INTER);
%}
operand vecD() %{
constraint(ALLOC_IN_RC(vectord_reg_legacy));
match(VecD);
format %{ %}
interface(REG_INTER);
%}
operand legVecD() %{
constraint(ALLOC_IN_RC(vectord_reg_legacy));
match(VecD);
format %{ %}
interface(REG_INTER);
%}
operand vecX() %{
constraint(ALLOC_IN_RC(vectorx_reg_legacy));
match(VecX);
format %{ %}
interface(REG_INTER);
%}
operand legVecX() %{
constraint(ALLOC_IN_RC(vectorx_reg_legacy));
match(VecX);
format %{ %}
interface(REG_INTER);
%}
operand vecY() %{
constraint(ALLOC_IN_RC(vectory_reg_legacy));
match(VecY);
format %{ %}
interface(REG_INTER);
%}
operand legVecY() %{
constraint(ALLOC_IN_RC(vectory_reg_legacy));
match(VecY);
format %{ %}
interface(REG_INTER);
%}
//----------Memory Operands----------------------------------------------------
// Direct Memory Operand
operand direct(immP addr) %{
@ -11795,12 +11729,12 @@ instruct string_equals(eDIRegP str1, eSIRegP str2, eCXRegI cnt, eAXRegI result,
// fast search of substring with known size.
instruct string_indexof_conL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
eBXRegI result, regD vec1, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
effect(TEMP vec1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec1, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 16) {
@ -11809,13 +11743,13 @@ instruct string_indexof_conL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
}
%}
ins_pipe( pipe_slow );
@ -11823,12 +11757,12 @@ instruct string_indexof_conL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_
// fast search of substring with known size.
instruct string_indexof_conU(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
eBXRegI result, regD vec1, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
effect(TEMP vec1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec1, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 8) {
@ -11837,13 +11771,13 @@ instruct string_indexof_conU(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
}
%}
ins_pipe( pipe_slow );
@ -11851,12 +11785,12 @@ instruct string_indexof_conU(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_
// fast search of substring with known size.
instruct string_indexof_conUL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2,
eBXRegI result, regD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
eBXRegI result, regD vec1, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
effect(TEMP vec1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec1, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 8) {
@ -11865,62 +11799,62 @@ instruct string_indexof_conUL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
}
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
eBXRegI result, regD vec1, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
effect(TEMP vec1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofU(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
eBXRegI result, regD vec1, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
effect(TEMP vec1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofUL(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2,
eBXRegI result, regD vec, eCXRegI tmp, eFlagsReg cr) %{
eBXRegI result, regD vec1, eCXRegI tmp, eFlagsReg cr) %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
effect(TEMP vec1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
$vec1$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
%}
ins_pipe( pipe_slow );
%}

@ -3720,72 +3720,6 @@ operand vlRegD() %{
interface(REG_INTER);
%}
// Vectors
operand vecS() %{
constraint(ALLOC_IN_RC(vectors_reg_vlbwdq));
match(VecS);
format %{ %}
interface(REG_INTER);
%}
// Vectors
operand legVecS() %{
constraint(ALLOC_IN_RC(vectors_reg_legacy));
match(VecS);
format %{ %}
interface(REG_INTER);
%}
operand vecD() %{
constraint(ALLOC_IN_RC(vectord_reg_vlbwdq));
match(VecD);
format %{ %}
interface(REG_INTER);
%}
operand legVecD() %{
constraint(ALLOC_IN_RC(vectord_reg_legacy));
match(VecD);
format %{ %}
interface(REG_INTER);
%}
operand vecX() %{
constraint(ALLOC_IN_RC(vectorx_reg_vlbwdq));
match(VecX);
format %{ %}
interface(REG_INTER);
%}
operand legVecX() %{
constraint(ALLOC_IN_RC(vectorx_reg_legacy));
match(VecX);
format %{ %}
interface(REG_INTER);
%}
operand vecY() %{
constraint(ALLOC_IN_RC(vectory_reg_vlbwdq));
match(VecY);
format %{ %}
interface(REG_INTER);
%}
operand legVecY() %{
constraint(ALLOC_IN_RC(vectory_reg_legacy));
match(VecY);
format %{ %}
interface(REG_INTER);
%}
//----------Memory Operands----------------------------------------------------
// Direct Memory Operand
// operand direct(immP addr)
@ -11224,7 +11158,7 @@ instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero,
%}
instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, legVecS tmp1, rFlagsReg cr)
rax_RegI result, legRegD tmp1, rFlagsReg cr)
%{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
@ -11240,7 +11174,7 @@ instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI c
%}
instruct string_compareU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, legVecS tmp1, rFlagsReg cr)
rax_RegI result, legRegD tmp1, rFlagsReg cr)
%{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
@ -11256,7 +11190,7 @@ instruct string_compareU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI c
%}
instruct string_compareLU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, legVecS tmp1, rFlagsReg cr)
rax_RegI result, legRegD tmp1, rFlagsReg cr)
%{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
@ -11272,7 +11206,7 @@ instruct string_compareLU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI
%}
instruct string_compareUL(rsi_RegP str1, rdx_RegI cnt1, rdi_RegP str2, rcx_RegI cnt2,
rax_RegI result, legVecS tmp1, rFlagsReg cr)
rax_RegI result, legRegD tmp1, rFlagsReg cr)
%{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
@ -11289,13 +11223,13 @@ instruct string_compareUL(rsi_RegP str1, rdx_RegI cnt1, rdi_RegP str2, rcx_RegI
// fast search of substring with known size.
instruct string_indexof_conL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, legVecS vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
rbx_RegI result, legRegD tmp_vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $tmp_vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 16) {
@ -11304,13 +11238,13 @@ instruct string_indexof_conL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI i
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
}
%}
ins_pipe( pipe_slow );
@ -11318,13 +11252,13 @@ instruct string_indexof_conL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI i
// fast search of substring with known size.
instruct string_indexof_conU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, legVecS vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
rbx_RegI result, legRegD tmp_vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $tmp_vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 8) {
@ -11333,13 +11267,13 @@ instruct string_indexof_conU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI i
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
}
%}
ins_pipe( pipe_slow );
@ -11347,13 +11281,13 @@ instruct string_indexof_conU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI i
// fast search of substring with known size.
instruct string_indexof_conUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, legVecS vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
rbx_RegI result, legRegD tmp_vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $tmp_vec, $cnt1, $cnt2, $tmp" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
if (icnt2 >= 8) {
@ -11362,86 +11296,86 @@ instruct string_indexof_conUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI
__ string_indexofC8($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
} else {
// Small strings are loaded through stack if they cross page boundary.
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
icnt2, $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
}
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, legVecS vec, rcx_RegI tmp, rFlagsReg cr)
rbx_RegI result, legRegD tmp_vec, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::LL);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, legVecS vec, rcx_RegI tmp, rFlagsReg cr)
rbx_RegI result, legRegD tmp_vec, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UU);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, legVecS vec, rcx_RegI tmp, rFlagsReg cr)
rbx_RegI result, legRegD tmp_vec, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
effect(TEMP tmp_vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
(-1), $result$$Register,
$vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
$tmp_vec$$XMMRegister, $tmp$$Register, StrIntrinsicNode::UL);
%}
ins_pipe( pipe_slow );
%}
instruct string_indexofU_char(rdi_RegP str1, rdx_RegI cnt1, rax_RegI ch,
rbx_RegI result, legVecS vec1, legVecS vec2, legVecS vec3, rcx_RegI tmp, rFlagsReg cr)
rbx_RegI result, legRegD tmp_vec1, legRegD tmp_vec2, legRegD tmp_vec3, rcx_RegI tmp, rFlagsReg cr)
%{
predicate(UseSSE42Intrinsics);
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
effect(TEMP vec1, TEMP vec2, TEMP vec3, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP tmp, KILL cr);
effect(TEMP tmp_vec1, TEMP tmp_vec2, TEMP tmp_vec3, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP tmp, KILL cr);
format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result // KILL all" %}
ins_encode %{
__ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, $result$$Register,
$vec1$$XMMRegister, $vec2$$XMMRegister, $vec3$$XMMRegister, $tmp$$Register);
$tmp_vec1$$XMMRegister, $tmp_vec2$$XMMRegister, $tmp_vec3$$XMMRegister, $tmp$$Register);
%}
ins_pipe( pipe_slow );
%}
// fast string equals
instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI result,
legVecS tmp1, legVecS tmp2, rbx_RegI tmp3, rFlagsReg cr)
legRegD tmp1, legRegD tmp2, rbx_RegI tmp3, rFlagsReg cr)
%{
match(Set result (StrEquals (Binary str1 str2) cnt));
effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr);
@ -11457,7 +11391,7 @@ instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI resu
// fast array equals
instruct array_equalsB(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
legVecS tmp1, legVecS tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
legRegD tmp1, legRegD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (AryEq ary1 ary2));
@ -11473,7 +11407,7 @@ instruct array_equalsB(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
%}
instruct array_equalsC(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
legVecS tmp1, legVecS tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
legRegD tmp1, legRegD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (AryEq ary1 ary2));
@ -11489,7 +11423,7 @@ instruct array_equalsC(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
%}
instruct has_negatives(rsi_RegP ary1, rcx_RegI len, rax_RegI result,
legVecS tmp1, legVecS tmp2, rbx_RegI tmp3, rFlagsReg cr)
legRegD tmp1, legRegD tmp2, rbx_RegI tmp3, rFlagsReg cr)
%{
match(Set result (HasNegatives ary1 len));
effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL len, KILL tmp3, KILL cr);
@ -11504,7 +11438,7 @@ instruct has_negatives(rsi_RegP ary1, rcx_RegI len, rax_RegI result,
%}
// fast char[] to byte[] compression
instruct string_compress(rsi_RegP src, rdi_RegP dst, rdx_RegI len, legVecS tmp1, legVecS tmp2, legVecS tmp3, legVecS tmp4,
instruct string_compress(rsi_RegP src, rdi_RegP dst, rdx_RegI len, legRegD tmp1, legRegD tmp2, legRegD tmp3, legRegD tmp4,
rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{
match(Set result (StrCompressedCopy src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);
@ -11520,7 +11454,7 @@ instruct string_compress(rsi_RegP src, rdi_RegP dst, rdx_RegI len, legVecS tmp1,
// fast byte[] to char[] inflation
instruct string_inflate(Universe dummy, rsi_RegP src, rdi_RegP dst, rdx_RegI len,
legVecS tmp1, rcx_RegI tmp2, rFlagsReg cr) %{
legRegD tmp1, rcx_RegI tmp2, rFlagsReg cr) %{
match(Set dummy (StrInflatedCopy src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
@ -11534,7 +11468,7 @@ instruct string_inflate(Universe dummy, rsi_RegP src, rdi_RegP dst, rdx_RegI len
// encode char[] to byte[] in ISO_8859_1
instruct encode_iso_array(rsi_RegP src, rdi_RegP dst, rdx_RegI len,
legVecS tmp1, legVecS tmp2, legVecS tmp3, legVecS tmp4,
legRegD tmp1, legRegD tmp2, legRegD tmp3, legRegD tmp4,
rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{
match(Set result (EncodeISOArray src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);

@ -1384,6 +1384,8 @@ void os::print_os_info(outputStream* st) {
st->print_cr("AIX kernel version %u.%u.%u.%u",
(ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
os::Posix::print_uptime_info(st);
os::Posix::print_rlimit_info(st);
os::Posix::print_load_average(st);

@ -28,6 +28,7 @@
#include "os_aix.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/os_perf.hpp"
#include "utilities/globalDefinitions.hpp"
#include CPU_HEADER(vm_version_ext)
@ -884,8 +885,7 @@ class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtIntern
friend class NetworkPerformanceInterface;
private:
NetworkPerformance();
NetworkPerformance(const NetworkPerformance& rhs); // no impl
NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
NONCOPYABLE(NetworkPerformance);
bool initialize();
~NetworkPerformance();
int network_utilization(NetworkInterface** network_interfaces) const;

@ -148,7 +148,7 @@ void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}
void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
const size_t nsegments = pmem.nsegments();
size_t size = 0;
@ -159,11 +159,6 @@ void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t add
_file.map(segment_addr, segment.size(), segment.start());
size += segment.size();
}
// Pre-touch memory
if (pretouch) {
pretouch_view(addr, size);
}
}
void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
@ -175,15 +170,27 @@ uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
return ZAddress::marked0(offset);
}
void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
if (ZVerifyViews) {
// Pre-touch good view
pretouch_view(ZAddress::good(offset), size);
} else {
// Pre-touch all views
pretouch_view(ZAddress::marked0(offset), size);
pretouch_view(ZAddress::marked1(offset), size);
pretouch_view(ZAddress::remapped(offset), size);
}
}
void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
if (ZVerifyViews) {
// Map good view
map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::good(offset));
} else {
// Map all views
map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::marked0(offset));
map_view(pmem, ZAddress::marked1(offset));
map_view(pmem, ZAddress::remapped(offset));
}
}
@ -202,7 +209,7 @@ void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset
void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
// Map good view
assert(ZVerifyViews, "Should be enabled");
map_view(pmem, ZAddress::good(offset), false /* pretouch */);
map_view(pmem, ZAddress::good(offset));
}
void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {

@ -36,7 +36,7 @@ private:
ZMemoryManager _uncommitted;
void pretouch_view(uintptr_t addr, size_t size) const;
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
public:
@ -53,6 +53,8 @@ public:
uintptr_t nmt_address(uintptr_t offset) const;
void pretouch(uintptr_t offset, size_t size) const;
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;

@ -168,6 +168,22 @@ julong os::Bsd::available_memory() {
return available;
}
// for more info see :
// https://man.openbsd.org/sysctl.2
void os::Bsd::print_uptime_info(outputStream* st) {
struct timeval boottime;
size_t len = sizeof(boottime);
int mib[2];
mib[0] = CTL_KERN;
mib[1] = KERN_BOOTTIME;
if (sysctl(mib, 2, &boottime, &len, NULL, 0) >= 0) {
time_t bootsec = boottime.tv_sec;
time_t currsec = time(NULL);
os::print_dhm(st, "OS uptime:", (long) difftime(currsec, bootsec));
}
}
julong os::physical_memory() {
return Bsd::physical_memory();
}
@ -1569,6 +1585,8 @@ void os::print_os_info(outputStream* st) {
os::Posix::print_uname_info(st);
os::Bsd::print_uptime_info(st);
os::Posix::print_rlimit_info(st);
os::Posix::print_load_average(st);
@ -3763,11 +3781,30 @@ int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
}
}
// Get the default path to the core file
// Get the kern.corefile setting, or otherwise the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {
int n = jio_snprintf(buffer, bufferSize, "/cores/core.%d", current_process_id());
int n = 0;
#ifdef __APPLE__
char coreinfo[MAX_PATH];
size_t sz = sizeof(coreinfo);
int ret = sysctlbyname("kern.corefile", coreinfo, &sz, NULL, 0);
if (ret == 0) {
char *pid_pos = strstr(coreinfo, "%P");
// skip over the "%P" to preserve any optional custom user pattern
const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : "";
if (pid_pos != NULL) {
*pid_pos = '\0';
n = jio_snprintf(buffer, bufferSize, "%s%d%s", coreinfo, os::current_process_id(), tail);
} else {
n = jio_snprintf(buffer, bufferSize, "%s", coreinfo);
}
} else
#endif
{
n = jio_snprintf(buffer, bufferSize, "/cores/core.%d", os::current_process_id());
}
// Truncate if theoretical string was longer than bufferSize
n = MIN2(n, (int)bufferSize);

@ -155,6 +155,8 @@ class Bsd {
}
}
static int get_node_by_cpu(int cpu_id);
static void print_uptime_info(outputStream* st);
};
#endif // OS_BSD_OS_BSD_HPP

@ -26,6 +26,7 @@
#include "memory/resourceArea.hpp"
#include "runtime/os.hpp"
#include "runtime/os_perf.hpp"
#include "utilities/globalDefinitions.hpp"
#include CPU_HEADER(vm_version_ext)
#ifdef __APPLE__
@ -72,8 +73,8 @@ class CPUPerformanceInterface::CPUPerformance : public CHeapObj<mtInternal> {
int cpu_load_total_process(double* cpu_load);
int cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotalLoad);
CPUPerformance(const CPUPerformance& rhs); // no impl
CPUPerformance& operator=(const CPUPerformance& rhs); // no impl
NONCOPYABLE(CPUPerformance);
public:
CPUPerformance();
bool initialize();
@ -264,8 +265,7 @@ class SystemProcessInterface::SystemProcesses : public CHeapObj<mtInternal> {
private:
SystemProcesses();
bool initialize();
SystemProcesses(const SystemProcesses& rhs); // no impl
SystemProcesses& operator=(const SystemProcesses& rhs); // no impl
NONCOPYABLE(SystemProcesses);
~SystemProcesses();
//information about system processes
@ -407,8 +407,7 @@ class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtIntern
friend class NetworkPerformanceInterface;
private:
NetworkPerformance();
NetworkPerformance(const NetworkPerformance& rhs); // no impl
NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
NONCOPYABLE(NetworkPerformance);
bool initialize();
~NetworkPerformance();
int network_utilization(NetworkInterface** network_interfaces) const;

@ -25,6 +25,8 @@
#ifndef OS_BSD_SEMAPHORE_BSD_HPP
#define OS_BSD_SEMAPHORE_BSD_HPP
#include "utilities/globalDefinitions.hpp"
#ifndef __APPLE__
// Use POSIX semaphores.
# include "semaphore_posix.hpp"
@ -37,9 +39,7 @@
class OSXSemaphore : public CHeapObj<mtInternal>{
semaphore_t _semaphore;
// Prevent copying and assignment.
OSXSemaphore(const OSXSemaphore&);
OSXSemaphore& operator=(const OSXSemaphore&);
NONCOPYABLE(OSXSemaphore);
public:
OSXSemaphore(uint value = 0);

@ -249,7 +249,7 @@ void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}
void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
const size_t nsegments = pmem.nsegments();
size_t size = 0;
@ -273,11 +273,6 @@ void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t add
// NUMA interleave memory before touching it
ZNUMA::memory_interleave(addr, size);
// Pre-touch memory
if (pretouch) {
pretouch_view(addr, size);
}
}
void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
@ -296,15 +291,27 @@ uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
return ZAddress::marked0(offset);
}
void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
if (ZVerifyViews) {
// Pre-touch good view
pretouch_view(ZAddress::good(offset), size);
} else {
// Pre-touch all views
pretouch_view(ZAddress::marked0(offset), size);
pretouch_view(ZAddress::marked1(offset), size);
pretouch_view(ZAddress::remapped(offset), size);
}
}
void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
if (ZVerifyViews) {
// Map good view
map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::good(offset));
} else {
// Map all views
map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
map_view(pmem, ZAddress::marked0(offset));
map_view(pmem, ZAddress::marked1(offset));
map_view(pmem, ZAddress::remapped(offset));
}
}
@ -323,7 +330,7 @@ void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset
void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
// Map good view
assert(ZVerifyViews, "Should be enabled");
map_view(pmem, ZAddress::good(offset), false /* pretouch */);
map_view(pmem, ZAddress::good(offset));
}
void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {

@ -43,7 +43,7 @@ private:
void advise_view(uintptr_t addr, size_t size, int advice) const;
void pretouch_view(uintptr_t addr, size_t size) const;
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
public:
@ -60,6 +60,8 @@ public:
uintptr_t nmt_address(uintptr_t offset) const;
void pretouch(uintptr_t offset, size_t size) const;
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;

@ -2119,6 +2119,8 @@ void os::print_os_info(outputStream* st) {
os::Posix::print_uname_info(st);
os::Linux::print_uptime_info(st);
// Print warning if unsafe chroot environment detected
if (unsafe_chroot_detected) {
st->print("WARNING!! ");
@ -2304,6 +2306,15 @@ void os::Linux::print_ld_preload_file(outputStream* st) {
st->cr();
}
void os::Linux::print_uptime_info(outputStream* st) {
struct sysinfo sinfo;
int ret = sysinfo(&sinfo);
if (ret == 0) {
os::print_dhm(st, "OS uptime:", (long) sinfo.uptime);
}
}
void os::Linux::print_container_info(outputStream* st) {
if (!OSContainer::is_containerized()) {
return;

@ -103,6 +103,7 @@ class Linux {
static void print_libversion_info(outputStream* st);
static void print_proc_sys_info(outputStream* st);
static void print_ld_preload_file(outputStream* st);
static void print_uptime_info(outputStream* st);
public:
struct CPUPerfTicks {

@ -28,6 +28,7 @@
#include "os_linux.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/os_perf.hpp"
#include "utilities/globalDefinitions.hpp"
#include CPU_HEADER(vm_version_ext)
@ -948,8 +949,7 @@ class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtIntern
friend class NetworkPerformanceInterface;
private:
NetworkPerformance();
NetworkPerformance(const NetworkPerformance& rhs); // no impl
NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
NONCOPYABLE(NetworkPerformance);
bool initialize();
~NetworkPerformance();
int64_t read_counter(const char* iface, const char* counter) const;

@ -26,13 +26,12 @@
#define OS_LINUX_WAITBARRIER_LINUX_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
class LinuxWaitBarrier : public CHeapObj<mtInternal> {
volatile int _futex_barrier;
// Prevent copying and assignment of LinuxWaitBarrier instances.
LinuxWaitBarrier(const LinuxWaitBarrier&);
LinuxWaitBarrier& operator=(const LinuxWaitBarrier&);
NONCOPYABLE(LinuxWaitBarrier);
public:
LinuxWaitBarrier() : _futex_barrier(0) {};

@ -49,6 +49,7 @@
#include <sys/utsname.h>
#include <time.h>
#include <unistd.h>
#include <utmpx.h>
// Todo: provide a os::get_max_process_id() or similar. Number of processes
// may have been configured, can be read more accurately from proc fs etc.
@ -379,6 +380,27 @@ void os::Posix::print_load_average(outputStream* st) {
st->cr();
}
// boot/uptime information;
// unfortunately it does not work on macOS and Linux because the utx chain has no entry
// for reboot at least on my test machines
void os::Posix::print_uptime_info(outputStream* st) {
int bootsec = -1;
int currsec = time(NULL);
struct utmpx* ent;
setutxent();
while ((ent = getutxent())) {
if (!strcmp("system boot", ent->ut_line)) {
bootsec = ent->ut_tv.tv_sec;
break;
}
}
if (bootsec != -1) {
os::print_dhm(st, "OS uptime:", (long) (currsec-bootsec));
}
}
void os::Posix::print_rlimit_info(outputStream* st) {
st->print("rlimit:");
struct rlimit rlim;

@ -41,6 +41,7 @@ protected:
static void print_uname_info(outputStream* st);
static void print_libversion_info(outputStream* st);
static void print_load_average(outputStream* st);
static void print_uptime_info(outputStream* st);
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code).
@ -285,10 +286,8 @@ class PlatformMutex : public CHeapObj<mtSynchronizer> {
#endif // PLATFORM_MONITOR_IMPL_INDIRECT
private:
// Disable copying
PlatformMutex(const PlatformMutex&);
PlatformMutex& operator=(const PlatformMutex&);
private:
NONCOPYABLE(PlatformMutex);
public:
void lock();
@ -329,9 +328,7 @@ class PlatformMonitor : public PlatformMutex {
#endif // PLATFORM_MONITOR_IMPL_INDIRECT
private:
// Disable copying
PlatformMonitor(const PlatformMonitor&);
PlatformMonitor& operator=(const PlatformMonitor&);
NONCOPYABLE(PlatformMonitor);
public:
int wait(jlong millis);

@ -26,15 +26,14 @@
#define OS_POSIX_SEMAPHORE_POSIX_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
#include <semaphore.h>
class PosixSemaphore : public CHeapObj<mtInternal> {
sem_t _semaphore;
// Prevent copying and assignment.
PosixSemaphore(const PosixSemaphore&);
PosixSemaphore& operator=(const PosixSemaphore&);
NONCOPYABLE(PosixSemaphore);
public:
PosixSemaphore(uint value = 0);

@ -28,6 +28,7 @@
#include "runtime/os.hpp"
#include "runtime/os_perf.hpp"
#include "os_solaris.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include CPU_HEADER(vm_version_ext)
@ -737,8 +738,7 @@ class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtIntern
friend class NetworkPerformanceInterface;
private:
NetworkPerformance();
NetworkPerformance(const NetworkPerformance& rhs); // no impl
NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
NONCOPYABLE(NetworkPerformance);
bool initialize();
~NetworkPerformance();
int network_utilization(NetworkInterface** network_interfaces) const;

@ -1584,6 +1584,8 @@ void os::print_os_info(outputStream* st) {
os::Posix::print_uname_info(st);
os::Posix::print_uptime_info(st);
os::Solaris::print_libversion_info(st);
os::Posix::print_rlimit_info(st);

@ -334,9 +334,7 @@ class PlatformParker : public CHeapObj<mtSynchronizer> {
// Platform specific implementations that underpin VM Mutex/Monitor classes
class PlatformMutex : public CHeapObj<mtSynchronizer> {
// Disable copying
PlatformMutex(const PlatformMutex&);
PlatformMutex& operator=(const PlatformMutex&);
NONCOPYABLE(PlatformMutex);
protected:
mutex_t _mutex; // Native mutex for locking
@ -352,9 +350,8 @@ class PlatformMutex : public CHeapObj<mtSynchronizer> {
class PlatformMonitor : public PlatformMutex {
private:
cond_t _cond; // Native condition variable for blocking
// Disable copying
PlatformMonitor(const PlatformMonitor&);
PlatformMonitor& operator=(const PlatformMonitor&);
NONCOPYABLE(PlatformMonitor);
public:
PlatformMonitor();

@ -0,0 +1,126 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zBackingFile_windows.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "gc/z/zMapper_windows.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
// The backing file commits and uncommits physical memory, that can be
// multi-mapped into the virtual address space. To support fine-graned
// committing and uncommitting, each ZGranuleSize chunked is mapped to
// a separate paging file mapping.
ZBackingFile::ZBackingFile() :
_handles(MaxHeapSize),
_size(0) {}
size_t ZBackingFile::size() const {
return _size;
}
HANDLE ZBackingFile::get_handle(uintptr_t offset) const {
HANDLE const handle = _handles.get(offset);
assert(handle != 0, "Should be set");
return handle;
}
void ZBackingFile::put_handle(uintptr_t offset, HANDLE handle) {
assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
assert(_handles.get(offset) == 0, "Should be cleared");
_handles.put(offset, handle);
}
void ZBackingFile::clear_handle(uintptr_t offset) {
assert(_handles.get(offset) != 0, "Should be set");
_handles.put(offset, 0);
}
size_t ZBackingFile::commit_from_paging_file(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
if (handle == 0) {
return i;
}
put_handle(offset + i, handle);
}
return size;
}
size_t ZBackingFile::uncommit_from_paging_file(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
clear_handle(offset + i);
ZMapper::close_paging_file_mapping(handle);
}
return size;
}
size_t ZBackingFile::commit(size_t offset, size_t length) {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
const size_t committed = commit_from_paging_file(offset, length);
const size_t end = offset + committed;
if (end > _size) {
// Update size
_size = end;
}
return committed;
}
size_t ZBackingFile::uncommit(size_t offset, size_t length) {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
return uncommit_from_paging_file(offset, length);
}
void ZBackingFile::map(uintptr_t addr, size_t size, size_t offset) const {
assert(is_aligned(offset, ZGranuleSize), "Misaligned");
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
}
}
void ZBackingFile::unmap(uintptr_t addr, size_t size) const {
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");
for (size_t i = 0; i < size; i += ZGranuleSize) {
ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
}
}

@ -0,0 +1,56 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP
#define OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP
#include "gc/z/zGranuleMap.hpp"
#include "memory/allocation.hpp"
#include <Windows.h>
class ZBackingFile {
private:
ZGranuleMap<HANDLE> _handles;
size_t _size;
HANDLE get_handle(uintptr_t offset) const;
void put_handle(uintptr_t offset, HANDLE handle);
void clear_handle(uintptr_t offset);
size_t commit_from_paging_file(size_t offset, size_t size);
size_t uncommit_from_paging_file(size_t offset, size_t size);
public:
ZBackingFile();
size_t size() const;
size_t commit(size_t offset, size_t length);
size_t uncommit(size_t offset, size_t length);
void map(uintptr_t addr, size_t size, size_t offset) const;
void unmap(uintptr_t addr, size_t size) const;
};
#endif // OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP

@ -0,0 +1,30 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zInitialize.hpp"
#include "gc/z/zSyscall_windows.hpp"
void ZInitialize::initialize_os() {
ZSyscall::initialize();
}

@ -0,0 +1,29 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zLargePages.hpp"
void ZLargePages::initialize_platform() {
_state = Disabled;
}

@ -0,0 +1,254 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zMapper_windows.hpp"
#include "gc/z/zSyscall_windows.hpp"
#include "logging/log.hpp"
#include "utilities/debug.hpp"
#include <Windows.h>
// Memory reservation, commit, views, and placeholders.
//
// To be able to up-front reserve address space for the heap views, and later
// multi-map the heap views to the same physical memory, without ever losing the
// reservation of the reserved address space, we use "placeholders".
//
// These placeholders block out the address space from being used by other parts
// of the process. To commit memory in this address space, the placeholder must
// be replaced by anonymous memory, or replaced by mapping a view against a
// paging file mapping. We use the later to support multi-mapping.
//
// We want to be able to dynamically commit and uncommit the physical memory of
// the heap (and also unmap ZPages), in granules of ZGranuleSize bytes. There is
// no way to grow and shrink the committed memory of a paging file mapping.
// Therefore, we create multiple granule-sized page file mappings. The memory is
// committed by creating a page file mapping, map a view against it, commit the
// memory, unmap the view. The memory will stay committed until all views are
// unmapped, and the paging file mapping handle is closed.
//
// When replacing a placeholder address space reservation with a mapped view
// against a paging file mapping, the virtual address space must exactly match
// an existing placeholder's address and size. Therefore we only deal with
// granule-sized placeholders at this layer. Higher layers that keep track of
// reserved available address space can (and will) coalesce placeholders, but
// they will be split before being used.
#define fatal_error(msg, addr, size) \
fatal(msg ": " PTR_FORMAT " " SIZE_FORMAT "M (%d)", \
(addr), (size) / M, GetLastError())
uintptr_t ZMapper::reserve(uintptr_t addr, size_t size) {
void* const res = ZSyscall::VirtualAlloc2(
GetCurrentProcess(), // Process
(void*)addr, // BaseAddress
size, // Size
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, // AllocationType
PAGE_NOACCESS, // PageProtection
NULL, // ExtendedParameters
0 // ParameterCount
);
// Caller responsible for error handling
return (uintptr_t)res;
}
void ZMapper::unreserve(uintptr_t addr, size_t size) {
const bool res = ZSyscall::VirtualFreeEx(
GetCurrentProcess(), // hProcess
(void*)addr, // lpAddress
size, // dwSize
MEM_RELEASE // dwFreeType
);
if (!res) {
fatal_error("Failed to unreserve memory", addr, size);
}
}
HANDLE ZMapper::create_paging_file_mapping(size_t size) {
// Create mapping with SEC_RESERVE instead of SEC_COMMIT.
//
// We use MapViewOfFile3 for two different reasons:
// 1) When commiting memory for the created paging file
// 2) When mapping a view of the memory created in (2)
//
// The non-platform code is only setup to deal with out-of-memory
// errors in (1). By using SEC_RESERVE, we prevent MapViewOfFile3
// from failing because of "commit limit" checks. To actually commit
// memory in (1), a call to VirtualAlloc2 is done.
HANDLE const res = ZSyscall::CreateFileMappingW(
INVALID_HANDLE_VALUE, // hFile
NULL, // lpFileMappingAttribute
PAGE_READWRITE | SEC_RESERVE, // flProtect
size >> 32, // dwMaximumSizeHigh
size & 0xFFFFFFFF, // dwMaximumSizeLow
NULL // lpName
);
// Caller responsible for error handling
return res;
}
bool ZMapper::commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size) {
const uintptr_t addr = map_view_no_placeholder(file_handle, file_offset, size);
if (addr == 0) {
log_error(gc)("Failed to map view of paging file mapping (%d)", GetLastError());
return false;
}
const uintptr_t res = commit(addr, size);
if (res != addr) {
log_error(gc)("Failed to commit memory (%d)", GetLastError());
}
unmap_view_no_placeholder(addr, size);
return res == addr;
}
uintptr_t ZMapper::map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size) {
void* const res = ZSyscall::MapViewOfFile3(
file_handle, // FileMapping
GetCurrentProcess(), // ProcessHandle
NULL, // BaseAddress
file_offset, // Offset
size, // ViewSize
0, // AllocationType
PAGE_NOACCESS, // PageProtection
NULL, // ExtendedParameters
0 // ParameterCount
);
// Caller responsible for error handling
return (uintptr_t)res;
}
void ZMapper::unmap_view_no_placeholder(uintptr_t addr, size_t size) {
const bool res = ZSyscall::UnmapViewOfFile2(
GetCurrentProcess(), // ProcessHandle
(void*)addr, // BaseAddress
0 // UnmapFlags
);
if (!res) {
fatal_error("Failed to unmap memory", addr, size);
}
}
uintptr_t ZMapper::commit(uintptr_t addr, size_t size) {
void* const res = ZSyscall::VirtualAlloc2(
GetCurrentProcess(), // Process
(void*)addr, // BaseAddress
size, // Size
MEM_COMMIT, // AllocationType
PAGE_NOACCESS, // PageProtection
NULL, // ExtendedParameters
0 // ParameterCount
);
// Caller responsible for error handling
return (uintptr_t)res;
}
HANDLE ZMapper::create_and_commit_paging_file_mapping(size_t size) {
HANDLE const file_handle = create_paging_file_mapping(size);
if (file_handle == 0) {
log_error(gc)("Failed to create paging file mapping (%d)", GetLastError());
return 0;
}
const bool res = commit_paging_file_mapping(file_handle, 0 /* file_offset */, size);
if (!res) {
close_paging_file_mapping(file_handle);
return 0;
}
return file_handle;
}
void ZMapper::close_paging_file_mapping(HANDLE file_handle) {
const bool res = CloseHandle(
file_handle // hObject
);
if (!res) {
fatal("Failed to close paging file handle (%d)", GetLastError());
}
}
void ZMapper::split_placeholder(uintptr_t addr, size_t size) {
const bool res = VirtualFree(
(void*)addr, // lpAddress
size, // dwSize
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER // dwFreeType
);
if (!res) {
fatal_error("Failed to split placeholder", addr, size);
}
}
void ZMapper::coalesce_placeholders(uintptr_t addr, size_t size) {
const bool res = VirtualFree(
(void*)addr, // lpAddress
size, // dwSize
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS // dwFreeType
);
if (!res) {
fatal_error("Failed to coalesce placeholders", addr, size);
}
}
void ZMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) {
void* const res = ZSyscall::MapViewOfFile3(
file_handle, // FileMapping
GetCurrentProcess(), // ProcessHandle
(void*)addr, // BaseAddress
file_offset, // Offset
size, // ViewSize
MEM_REPLACE_PLACEHOLDER, // AllocationType
PAGE_READWRITE, // PageProtection
NULL, // ExtendedParameters
0 // ParameterCount
);
if (res == NULL) {
fatal_error("Failed to map memory", addr, size);
}
}
void ZMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) {
const bool res = ZSyscall::UnmapViewOfFile2(
GetCurrentProcess(), // ProcessHandle
(void*)addr, // BaseAddress
MEM_PRESERVE_PLACEHOLDER // UnmapFlags
);
if (!res) {
fatal_error("Failed to unmap memory", addr, size);
}
}

@ -0,0 +1,85 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP
#define OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
#include <Windows.h>
class ZMapper : public AllStatic {
private:
// Create paging file mapping
static HANDLE create_paging_file_mapping(size_t size);
// Commit paging file mapping
static bool commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size);
// Map a view anywhere without a placeholder
static uintptr_t map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size);
// Unmap a view without preserving a placeholder
static void unmap_view_no_placeholder(uintptr_t addr, size_t size);
// Commit memory covering the given virtual address range
static uintptr_t commit(uintptr_t addr, size_t size);
public:
// Reserve memory with a placeholder
static uintptr_t reserve(uintptr_t addr, size_t size);
// Unreserve memory
static void unreserve(uintptr_t addr, size_t size);
// Create and commit paging file mapping
static HANDLE create_and_commit_paging_file_mapping(size_t size);
// Close paging file mapping
static void close_paging_file_mapping(HANDLE file_handle);
// Split a placeholder
//
// A view can only replace an entire placeholder, so placeholders need to be
// split and coalesced to be the exact size of the new views.
// [addr, addr + size) needs to be a proper sub-placeholder of an existing
// placeholder.
static void split_placeholder(uintptr_t addr, size_t size);
// Coalesce a placeholder
//
// [addr, addr + size) is the new placeholder. A sub-placeholder needs to
// exist within that range.
static void coalesce_placeholders(uintptr_t addr, size_t size);
// Map a view of the file handle and replace the placeholder covering the
// given virtual address range
static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size);
// Unmap the view and reinstate a placeholder covering the given virtual
// address range
static void unmap_view_preserve_placeholder(uintptr_t addr, size_t size);
};
#endif // OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP

@ -0,0 +1,42 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zNUMA.hpp"
void ZNUMA::initialize_platform() {
_enabled = false;
}
uint32_t ZNUMA::count() {
return 1;
}
uint32_t ZNUMA::id() {
return 0;
}
uint32_t ZNUMA::memory_id(uintptr_t addr) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}

@ -0,0 +1,219 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zMapper_windows.hpp"
#include "gc/z/zPhysicalMemory.inline.hpp"
#include "gc/z/zPhysicalMemoryBacking_windows.hpp"
#include "runtime/globals.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
bool ZPhysicalMemoryBacking::is_initialized() const {
return true;
}
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
// Does nothing
}
bool ZPhysicalMemoryBacking::supports_uncommit() {
assert(!is_init_completed(), "Invalid state");
assert(_file.size() >= ZGranuleSize, "Invalid size");
// Test if uncommit is supported by uncommitting and then re-committing a granule
return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
}
size_t ZPhysicalMemoryBacking::commit(size_t size) {
size_t committed = 0;
// Fill holes in the backing file
while (committed < size) {
size_t allocated = 0;
const size_t remaining = size - committed;
const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
if (start == UINTPTR_MAX) {
// No holes to commit
break;
}
// Try commit hole
const size_t filled = _file.commit(start, allocated);
if (filled > 0) {
// Successful or partialy successful
_committed.free(start, filled);
committed += filled;
}
if (filled < allocated) {
// Failed or partialy failed
_uncommitted.free(start + filled, allocated - filled);
return committed;
}
}
// Expand backing file
if (committed < size) {
const size_t remaining = size - committed;
const uintptr_t start = _file.size();
const size_t expanded = _file.commit(start, remaining);
if (expanded > 0) {
// Successful or partialy successful
_committed.free(start, expanded);
committed += expanded;
}
}
return committed;
}
size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
size_t uncommitted = 0;
// Punch holes in backing file
while (uncommitted < size) {
size_t allocated = 0;
const size_t remaining = size - uncommitted;
const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
assert(start != UINTPTR_MAX, "Allocation should never fail");
// Try punch hole
const size_t punched = _file.uncommit(start, allocated);
if (punched > 0) {
// Successful or partialy successful
_uncommitted.free(start, punched);
uncommitted += punched;
}
if (punched < allocated) {
// Failed or partialy failed
_committed.free(start + punched, allocated - punched);
return uncommitted;
}
}
return uncommitted;
}
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
assert(is_aligned(size, ZGranuleSize), "Invalid size");
ZPhysicalMemory pmem;
// Allocate segments
for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
assert(start != UINTPTR_MAX, "Allocation should never fail");
pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
}
return pmem;
}
void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
const size_t nsegments = pmem.nsegments();
// Free segments
for (size_t i = 0; i < nsegments; i++) {
const ZPhysicalMemorySegment& segment = pmem.segment(i);
_committed.free(segment.start(), segment.size());
}
}
void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}
void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
const size_t nsegments = pmem.nsegments();
size_t size = 0;
// Map segments
for (size_t i = 0; i < nsegments; i++) {
const ZPhysicalMemorySegment& segment = pmem.segment(i);
_file.map(addr + size, segment.size(), segment.start());
size += segment.size();
}
}
void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
_file.unmap(addr, pmem.size());
}
uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
// From an NMT point of view we treat the first heap view (marked0) as committed
return ZAddress::marked0(offset);
}
void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
if (ZVerifyViews) {
// Pre-touch good view
pretouch_view(ZAddress::good(offset), size);
} else {
// Pre-touch all views
pretouch_view(ZAddress::marked0(offset), size);
pretouch_view(ZAddress::marked1(offset), size);
pretouch_view(ZAddress::remapped(offset), size);
}
}
void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
if (ZVerifyViews) {
// Map good view
map_view(pmem, ZAddress::good(offset));
} else {
// Map all views
map_view(pmem, ZAddress::marked0(offset));
map_view(pmem, ZAddress::marked1(offset));
map_view(pmem, ZAddress::remapped(offset));
}
}
void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
if (ZVerifyViews) {
// Unmap good view
unmap_view(pmem, ZAddress::good(offset));
} else {
// Unmap all views
unmap_view(pmem, ZAddress::marked0(offset));
unmap_view(pmem, ZAddress::marked1(offset));
unmap_view(pmem, ZAddress::remapped(offset));
}
}
void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
// Map good view
assert(ZVerifyViews, "Should be enabled");
map_view(pmem, ZAddress::good(offset));
}
void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
// Unmap good view
assert(ZVerifyViews, "Should be enabled");
unmap_view(pmem, ZAddress::good(offset));
}

@ -0,0 +1,65 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
#define OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
#include "gc/z/zBackingFile_windows.hpp"
#include "gc/z/zMemory.hpp"
class ZPhysicalMemory;
class ZPhysicalMemoryBacking {
private:
ZBackingFile _file;
ZMemoryManager _committed;
ZMemoryManager _uncommitted;
void pretouch_view(uintptr_t addr, size_t size) const;
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
public:
bool is_initialized() const;
void warn_commit_limits(size_t max) const;
bool supports_uncommit();
size_t commit(size_t size);
size_t uncommit(size_t size);
ZPhysicalMemory alloc(size_t size);
void free(const ZPhysicalMemory& pmem);
uintptr_t nmt_address(uintptr_t offset) const;
void pretouch(uintptr_t offset, size_t size) const;
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
};
#endif // OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP

@ -0,0 +1,58 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zSyscall_windows.hpp"
#include "logging/log.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
ZSyscall::CreateFileMappingWFn ZSyscall::CreateFileMappingW;
ZSyscall::VirtualAlloc2Fn ZSyscall::VirtualAlloc2;
ZSyscall::VirtualFreeExFn ZSyscall::VirtualFreeEx;
ZSyscall::MapViewOfFile3Fn ZSyscall::MapViewOfFile3;
ZSyscall::UnmapViewOfFile2Fn ZSyscall::UnmapViewOfFile2;
template <typename Fn>
static void lookup_symbol(Fn*& fn, const char* library, const char* symbol) {
char ebuf[1024];
void* const handle = os::dll_load(library, ebuf, sizeof(ebuf));
if (handle == NULL) {
log_error(gc)("Failed to load library: %s", library);
vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
}
fn = reinterpret_cast<Fn*>(os::dll_lookup(handle, symbol));
if (fn == NULL) {
log_error(gc)("Failed to lookup symbol: %s", symbol);
vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
}
}
void ZSyscall::initialize() {
lookup_symbol(CreateFileMappingW, "KernelBase", "CreateFileMappingW");
lookup_symbol(VirtualAlloc2, "KernelBase", "VirtualAlloc2");
lookup_symbol(VirtualFreeEx, "KernelBase", "VirtualFreeEx");
lookup_symbol(MapViewOfFile3, "KernelBase", "MapViewOfFile3");
lookup_symbol(UnmapViewOfFile2, "KernelBase", "UnmapViewOfFile2");
}

@ -0,0 +1,50 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef OS_WINDOWS_GC_Z_ZSYSCALL_WINDOWS_HPP
#define OS_WINDOWS_GC_Z_ZSYSCALL_WINDOWS_HPP
#include "utilities/globalDefinitions.hpp"
#include <Windows.h>
#include <Memoryapi.h>
class ZSyscall {
private:
typedef HANDLE (*CreateFileMappingWFn)(HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCWSTR);
typedef PVOID (*VirtualAlloc2Fn)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
typedef BOOL (*VirtualFreeExFn)(HANDLE, LPVOID, SIZE_T, DWORD);
typedef PVOID (*MapViewOfFile3Fn)(HANDLE, HANDLE, PVOID, ULONG64, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG);
typedef BOOL (*UnmapViewOfFile2Fn)(HANDLE, PVOID, ULONG);
public:
static CreateFileMappingWFn CreateFileMappingW;
static VirtualAlloc2Fn VirtualAlloc2;
static VirtualFreeExFn VirtualFreeEx;
static MapViewOfFile3Fn MapViewOfFile3;
static UnmapViewOfFile2Fn UnmapViewOfFile2;
static void initialize();
};
#endif // OS_WINDOWS_GC_Z_ZSYSCALL_WINDOWS_HPP

@ -0,0 +1,40 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zUtils.hpp"
#include "utilities/debug.hpp"
#include <malloc.h>
uintptr_t ZUtils::alloc_aligned(size_t alignment, size_t size) {
void* const res = _aligned_malloc(size, alignment);
if (res == NULL) {
fatal("_aligned_malloc failed");
}
memset(res, 0, size);
return (uintptr_t)res;
}

@ -0,0 +1,149 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zMapper_windows.hpp"
#include "gc/z/zVirtualMemory.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
static void split_placeholder(uintptr_t start, size_t size) {
ZMapper::split_placeholder(ZAddress::marked0(start), size);
ZMapper::split_placeholder(ZAddress::marked1(start), size);
ZMapper::split_placeholder(ZAddress::remapped(start), size);
}
static void coalesce_placeholders(uintptr_t start, size_t size) {
ZMapper::coalesce_placeholders(ZAddress::marked0(start), size);
ZMapper::coalesce_placeholders(ZAddress::marked1(start), size);
ZMapper::coalesce_placeholders(ZAddress::remapped(start), size);
}
static void split_into_placeholder_granules(uintptr_t start, size_t size) {
for (uintptr_t addr = start; addr < start + size; addr += ZGranuleSize) {
split_placeholder(addr, ZGranuleSize);
}
}
static void coalesce_into_one_placeholder(uintptr_t start, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
if (size > ZGranuleSize) {
coalesce_placeholders(start, size);
}
}
static void create_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size());
}
static void destroy_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->start(), area->size() - ZGranuleSize);
}
static void shrink_from_front_callback(const ZMemory* area, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
split_into_placeholder_granules(area->start(), size);
}
static void shrink_from_back_callback(const ZMemory* area, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
// Don't try split the last granule - VirtualFree will fail
split_into_placeholder_granules(area->end() - size, size - ZGranuleSize);
}
static void grow_from_front_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start() - size, area->size() + size);
}
static void grow_from_back_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
coalesce_into_one_placeholder(area->start(), area->size() + size);
}
void ZVirtualMemoryManager::initialize_os() {
// Each reserved virtual memory address area registered in _manager is
// exactly covered by a single placeholder. Callbacks are installed so
// that whenever a memory area changes, the corresponding placeholder
// is adjusted.
//
// The create and grow callbacks are called when virtual memory is
// returned to the memory manager. The new memory area is then covered
// by a new single placeholder.
//
// The destroy and shrink callbacks are called when virtual memory is
// allocated from the memory manager. The memory area is then is split
// into granule-sized placeholders.
//
// See comment in zMapper_windows.cpp explaining why placeholders are
// split into ZGranuleSize sized placeholders.
ZMemoryManager::Callbacks callbacks;
callbacks._create = &create_callback;
callbacks._destroy = &destroy_callback;
callbacks._shrink_from_front = &shrink_from_front_callback;
callbacks._shrink_from_back = &shrink_from_back_callback;
callbacks._grow_from_front = &grow_from_front_callback;
callbacks._grow_from_back = &grow_from_back_callback;
_manager.register_callbacks(callbacks);
}
bool ZVirtualMemoryManager::reserve_contiguous_platform(uintptr_t start, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
// Reserve address views
const uintptr_t marked0 = ZAddress::marked0(start);
const uintptr_t marked1 = ZAddress::marked1(start);
const uintptr_t remapped = ZAddress::remapped(start);
// Reserve address space
if (ZMapper::reserve(marked0, size) != marked0) {
return false;
}
if (ZMapper::reserve(marked1, size) != marked1) {
ZMapper::unreserve(marked0, size);
return false;
}
if (ZMapper::reserve(remapped, size) != remapped) {
ZMapper::unreserve(marked0, size);
ZMapper::unreserve(marked1, size);
return false;
}
// Register address views with native memory tracker
nmt_reserve(marked0, size);
nmt_reserve(marked1, size);
nmt_reserve(remapped, size);
return true;
}

@ -30,6 +30,7 @@
#include "pdh_interface.hpp"
#include "runtime/os_perf.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include CPU_HEADER(vm_version_ext)
#include <math.h>
@ -1355,8 +1356,7 @@ class NetworkPerformanceInterface::NetworkPerformance : public CHeapObj<mtIntern
bool _iphlp_attached;
NetworkPerformance();
NetworkPerformance(const NetworkPerformance& rhs); // no impl
NetworkPerformance& operator=(const NetworkPerformance& rhs); // no impl
NONCOPYABLE(NetworkPerformance);
bool initialize();
~NetworkPerformance();
int network_utilization(NetworkInterface** network_interfaces) const;

@ -1591,6 +1591,11 @@ void os::print_os_info_brief(outputStream* st) {
os::print_os_info(st);
}
void os::win32::print_uptime_info(outputStream* st) {
unsigned long long ticks = GetTickCount64();
os::print_dhm(st, "OS uptime:", ticks/1000);
}
void os::print_os_info(outputStream* st) {
#ifdef ASSERT
char buffer[1024];
@ -1604,6 +1609,8 @@ void os::print_os_info(outputStream* st) {
st->print("OS:");
os::win32::print_windows_version(st);
os::win32::print_uptime_info(st);
#ifdef _LP64
VM_Version::print_platform_virtualization_info(st);
#endif

@ -55,6 +55,7 @@ class win32 {
static bool _has_exit_bug;
static void print_windows_version(outputStream* st);
static void print_uptime_info(outputStream* st);
public:
// Windows-specific interface:
@ -190,9 +191,7 @@ class PlatformParker : public CHeapObj<mtSynchronizer> {
// Platform specific implementations that underpin VM Mutex/Monitor classes
class PlatformMutex : public CHeapObj<mtSynchronizer> {
// Disable copying
PlatformMutex(const PlatformMutex&);
PlatformMutex& operator=(const PlatformMutex&);
NONCOPYABLE(PlatformMutex);
protected:
CRITICAL_SECTION _mutex; // Native mutex for locking
@ -208,9 +207,7 @@ class PlatformMutex : public CHeapObj<mtSynchronizer> {
class PlatformMonitor : public PlatformMutex {
private:
CONDITION_VARIABLE _cond; // Native condition variable for blocking
// Disable copying
PlatformMonitor(const PlatformMonitor&);
PlatformMonitor& operator=(const PlatformMonitor&);
NONCOPYABLE(PlatformMonitor);
public:
PlatformMonitor();

@ -26,15 +26,14 @@
#define OS_WINDOWS_SEMAPHORE_WINDOWS_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
#include <windows.h>
class WindowsSemaphore : public CHeapObj<mtInternal> {
HANDLE _semaphore;
// Prevent copying and assignment.
WindowsSemaphore(const WindowsSemaphore&);
WindowsSemaphore& operator=(const WindowsSemaphore&);
NONCOPYABLE(WindowsSemaphore);
public:
WindowsSemaphore(uint value = 0);

@ -1568,6 +1568,8 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," assert(operand_index < _num_opnds, \"invalid _opnd_array index\");\n");
fprintf(fp," _opnd_array[operand_index] = operand;\n");
fprintf(fp," }\n");
fprintf(fp," virtual uint rule() const { return %s_rule; }\n",
instr->_ident);
fprintf(fp,"private:\n");
if ( instr->is_ideal_jump() ) {
fprintf(fp," virtual void add_case_label(int index_num, Label* blockLabel) {\n");
@ -1579,8 +1581,6 @@ void ArchDesc::declareClasses(FILE *fp) {
}
out_RegMask(fp); // output register mask
fprintf(fp," virtual uint rule() const { return %s_rule; }\n",
instr->_ident);
// If this instruction contains a labelOper
// Declare Node::methods that set operand Label's contents

@ -1707,7 +1707,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
// For CallSite objects add a dependency for invalidation of the optimization.
if (field->is_call_site_target()) {
ciCallSite* call_site = const_oop->as_call_site();
if (!call_site->is_constant_call_site()) {
if (!call_site->is_fully_initialized_constant_call_site()) {
ciMethodHandle* target = field_value.as_object()->as_method_handle();
dependency_recorder()->assert_call_site_target_value(call_site, target);
}
@ -1725,6 +1725,23 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
Value replacement = !needs_patching ? _memory->load(load) : load;
if (replacement != load) {
assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
// Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing
// conversion. Emit an explicit conversion here to get the correct field value after the write.
BasicType bt = field->type()->basic_type();
switch (bt) {
case T_BOOLEAN:
case T_BYTE:
replacement = append(new Convert(Bytecodes::_i2b, replacement, as_ValueType(bt)));
break;
case T_CHAR:
replacement = append(new Convert(Bytecodes::_i2c, replacement, as_ValueType(bt)));
break;
case T_SHORT:
replacement = append(new Convert(Bytecodes::_i2s, replacement, as_ValueType(bt)));
break;
default:
break;
}
push(type, replacement);
} else {
push(type, append(load));

@ -29,8 +29,18 @@
// ciCallSite
bool ciCallSite::is_constant_call_site() {
return klass()->is_subclass_of(CURRENT_ENV->ConstantCallSite_klass());
bool ciCallSite::is_fully_initialized_constant_call_site() {
if (klass()->is_subclass_of(CURRENT_ENV->ConstantCallSite_klass())) {
bool is_fully_initialized = _is_fully_initialized_cache;
if (!is_fully_initialized) { // changes monotonically: false => true
VM_ENTRY_MARK;
is_fully_initialized = (java_lang_invoke_ConstantCallSite::is_frozen(get_oop()) != JNI_FALSE);
_is_fully_initialized_cache = is_fully_initialized; // cache updated value
}
return is_fully_initialized;
} else {
return false;
}
}
// ------------------------------------------------------------------

@ -31,13 +31,16 @@
//
// The class represents a java.lang.invoke.CallSite object.
class ciCallSite : public ciInstance {
public:
ciCallSite(instanceHandle h_i) : ciInstance(h_i) {}
private:
bool _is_fully_initialized_cache;
public:
ciCallSite(instanceHandle h_i) : ciInstance(h_i), _is_fully_initialized_cache(false) {}
// What kind of ciObject is this?
bool is_call_site() const { return true; }
bool is_constant_call_site();
bool is_fully_initialized_constant_call_site();
// Return the target MethodHandle of this CallSite.
ciMethodHandle* get_target() const;

@ -55,6 +55,7 @@
#include "oops/metadata.hpp"
#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/recordComponent.hpp"
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
@ -3211,6 +3212,173 @@ u2 ClassFileParser::parse_classfile_nest_members_attribute(const ClassFileStream
return length;
}
// Record {
// u2 attribute_name_index;
// u4 attribute_length;
// u2 components_count;
// component_info components[components_count];
// }
// component_info {
// u2 name_index;
// u2 descriptor_index
// u2 attributes_count;
// attribute_info_attributes[attributes_count];
// }
u2 ClassFileParser::parse_classfile_record_attribute(const ClassFileStream* const cfs,
const ConstantPool* cp,
const u1* const record_attribute_start,
TRAPS) {
const u1* const current_mark = cfs->current();
int components_count = 0;
unsigned int calculate_attr_size = 0;
if (record_attribute_start != NULL) {
cfs->set_current(record_attribute_start);
cfs->guarantee_more(2, CHECK_0); // num of components
components_count = (int)cfs->get_u2_fast();
calculate_attr_size = 2;
}
Array<RecordComponent*>* const record_components =
MetadataFactory::new_array<RecordComponent*>(_loader_data, components_count, NULL, CHECK_0);
_record_components = record_components;
for (int x = 0; x < components_count; x++) {
cfs->guarantee_more(6, CHECK_0); // name_index, descriptor_index, attributes_count
const u2 name_index = cfs->get_u2_fast();
check_property(valid_symbol_at(name_index),
"Invalid constant pool index %u for name in Record attribute in class file %s",
name_index, CHECK_0);
const Symbol* const name = cp->symbol_at(name_index);
verify_legal_field_name(name, CHECK_0);
const u2 descriptor_index = cfs->get_u2_fast();
check_property(valid_symbol_at(descriptor_index),
"Invalid constant pool index %u for descriptor in Record attribute in class file %s",
descriptor_index, CHECK_0);
const Symbol* const descr = cp->symbol_at(descriptor_index);
verify_legal_field_signature(name, descr, CHECK_0);
const u2 attributes_count = cfs->get_u2_fast();
calculate_attr_size += 6;
u2 generic_sig_index = 0;
const u1* runtime_visible_annotations = NULL;
int runtime_visible_annotations_length = 0;
const u1* runtime_invisible_annotations = NULL;
int runtime_invisible_annotations_length = 0;
bool runtime_invisible_annotations_exists = false;
const u1* runtime_visible_type_annotations = NULL;
int runtime_visible_type_annotations_length = 0;
const u1* runtime_invisible_type_annotations = NULL;
int runtime_invisible_type_annotations_length = 0;
bool runtime_invisible_type_annotations_exists = false;
// Expected attributes for record components are Signature, Runtime(In)VisibleAnnotations,
// and Runtime(In)VisibleTypeAnnotations. Other attributes are ignored.
for (int y = 0; y < attributes_count; y++) {
cfs->guarantee_more(6, CHECK_0); // attribute_name_index, attribute_length
const u2 attribute_name_index = cfs->get_u2_fast();
const u4 attribute_length = cfs->get_u4_fast();
calculate_attr_size += 6;
check_property(
valid_symbol_at(attribute_name_index),
"Invalid Record attribute name index %u in class file %s",
attribute_name_index, CHECK_0);
const Symbol* const attribute_name = cp->symbol_at(attribute_name_index);
if (attribute_name == vmSymbols::tag_signature()) {
if (generic_sig_index != 0) {
classfile_parse_error(
"Multiple Signature attributes for Record component in class file %s",
CHECK_0);
}
if (attribute_length != 2) {
classfile_parse_error(
"Invalid Signature attribute length %u in Record component in class file %s",
attribute_length, CHECK_0);
}
generic_sig_index = parse_generic_signature_attribute(cfs, CHECK_0);
} else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
if (runtime_visible_annotations != NULL) {
classfile_parse_error(
"Multiple RuntimeVisibleAnnotations attributes for Record component in class file %s", CHECK_0);
}
runtime_visible_annotations_length = attribute_length;
runtime_visible_annotations = cfs->current();
assert(runtime_visible_annotations != NULL, "null record component visible annotation");
cfs->guarantee_more(runtime_visible_annotations_length, CHECK_0);
cfs->skip_u1_fast(runtime_visible_annotations_length);
} else if (attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
if (runtime_invisible_annotations_exists) {
classfile_parse_error(
"Multiple RuntimeInvisibleAnnotations attributes for Record component in class file %s", CHECK_0);
}
runtime_invisible_annotations_exists = true;
if (PreserveAllAnnotations) {
runtime_invisible_annotations_length = attribute_length;
runtime_invisible_annotations = cfs->current();
assert(runtime_invisible_annotations != NULL, "null record component invisible annotation");
}
cfs->skip_u1(attribute_length, CHECK_0);
} else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
if (runtime_visible_type_annotations != NULL) {
classfile_parse_error(
"Multiple RuntimeVisibleTypeAnnotations attributes for Record component in class file %s", CHECK_0);
}
runtime_visible_type_annotations_length = attribute_length;
runtime_visible_type_annotations = cfs->current();
assert(runtime_visible_type_annotations != NULL, "null record component visible type annotation");
cfs->guarantee_more(runtime_visible_type_annotations_length, CHECK_0);
cfs->skip_u1_fast(runtime_visible_type_annotations_length);
} else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
if (runtime_invisible_type_annotations_exists) {
classfile_parse_error(
"Multiple RuntimeInvisibleTypeAnnotations attributes for Record component in class file %s", CHECK_0);
}
runtime_invisible_type_annotations_exists = true;
if (PreserveAllAnnotations) {
runtime_invisible_type_annotations_length = attribute_length;
runtime_invisible_type_annotations = cfs->current();
assert(runtime_invisible_type_annotations != NULL, "null record component invisible type annotation");
}
cfs->skip_u1(attribute_length, CHECK_0);
} else {
// Skip unknown attributes
cfs->skip_u1(attribute_length, CHECK_0);
}
calculate_attr_size += attribute_length;
} // End of attributes For loop
AnnotationArray* annotations = assemble_annotations(runtime_visible_annotations,
runtime_visible_annotations_length,
runtime_invisible_annotations,
runtime_invisible_annotations_length,
CHECK_0);
AnnotationArray* type_annotations = assemble_annotations(runtime_visible_type_annotations,
runtime_visible_type_annotations_length,
runtime_invisible_type_annotations,
runtime_invisible_type_annotations_length,
CHECK_0);
RecordComponent* record_component =
RecordComponent::allocate(_loader_data, name_index, descriptor_index,
attributes_count, generic_sig_index,
annotations, type_annotations, CHECK_0);
record_components->at_put(x, record_component);
} // End of component processing loop
// Restore buffer's current position.
cfs->set_current(current_mark);
return calculate_attr_size;
}
void ClassFileParser::parse_classfile_synthetic_attribute(TRAPS) {
set_class_synthetic_flag(true);
}
@ -3308,6 +3476,12 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(const ClassFil
CHECK);
}
bool ClassFileParser::supports_records() {
return _major_version == JAVA_14_VERSION &&
_minor_version == JAVA_PREVIEW_MINOR_VERSION &&
Arguments::enable_preview();
}
void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cfs,
ConstantPool* cp,
ClassFileParser::ClassAnnotationCollector* parsed_annotations,
@ -3326,6 +3500,7 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
bool parsed_innerclasses_attribute = false;
bool parsed_nest_members_attribute = false;
bool parsed_nest_host_attribute = false;
bool parsed_record_attribute = false;
bool parsed_enclosingmethod_attribute = false;
bool parsed_bootstrap_methods_attribute = false;
const u1* runtime_visible_annotations = NULL;
@ -3345,6 +3520,8 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
u2 enclosing_method_method_index = 0;
const u1* nest_members_attribute_start = NULL;
u4 nest_members_attribute_length = 0;
const u1* record_attribute_start = NULL;
u4 record_attribute_length = 0;
// Iterate over attributes
while (attributes_count--) {
@ -3527,6 +3704,38 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
"Nest-host class_info_index %u has bad constant type in class file %s",
class_info_index, CHECK);
_nest_host = class_info_index;
} else if (_major_version >= JAVA_14_VERSION) {
if (tag == vmSymbols::tag_record()) {
// Skip over Record attribute if not supported or if super class is
// not java.lang.Record.
if (supports_records() &&
cp->klass_name_at(_super_class_index) == vmSymbols::java_lang_Record()) {
if (parsed_record_attribute) {
classfile_parse_error("Multiple Record attributes in class file %s", CHECK);
}
// Check that class is final and not abstract.
if (!_access_flags.is_final() || _access_flags.is_abstract()) {
classfile_parse_error("Record attribute in non-final or abstract class file %s", CHECK);
}
parsed_record_attribute = true;
record_attribute_start = cfs->current();
record_attribute_length = attribute_length;
} else if (log_is_enabled(Info, class, record)) {
// Log why the Record attribute was ignored. Note that if the
// class file version is 58.65535 and --enable-preview wasn't
// specified then a java.lang.UnsupportedClassVersionError
// exception would have been thrown.
ResourceMark rm(THREAD);
log_info(class, record)("Ignoring Record attribute in class %s because %s",
_class_name->as_C_string(),
supports_records() ? "super type is not java.lang.Record" :
"class file version is not 58.65535");
}
cfs->skip_u1(attribute_length, CHECK);
} else {
// Unknown attribute
cfs->skip_u1(attribute_length, CHECK);
}
} else {
// Unknown attribute
cfs->skip_u1(attribute_length, CHECK);
@ -3578,6 +3787,19 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
}
}
if (parsed_record_attribute) {
const unsigned int calculated_attr_length = parse_classfile_record_attribute(
cfs,
cp,
record_attribute_start,
CHECK);
if (_need_verify) {
guarantee_property(record_attribute_length == calculated_attr_length,
"Record attribute has wrong length in class file %s",
CHECK);
}
}
if (_max_bootstrap_specifier_index >= 0) {
guarantee_property(parsed_bootstrap_methods_attribute,
"Missing BootstrapMethods attribute in class file %s", CHECK);
@ -3632,7 +3854,8 @@ void ClassFileParser::create_combined_annotations(TRAPS) {
// Transfer ownership of metadata allocated to the InstanceKlass.
void ClassFileParser::apply_parsed_class_metadata(
InstanceKlass* this_klass,
int java_fields_count, TRAPS) {
int java_fields_count,
TRAPS) {
assert(this_klass != NULL, "invariant");
_cp->set_pool_holder(this_klass);
@ -3644,6 +3867,7 @@ void ClassFileParser::apply_parsed_class_metadata(
this_klass->set_nest_host_index(_nest_host);
this_klass->set_local_interfaces(_local_interfaces);
this_klass->set_annotations(_combined_annotations);
this_klass->set_record_components(_record_components);
// Delay the setting of _transitive_interfaces until after initialize_supers() in
// fill_instance_klass(). It is because the _transitive_interfaces may be shared with
// its _super. If an OOM occurs while loading the current klass, its _super field
@ -4514,6 +4738,7 @@ static Array<InstanceKlass*>* compute_transitive_interfaces(const InstanceKlass*
static void check_super_class_access(const InstanceKlass* this_klass, TRAPS) {
assert(this_klass != NULL, "invariant");
const Klass* const super = this_klass->super();
if (super != NULL) {
// If the loader is not the boot loader then throw an exception if its
@ -4715,12 +4940,13 @@ void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) const {
const bool is_super = (flags & JVM_ACC_SUPER) != 0;
const bool is_enum = (flags & JVM_ACC_ENUM) != 0;
const bool is_annotation = (flags & JVM_ACC_ANNOTATION) != 0;
const bool major_gte_15 = _major_version >= JAVA_1_5_VERSION;
const bool major_gte_1_5 = _major_version >= JAVA_1_5_VERSION;
const bool major_gte_14 = _major_version >= JAVA_14_VERSION;
if ((is_abstract && is_final) ||
(is_interface && !is_abstract) ||
(is_interface && major_gte_15 && (is_super || is_enum)) ||
(!is_interface && major_gte_15 && is_annotation)) {
(is_interface && major_gte_1_5 && (is_super || is_enum)) ||
(!is_interface && major_gte_1_5 && is_annotation)) {
ResourceMark rm(THREAD);
Exceptions::fthrow(
THREAD_AND_LOCATION,
@ -4816,14 +5042,14 @@ void ClassFileParser::verify_legal_field_modifiers(jint flags,
const bool is_volatile = (flags & JVM_ACC_VOLATILE) != 0;
const bool is_transient = (flags & JVM_ACC_TRANSIENT) != 0;
const bool is_enum = (flags & JVM_ACC_ENUM) != 0;
const bool major_gte_15 = _major_version >= JAVA_1_5_VERSION;
const bool major_gte_1_5 = _major_version >= JAVA_1_5_VERSION;
bool is_illegal = false;
if (is_interface) {
if (!is_public || !is_static || !is_final || is_private ||
is_protected || is_volatile || is_transient ||
(major_gte_15 && is_enum)) {
(major_gte_1_5 && is_enum)) {
is_illegal = true;
}
} else { // not interface
@ -4859,7 +5085,7 @@ void ClassFileParser::verify_legal_method_modifiers(jint flags,
const bool is_strict = (flags & JVM_ACC_STRICT) != 0;
const bool is_synchronized = (flags & JVM_ACC_SYNCHRONIZED) != 0;
const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0;
const bool major_gte_15 = _major_version >= JAVA_1_5_VERSION;
const bool major_gte_1_5 = _major_version >= JAVA_1_5_VERSION;
const bool major_gte_8 = _major_version >= JAVA_8_VERSION;
const bool is_initializer = (name == vmSymbols::object_initializer_name());
@ -4882,7 +5108,7 @@ void ClassFileParser::verify_legal_method_modifiers(jint flags,
(is_abstract && (is_private || is_static || is_strict))) {
is_illegal = true;
}
} else if (major_gte_15) {
} else if (major_gte_1_5) {
// Class file version in the interval [JAVA_1_5_VERSION, JAVA_8_VERSION)
if (!is_public || is_private || is_protected || is_static || is_final ||
is_synchronized || is_native || !is_abstract || is_strict) {
@ -4900,13 +5126,13 @@ void ClassFileParser::verify_legal_method_modifiers(jint flags,
} else {
if (is_initializer) {
if (is_static || is_final || is_synchronized || is_native ||
is_abstract || (major_gte_15 && is_bridge)) {
is_abstract || (major_gte_1_5 && is_bridge)) {
is_illegal = true;
}
} else { // not initializer
if (is_abstract) {
if ((is_final || is_native || is_private || is_static ||
(major_gte_15 && (is_synchronized || is_strict)))) {
(major_gte_1_5 && (is_synchronized || is_strict)))) {
is_illegal = true;
}
}
@ -5495,6 +5721,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
assert(NULL == _nest_members, "invariant");
assert(NULL == _local_interfaces, "invariant");
assert(NULL == _combined_annotations, "invariant");
assert(NULL == _record_components, "invariant");
if (_has_final_method) {
ik->set_has_final_method();
@ -5776,6 +6003,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
_inner_classes(NULL),
_nest_members(NULL),
_nest_host(0),
_record_components(NULL),
_local_interfaces(NULL),
_transitive_interfaces(NULL),
_combined_annotations(NULL),
@ -5886,6 +6114,7 @@ void ClassFileParser::clear_class_metadata() {
_combined_annotations = NULL;
_class_annotations = _class_type_annotations = NULL;
_fields_annotations = _fields_type_annotations = NULL;
_record_components = NULL;
}
// Destructor to clean up
@ -5913,6 +6142,10 @@ ClassFileParser::~ClassFileParser() {
MetadataFactory::free_array<u2>(_loader_data, _nest_members);
}
if (_record_components != NULL) {
InstanceKlass::deallocate_record_components(_loader_data, _record_components);
}
// Free interfaces
InstanceKlass::deallocate_interfaces(_loader_data, _super_klass,
_local_interfaces, _transitive_interfaces);

@ -42,6 +42,7 @@ class FieldInfo;
template <typename T>
class GrowableArray;
class InstanceKlass;
class RecordComponent;
class Symbol;
class TempNewSymbol;
@ -98,6 +99,7 @@ class ClassFileParser {
Array<u2>* _inner_classes;
Array<u2>* _nest_members;
u2 _nest_host;
Array<RecordComponent*>* _record_components;
Array<InstanceKlass*>* _local_interfaces;
Array<InstanceKlass*>* _transitive_interfaces;
Annotations* _combined_annotations;
@ -287,6 +289,13 @@ class ClassFileParser {
const u1* const nest_members_attribute_start,
TRAPS);
u2 parse_classfile_record_attribute(const ClassFileStream* const cfs,
const ConstantPool* cp,
const u1* const record_attribute_start,
TRAPS);
bool supports_records();
void parse_classfile_attributes(const ClassFileStream* const cfs,
ConstantPool* cp,
ClassAnnotationCollector* parsed_annotations,

@ -75,6 +75,8 @@
// Entry point in java.dll for path canonicalization
typedef int (*canonicalize_fn_t)(const char *orig, char *out, int len);
static canonicalize_fn_t CanonicalizeEntry = NULL;
// Entry points in zip.dll for loading zip/jar file entries
@ -980,7 +982,7 @@ void ClassLoader::load_java_library() {
vm_exit_during_initialization("Unable to load java library", NULL);
}
CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, dll_lookup(javalib_handle, "Canonicalize", NULL));
CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, dll_lookup(javalib_handle, "JDK_Canonicalize", NULL));
}
void ClassLoader::load_zip_library() {
@ -1643,13 +1645,12 @@ void ClassLoader::classLoader_init2(TRAPS) {
bool ClassLoader::get_canonical_path(const char* orig, char* out, int len) {
assert(orig != NULL && out != NULL && len > 0, "bad arguments");
JavaThread* THREAD = JavaThread::current();
JNIEnv* env = THREAD->jni_environment();
ResourceMark rm(THREAD);
// os::native_path writes into orig_copy
char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(orig)+1);
strcpy(orig_copy, orig);
if ((CanonicalizeEntry)(env, os::native_path(orig_copy), out, len) < 0) {
if ((CanonicalizeEntry)(os::native_path(orig_copy), out, len) < 0) {
return false;
}
return true;

@ -50,6 +50,7 @@
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "oops/recordComponent.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/resolvedMethodTable.hpp"
@ -3148,6 +3149,64 @@ void java_lang_reflect_Field::set_annotations(oop field, oop value) {
field->obj_field_put(annotations_offset, value);
}
oop java_lang_reflect_RecordComponent::create(InstanceKlass* holder, RecordComponent* component, TRAPS) {
// Allocate java.lang.reflect.RecordComponent instance
HandleMark hm(THREAD);
InstanceKlass* ik = SystemDictionary::RecordComponent_klass();
assert(ik != NULL, "must be loaded");
ik->initialize(CHECK_NULL);
Handle element = ik->allocate_instance_handle(CHECK_NULL);
Handle decl_class(THREAD, holder->java_mirror());
java_lang_reflect_RecordComponent::set_clazz(element(), decl_class());
Symbol* name = holder->constants()->symbol_at(component->name_index()); // name_index is a utf8
oop component_name = StringTable::intern(name, CHECK_NULL);
java_lang_reflect_RecordComponent::set_name(element(), component_name);
Symbol* type = holder->constants()->symbol_at(component->descriptor_index());
Handle component_type_h =
SystemDictionary::find_java_mirror_for_type(type, holder, SignatureStream::NCDFError, CHECK_NULL);
java_lang_reflect_RecordComponent::set_type(element(), component_type_h());
Method* accessor_method = NULL;
{
// Prepend "()" to type to create the full method signature.
ResourceMark rm(THREAD);
int sig_len = type->utf8_length() + 3; // "()" and null char
char* sig = NEW_RESOURCE_ARRAY(char, sig_len);
jio_snprintf(sig, sig_len, "%c%c%s", JVM_SIGNATURE_FUNC, JVM_SIGNATURE_ENDFUNC, type->as_C_string());
TempNewSymbol full_sig = SymbolTable::new_symbol(sig);
accessor_method = holder->find_instance_method(name, full_sig);
}
if (accessor_method != NULL) {
methodHandle method(THREAD, accessor_method);
oop m = Reflection::new_method(method, false, CHECK_NULL);
java_lang_reflect_RecordComponent::set_accessor(element(), m);
} else {
java_lang_reflect_RecordComponent::set_accessor(element(), NULL);
}
int sig_index = component->generic_signature_index();
if (sig_index > 0) {
Symbol* sig = holder->constants()->symbol_at(sig_index); // sig_index is a utf8
oop component_sig = StringTable::intern(sig, CHECK_NULL);
java_lang_reflect_RecordComponent::set_signature(element(), component_sig);
} else {
java_lang_reflect_RecordComponent::set_signature(element(), NULL);
}
typeArrayOop annotation_oop = Annotations::make_java_array(component->annotations(), CHECK_NULL);
java_lang_reflect_RecordComponent::set_annotations(element(), annotation_oop);
typeArrayOop type_annotation_oop = Annotations::make_java_array(component->type_annotations(), CHECK_NULL);
java_lang_reflect_RecordComponent::set_typeAnnotations(element(), type_annotation_oop);
return element();
}
#define CONSTANTPOOL_FIELDS_DO(macro) \
macro(_oop_offset, k, "constantPoolOop", object_signature, false)
@ -3918,6 +3977,24 @@ oop java_lang_invoke_CallSite::context_no_keepalive(oop call_site) {
return dep_oop;
}
// Support for java_lang_invoke_ConstantCallSite
int java_lang_invoke_ConstantCallSite::_is_frozen_offset;
#define CONSTANTCALLSITE_FIELDS_DO(macro) \
macro(_is_frozen_offset, k, "isFrozen", bool_signature, false)
void java_lang_invoke_ConstantCallSite::compute_offsets() {
InstanceKlass* k = SystemDictionary::ConstantCallSite_klass();
CONSTANTCALLSITE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void java_lang_invoke_ConstantCallSite::serialize_offsets(SerializeClosure* f) {
CONSTANTCALLSITE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
// Support for java_lang_invoke_MethodHandleNatives_CallSiteContext
int java_lang_invoke_MethodHandleNatives_CallSiteContext::_vmdependencies_offset;
@ -3995,17 +4072,20 @@ int java_lang_ClassLoader::nameAndId_offset = -1;
int java_lang_ClassLoader::unnamedModule_offset = -1;
ClassLoaderData* java_lang_ClassLoader::loader_data_acquire(oop loader) {
assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
assert(loader != NULL, "loader must not be NULL");
assert(oopDesc::is_oop(loader), "loader must be oop");
return HeapAccess<MO_ACQUIRE>::load_at(loader, _loader_data_offset);
}
ClassLoaderData* java_lang_ClassLoader::loader_data_raw(oop loader) {
assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
assert(loader != NULL, "loader must not be NULL");
assert(oopDesc::is_oop(loader), "loader must be oop");
return RawAccess<>::load_at(loader, _loader_data_offset);
}
void java_lang_ClassLoader::release_set_loader_data(oop loader, ClassLoaderData* new_data) {
assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
assert(loader != NULL, "loader must not be NULL");
assert(oopDesc::is_oop(loader), "loader must be oop");
HeapAccess<MO_RELEASE>::store_at(loader, _loader_data_offset, new_data);
}
@ -4290,6 +4370,13 @@ int java_lang_Short_ShortCache::_static_cache_offset;
int java_lang_Byte_ByteCache::_static_cache_offset;
int java_lang_Boolean::_static_TRUE_offset;
int java_lang_Boolean::_static_FALSE_offset;
int java_lang_reflect_RecordComponent::clazz_offset;
int java_lang_reflect_RecordComponent::name_offset;
int java_lang_reflect_RecordComponent::type_offset;
int java_lang_reflect_RecordComponent::accessor_offset;
int java_lang_reflect_RecordComponent::signature_offset;
int java_lang_reflect_RecordComponent::annotations_offset;
int java_lang_reflect_RecordComponent::typeAnnotations_offset;
@ -4641,6 +4728,55 @@ static int member_offset(int hardcoded_offset) {
return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes();
}
#define RECORDCOMPONENT_FIELDS_DO(macro) \
macro(clazz_offset, k, "clazz", class_signature, false); \
macro(name_offset, k, "name", string_signature, false); \
macro(type_offset, k, "type", class_signature, false); \
macro(accessor_offset, k, "accessor", reflect_method_signature, false); \
macro(signature_offset, k, "signature", string_signature, false); \
macro(annotations_offset, k, "annotations", byte_array_signature, false); \
macro(typeAnnotations_offset, k, "typeAnnotations", byte_array_signature, false);
// Support for java_lang_reflect_RecordComponent
void java_lang_reflect_RecordComponent::compute_offsets() {
InstanceKlass* k = SystemDictionary::RecordComponent_klass();
RECORDCOMPONENT_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void java_lang_reflect_RecordComponent::serialize_offsets(SerializeClosure* f) {
RECORDCOMPONENT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
void java_lang_reflect_RecordComponent::set_clazz(oop element, oop value) {
element->obj_field_put(clazz_offset, value);
}
void java_lang_reflect_RecordComponent::set_name(oop element, oop value) {
element->obj_field_put(name_offset, value);
}
void java_lang_reflect_RecordComponent::set_type(oop element, oop value) {
element->obj_field_put(type_offset, value);
}
void java_lang_reflect_RecordComponent::set_accessor(oop element, oop value) {
element->obj_field_put(accessor_offset, value);
}
void java_lang_reflect_RecordComponent::set_signature(oop element, oop value) {
element->obj_field_put(signature_offset, value);
}
void java_lang_reflect_RecordComponent::set_annotations(oop element, oop value) {
element->obj_field_put(annotations_offset, value);
}
void java_lang_reflect_RecordComponent::set_typeAnnotations(oop element, oop value) {
element->obj_field_put(typeAnnotations_offset, value);
}
// Compute hard-coded offsets
// Invoked before SystemDictionary::initialize, so pre-loaded classes
// are not available to determine the offset_of_static_fields.

@ -30,6 +30,8 @@
#include "oops/oop.hpp"
#include "runtime/os.hpp"
class RecordComponent;
// Interface for manipulating the basic Java classes.
//
// All dependencies on layout of actual Java classes should be kept here.
@ -66,12 +68,14 @@
f(java_lang_invoke_LambdaForm) \
f(java_lang_invoke_MethodType) \
f(java_lang_invoke_CallSite) \
f(java_lang_invoke_ConstantCallSite) \
f(java_lang_invoke_MethodHandleNatives_CallSiteContext) \
f(java_security_AccessControlContext) \
f(java_lang_reflect_AccessibleObject) \
f(java_lang_reflect_Method) \
f(java_lang_reflect_Constructor) \
f(java_lang_reflect_Field) \
f(java_lang_reflect_RecordComponent) \
f(java_nio_Buffer) \
f(reflect_ConstantPool) \
f(reflect_UnsafeStaticFieldAccessorImpl) \
@ -1226,6 +1230,28 @@ public:
static int target_offset_in_bytes() { return _target_offset; }
};
// Interface to java.lang.invoke.ConstantCallSite objects
class java_lang_invoke_ConstantCallSite: AllStatic {
friend class JavaClasses;
private:
static int _is_frozen_offset;
static void compute_offsets();
public:
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static jboolean is_frozen(oop site);
// Testers
static bool is_subclass(Klass* klass) {
return klass->is_subclass_of(SystemDictionary::ConstantCallSite_klass());
}
static bool is_instance(oop obj);
};
// Interface to java.lang.invoke.MethodHandleNatives$CallSiteContext objects
#define CALLSITECONTEXT_INJECTED_FIELDS(macro) \
@ -1460,6 +1486,39 @@ class java_lang_LiveStackFrameInfo: AllStatic {
friend class JavaClasses;
};
// Interface to java.lang.reflect.RecordComponent objects
class java_lang_reflect_RecordComponent: AllStatic {
private:
static int clazz_offset;
static int name_offset;
static int type_offset;
static int accessor_offset;
static int signature_offset;
static int annotations_offset;
static int typeAnnotations_offset;
// Setters
static void set_clazz(oop element, oop value);
static void set_name(oop element, oop value);
static void set_type(oop element, oop value);
static void set_accessor(oop element, oop value);
static void set_signature(oop element, oop value);
static void set_annotations(oop element, oop value);
static void set_typeAnnotations(oop element, oop value);
public:
// Create an instance of RecordComponent
static oop create(InstanceKlass* holder, RecordComponent* component, TRAPS);
static void compute_offsets();
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
};
// Interface to java.lang.AssertionStatusDirectives objects
class java_lang_AssertionStatusDirectives: AllStatic {

@ -179,6 +179,14 @@ inline bool java_lang_invoke_CallSite::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
inline jboolean java_lang_invoke_ConstantCallSite::is_frozen(oop site) {
return site->bool_field(_is_frozen_offset);
}
inline bool java_lang_invoke_ConstantCallSite::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
inline bool java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}

@ -119,6 +119,7 @@ class GCTimer;
do_klass(AccessController_klass, java_security_AccessController ) \
do_klass(SecureClassLoader_klass, java_security_SecureClassLoader ) \
do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException ) \
do_klass(Record_klass, java_lang_Record ) \
do_klass(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError ) \
do_klass(LinkageError_klass, java_lang_LinkageError ) \
do_klass(ClassCastException_klass, java_lang_ClassCastException ) \
@ -217,6 +218,9 @@ class GCTimer;
/* force inline of iterators */ \
do_klass(Iterator_klass, java_util_Iterator ) \
\
/* support for records */ \
do_klass(RecordComponent_klass, java_lang_reflect_RecordComponent ) \
\
/*end*/
class SystemDictionary : AllStatic {

@ -739,7 +739,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
StackMapTable stackmap_table(&reader, &current_frame, max_locals, max_stack,
code_data, code_length, CHECK_VERIFY(this));
LogTarget(Info, verification) lt;
LogTarget(Debug, verification) lt;
if (lt.is_enabled()) {
ResourceMark rm(THREAD);
LogStream ls(lt);
@ -783,7 +783,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
VerificationType type, type2;
VerificationType atype;
LogTarget(Info, verification) lt;
LogTarget(Debug, verification) lt;
if (lt.is_enabled()) {
ResourceMark rm(THREAD);
LogStream ls(lt);
@ -2648,9 +2648,9 @@ void ClassVerifier::verify_invoke_init(
verify_error(ErrorContext::bad_code(bci),
"Bad <init> method call from after the start of a try block");
return;
} else if (log_is_enabled(Info, verification)) {
} else if (log_is_enabled(Debug, verification)) {
ResourceMark rm(THREAD);
log_info(verification)("Survived call to ends_in_athrow(): %s",
log_debug(verification)("Survived call to ends_in_athrow(): %s",
current_class()->name()->as_C_string());
}
}

@ -94,6 +94,7 @@
template(java_lang_reflect_Field, "java/lang/reflect/Field") \
template(java_lang_reflect_Parameter, "java/lang/reflect/Parameter") \
template(java_lang_reflect_Array, "java/lang/reflect/Array") \
template(java_lang_reflect_RecordComponent, "java/lang/reflect/RecordComponent") \
template(java_lang_StringBuffer, "java/lang/StringBuffer") \
template(java_lang_StringBuilder, "java/lang/StringBuilder") \
template(java_lang_CharSequence, "java/lang/CharSequence") \
@ -127,6 +128,7 @@
template(jdk_internal_vm_PostVMInitHook, "jdk/internal/vm/PostVMInitHook") \
template(sun_net_www_ParseUtil, "sun/net/www/ParseUtil") \
template(java_util_Iterator, "java/util/Iterator") \
template(java_lang_Record, "java/lang/Record") \
\
template(jdk_internal_loader_ClassLoaders_AppClassLoader, "jdk/internal/loader/ClassLoaders$AppClassLoader") \
template(jdk_internal_loader_ClassLoaders_PlatformClassLoader, "jdk/internal/loader/ClassLoaders$PlatformClassLoader") \
@ -161,6 +163,7 @@
template(tag_deprecated, "Deprecated") \
template(tag_source_debug_extension, "SourceDebugExtension") \
template(tag_signature, "Signature") \
template(tag_record, "Record") \
template(tag_runtime_visible_annotations, "RuntimeVisibleAnnotations") \
template(tag_runtime_invisible_annotations, "RuntimeInvisibleAnnotations") \
template(tag_runtime_visible_parameter_annotations, "RuntimeVisibleParameterAnnotations") \
@ -562,6 +565,7 @@
template(char_StringBuffer_signature, "(C)Ljava/lang/StringBuffer;") \
template(int_String_signature, "(I)Ljava/lang/String;") \
template(boolean_boolean_int_signature, "(ZZ)I") \
template(reflect_method_signature, "Ljava/lang/reflect/Method;") \
/* signature symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE) \
\

@ -147,6 +147,7 @@ public:
inline bool is_compiled_by_c2() const { return _type == compiler_c2; };
inline bool is_compiled_by_jvmci() const { return _type == compiler_jvmci; };
const char* compiler_name() const;
CompilerType compiler_type() const { return _type; }
// Casting
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }

@ -30,6 +30,7 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/gcBehaviours.hpp"
#include "interpreter/bytecode.inline.hpp"
#include "logging/log.hpp"
@ -489,7 +490,20 @@ static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address add
if (nm != NULL) {
// Clean inline caches pointing to both zombie and not_entrant methods
if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
if (!ic->set_to_clean(from->is_alive())) {
// Inline cache cleaning should only be initiated on CompiledMethods that have been
// observed to be is_alive(). However, with concurrent code cache unloading, it is
// possible that by now, the state has been racingly flipped to unloaded if the nmethod
// being cleaned is_unloading(). This is fine, because if that happens, then the inline
// caches have already been cleaned under the same CompiledICLocker that we now hold during
// inline cache cleaning, and we will simply walk the inline caches again, and likely not
// find much of interest to clean. However, this race prevents us from asserting that the
// nmethod is_alive(). The is_unloading() function is completely monotonic; once set due
// to an oop dying, it remains set forever until freed. Because of that, all unloaded
// nmethods are is_unloading(), but notably, an unloaded nmethod may also subsequently
// become zombie (when the sweeper converts it to zombie). Therefore, the most precise
// sanity check we can check for in this context is to not allow zombies.
assert(!from->is_zombie(), "should not clean inline caches on zombies");
if (!ic->set_to_clean(!from->is_unloading())) {
return false;
}
assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
@ -543,6 +557,18 @@ void CompiledMethod::cleanup_inline_caches(bool clean_all) {
return;
}
}
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
// We want to keep an invariant that nmethods found through iterations of a Thread's
// nmethods found in safepoints have gone through an entry barrier and are not armed.
// By calling this nmethod entry barrier from the sweeper, it plays along and acts
// like any other nmethod found on the stack of a thread (fewer surprises).
nmethod* nm = as_nmethod_or_null();
if (nm != NULL) {
bool alive = bs_nm->nmethod_entry_barrier(nm);
assert(alive, "should be alive");
}
}
InlineCacheBuffer::refill_ic_stubs();
}
}

@ -49,6 +49,7 @@
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/flags/flagSetting.hpp"
@ -58,6 +59,7 @@
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/vmThread.hpp"
@ -428,7 +430,8 @@ void nmethod::init_defaults() {
_has_flushed_dependencies = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
_unload_reported = false; // jvmti state
_load_reported = false; // jvmti state
_unload_reported = false;
_is_far_code = false; // nmethods are located in CodeCache
#ifdef ASSERT
@ -436,7 +439,6 @@ void nmethod::init_defaults() {
#endif
_oops_do_mark_link = NULL;
_jmethod_id = NULL;
_osr_link = NULL;
#if INCLUDE_RTM_OPT
_rtm_state = NoRTM;
@ -1563,11 +1565,11 @@ void nmethod::flush_dependencies(bool delete_immediately) {
// post_compiled_method_load_event
// new method for install_code() path
// Transfer information from compilation to jvmti
void nmethod::post_compiled_method_load_event() {
void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
// This is a bad time for a safepoint. We don't want
// this nmethod to get unloaded while we're queueing the event.
NoSafepointVerifier nsv;
// This is a bad time for a safepoint. We don't want
// this nmethod to get unloaded while we're queueing the event.
NoSafepointVerifier nsv;
Method* m = method();
HOTSPOT_COMPILED_METHOD_LOAD(
@ -1579,26 +1581,22 @@ void nmethod::post_compiled_method_load_event() {
m->signature()->utf8_length(),
insts_begin(), insts_size());
if (JvmtiExport::should_post_compiled_method_load() ||
JvmtiExport::should_post_compiled_method_unload()) {
get_and_cache_jmethod_id();
}
if (JvmtiExport::should_post_compiled_method_load()) {
// Let the Service thread (which is a real Java thread) post the event
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
JvmtiDeferredEventQueue::enqueue(
JvmtiDeferredEvent::compiled_method_load_event(this));
}
}
// Only post unload events if load events are found.
set_load_reported();
// Keep sweeper from turning this into zombie until it is posted.
mark_as_seen_on_stack();
jmethodID nmethod::get_and_cache_jmethod_id() {
if (_jmethod_id == NULL) {
// Cache the jmethod_id since it can no longer be looked up once the
// method itself has been marked for unloading.
_jmethod_id = method()->jmethod_id();
// If a JavaThread hasn't been passed in, let the Service thread
// (which is a real Java thread) post the event
JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
if (state == NULL) {
ServiceThread::enqueue_deferred_event(&event);
} else {
state->enqueue_event(&event);
}
}
return _jmethod_id;
}
void nmethod::post_compiled_method_unload() {
@ -1614,18 +1612,17 @@ void nmethod::post_compiled_method_unload() {
// If a JVMTI agent has enabled the CompiledMethodUnload event then
// post the event. Sometime later this nmethod will be made a zombie
// by the sweeper but the Method* will not be valid at that point.
// If the _jmethod_id is null then no load event was ever requested
// so don't bother posting the unload. The main reason for this is
// that the jmethodID is a weak reference to the Method* so if
// The jmethodID is a weak reference to the Method* so if
// it's being unloaded there's no way to look it up since the weak
// ref will have been cleared.
if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
// Don't bother posting the unload if the load event wasn't posted.
if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded");
JvmtiDeferredEvent event =
JvmtiDeferredEvent::compiled_method_unload_event(this,
_jmethod_id, insts_begin());
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
JvmtiDeferredEventQueue::enqueue(event);
JvmtiDeferredEvent::compiled_method_unload_event(
method()->jmethod_id(), insts_begin());
ServiceThread::enqueue_deferred_event(&event);
}
// The JVMTI CompiledMethodUnload event can be enabled or disabled at

@ -30,6 +30,7 @@
class DepChange;
class DirectiveSet;
class DebugInformationRecorder;
class JvmtiThreadState;
// nmethods (native methods) are the compiled code versions of Java methods.
//
@ -71,7 +72,6 @@ class nmethod : public CompiledMethod {
private:
// Shared fields for all nmethod's
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
@ -227,8 +227,9 @@ class nmethod : public CompiledMethod {
// protected by CodeCache_lock
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
// used by jvmti to track if an unload event has been posted for this nmethod.
// used by jvmti to track if an event has been posted for this nmethod.
bool _unload_reported;
bool _load_reported;
// Protected by CompiledMethod_lock
volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
@ -482,10 +483,6 @@ class nmethod : public CompiledMethod {
bool make_not_used() { return make_not_entrant(); }
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
// used by jvmti to track if the unload event has been reported
bool unload_reported() { return _unload_reported; }
void set_unload_reported() { _unload_reported = true; }
int get_state() const {
return _state;
}
@ -621,6 +618,12 @@ public:
address* orig_pc_addr(const frame* fr);
// used by jvmti to track if the load and unload events has been reported
bool unload_reported() const { return _unload_reported; }
void set_unload_reported() { _unload_reported = true; }
bool load_reported() const { return _load_reported; }
void set_load_reported() { _load_reported = true; }
public:
// copying of debugging information
void copy_scopes_pcs(PcDesc* pcs, int count);
@ -631,8 +634,7 @@ public:
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
// jvmti support:
void post_compiled_method_load_event();
jmethodID get_and_cache_jmethod_id();
void post_compiled_method_load_event(JvmtiThreadState* state = NULL);
// verify operations
void verify();

@ -2019,8 +2019,10 @@ void CompileBroker::post_compile(CompilerThread* thread, CompileTask* task, bool
static void post_compilation_event(EventCompilation* event, CompileTask* task) {
assert(event != NULL, "invariant");
assert(event->should_commit(), "invariant");
event->set_method(task->method());
assert(task != NULL, "invariant");
event->set_compileId(task->compile_id());
event->set_compiler(task->compiler()->type());
event->set_method(task->method());
event->set_compileLevel(task->comp_level());
event->set_succeded(task->is_success());
event->set_isOsr(task->osr_bci() != CompileBroker::standard_entry_bci);

@ -55,6 +55,7 @@
#include "include/jvm.h"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"

Some files were not shown because too many files have changed in this diff Show More