This commit is contained in:
Phil Race 2018-05-15 10:13:52 -07:00
commit ca8b59370b
242 changed files with 5083 additions and 2356 deletions

View File

@ -13,3 +13,4 @@ test/nashorn/lib
NashornProfile.txt
.*/JTreport/.*
.*/JTwork/.*
.*/.git/.*

View File

@ -484,3 +484,4 @@ f7363de371c9a1f668bd0a01b7df3d1ddb9cc58b jdk-11+7
69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10
e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11
3ab6ba9f94a9045a526d645af26c933235371d6f jdk-11+12
758deedaae8406ae60147486107a54e9864aa7b0 jdk-11+13

View File

@ -121,11 +121,21 @@ else # not java.base
ifeq ($(OPENJDK_TARGET_OS), windows)
# Only java.base needs to include the MSVC*_DLLs. Make sure no other module
# tries to include them (typically imported ones).
ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCR_DLL))), )
JMOD_FLAGS += --exclude '$(notdir $(MSVCR_DLL))'
ifneq ($(MSVCR_DLL), )
ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCR_DLL))), )
JMOD_FLAGS += --exclude '$(notdir $(MSVCR_DLL))'
endif
endif
ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCP_DLL))), )
JMOD_FLAGS += --exclude '$(notdir $(MSVCP_DLL))'
ifneq ($(MSVCP_DLL), )
ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCP_DLL))), )
JMOD_FLAGS += --exclude '$(notdir $(MSVCP_DLL))'
endif
endif
ifneq ($(UCRT_DLL_DIR), )
UCRT_DLL_FILES := $(notdir $(wildcard $(UCRT_DLL_DIR)/*.dll))
ifneq ($(wildcard $(LIBS_DIR)/$(firstword $(UCRT_DLL_FILES))), )
JMOD_FLAGS += $(patsubst %, --exclude '%', $(UCRT_DLL_FILES))
endif
endif
endif
endif

View File

@ -671,6 +671,8 @@ AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
BASIC_EVAL_DEVKIT_VARIABLE([DEVKIT_MSVCR_DLL])
# Corresponds to --with-msvcp-dll
BASIC_EVAL_DEVKIT_VARIABLE([DEVKIT_MSVCP_DLL])
# Corresponds to --with-ucrt-dll-dir
BASIC_EVAL_DEVKIT_VARIABLE([DEVKIT_UCRT_DLL_DIR])
fi
AC_MSG_CHECKING([for devkit])

View File

@ -206,7 +206,7 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
if test "x$ENABLE_AOT" = "xtrue"; then
# Only enable AOT on X64 platforms.
if test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
if test -e "${TOPDIR}/src/jdk.aot"; then
if test -e "${TOPDIR}/src/jdk.internal.vm.compiler"; then
ENABLE_AOT="true"

View File

@ -736,6 +736,7 @@ LIBZ_LIBS:=@LIBZ_LIBS@
LIBZIP_CAN_USE_MMAP:=@LIBZIP_CAN_USE_MMAP@
MSVCR_DLL:=@MSVCR_DLL@
MSVCP_DLL:=@MSVCP_DLL@
UCRT_DLL_DIR:=@UCRT_DLL_DIR@
STLPORT_LIB:=@STLPORT_LIB@
####################################################

View File

@ -76,6 +76,7 @@ VS_VERSION_INTERNAL_2017=141
VS_MSVCR_2017=vcruntime140.dll
VS_MSVCP_2017=msvcp140.dll
VS_ENVVAR_2017="VS150COMNTOOLS"
VS_USE_UCRT_2017="true"
VS_VS_INSTALLDIR_2017="Microsoft Visual Studio/2017"
VS_EDITIONS_2017="BuildTools Community Professional Enterprise"
VS_SDK_INSTALLDIR_2017=
@ -264,6 +265,7 @@ AC_DEFUN([TOOLCHAIN_FIND_VISUAL_STUDIO],
eval VS_VERSION_INTERNAL="\${VS_VERSION_INTERNAL_${VS_VERSION}}"
eval MSVCR_NAME="\${VS_MSVCR_${VS_VERSION}}"
eval MSVCP_NAME="\${VS_MSVCP_${VS_VERSION}}"
eval USE_UCRT="\${VS_USE_UCRT_${VS_VERSION}}"
eval PLATFORM_TOOLSET="\${VS_VS_PLATFORM_NAME_${VS_VERSION}}"
VS_PATH="$TOOLCHAIN_PATH:$PATH"
@ -309,6 +311,7 @@ AC_DEFUN([TOOLCHAIN_FIND_VISUAL_STUDIO],
eval VS_VERSION_INTERNAL="\${VS_VERSION_INTERNAL_${VS_VERSION}}"
eval MSVCR_NAME="\${VS_MSVCR_${VS_VERSION}}"
eval MSVCP_NAME="\${VS_MSVCP_${VS_VERSION}}"
eval USE_UCRT="\${VS_USE_UCRT_${VS_VERSION}}"
# The rest of the variables are already evaled while probing
AC_MSG_NOTICE([Found $VS_DESCRIPTION])
break
@ -432,8 +435,11 @@ AC_DEFUN([TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV],
VS_INCLUDE=`$ECHO "$VS_INCLUDE" | $SED -e 's/\\\\*;* *$//'`
VS_LIB=`$ECHO "$VS_LIB" | $SED 's/\\\\*;* *$//'`
VCINSTALLDIR=`$ECHO "$VCINSTALLDIR" | $SED 's/\\\\* *$//'`
WindowsSDKDir=`$ECHO "$WindowsSDKDir" | $SED 's/\\\\* *$//'`
WindowsSdkDir=`$ECHO "$WindowsSdkDir" | $SED 's/\\\\* *$//'`
WINDOWSSDKDIR=`$ECHO "$WINDOWSSDKDIR" | $SED 's/\\\\* *$//'`
if test -z "$WINDOWSSDKDIR"; then
WINDOWSSDKDIR="$WindowsSdkDir"
fi
# Remove any paths containing # (typically F#) as that messes up make. This
# is needed if visual studio was installed with F# support.
VS_PATH=`$ECHO "$VS_PATH" | $SED 's/[[^:#]]*#[^:]*://g'`
@ -539,7 +545,7 @@ AC_DEFUN([TOOLCHAIN_SETUP_MSVC_DLL],
if test "x$MSVC_DLL" = x; then
if test "x$VCINSTALLDIR" != x; then
CYGWIN_VC_INSTALL_DIR="$VCINSTALLDIR"
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(CYGWIN_VC_INSTALL_DIR)
BASIC_FIXUP_PATH(CYGWIN_VC_INSTALL_DIR)
if test "$VS_VERSION" -lt 2017; then
# Probe: Using well-known location from Visual Studio 12.0 and older
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
@ -673,4 +679,41 @@ AC_DEFUN([TOOLCHAIN_SETUP_VS_RUNTIME_DLLS],
fi
AC_SUBST(MSVCP_DLL)
fi
AC_ARG_WITH(ucrt-dll-dir, [AS_HELP_STRING([--with-ucrt-dll-dir],
[path to Microsoft Windows Kit UCRT DLL dir (Windows only) @<:@probed@:>@])])
if test "x$USE_UCRT" = "xtrue"; then
AC_MSG_CHECKING([for UCRT DLL dir])
if test "x$with_ucrt_dll_dir" != x; then
if test -z "$(ls -d "$with_ucrt_dll_dir/*.dll" 2> /dev/null)"; then
AC_MSG_RESULT([no])
AC_MSG_ERROR([Could not find any dlls in $with_ucrt_dll_dir])
else
AC_MSG_RESULT([$with_ucrt_dll_dir])
UCRT_DLL_DIR="$with_ucrt_dll_dir"
BASIC_FIXUP_PATH([UCRT_DLL_DIR])
fi
elif test "x$DEVKIT_UCRT_DLL_DIR" != "x"; then
UCRT_DLL_DIR="$DEVKIT_UCRT_DLL_DIR"
AC_MSG_RESULT($UCRT_DLL_DIR)
else
CYGWIN_WINDOWSSDKDIR="${WINDOWSSDKDIR}"
BASIC_FIXUP_PATH([CYGWIN_WINDOWSSDKDIR])
dll_subdir=$OPENJDK_TARGET_CPU
if test "x$dll_subdir" = "xx86_64"; then
dll_subdir="x64"
fi
UCRT_DLL_DIR="$CYGWIN_WINDOWSSDKDIR/Redist/ucrt/DLLs/$dll_subdir"
if test -z "$(ls -d "$UCRT_DLL_DIR/"*.dll 2> /dev/null)"; then
AC_MSG_RESULT([no])
AC_MSG_ERROR([Could not find any dlls in $UCRT_DLL_DIR])
else
AC_MSG_RESULT($UCRT_DLL_DIR)
fi
fi
else
UCRT_DLL_DIR=
fi
AC_SUBST(UCRT_DLL_DIR)
])

View File

@ -65,6 +65,17 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
MACRO := copy-and-chmod))
TARGETS += $(COPY_MSVCR) $(COPY_MSVCP)
ifneq ($(UCRT_DLL_DIR), )
$(eval $(call SetupCopyFiles, COPY_UCRT_DLLS, \
DEST := $(LIB_DST_DIR), \
SRC := $(UCRT_DLL_DIR), \
FILES := $(wildcard $(UCRT_DLL_DIR)/*.dll), \
MACRO := copy-and-chmod, \
))
TARGETS += $(COPY_UCRT_DLLS)
endif
endif
################################################################################
@ -117,23 +128,23 @@ else
$(RM) $(@)
# Now check for other permutations
ifeq ($(call check-jvm-variant, server), true)
$(PRINTF) "-server KNOWN\n">>$(@)
$(PRINTF) "-client ALIASED_TO -server\n">>$(@)
$(PRINTF) -- "-server KNOWN\n">>$(@)
$(PRINTF) -- "-client ALIASED_TO -server\n">>$(@)
ifeq ($(call check-jvm-variant, minimal), true)
$(PRINTF) "-minimal KNOWN\n">>$(@)
$(PRINTF) -- "-minimal KNOWN\n">>$(@)
endif
else
ifeq ($(call check-jvm-variant, client), true)
$(PRINTF) "-client KNOWN\n">>$(@)
$(PRINTF) "-server ALIASED_TO -client\n">>$(@)
$(PRINTF) -- "-client KNOWN\n">>$(@)
$(PRINTF) -- "-server ALIASED_TO -client\n">>$(@)
ifeq ($(call check-jvm-variant, minimal), true)
$(PRINTF) "-minimal KNOWN\n">>$(@)
$(PRINTF) -- "-minimal KNOWN\n">>$(@)
endif
else
ifeq ($(call check-jvm-variant, minimal), true)
$(PRINTF) "-minimal KNOWN\n">>$(@)
$(PRINTF) "-server ALIASED_TO -minimal\n">>$(@)
$(PRINTF) "-client ALIASED_TO -minimal\n">>$(@)
$(PRINTF) -- "-minimal KNOWN\n">>$(@)
$(PRINTF) -- "-server ALIASED_TO -minimal\n">>$(@)
$(PRINTF) -- "-client ALIASED_TO -minimal\n">>$(@)
endif
endif
endif

View File

@ -130,6 +130,8 @@ if [ ! -d $DEVKIT_ROOT/$SDK_VERSION ]; then
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/Redist
cp -r "$SDK_INSTALL_DIR/Redist/ucrt" $DEVKIT_ROOT/$SDK_VERSION/Redist/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/include
cp -r "$SDK_INSTALL_DIR/include/$SDK_FULL_VERSION/"* $DEVKIT_ROOT/$SDK_VERSION/include/
fi
@ -152,12 +154,14 @@ echo-info "DEVKIT_VS_INCLUDE_x86=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atl
echo-info "DEVKIT_VS_LIB_x86=\"\$DEVKIT_ROOT/VC/lib/x86;\$DEVKIT_ROOT/VC/atlmfc/lib/x86;\$DEVKIT_ROOT/$SDK_VERSION/lib/x86\""
echo-info "DEVKIT_MSVCR_DLL_x86=\"\$DEVKIT_ROOT/VC/redist/x86/$MSVCR_DLL\""
echo-info "DEVKIT_MSVCP_DLL_x86=\"\$DEVKIT_ROOT/VC/redist/x86/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_x86=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x86\""
echo-info ""
echo-info "DEVKIT_TOOLCHAIN_PATH_x86_64=\"\$DEVKIT_ROOT/VC/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x86\""
echo-info "DEVKIT_VS_INCLUDE_x86_64=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atlmfc/include;\$DEVKIT_ROOT/$SDK_VERSION/include/shared;\$DEVKIT_ROOT/$SDK_VERSION/include/ucrt;\$DEVKIT_ROOT/$SDK_VERSION/include/um;\$DEVKIT_ROOT/$SDK_VERSION/include/winrt\""
echo-info "DEVKIT_VS_LIB_x86_64=\"\$DEVKIT_ROOT/VC/lib/x64;\$DEVKIT_ROOT/VC/atlmfc/lib/x64;\$DEVKIT_ROOT/$SDK_VERSION/lib/x64\""
echo-info "DEVKIT_MSVCR_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL\""
echo-info "DEVKIT_MSVCP_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_x86_64=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x64\""
################################################################################
# Copy this script

View File

@ -128,8 +128,9 @@ endif
ifneq ($(call check-jvm-feature, aot), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_AOT=0
JVM_EXCLUDE_FILES += \
compiledIC_aot_x86_64.cpp compilerRuntime.cpp \
aotCodeHeap.cpp aotCompiledMethod.cpp aotLoader.cpp compiledIC_aot.cpp
compiledIC_aot_x86_64.cpp compiledIC_aot_aarch64.cpp \
compilerRuntime.cpp aotCodeHeap.cpp aotCompiledMethod.cpp \
aotLoader.cpp compiledIC_aot.cpp
endif
ifneq ($(call check-jvm-feature, cmsgc), true)

View File

@ -109,6 +109,7 @@ public class CLDRConverter {
private static final String[] AVAILABLE_TZIDS = TimeZone.getAvailableIDs();
private static String zoneNameTempFile;
private static String tzDataDir;
private static final Map<String, String> canonicalTZMap = new HashMap<>();
static enum DraftType {
UNCONFIRMED,
@ -439,6 +440,15 @@ public class CLDRConverter {
// Parse timezone
handlerTimeZone = new TimeZoneParseHandler();
parseLDMLFile(new File(TIMEZONE_SOURCE_FILE), handlerTimeZone);
// canonical tz name map
// alias -> primary
handlerTimeZone.getData().forEach((k, v) -> {
String[] ids = ((String)v).split("\\s");
for (int i = 1; i < ids.length; i++) {
canonicalTZMap.put(ids[i], ids[0]);
}
});
}
private static void parseLDMLFile(File srcfile, AbstractLDMLHandler handler) throws Exception {
@ -658,7 +668,27 @@ public class CLDRConverter {
handlerMetaZones.get(tzid) == null ||
handlerMetaZones.get(tzid) != null &&
map.get(METAZONE_ID_PREFIX + handlerMetaZones.get(tzid)) == null) {
// First, check the CLDR meta key
// First, check the alias
String canonID = canonicalTZMap.get(tzid);
if (canonID != null && !tzid.equals(canonID)) {
Object value = map.get(TIMEZONE_ID_PREFIX + canonID);
if (value != null) {
names.put(tzid, value);
return;
} else {
String meta = handlerMetaZones.get(canonID);
if (meta != null) {
value = map.get(METAZONE_ID_PREFIX + meta);
if (value != null) {
names.put(tzid, meta);
return;
}
}
}
}
// Check the CLDR meta key
Optional<Map.Entry<String, String>> cldrMeta =
handlerMetaZones.getData().entrySet().stream()
.filter(me ->
@ -666,7 +696,7 @@ public class CLDRConverter {
(String[])map.get(METAZONE_ID_PREFIX + me.getValue())))
.findAny();
cldrMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
// check the JRE meta key, add if there is not.
// Check the JRE meta key, add if there is not.
Optional<Map.Entry<String[], String>> jreMeta =
jreMetaMap.entrySet().stream()
.filter(jm -> Arrays.deepEquals(data, jm.getKey()))
@ -1024,16 +1054,9 @@ public class CLDRConverter {
}
private static Stream<String> zidMapEntry() {
Map<String, String> canonMap = new HashMap<>();
handlerTimeZone.getData().entrySet().stream()
.forEach(e -> {
String[] ids = ((String)e.getValue()).split("\\s");
for (int i = 1; i < ids.length; i++) {
canonMap.put(ids[i], ids[0]);
}});
return ZoneId.getAvailableZoneIds().stream()
.map(id -> {
String canonId = canonMap.getOrDefault(id, id);
String canonId = canonicalTZMap.getOrDefault(id, id);
String meta = handlerMetaZones.get(canonId);
String zone001 = handlerMetaZones.zidMap().get(meta);
return zone001 == null ? "" :

View File

@ -24,9 +24,7 @@
#
#javac configuration for "normal build" (these will be passed to the bootstrap compiler):
javac.opts = -XDignore.symbol.file=true -Xlint:all,-deprecation,-options,-exports -Werror -g:source,lines,vars
javac.source = 9
javac.target = 9
javac.opts = -XDignore.symbol.file=true -Xlint:all,-deprecation,-exports -Werror -g:source,lines,vars
#version used to compile build tools
javac.build.opts = -XDignore.symbol.file=true -Xlint:all,-deprecation,-options -Werror -g:source,lines,vars

View File

@ -232,7 +232,6 @@
<pathconvert pathsep=" " property="source.files" refid="source.fileset"/>
<echo file="${build.dir}/sources.txt">${source.files}</echo>
<exec executable="${langtools.jdk.home}/bin/javac" failonerror="true">
<arg line="-source ${javac.source} -target ${javac.target}" />
<arg value="-d" />
<arg value="${build.modules}" />
<arg line="${javac.opts}" />

View File

@ -41,6 +41,7 @@ $(eval $(call SetupBuildLauncher, jaotc, \
, \
JAVA_ARGS := --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.sparc=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.meta=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.runtime=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \

View File

@ -2410,7 +2410,8 @@ public:
#define INSN(NAME, opcode) \
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
starti; \
f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0b001110, 15, 10); \
f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0, 15); \
f(opcode, 14, 12), f(0b10, 11, 10); \
rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); \
f(T & 1, 30), f(T >> 1, 23, 22); \
}

View File

@ -48,11 +48,14 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ b(_continuation);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception)
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
, _index(index)
{
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
: _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
: _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
@ -69,14 +72,16 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
}
if (_index->is_cpu_register()) {
__ mov(rscratch1, _index->as_register());
__ mov(r22, _index->as_register());
} else {
__ mov(rscratch1, _index->as_jint());
__ mov(r22, _index->as_jint());
}
Runtime1::StubID stub_id;
if (_throw_index_out_of_bounds_exception) {
stub_id = Runtime1::throw_index_exception_id;
} else {
assert(_array != NULL, "sanity");
__ mov(r23, _array->as_pointer_register());
stub_id = Runtime1::throw_range_check_failed_id;
}
__ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2);

View File

@ -2807,7 +2807,8 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
}
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
assert(patch_code == lir_patch_none, "Patch code not supported");
__ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
}

View File

@ -323,7 +323,7 @@ void Runtime1::initialize_pd() {
// target: the entry point of the method that creates and posts the exception oop
// has_argument: true if the exception needs an argument (passed in rscratch1)
// has_argument: true if the exception needs arguments (passed in r22 and r23)
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
// make a frame and preserve the caller's caller-save registers
@ -332,7 +332,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
if (!has_argument) {
call_offset = __ call_RT(noreg, noreg, target);
} else {
call_offset = __ call_RT(noreg, noreg, target, rscratch1);
call_offset = __ call_RT(noreg, noreg, target, r22, r23);
}
OopMapSet* oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,8 +56,17 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
// static stub relocation stores the instruction address of the call
__ relocate(static_stub_Relocation::spec(mark));
// static stub relocation also tags the Method* in the code-stream.
#if INCLUDE_AOT
// Don't create a Metadata reloc if we're generating immutable PIC.
if (cbuf.immutable_PIC()) {
__ movptr(rmethod, 0);
} else {
__ mov_metadata(rmethod, (Metadata*)NULL);
}
#else
__ mov_metadata(rmethod, (Metadata*)NULL);
#endif
__ movptr(rscratch1, 0);
__ br(rscratch1);
@ -83,6 +92,61 @@ int CompiledStaticCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
#if INCLUDE_AOT
#define __ _masm.
void CompiledStaticCall::emit_to_aot_stub(CodeBuffer &cbuf, address mark) {
if (!UseAOT) {
return;
}
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling aot code.
// mov r, imm64_aot_code_address
// jmp r
if (mark == NULL) {
mark = cbuf.insts_mark(); // Get mark within main instrs section.
}
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address base =
__ start_a_stub(to_aot_stub_size());
guarantee(base != NULL, "out of space");
// Static stub relocation stores the instruction address of the call.
__ relocate(static_stub_Relocation::spec(mark, true /* is_aot */));
// Load destination AOT code address.
__ movptr(rscratch1, 0); // address is zapped till fixup time.
// This is recognized as unresolved by relocs/nativeinst/ic code.
__ br(rscratch1);
assert(__ pc() - base <= to_aot_stub_size(), "wrong stub size");
// Update current stubs pointer and restore insts_end.
__ end_a_stub();
}
#undef __
int CompiledStaticCall::to_aot_stub_size() {
if (UseAOT) {
return 5 * 4; // movz; movk; movk; movk; br
} else {
return 0;
}
}
// Relocation entries for call stub, compiled java to aot.
int CompiledStaticCall::reloc_to_aot_stub() {
if (UseAOT) {
return 5 * 4; // movz; movk; movk; movk; br
} else {
return 0;
}
}
#endif // INCLUDE_AOT
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(false /* is_aot */);
guarantee(stub != NULL, "stub not found");

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "aot/compiledIC_aot.hpp"
#include "code/codeCache.hpp"
#include "memory/resourceArea.hpp"
void CompiledDirectStaticCall::set_to_far(const methodHandle& callee, address entry) {
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_far %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
set_destination_mt_safe(entry);
}
void CompiledPltStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledPltStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
intptr_t data = method_loader->data();
address destination = jump->destination();
assert(data == 0 || data == (intptr_t)callee(),
"a) MT-unsafe modification of inline cache");
assert(destination == (address)Universe::non_oop_word()
|| destination == entry,
"b) MT-unsafe modification of inline cache");
// Update stub.
method_loader->set_data((intptr_t)callee());
jump->set_jump_destination(entry);
// Update jump to call.
set_destination_mt_safe(stub);
}
#ifdef NEVER_CALLED
void CompiledPltStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
method_loader->set_data(0);
jump->set_jump_destination((address)-1);
}
#endif
#ifndef PRODUCT
void CompiledPltStaticCall::verify() {
// Verify call.
_call->verify();
#ifdef ASSERT
CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
assert(cb && cb->is_aot(), "CompiledPltStaticCall can only be used on AOTCompiledMethod");
#endif
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
// Verify state.
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
#endif // !PRODUCT

View File

@ -116,7 +116,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
// Do we need to load the previous value?
if (obj != noreg) {
__ load_heap_oop(pre_val, Address(obj, 0));
__ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
}
// Is the previous value null?
@ -294,7 +294,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
false /* expand_call */);
if (val == noreg) {
__ store_heap_oop_null(Address(r3, 0));
BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg);
} else {
// G1 barrier needs uncompressed oop for region cross check.
Register new_val = val;
@ -302,7 +302,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco
new_val = rscratch2;
__ mov(new_val, val);
}
__ store_heap_oop(Address(r3, 0), val);
BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg);
g1_write_barrier_post(masm,
r3 /* store_adr */,
new_val /* new_val */,

View File

@ -35,11 +35,21 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
bool on_heap = (decorators & IN_HEAP) != 0;
bool on_root = (decorators & IN_ROOT) != 0;
bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
switch (type) {
case T_OBJECT:
case T_ARRAY: {
if (on_heap) {
__ load_heap_oop(dst, src);
if (UseCompressedOops) {
__ ldrw(dst, src);
if (oop_not_null) {
__ decode_heap_oop_not_null(dst);
} else {
__ decode_heap_oop(dst);
}
} else {
__ ldr(dst, src);
}
} else {
assert(on_root, "why else?");
__ ldr(dst, src);
@ -57,8 +67,17 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
switch (type) {
case T_OBJECT:
case T_ARRAY: {
val = val == noreg ? zr : val;
if (on_heap) {
__ store_heap_oop(dst, val);
if (UseCompressedOops) {
assert(!dst.uses(val), "not enough registers");
if (val != zr) {
__ encode_heap_oop(val);
}
__ strw(val, dst);
} else {
__ str(val, dst);
}
} else {
assert(on_root, "why else?");
__ str(val, dst);

View File

@ -90,13 +90,14 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
bool in_heap = (decorators & IN_HEAP) != 0;
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
if (val == noreg) {
__ store_heap_oop_null(dst);
} else {
__ store_heap_oop(dst, val);
bool needs_post_barrier = val != noreg && in_heap;
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, noreg, noreg);
if (needs_post_barrier) {
// flatten object address if needed
if (!precise || (dst.index() == noreg && dst.offset() == 0)) {
store_check(masm, dst.base(), dst);

View File

@ -278,8 +278,7 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
resolve_oop_handle(result, tmp);
// Add in the index
add(result, result, index);
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->load_at(this, IN_HEAP, T_OBJECT, result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp, /*tmp_thread*/ noreg);
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(

View File

@ -35,6 +35,9 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
return pc_offset + NativeCall::instruction_size;
} else if (inst->is_general_jump()) {
return pc_offset + NativeGeneralJump::instruction_size;
} else if (NativeInstruction::is_adrp_at((address)inst)) {
// adrp; add; blr
return pc_offset + 3 * NativeInstruction::instruction_size;
} else {
JVMCI_ERROR_0("unsupported type of instruction for call site");
}
@ -81,7 +84,8 @@ void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, T
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
address pc = _instructions->start() + pc_offset;
NativeInstruction* inst = nativeInstruction_at(pc);
if (inst->is_adr_aligned() || inst->is_ldr_literal()) {
if (inst->is_adr_aligned() || inst->is_ldr_literal()
|| (NativeInstruction::maybe_cpool_ref(pc))) {
address dest = _constants->start() + data_offset;
_instructions->relocate(pc, section_word_Relocation::spec((address) dest, CodeBuffer::SECT_CONSTS));
TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset);
@ -104,6 +108,10 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
NativeGeneralJump* jump = nativeGeneralJump_at(pc);
jump->set_jump_destination((address) foreign_call_destination);
_instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec());
} else if (NativeInstruction::is_adrp_at((address)inst)) {
// adrp; add; blr
MacroAssembler::pd_patch_instruction_size((address)inst,
(address)foreign_call_destination);
} else {
JVMCI_ERROR("unknown call or jump instruction at " PTR_FORMAT, p2i(pc));
}

View File

@ -3975,41 +3975,48 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
movk(dst, nk & 0xffff);
}
void MacroAssembler::load_heap_oop(Register dst, Address src)
{
if (UseCompressedOops) {
ldrw(dst, src);
decode_heap_oop(dst);
void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
Register dst, Address src,
Register tmp1, Register thread_tmp) {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
} else {
ldr(dst, src);
bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
}
}
void MacroAssembler::load_heap_oop_not_null(Register dst, Address src)
{
if (UseCompressedOops) {
ldrw(dst, src);
decode_heap_oop_not_null(dst);
void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
Address dst, Register src,
Register tmp1, Register thread_tmp) {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
} else {
ldr(dst, src);
bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
}
}
void MacroAssembler::store_heap_oop(Address dst, Register src) {
if (UseCompressedOops) {
assert(!dst.uses(src), "not enough registers");
encode_heap_oop(src);
strw(src, dst);
} else
str(src, dst);
void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
Register thread_tmp, DecoratorSet decorators) {
access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
}
void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
Register thread_tmp, DecoratorSet decorators) {
access_load_at(T_OBJECT, IN_HEAP | OOP_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
}
void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
Register thread_tmp, DecoratorSet decorators) {
access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
}
// Used for storing NULLs.
void MacroAssembler::store_heap_oop_null(Address dst) {
if (UseCompressedOops) {
strw(zr, dst);
} else
str(zr, dst);
access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg);
}
Address MacroAssembler::allocate_metadata_address(Metadata* obj) {

View File

@ -789,10 +789,19 @@ public:
void resolve_oop_handle(Register result, Register tmp = r5);
void load_mirror(Register dst, Register method, Register tmp = r5);
void load_heap_oop(Register dst, Address src);
void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
Register tmp1, Register tmp_thread);
void load_heap_oop_not_null(Register dst, Address src);
void store_heap_oop(Address dst, Register src);
void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
Register tmp1, Register tmp_thread);
void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
Register thread_tmp = noreg, DecoratorSet decorators = 0);
void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
Register thread_tmp = noreg, DecoratorSet decorators = 0);
void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
Register tmp_thread = noreg, DecoratorSet decorators = 0);
// currently unimplemented
// Used for storing NULL. All other oop constants should be

View File

@ -135,11 +135,11 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
__ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
__ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
@ -311,7 +311,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
Label L_ok;
Register temp2_defc = temp2;
__ load_heap_oop(temp2_defc, member_clazz);
__ load_heap_oop(temp2_defc, member_clazz, temp3);
load_klass_from_Class(_masm, temp2_defc);
__ verify_klass_ptr(temp2_defc);
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);

View File

@ -36,7 +36,120 @@
#include "c1/c1_Runtime1.hpp"
#endif
void NativeCall::verify() { ; }
void NativeCall::verify() {
assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
}
void NativeInstruction::wrote(int offset) {
ICache::invalidate_word(addr_at(offset));
}
void NativeLoadGot::report_and_fail() const {
tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
fatal("not a indirect rip mov to rbx");
}
void NativeLoadGot::verify() const {
assert(is_adrp_at((address)this), "must be adrp");
}
address NativeLoadGot::got_address() const {
return MacroAssembler::target_addr_for_insn((address)this);
}
intptr_t NativeLoadGot::data() const {
return *(intptr_t *) got_address();
}
address NativePltCall::destination() const {
NativeGotJump* jump = nativeGotJump_at(plt_jump());
return *(address*)MacroAssembler::target_addr_for_insn((address)jump);
}
address NativePltCall::plt_entry() const {
return MacroAssembler::target_addr_for_insn((address)this);
}
address NativePltCall::plt_jump() const {
address entry = plt_entry();
// Virtual PLT code has move instruction first
if (((NativeGotJump*)entry)->is_GotJump()) {
return entry;
} else {
return nativeLoadGot_at(entry)->next_instruction_address();
}
}
address NativePltCall::plt_load_got() const {
address entry = plt_entry();
if (!((NativeGotJump*)entry)->is_GotJump()) {
// Virtual PLT code has move instruction first
return entry;
} else {
// Static PLT code has move instruction second (from c2i stub)
return nativeGotJump_at(entry)->next_instruction_address();
}
}
address NativePltCall::plt_c2i_stub() const {
address entry = plt_load_got();
// This method should be called only for static calls which has C2I stub.
NativeLoadGot* load = nativeLoadGot_at(entry);
return entry;
}
address NativePltCall::plt_resolve_call() const {
NativeGotJump* jump = nativeGotJump_at(plt_jump());
address entry = jump->next_instruction_address();
if (((NativeGotJump*)entry)->is_GotJump()) {
return entry;
} else {
// c2i stub 2 instructions
entry = nativeLoadGot_at(entry)->next_instruction_address();
return nativeGotJump_at(entry)->next_instruction_address();
}
}
void NativePltCall::reset_to_plt_resolve_call() {
set_destination_mt_safe(plt_resolve_call());
}
void NativePltCall::set_destination_mt_safe(address dest) {
// rewriting the value in the GOT, it should always be aligned
NativeGotJump* jump = nativeGotJump_at(plt_jump());
address* got = (address *) jump->got_address();
*got = dest;
}
void NativePltCall::set_stub_to_clean() {
NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
method_loader->set_data(0);
jump->set_jump_destination((address)-1);
}
void NativePltCall::verify() const {
assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
}
address NativeGotJump::got_address() const {
return MacroAssembler::target_addr_for_insn((address)this);
}
address NativeGotJump::destination() const {
address *got_entry = (address *) got_address();
return *got_entry;
}
bool NativeGotJump::is_GotJump() const {
NativeInstruction *insn =
nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size));
return insn->encoding() == 0xd61f0200; // br x16
}
void NativeGotJump::verify() const {
assert(is_adrp_at((address)this), "must be adrp");
}
address NativeCall::destination() const {
address addr = (address)this;
@ -71,6 +184,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
ResourceMark rm;
int code_size = NativeInstruction::instruction_size;
address addr_call = addr_at(0);
bool reachable = Assembler::reachable_from_branch_at(addr_call, dest);
assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
// Patch the constant in the call's trampoline stub.
@ -81,7 +195,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
}
// Patch the call.
if (Assembler::reachable_from_branch_at(addr_call, dest)) {
if (reachable) {
set_destination(dest);
} else {
assert (trampoline_stub_addr != NULL, "we need a trampoline");
@ -103,9 +217,11 @@ address NativeCall::get_trampoline() {
is_NativeCallTrampolineStub_at(bl_destination))
return bl_destination;
// If the codeBlob is not a nmethod, this is because we get here from the
// CodeBlob constructor, which is called within the nmethod constructor.
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
if (code->is_nmethod()) {
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
}
return NULL;
}
// Inserts a native call instruction at a given pc
@ -340,9 +456,16 @@ void NativeIllegalInstruction::insert(address code_pos) {
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
"Aarch64 cannot replace non-jump with jump");
#ifdef ASSERT
// This may be the temporary nmethod generated while we're AOT
// compiling. Such an nmethod doesn't begin with a NOP but with an ADRP.
if (! (CalculateClassFingerprint && UseAOT && is_adrp_at(verified_entry))) {
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
"Aarch64 cannot replace non-jump with jump");
}
#endif
// Patch this nmethod atomically.
if (Assembler::reachable_from_branch_at(verified_entry, dest)) {

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -95,6 +95,8 @@ class NativeInstruction {
void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; }
void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; }
void wrote(int offset);
public:
// unit test stuff
@ -148,6 +150,46 @@ inline NativeInstruction* nativeInstruction_at(uint32_t *address) {
return (NativeInstruction*)address;
}
class NativePltCall: public NativeInstruction {
public:
enum Arm_specific_constants {
instruction_size = 4,
instruction_offset = 0,
displacement_offset = 1,
return_address_offset = 4
};
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const { return addr_at(return_address_offset); }
address displacement_address() const { return addr_at(displacement_offset); }
int displacement() const { return (jint) int_at(displacement_offset); }
address return_address() const { return addr_at(return_address_offset); }
address destination() const;
address plt_entry() const;
address plt_jump() const;
address plt_load_got() const;
address plt_resolve_call() const;
address plt_c2i_stub() const;
void set_stub_to_clean();
void reset_to_plt_resolve_call();
void set_destination_mt_safe(address dest);
void verify() const;
};
inline NativePltCall* nativePltCall_at(address address) {
NativePltCall* call = (NativePltCall*) address;
#ifdef ASSERT
call->verify();
#endif
return call;
}
inline NativePltCall* nativePltCall_before(address addr) {
address at = addr - NativePltCall::instruction_size;
return nativePltCall_at(at);
}
inline NativeCall* nativeCall_at(address address);
// The NativeCall is an abstraction for accessing/manipulating native
// call instructions (used to manipulate inline caches, primitive &
@ -169,7 +211,7 @@ class NativeCall: public NativeInstruction {
address return_address() const { return addr_at(return_address_offset); }
address destination() const;
void set_destination(address dest) {
void set_destination(address dest) {
int offset = dest - instruction_address();
unsigned int insn = 0b100101 << 26;
assert((offset & 3) == 0, "should be");
@ -191,6 +233,12 @@ class NativeCall: public NativeInstruction {
return is_call_at(return_address - NativeCall::return_address_offset);
}
#if INCLUDE_AOT
static bool is_far_call(address instr, address target) {
return !Assembler::reachable_from_branch_at(instr, target);
}
#endif
// MT-safe patching of a call instruction.
static void insert(address code_pos, address entry);
@ -381,6 +429,39 @@ class NativeLoadAddress: public NativeInstruction {
static void test() {}
};
// adrp x16, #page
// add x16, x16, #offset
// ldr x16, [x16]
class NativeLoadGot: public NativeInstruction {
public:
enum AArch64_specific_constants {
instruction_length = 4 * NativeInstruction::instruction_size,
offset_offset = 0,
};
address instruction_address() const { return addr_at(0); }
address return_address() const { return addr_at(instruction_length); }
address got_address() const;
address next_instruction_address() const { return return_address(); }
intptr_t data() const;
void set_data(intptr_t data) {
intptr_t *addr = (intptr_t *) got_address();
*addr = data;
}
void verify() const;
private:
void report_and_fail() const;
};
inline NativeLoadGot* nativeLoadGot_at(address addr) {
NativeLoadGot* load = (NativeLoadGot*) addr;
#ifdef ASSERT
load->verify();
#endif
return load;
}
class NativeJump: public NativeInstruction {
public:
enum AArch64_specific_constants {
@ -441,6 +522,31 @@ inline NativeGeneralJump* nativeGeneralJump_at(address address) {
return jump;
}
class NativeGotJump: public NativeInstruction {
public:
enum AArch64_specific_constants {
instruction_size = 4 * NativeInstruction::instruction_size,
};
void verify() const;
address instruction_address() const { return addr_at(0); }
address destination() const;
address return_address() const { return addr_at(instruction_size); }
address got_address() const;
address next_instruction_address() const { return addr_at(instruction_size); }
bool is_GotJump() const;
void set_jump_destination(address dest) {
address* got = (address *)got_address();
*got = dest;
}
};
inline NativeGotJump* nativeGotJump_at(address addr) {
NativeGotJump* jump = (NativeGotJump*)(addr);
return jump;
}
class NativePopReg : public NativeInstruction {
public:
// Insert a pop instruction

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -560,7 +560,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
#if INCLUDE_JVMCI
if (EnableJVMCI) {
if (EnableJVMCI || UseAOT) {
// check if this call should be routed towards a specific entry point
__ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
Label no_alternative_target;
@ -2278,7 +2278,7 @@ void SharedRuntime::generate_deopt_blob() {
// Setup code generation tools
int pad = 0;
#if INCLUDE_JVMCI
if (EnableJVMCI) {
if (EnableJVMCI || UseAOT) {
pad += 512; // Increase the buffer size when compiling for JVMCI
}
#endif
@ -2360,7 +2360,7 @@ void SharedRuntime::generate_deopt_blob() {
int implicit_exception_uncommon_trap_offset = 0;
int uncommon_trap_offset = 0;
if (EnableJVMCI) {
if (EnableJVMCI || UseAOT) {
implicit_exception_uncommon_trap_offset = __ pc() - start;
__ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
@ -2486,7 +2486,7 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(false);
#if INCLUDE_JVMCI
if (EnableJVMCI) {
if (EnableJVMCI || UseAOT) {
__ bind(after_fetch_unroll_info_call);
}
#endif
@ -2644,7 +2644,7 @@ void SharedRuntime::generate_deopt_blob() {
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
#if INCLUDE_JVMCI
if (EnableJVMCI) {
if (EnableJVMCI || UseAOT) {
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
}

View File

@ -1816,13 +1816,13 @@ class StubGenerator: public StubCodeGenerator {
__ align(OptoLoopAlignment);
__ BIND(L_store_element);
__ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop); // store the oop
__ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, AS_RAW); // store the oop
__ sub(count, count, 1);
__ cbz(count, L_do_card_marks);
// ======== loop entry is here ========
__ BIND(L_load_element);
__ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8)); // load the oop
__ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8), noreg, noreg, AS_RAW); // load the oop
__ cbz(copied_oop, L_store_element);
__ load_klass(r19_klass, copied_oop);// query the object klass

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -333,16 +333,17 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
return entry;
}
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
const char* name) {
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
address entry = __ pc();
// expression stack must be empty before entering the VM if an
// exception happened
__ empty_expression_stack();
// setup parameters
// ??? convention: expect aberrant index in register r1
__ movw(c_rarg2, r1);
__ mov(c_rarg1, (address)name);
// ??? convention: expect array in register r3
__ mov(c_rarg1, r3);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::
@ -483,7 +484,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method. This can
// only occur on method entry so emit it only for vtos with step 0.
if (EnableJVMCI && state == vtos && step == 0) {
if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) {
Label L;
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
__ cbz(rscratch1, L);

View File

@ -147,16 +147,14 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
Register val,
DecoratorSet decorators) {
assert(val == noreg || val == r0, "parameter is just for looks");
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->store_at(_masm, decorators, T_OBJECT, dst, val, /*tmp1*/ r10, /*tmp2*/ r1);
__ store_heap_oop(dst, val, r10, r1, decorators);
}
static void do_oop_load(InterpreterMacroAssembler* _masm,
Address src,
Register dst,
DecoratorSet decorators) {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->load_at(_masm, decorators, T_OBJECT, dst, src, /*tmp1*/ r10, /*tmp_thread*/ r1);
__ load_heap_oop(dst, src, r10, r1, decorators);
}
Address TemplateTable::at_bcp(int offset) {
@ -747,6 +745,8 @@ void TemplateTable::index_check(Register array, Register index)
}
Label ok;
__ br(Assembler::LO, ok);
// ??? convention: move array into r3 for exception message
__ mov(r3, array);
__ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
__ br(rscratch1);
__ bind(ok);

View File

@ -50,14 +50,18 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
// TODO: ARM - is it possible to inline these stubs into the main code stream?
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception)
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
, _index(index)
{
_info = info == NULL ? NULL : new CodeEmitInfo(info);
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
: _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
: _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
@ -73,7 +77,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
return;
}
// Pass the array index on stack because all registers must be preserved
ce->verify_reserved_argument_area_size(1);
ce->verify_reserved_argument_area_size(_throw_index_out_of_bounds_exception ? 1 : 2);
if (_index->is_cpu_register()) {
__ str_32(_index->as_register(), Address(SP));
} else {
@ -87,6 +91,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
#endif
__ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
} else {
__ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
__ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
}
ce->add_call_info_here(_info);

View File

@ -3285,7 +3285,8 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
}
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
assert(patch_code == lir_patch_none, "Patch code not supported");
LIR_Address* addr = addr_opr->as_address_ptr();
if (addr->index()->is_illegal()) {
jint c = addr->disp();

View File

@ -366,11 +366,15 @@ void Runtime1::initialize_pd() {
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
OopMap* oop_map = save_live_registers(sasm);
int call_offset;
if (has_argument) {
__ ldr(R1, Address(SP, arg1_offset));
__ ldr(R2, Address(SP, arg2_offset));
call_offset = __ call_RT(noreg, noreg, target, R1, R2);
} else {
call_offset = __ call_RT(noreg, noreg, target);
}
int call_offset = __ call_RT(noreg, noreg, target);
OopMapSet* oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -91,6 +91,11 @@ int CompiledStaticCall::reloc_to_interp_stub() {
}
#endif // COMPILER2_OR_JVMCI
int CompiledStaticCall::to_trampoline_stub_size() {
// ARM doesn't use trampolines.
return 0;
}
// size of C2 call stub, compiled java to interpretor
int CompiledStaticCall::to_interp_stub_size() {
return 8 * NativeInstruction::instruction_size;

View File

@ -52,7 +52,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
Unimplemented();
}
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle hotspot_method, jint pc_offset, TRAPS) {
Unimplemented();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -185,18 +185,16 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
return entry;
}
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
address entry = __ pc();
// index is in R4_ArrayIndexOutOfBounds_index
InlinedString Lname(name);
// expression stack must be empty before entering the VM if an exception happened
__ empty_expression_stack();
// setup parameters
__ ldr_literal(R1, Lname);
// Array expected in R1.
__ mov(R2, R4_ArrayIndexOutOfBounds_index);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2);
@ -204,7 +202,6 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con
__ nop(); // to avoid filling CPU pipeline with invalid instructions
__ nop();
__ should_not_reach_here();
__ bind_literal(Lname);
return entry;
}

View File

@ -398,7 +398,7 @@ void TemplateTable::sipush() {
void TemplateTable::ldc(bool wide) {
transition(vtos, vtos);
Label fastCase, Done;
Label fastCase, Condy, Done;
const Register Rindex = R1_tmp;
const Register Rcpool = R2_tmp;
@ -450,15 +450,11 @@ void TemplateTable::ldc(bool wide) {
// int, float, String
__ bind(fastCase);
#ifdef ASSERT
{ Label L;
__ cmp(RtagType, JVM_CONSTANT_Integer);
__ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
__ b(L, eq);
__ stop("unexpected tag type in ldc");
__ bind(L);
}
#endif // ASSERT
__ cmp(RtagType, JVM_CONSTANT_Integer);
__ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
__ b(Condy, ne);
// itos, ftos
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ ldr_u32(R0_tos, Address(Rtemp, base_offset));
@ -466,6 +462,11 @@ void TemplateTable::ldc(bool wide) {
// floats and ints are placed on stack in the same way, so
// we can use push(itos) to transfer float value without VFP
__ push(itos);
__ b(Done);
__ bind(Condy);
condy_helper(Done);
__ bind(Done);
}
@ -489,6 +490,23 @@ void TemplateTable::fast_aldc(bool wide) {
__ call_VM(R0_tos, entry, R1);
__ bind(resolved);
{ // Check for the null sentinel.
// If we just called the VM, that already did the mapping for us,
// but it's harmless to retry.
Label notNull;
Register result = R0;
Register tmp = R1;
Register rarg = R2;
// Stash null_sentinel address to get its value later
__ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
__ ldr(tmp, Address(rarg));
__ cmp(result, tmp);
__ b(notNull, ne);
__ mov(result, 0); // NULL object reference
__ bind(notNull);
}
if (VerifyOops) {
__ verify_oop(R0_tos);
}
@ -509,8 +527,9 @@ void TemplateTable::ldc2_w() {
__ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
Label Condy, exit;
#ifdef __ABI_HARD__
Label Long, exit;
Label Long;
// get type from tags
__ add(Rtemp, Rtags, tags_offset);
__ ldrb(Rtemp, Address(Rtemp, Rindex));
@ -523,6 +542,8 @@ void TemplateTable::ldc2_w() {
__ bind(Long);
#endif
__ cmp(Rtemp, JVM_CONSTANT_Long);
__ b(Condy, ne);
#ifdef AARCH64
__ ldr(R0_tos, Address(Rbase, base_offset));
#else
@ -530,10 +551,115 @@ void TemplateTable::ldc2_w() {
__ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
#endif // AARCH64
__ push(ltos);
__ b(exit);
__ bind(Condy);
condy_helper(exit);
#ifdef __ABI_HARD__
__ bind(exit);
}
void TemplateTable::condy_helper(Label& Done)
{
Register obj = R0_tmp;
Register rtmp = R1_tmp;
Register flags = R2_tmp;
Register off = R3_tmp;
__ mov(rtmp, (int) bytecode());
__ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp);
__ get_vm_result_2(flags, rtmp);
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
__ mov(off, flags);
#ifdef AARCH64
__ andr(off, off, (unsigned)ConstantPoolCacheEntry::field_index_mask);
#else
__ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
__ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
#endif
const Address field(obj, off);
__ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask flags after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
switch (bytecode()) {
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
{
// tos in (itos, ftos, stos, btos, ctos, ztos)
Label notIntFloat, notShort, notByte, notChar, notBool;
__ cmp(flags, itos);
__ cond_cmp(flags, ftos, ne);
__ b(notIntFloat, ne);
__ ldr(R0_tos, field);
__ push(itos);
__ b(Done);
__ bind(notIntFloat);
__ cmp(flags, stos);
__ b(notShort, ne);
__ ldrsh(R0_tos, field);
__ push(stos);
__ b(Done);
__ bind(notShort);
__ cmp(flags, btos);
__ b(notByte, ne);
__ ldrsb(R0_tos, field);
__ push(btos);
__ b(Done);
__ bind(notByte);
__ cmp(flags, ctos);
__ b(notChar, ne);
__ ldrh(R0_tos, field);
__ push(ctos);
__ b(Done);
__ bind(notChar);
__ cmp(flags, ztos);
__ b(notBool, ne);
__ ldrsb(R0_tos, field);
__ push(ztos);
__ b(Done);
__ bind(notBool);
break;
}
case Bytecodes::_ldc2_w:
{
Label notLongDouble;
__ cmp(flags, ltos);
__ cond_cmp(flags, dtos, ne);
__ b(notLongDouble, ne);
#ifdef AARCH64
__ ldr(R0_tos, field);
#else
__ add(rtmp, obj, wordSize);
__ ldr(R0_tos_lo, Address(obj, off));
__ ldr(R1_tos_hi, Address(rtmp, off));
#endif
__ push(ltos);
__ b(Done);
__ bind(notLongDouble);
break;
}
default:
ShouldNotReachHere();
}
__ stop("bad ldc/condy");
}
@ -746,6 +872,7 @@ void TemplateTable::index_check_without_pop(Register array, Register index) {
// convention with generate_ArrayIndexOutOfBounds_handler()
__ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
}
__ mov(R1, array, hs);
__ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,10 +37,14 @@
#define __ ce->masm()->
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception)
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
, _index(index) {
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
: _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
: _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
@ -68,12 +72,16 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mtctr(R0);
Register index = R0; // pass in R0
Register index = R0;
if (_index->is_register()) {
__ extsw(index, _index->as_register());
} else {
__ load_const_optimized(index, _index->as_jint());
}
if (_array) {
__ std(_array->as_pointer_register(), -8, R1_SP);
}
__ std(index, -16, R1_SP);
__ bctrl();
ce->add_call_info_here(_info);

View File

@ -2925,7 +2925,8 @@ void LIR_Assembler::on_spin_wait() {
Unimplemented();
}
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
assert(patch_code == lir_patch_none, "Patch code not supported");
LIR_Address* addr = addr_opr->as_address_ptr();
assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform");
if (addr->index()->is_illegal()) {

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -502,8 +502,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
case throw_range_check_failed_id:
{
__ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded.
__ std(R0, -8, R1_SP); // Pass index on stack.
oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 1);
oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 2);
}
break;

View File

@ -1037,7 +1037,7 @@ bool followed_by_acquire(const Node *load) {
// So first get the Proj node, mem_proj, to use it to iterate forward.
Node *mem_proj = NULL;
for (DUIterator_Fast imax, i = mba->fast_outs(imax); i < imax; i++) {
mem_proj = mba->fast_out(i); // Throw out-of-bounds if proj not found
mem_proj = mba->fast_out(i); // Runs out of bounds and asserts if Proj not found.
assert(mem_proj->is_Proj(), "only projections here");
ProjNode *proj = mem_proj->as_Proj();
if (proj->_con == TypeFunc::Memory &&

View File

@ -564,13 +564,13 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
return entry;
}
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
address entry = __ pc();
__ empty_expression_stack();
__ load_const_optimized(R4_ARG2, (address) name);
// R4_ARG2 already contains the array.
// Index is in R17_tos.
__ mr(R5_ARG3, R17_tos);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R4_ARG2, R5_ARG3);
return entry;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,10 +39,14 @@
#undef CHECK_BAILOUT
#define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception) :
_throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception),
_index(index) {
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
: _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
: _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
@ -71,6 +75,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
stub_id = Runtime1::throw_index_exception_id;
} else {
stub_id = Runtime1::throw_range_check_failed_id;
__ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register());
}
ce->emit_call_c(Runtime1::entry_for (stub_id));
CHECK_BAILOUT();

View File

@ -2922,7 +2922,8 @@ void LIR_Assembler::on_spin_wait() {
Unimplemented();
}
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
assert(patch_code == lir_patch_none, "Patch code not supported");
LIR_Address* addr = addr_opr->as_address_ptr();
assert(addr->scale() == LIR_Address::times_1, "scaling unsupported");
__ load_address(dest->as_pointer_register(), as_Address(addr));

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -314,8 +314,8 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, RegisterSet reg
__ save_return_pc(return_pc);
// Push a new frame (includes stack linkage).
// use return_pc as scratch for push_frame. Z_R0_scratch (the default) and Z_R1_scratch are
// illegally used to pass parameters (SAPJVM extension) by RangeCheckStub::emit_code().
// Use return_pc as scratch for push_frame. Z_R0_scratch (the default) and Z_R1_scratch are
// illegally used to pass parameters by RangeCheckStub::emit_code().
__ push_frame(frame_size_in_bytes, return_pc);
// We have to restore return_pc right away.
// Nobody else will. Furthermore, return_pc isn't necessarily the default (Z_R14).

View File

@ -551,9 +551,10 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
//
// Args:
// Z_ARG2: oop of array
// Z_ARG3: aberrant index
//
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char * name) {
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
address entry = __ pc();
address excp = CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException);
@ -562,8 +563,7 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con
__ empty_expression_stack();
// Setup parameters.
// Leave out the name and use register for array to create more detailed exceptions.
__ load_absolute_address(Z_ARG2, (address) name);
// Pass register with array to create more detailed exceptions.
__ call_VM(noreg, excp, Z_ARG2, Z_ARG3);
return entry;
}

View File

@ -784,7 +784,7 @@ void TemplateTable::index_check(Register array, Register index, unsigned int shi
__ z_cl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
__ z_brl(index_ok);
__ lgr_if_needed(Z_ARG3, index); // See generate_ArrayIndexOutOfBounds_handler().
// Give back the array to create more detailed exceptions.
// Pass the array to create more detailed exceptions.
__ lgr_if_needed(Z_ARG2, array); // See generate_ArrayIndexOutOfBounds_handler().
__ load_absolute_address(Z_R1_scratch,
Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);

View File

@ -35,15 +35,17 @@
#define __ ce->masm()->
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception)
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
, _index(index)
{
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
: _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
: _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
@ -66,6 +68,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
if (_throw_index_out_of_bounds_exception) {
__ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
} else {
__ mov(_array->as_pointer_register(), G5);
__ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
}
__ delayed()->nop();

View File

@ -3195,7 +3195,7 @@ void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
__ srl (rs, 0, rd->successor());
}
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
const LIR_Address* addr = addr_opr->as_address_ptr();
assert(addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet");
const Register dest_reg = dest->as_pointer_register();

View File

@ -302,7 +302,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
if (!has_argument) {
call_offset = __ call_RT(noreg, noreg, target);
} else {
call_offset = __ call_RT(noreg, noreg, target, G4);
call_offset = __ call_RT(noreg, noreg, target, G4, G5);
}
OopMapSet* oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);

View File

@ -881,27 +881,32 @@ void InterpreterMacroAssembler::index_check_without_pop(Register array, Register
assert_not_delayed();
verify_oop(array);
// sign extend since tos (index) can be a 32bit value
// Sign extend since tos (index) can be a 32bit value.
sra(index, G0, index);
// check array
// Check array.
Label ptr_ok;
tst(array);
throw_if_not_1_x( notZero, ptr_ok );
delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index
throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);
throw_if_not_1_x(notZero, ptr_ok);
delayed()->ld(array, arrayOopDesc::length_offset_in_bytes(), tmp); // Check index.
throw_if_not_2(Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);
Label index_ok;
cmp(index, tmp);
throw_if_not_1_icc( lessUnsigned, index_ok );
if (index_shift > 0) delayed()->sll(index, index_shift, index);
else delayed()->add(array, index, res); // addr - const offset in index
// convention: move aberrant index into G3_scratch for exception message
mov(index, G3_scratch);
throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);
throw_if_not_1_icc(lessUnsigned, index_ok);
if (index_shift > 0) {
delayed()->sll(index, index_shift, index);
} else {
delayed()->add(array, index, res); // addr - const offset in index
}
// Pass the array to create more detailed exceptions.
// Convention: move aberrant index into Otos_i for exception message.
mov(index, Otos_i);
mov(array, G3_scratch);
throw_if_not_2(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);
// add offset if didn't do it in delay slot
if (index_shift > 0) add(array, index, res); // addr - const offset in index
if (index_shift > 0) { add(array, index, res); } // addr - const offset in index
}

View File

@ -255,15 +255,14 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
}
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
address entry = __ pc();
// expression stack must be empty before entering the VM if an exception happened
__ empty_expression_stack();
// Pass the array to create more detailed exceptions.
// convention: expect aberrant index in register G3_scratch, then shuffle the
// index to G4_scratch for the VM call
__ mov(G3_scratch, G4_scratch);
__ set((intptr_t)name, G3_scratch);
__ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
__ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, Otos_i);
__ should_not_reach_here();
return entry;
}

View File

@ -8981,6 +8981,13 @@ void Assembler::testq(Register dst, Register src) {
emit_arith(0x85, 0xC0, dst, src);
}
void Assembler::testq(Register dst, Address src) {
InstructionMark im(this);
prefixq(src, dst);
emit_int8((unsigned char)0x85);
emit_operand(dst, src);
}
void Assembler::xaddq(Address dst, Register src) {
InstructionMark im(this);
prefixq(dst, src);

View File

@ -1813,6 +1813,7 @@ private:
void testq(Register dst, int32_t imm32);
void testq(Register dst, Register src);
void testq(Register dst, Address src);
// BMI - count trailing zeros
void tzcntl(Register dst, Register src);

View File

@ -88,15 +88,17 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ jmp(_continuation);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception)
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
, _index(index)
{
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
: _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
: _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
@ -120,6 +122,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
stub_id = Runtime1::throw_index_exception_id;
} else {
stub_id = Runtime1::throw_range_check_failed_id;
ce->store_parameter(_array->as_pointer_register(), 1);
}
__ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
ce->add_call_info_here(_info);

View File

@ -3786,11 +3786,22 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
}
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
assert(addr->is_address() && dest->is_register(), "check");
Register reg;
reg = dest->as_pointer_register();
__ lea(reg, as_Address(addr->as_address_ptr()));
void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
assert(src->is_address(), "must be an address");
assert(dest->is_register(), "must be a register");
PatchingStub* patch = NULL;
if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
}
Register reg = dest->as_pointer_register();
LIR_Address* addr = src->as_address_ptr();
__ lea(reg, as_Address(addr));
if (patch != NULL) {
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
}
}

View File

@ -611,26 +611,29 @@ void Runtime1::initialize_pd() {
}
// target: the entry point of the method that creates and posts the exception oop
// has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
// Target: the entry point of the method that creates and posts the exception oop.
// has_argument: true if the exception needs arguments (passed on the stack because
// registers must be preserved).
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
// preserve all registers
int num_rt_args = has_argument ? 2 : 1;
// Preserve all registers.
int num_rt_args = has_argument ? (2 + 1) : 1;
OopMap* oop_map = save_live_registers(sasm, num_rt_args);
// now all registers are saved and can be used freely
// verify that no old value is used accidentally
// Now all registers are saved and can be used freely.
// Verify that no old value is used accidentally.
__ invalidate_registers(true, true, true, true, true, true);
// registers used by this stub
// Registers used by this stub.
const Register temp_reg = rbx;
// load argument for exception that is passed as an argument into the stub
// Load arguments for exception that are passed as arguments into the stub.
if (has_argument) {
#ifdef _LP64
__ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
__ movptr(c_rarg2, Address(rbp, 3*BytesPerWord));
#else
__ movptr(temp_reg, Address(rbp, 3*BytesPerWord));
__ push(temp_reg);
__ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
__ push(temp_reg);
#endif // _LP64

View File

@ -504,18 +504,15 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index, Register tmp) {
assert_different_registers(result, index);
// convert from field index to resolved_references() index and from
// word index to byte offset. Since this is a java object, it can be compressed
shll(index, LogBytesPerHeapOop);
get_constant_pool(result);
// load pointer for resolved_references[] objArray
movptr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
resolve_oop_handle(result, tmp);
// Add in the index
addptr(result, index);
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp);
load_heap_oop(result, Address(result, index,
UseCompressedOops ? Address::times_4 : Address::times_ptr,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp);
}
// load cpool->resolved_klass_at(index)

View File

@ -836,6 +836,7 @@ class MacroAssembler: public Assembler {
void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
void testptr(Register src1, Register src2);
void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -436,6 +436,8 @@ address NativeMovRegMem::next_instruction_address() const {
case instruction_code_reg2memb: // 0x88
case instruction_code_mem2regb: // 0x8a
case instruction_code_lea: // 0x8d
case instruction_code_float_s: // 0xd9 fld_s a
case instruction_code_float_d: // 0xdd fld_d a
@ -508,6 +510,9 @@ void NativeMovRegMem::verify() {
case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a
break;
case instruction_code_lea: // 0x8d lea r, a
break;
default:
fatal ("not a mov [reg+offs], reg instruction");
}

View File

@ -354,6 +354,8 @@ class NativeMovRegMem: public NativeInstruction {
instruction_code_xmm_store = 0x11,
instruction_code_xmm_lpd = 0x12,
instruction_code_lea = 0x8d,
instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes,
instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
instruction_EVEX_prefix_4bytes = Assembler::EVEX_4bytes,

View File

@ -102,16 +102,16 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
return entry;
}
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
const char* name) {
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
address entry = __ pc();
// expression stack must be empty before entering the VM if an
// exception happened
// The expression stack must be empty before entering the VM if an
// exception happened.
__ empty_expression_stack();
// setup parameters
// ??? convention: expect aberrant index in register ebx
// Setup parameters.
// ??? convention: expect aberrant index in register ebx/rbx.
// Pass array to create more detailed exceptions.
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
__ lea(rarg, ExternalAddress((address)name));
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::

View File

@ -757,11 +757,14 @@ void TemplateTable::index_check_without_pop(Register array, Register index) {
assert(rbx != array, "different registers");
__ movl(rbx, index);
}
__ jump_cc(Assembler::aboveEqual,
ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
Label skip;
__ jccb(Assembler::below, skip);
// Pass array to create more detailed exceptions.
__ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
__ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
__ bind(skip);
}
void TemplateTable::iaload() {
transition(itos, itos);
// rax: index
@ -1109,8 +1112,6 @@ void TemplateTable::aastore() {
__ load_klass(rax, rdx);
__ movptr(rax, Address(rax,
ObjArrayKlass::element_klass_offset()));
// Compress array + index*oopSize + 12 into a single register. Frees rcx.
__ lea(rdx, element_address);
// Generate subtype check. Blows rcx, rdi
// Superklass in rax. Subklass in rbx.
@ -1125,8 +1126,9 @@ void TemplateTable::aastore() {
// Get the value we will store
__ movptr(rax, at_tos());
__ movl(rcx, at_tos_p1()); // index
// Now store using the appropriate barrier
do_oop_store(_masm, Address(rdx, 0), rax, IN_HEAP_ARRAY);
do_oop_store(_masm, element_address, rax, IN_HEAP_ARRAY);
__ jmp(done);
// Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]

View File

@ -235,7 +235,12 @@ int AixAttachListener::init() {
if (res == 0) {
RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res);
if (res == 0) {
res = ::rename(initial_path, path);
// make sure the file is owned by the effective user and effective group
// e.g. the group could be inherited from the directory in case the s bit is set
RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res);
if (res == 0) {
res = ::rename(initial_path, path);
}
}
}
if (res == -1) {

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1246,7 +1246,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
*sizep = size;
log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at "
INTPTR_FORMAT "\n", size, vmid, p2i((void*)mapAddress));
INTPTR_FORMAT, size, vmid, p2i((void*)mapAddress));
}
// create the PerfData memory region

View File

@ -215,7 +215,8 @@ int BsdAttachListener::init() {
RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res);
if (res == 0) {
// make sure the file is owned by the effective user and effective group
// (this is the default on linux, but not on mac os)
// e.g. default behavior on mac is that new files inherit the group of
// the directory that they are created in
RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res);
if (res == 0) {
res = ::rename(initial_path, path);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1152,7 +1152,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
*sizep = size;
log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at "
INTPTR_FORMAT "\n", size, vmid, p2i((void*)mapAddress));
INTPTR_FORMAT, size, vmid, p2i((void*)mapAddress));
}
// create the PerfData memory region

View File

@ -215,7 +215,12 @@ int LinuxAttachListener::init() {
if (res == 0) {
RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res);
if (res == 0) {
res = ::rename(initial_path, path);
// make sure the file is owned by the effective user and effective group
// e.g. the group could be inherited from the directory in case the s bit is set
RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res);
if (res == 0) {
res = ::rename(initial_path, path);
}
}
}
if (res == -1) {

View File

@ -1241,7 +1241,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
*sizep = size;
log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at "
INTPTR_FORMAT "\n", size, vmid, p2i((void*)mapAddress));
INTPTR_FORMAT, size, vmid, p2i((void*)mapAddress));
}
// create the PerfData memory region

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1186,7 +1186,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
*sizep = size;
log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at "
INTPTR_FORMAT "\n", size, vmid, (void*)mapAddress);
INTPTR_FORMAT, size, vmid, (void*)mapAddress);
}
// create the PerfData memory region

View File

@ -4417,10 +4417,11 @@ bool os::dir_is_empty(const char* path) {
return false;
}
strcpy(search_path, path);
os::native_path(search_path);
// Append "*", or possibly "\\*", to path
if (path[1] == ':' &&
(path[2] == '\0' ||
(path[2] == '\\' && path[3] == '\0'))) {
if (search_path[1] == ':' &&
(search_path[2] == '\0' ||
(search_path[2] == '\\' && search_path[3] == '\0'))) {
// No '\\' needed for cases like "Z:" or "Z:\"
strcat(search_path, "*");
}

View File

@ -1697,7 +1697,7 @@ static void open_file_mapping(const char* user, int vmid,
CloseHandle(fmh);
log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at "
INTPTR_FORMAT "\n", size, vmid, mapAddress);
INTPTR_FORMAT, size, vmid, mapAddress);
}
// this method unmaps the the mapped view of the the

View File

@ -382,6 +382,10 @@ class CodeBuffer: public StackObj {
address _last_insn; // used to merge consecutive memory barriers, loads or stores.
#if INCLUDE_AOT
bool _immutable_PIC;
#endif
address _decode_begin; // start address for decode
address decode_begin();
@ -396,6 +400,9 @@ class CodeBuffer: public StackObj {
_overflow_arena = NULL;
_code_strings = CodeStrings();
_last_insn = NULL;
#if INCLUDE_AOT
_immutable_PIC = false;
#endif
}
void initialize(address code_start, csize_t code_size) {
@ -629,6 +636,13 @@ class CodeBuffer: public StackObj {
// Log a little info about section usage in the CodeBuffer
void log_section_sizes(const char* name);
#if INCLUDE_AOT
// True if this is a code buffer used for immutable PIC, i.e. AOT
// compilation.
bool immutable_PIC() { return _immutable_PIC; }
void set_immutable_PIC(bool pic) { _immutable_PIC = pic; }
#endif
#ifndef PRODUCT
public:
// Printing / Decoding

View File

@ -147,10 +147,14 @@ class RangeCheckStub: public CodeStub {
private:
CodeEmitInfo* _info;
LIR_Opr _index;
LIR_Opr _array;
bool _throw_index_out_of_bounds_exception;
public:
RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, bool throw_index_out_of_bounds_exception = false);
// For ArrayIndexOutOfBoundsException.
RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array);
// For IndexOutOfBoundsException.
RangeCheckStub(CodeEmitInfo* info, LIR_Opr index);
virtual void emit_code(LIR_Assembler* e);
virtual CodeEmitInfo* info() const { return _info; }
virtual bool is_exception_throw_stub() const { return true; }
@ -158,6 +162,7 @@ class RangeCheckStub: public CodeStub {
virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case(_info);
visitor->do_input(_index);
if (_array) { visitor->do_input(_array); }
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("RangeCheckStub"); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2074,7 +2074,7 @@ class LIR_List: public CompilationResourceObj {
void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); }
void negate(LIR_Opr from, LIR_Opr to) { append(new LIR_Op1(lir_neg, from, to)); }
void leal(LIR_Opr from, LIR_Opr result_reg) { append(new LIR_Op1(lir_leal, from, result_reg)); }
void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); }
// result is a stack location for old backend and vreg for UseLinearScan
// stack_loc_temp is an illegal register for old backend

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -276,7 +276,8 @@ void LIR_Assembler::emit_lir_list(LIR_List* list) {
// branches since they include block and stub names. Also print
// patching moves since they generate funny looking code.
if (op->code() == lir_branch ||
(op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
(op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
(op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
stringStream st;
op->print_on(&st);
_masm->block_comment(st.as_string());
@ -554,7 +555,7 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
break;
case lir_leal:
leal(op->in_opr(), op->result_opr());
leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
break;
case lir_null_check: {

View File

@ -240,7 +240,7 @@ class LIR_Assembler: public CompilationResourceObj {
void align_call(LIR_Code code);
void negate(LIR_Opr left, LIR_Opr dest);
void leal(LIR_Opr left, LIR_Opr dest);
void leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info);
void rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info);

View File

@ -480,7 +480,7 @@ void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitI
void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
CodeStub* stub = new RangeCheckStub(range_check_info, index);
CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
if (index->is_constant()) {
cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
index->as_jint(), null_check_info);
@ -494,7 +494,7 @@ void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
CodeStub* stub = new RangeCheckStub(info, index, true);
CodeStub* stub = new RangeCheckStub(info, index);
if (index->is_constant()) {
cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
__ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
@ -1592,7 +1592,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// range_check also does the null check
@ -1756,7 +1756,7 @@ void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
LIR_Opr result = rlock_result(x);
if (GenerateRangeChecks) {
CodeEmitInfo* info = state_for(x);
CodeStub* stub = new RangeCheckStub(info, index.result(), true);
CodeStub* stub = new RangeCheckStub(info, index.result());
if (index.result()->is_constant()) {
cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
__ branch(lir_cond_belowEqual, T_INT, stub);
@ -1837,12 +1837,12 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
if (GenerateRangeChecks && needs_range_check) {
if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
__ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
__ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
} else if (use_length) {
// TODO: use a (modified) version of array_range_check that does not require a
// constant length to be loaded to a register
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// The range check performs the null check, so clear it out for the load

View File

@ -641,10 +641,12 @@ address Runtime1::exception_handler_for_pc(JavaThread* thread) {
}
JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index))
JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index, arrayOopDesc* a))
NOT_PRODUCT(_throw_range_check_exception_count++;)
char message[jintAsStringSize];
sprintf(message, "%d", index);
const int len = 35;
assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message.");
char message[2 * jintAsStringSize + len];
sprintf(message, "Index %d out of bounds for length %d", index, a->length());
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
JRT_END

View File

@ -149,7 +149,7 @@ class Runtime1: public AllStatic {
static address exception_handler_for_pc(JavaThread* thread);
static void throw_range_check_exception(JavaThread* thread, int index);
static void throw_range_check_exception(JavaThread* thread, int index, arrayOopDesc* a);
static void throw_index_exception(JavaThread* thread, int index);
static void throw_div0_exception(JavaThread* thread);
static void throw_null_pointer_exception(JavaThread* thread);

View File

@ -1552,56 +1552,63 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream
PackageEntry* pkg_entry = ik->package();
if (FileMapInfo::get_number_of_shared_paths() > 0) {
char* canonical_path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN);
char* canonical_path_table_entry = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN);
// save the path from the file: protocol or the module name from the jrt: protocol
// if no protocol prefix is found, path is the same as stream->source()
char* path = skip_uri_protocol(src);
char* canonical_class_src_path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN);
if (!get_canonical_path(path, canonical_class_src_path, JVM_MAXPATHLEN)) {
tty->print_cr("Bad pathname %s. CDS dump aborted.", path);
vm_exit(1);
}
for (int i = 0; i < FileMapInfo::get_number_of_shared_paths(); i++) {
SharedClassPathEntry* ent = FileMapInfo::shared_path(i);
if (get_canonical_path(ent->name(), canonical_path, JVM_MAXPATHLEN)) {
// If the path (from the class stream source) is the same as the shared
// class or module path, then we have a match.
if (strcmp(canonical_path, os::native_path((char*)path)) == 0) {
// NULL pkg_entry and pkg_entry in an unnamed module implies the class
// is from the -cp or boot loader append path which consists of -Xbootclasspath/a
// and jvmti appended entries.
if ((pkg_entry == NULL) || (pkg_entry->in_unnamed_module())) {
// Ensure the index is within the -cp range before assigning
// to the classpath_index.
if (SystemDictionary::is_system_class_loader(loader) &&
(i >= ClassLoaderExt::app_class_paths_start_index()) &&
(i < ClassLoaderExt::app_module_paths_start_index())) {
if (!get_canonical_path(ent->name(), canonical_path_table_entry, JVM_MAXPATHLEN)) {
tty->print_cr("Bad pathname %s. CDS dump aborted.", ent->name());
vm_exit(1);
}
// If the path (from the class stream source) is the same as the shared
// class or module path, then we have a match.
if (strcmp(canonical_path_table_entry, canonical_class_src_path) == 0) {
// NULL pkg_entry and pkg_entry in an unnamed module implies the class
// is from the -cp or boot loader append path which consists of -Xbootclasspath/a
// and jvmti appended entries.
if ((pkg_entry == NULL) || (pkg_entry->in_unnamed_module())) {
// Ensure the index is within the -cp range before assigning
// to the classpath_index.
if (SystemDictionary::is_system_class_loader(loader) &&
(i >= ClassLoaderExt::app_class_paths_start_index()) &&
(i < ClassLoaderExt::app_module_paths_start_index())) {
classpath_index = i;
break;
} else {
if ((i >= 1) &&
(i < ClassLoaderExt::app_class_paths_start_index())) {
// The class must be from boot loader append path which consists of
// -Xbootclasspath/a and jvmti appended entries.
assert(loader == NULL, "sanity");
classpath_index = i;
break;
} else {
if ((i >= 1) &&
(i < ClassLoaderExt::app_class_paths_start_index())) {
// The class must be from boot loader append path which consists of
// -Xbootclasspath/a and jvmti appended entries.
assert(loader == NULL, "sanity");
classpath_index = i;
break;
}
}
} else {
// A class from a named module from the --module-path. Ensure the index is
// within the --module-path range before assigning to the classpath_index.
if ((pkg_entry != NULL) && !(pkg_entry->in_unnamed_module()) && (i > 0)) {
if (i >= ClassLoaderExt::app_module_paths_start_index() &&
i < FileMapInfo::get_number_of_shared_paths()) {
classpath_index = i;
break;
}
}
} else {
// A class from a named module from the --module-path. Ensure the index is
// within the --module-path range before assigning to the classpath_index.
if ((pkg_entry != NULL) && !(pkg_entry->in_unnamed_module()) && (i > 0)) {
if (i >= ClassLoaderExt::app_module_paths_start_index() &&
i < FileMapInfo::get_number_of_shared_paths()) {
classpath_index = i;
break;
}
}
}
// for index 0 and the stream->source() is the modules image or has the jrt: protocol.
// The class must be from the runtime modules image.
if (i == 0 && (is_modules_image(src) || string_starts_with(src, "jrt:"))) {
classpath_index = i;
break;
}
}
// for index 0 and the stream->source() is the modules image or has the jrt: protocol.
// The class must be from the runtime modules image.
if (i == 0 && (is_modules_image(src) || string_starts_with(src, "jrt:"))) {
classpath_index = i;
break;
}
}

View File

@ -79,12 +79,11 @@ void ClassLoaderExt::setup_app_search_path() {
}
void ClassLoaderExt::process_module_table(ModuleEntryTable* met, TRAPS) {
ResourceMark rm;
ResourceMark rm(THREAD);
for (int i = 0; i < met->table_size(); i++) {
for (ModuleEntry* m = met->bucket(i); m != NULL;) {
char* path = m->location()->as_C_string();
if (strncmp(path, "file:", 5) == 0 && ClassLoader::string_ends_with(path, ".jar")) {
m->print();
if (strncmp(path, "file:", 5) == 0) {
path = ClassLoader::skip_uri_protocol(path);
ClassLoader::setup_module_search_path(path, THREAD);
}

View File

@ -768,7 +768,7 @@ static void initialize_static_field(fieldDescriptor* fd, Handle mirror, TRAPS) {
{
assert(fd->signature() == vmSymbols::string_signature(),
"just checking");
if (DumpSharedSpaces && oopDesc::is_archive_object(mirror())) {
if (DumpSharedSpaces && MetaspaceShared::is_archive_object(mirror())) {
// Archive the String field and update the pointer.
oop s = mirror()->obj_field(fd->offset());
oop archived_s = StringTable::create_archived_string(s, CHECK);
@ -809,7 +809,7 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
if (MetaspaceShared::open_archive_heap_region_mapped()) {
oop m = k->archived_java_mirror();
assert(m != NULL, "archived mirror is NULL");
assert(oopDesc::is_archive_object(m), "must be archived mirror object");
assert(MetaspaceShared::is_archive_object(m), "must be archived mirror object");
Handle m_h(THREAD, m);
// restore_archived_mirror() clears the klass' _has_raw_archived_mirror flag
restore_archived_mirror(k, m_h, Handle(), Handle(), Handle(), CHECK);
@ -3556,34 +3556,6 @@ void java_lang_ref_SoftReference::set_clock(jlong value) {
base->long_field_put(static_clock_offset, value);
}
// Support for java_lang_ref_ReferenceQueue
oop java_lang_ref_ReferenceQueue::NULL_queue() {
InstanceKlass* ik = SystemDictionary::ReferenceQueue_klass();
oop mirror = ik->java_mirror();
return mirror->obj_field(static_NULL_queue_offset);
}
oop java_lang_ref_ReferenceQueue::ENQUEUED_queue() {
InstanceKlass* ik = SystemDictionary::ReferenceQueue_klass();
oop mirror = ik->java_mirror();
return mirror->obj_field(static_ENQUEUED_queue_offset);
}
void java_lang_ref_ReferenceQueue::compute_offsets() {
InstanceKlass* k = SystemDictionary::ReferenceQueue_klass();
compute_offset(static_NULL_queue_offset,
k,
vmSymbols::referencequeue_null_name(),
vmSymbols::referencequeue_signature(),
true /* is_static */);
compute_offset(static_ENQUEUED_queue_offset,
k,
vmSymbols::referencequeue_enqueued_name(),
vmSymbols::referencequeue_signature(),
true /* is_static */);
}
// Support for java_lang_invoke_DirectMethodHandle
int java_lang_invoke_DirectMethodHandle::_member_offset;
@ -4263,8 +4235,6 @@ int java_lang_ref_Reference::referent_offset;
int java_lang_ref_Reference::queue_offset;
int java_lang_ref_Reference::next_offset;
int java_lang_ref_Reference::discovered_offset;
int java_lang_ref_ReferenceQueue::static_NULL_queue_offset;
int java_lang_ref_ReferenceQueue::static_ENQUEUED_queue_offset;
int java_lang_ref_SoftReference::timestamp_offset;
int java_lang_ref_SoftReference::static_clock_offset;
int java_lang_ClassLoader::parent_offset;
@ -4509,7 +4479,6 @@ void JavaClasses::compute_offsets() {
java_lang_StackTraceElement::compute_offsets();
java_lang_StackFrameInfo::compute_offsets();
java_lang_LiveStackFrameInfo::compute_offsets();
java_lang_ref_ReferenceQueue::compute_offsets();
// generated interpreter code wants to know about the offsets we just computed:
AbstractAssembler::update_delayed_values();

View File

@ -946,20 +946,6 @@ class java_lang_ref_SoftReference: public java_lang_ref_Reference {
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
// Interface to java.lang.ref.ReferenceQueue objects
class java_lang_ref_ReferenceQueue: public AllStatic {
public:
static int static_NULL_queue_offset;
static int static_ENQUEUED_queue_offset;
// Accessors
static oop NULL_queue();
static oop ENQUEUED_queue();
static void compute_offsets();
};
// Interface to java.lang.invoke.MethodHandle objects
class MethodHandleEntry;

View File

@ -127,12 +127,6 @@ void java_lang_ref_Reference::set_discovered_raw(oop ref, oop value) {
HeapWord* java_lang_ref_Reference::discovered_addr_raw(oop ref) {
return ref->obj_field_addr_raw<HeapWord>(discovered_offset);
}
oop java_lang_ref_Reference::queue(oop ref) {
return ref->obj_field(queue_offset);
}
void java_lang_ref_Reference::set_queue(oop ref, oop value) {
return ref->obj_field_put(queue_offset, value);
}
bool java_lang_ref_Reference::is_phantom(oop ref) {
return InstanceKlass::cast(ref->klass())->reference_type() == REF_PHANTOM;
}

View File

@ -28,12 +28,13 @@
#include "classfile/javaClasses.inline.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
@ -44,9 +45,6 @@
#include "services/diagnosticCommand.hpp"
#include "utilities/hashtable.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1StringDedup.hpp"
#endif
// the number of buckets a thread claims
const int ClaimChunkSize = 32;
@ -260,14 +258,10 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
string = java_lang_String::create_from_unicode(name, len, CHECK_NULL);
}
#if INCLUDE_G1GC
if (G1StringDedup::is_enabled()) {
// Deduplicate the string before it is interned. Note that we should never
// deduplicate a string after it has been interned. Doing so will counteract
// compiler optimizations done on e.g. interned string literals.
G1StringDedup::deduplicate(string());
}
#endif
// Deduplicate the string before it is interned. Note that we should never
// deduplicate a string after it has been interned. Doing so will counteract
// compiler optimizations done on e.g. interned string literals.
Universe::heap()->deduplicate_string(string());
// Grab the StringTable_lock before getting the_table() because it could
// change at safepoint.

View File

@ -2076,8 +2076,6 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
InstanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL);
InstanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM);
initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(ReferenceQueue_klass), scan, CHECK);
// JSR 292 classes
WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
WKID jsr292_group_end = WK_KLASS_ENUM_NAME(VolatileCallSite_klass);

View File

@ -135,7 +135,6 @@ class OopStorage;
do_klass(FinalReference_klass, java_lang_ref_FinalReference, Pre ) \
do_klass(PhantomReference_klass, java_lang_ref_PhantomReference, Pre ) \
do_klass(Finalizer_klass, java_lang_ref_Finalizer, Pre ) \
do_klass(ReferenceQueue_klass, java_lang_ref_ReferenceQueue, Pre ) \
\
do_klass(Thread_klass, java_lang_Thread, Pre ) \
do_klass(ThreadGroup_klass, java_lang_ThreadGroup, Pre ) \

View File

@ -86,7 +86,6 @@
template(java_lang_ref_FinalReference, "java/lang/ref/FinalReference") \
template(java_lang_ref_PhantomReference, "java/lang/ref/PhantomReference") \
template(java_lang_ref_Finalizer, "java/lang/ref/Finalizer") \
template(java_lang_ref_ReferenceQueue, "java/lang/ref/ReferenceQueue") \
template(java_lang_reflect_AccessibleObject, "java/lang/reflect/AccessibleObject") \
template(java_lang_reflect_Method, "java/lang/reflect/Method") \
template(java_lang_reflect_Constructor, "java/lang/reflect/Constructor") \
@ -439,8 +438,6 @@
template(module_entry_name, "module_entry") \
template(resolved_references_name, "<resolved_references>") \
template(init_lock_name, "<init_lock>") \
template(referencequeue_null_name, "NULL") \
template(referencequeue_enqueued_name, "ENQUEUED") \
\
/* name symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@ -534,8 +531,6 @@
template(string_signature, "Ljava/lang/String;") \
template(string_array_signature, "[Ljava/lang/String;") \
template(reference_signature, "Ljava/lang/ref/Reference;") \
template(referencequeue_signature, "Ljava/lang/ref/ReferenceQueue;") \
template(sun_misc_Cleaner_signature, "Lsun/misc/Cleaner;") \
template(executable_signature, "Ljava/lang/reflect/Executable;") \
template(module_signature, "Ljava/lang/Module;") \
template(concurrenthashmap_signature, "Ljava/util/concurrent/ConcurrentHashMap;") \

View File

@ -5062,22 +5062,6 @@ void CMSRefProcTaskProxy::work(uint worker_id) {
assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
}
class CMSRefEnqueueTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
EnqueueTask& _task;
public:
CMSRefEnqueueTaskProxy(EnqueueTask& task)
: AbstractGangTask("Enqueue reference objects in parallel"),
_task(task)
{ }
virtual void work(uint worker_id)
{
_task.work(worker_id);
}
};
CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
_span(span),
@ -5147,16 +5131,6 @@ void CMSRefProcTaskExecutor::execute(ProcessTask& task)
workers->run_task(&rp_task);
}
void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
{
CMSHeap* heap = CMSHeap::heap();
WorkGang* workers = heap->workers();
assert(workers != NULL, "Need parallel worker threads.");
CMSRefEnqueueTaskProxy enq_task(task);
workers->run_task(&enq_task);
}
void CMSCollector::refProcessingWork() {
ResourceMark rm;
HandleMark hm;
@ -7149,7 +7123,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
// coalesced chunk to the appropriate free list.
if (inFreeRange()) {
assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
"freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
"freeFinger() " PTR_FORMAT " is out of bounds", p2i(freeFinger()));
flush_cur_free_chunk(freeFinger(),
pointer_delta(addr, freeFinger()));
log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",

View File

@ -487,7 +487,6 @@ public:
// Executes a task using worker threads.
virtual void execute(ProcessTask& task);
virtual void execute(EnqueueTask& task);
private:
CMSCollector& _collector;
};

View File

@ -789,21 +789,6 @@ void ParNewRefProcTaskProxy::work(uint worker_id) {
par_scan_state.evacuate_followers_closure());
}
class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
EnqueueTask& _task;
public:
ParNewRefEnqueueTaskProxy(EnqueueTask& task)
: AbstractGangTask("ParNewGeneration parallel reference enqueue"),
_task(task)
{ }
virtual void work(uint worker_id) {
_task.work(worker_id);
}
};
void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
CMSHeap* gch = CMSHeap::heap();
WorkGang* workers = gch->workers();
@ -816,14 +801,6 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
_young_gen.promotion_failed());
}
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
CMSHeap* gch = CMSHeap::heap();
WorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
ParNewRefEnqueueTaskProxy enq_task(task);
workers->run_task(&enq_task);
}
void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
_state_set.flush();
CMSHeap* heap = CMSHeap::heap();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -297,7 +297,6 @@ class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
// Executes a task using worker threads.
virtual void execute(ProcessTask& task);
virtual void execute(EnqueueTask& task);
// Switch to single threaded mode.
virtual void set_single_threaded_mode();
};

View File

@ -64,7 +64,7 @@ inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
inline PLAB* G1PLABAllocator::alloc_buffer(InCSetState dest) {
assert(dest.is_valid(),
"Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value());
"Allocation buffer index out of bounds: " CSETSTATE_FORMAT, dest.value());
assert(_alloc_buffers[dest.value()] != NULL,
"Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value());
return _alloc_buffers[dest.value()];

View File

@ -2164,6 +2164,14 @@ jlong G1CollectedHeap::millis_since_last_gc() {
return ret_val;
}
void G1CollectedHeap::deduplicate_string(oop str) {
assert(java_lang_String::is_instance(str), "invariant");
if (G1StringDedup::is_enabled()) {
G1StringDedup::deduplicate(str);
}
}
void G1CollectedHeap::prepare_for_verify() {
_verifier->prepare_for_verify();
}
@ -3783,7 +3791,6 @@ public:
// Executes the given task using concurrent marking worker threads.
virtual void execute(ProcessTask& task);
virtual void execute(EnqueueTask& task);
};
// Gang task for possibly parallel reference processing
@ -3848,35 +3855,6 @@ void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
_workers->run_task(&proc_task_proxy);
}
// Gang task for parallel reference enqueueing.
class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
EnqueueTask& _enq_task;
public:
G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
AbstractGangTask("Enqueue reference objects in parallel"),
_enq_task(enq_task)
{ }
virtual void work(uint worker_id) {
_enq_task.work(worker_id);
}
};
// Driver routine for parallel reference enqueueing.
// Creates an instance of the ref enqueueing gang
// task and has the worker threads execute it.
void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
assert(_workers != NULL, "Need parallel worker threads.");
G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
_workers->run_task(&enq_task_proxy);
}
// End of weak reference support closures
void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {

View File

@ -1338,6 +1338,9 @@ public:
void redirty_logged_cards();
// Verification
// Deduplicate the string
virtual void deduplicate_string(oop str);
// Perform any cleanup actions necessary before allowing a verification.
virtual void prepare_for_verify();
@ -1363,6 +1366,8 @@ public:
virtual const char* const* concurrent_phases() const;
virtual bool request_concurrent_phase(const char* phase);
virtual WorkGang* get_safepoint_workers() { return _workers; }
// The methods below are here for convenience and dispatch the
// appropriate method depending on value of the given VerifyOption
// parameter. The values for that parameter, and their meanings,

Some files were not shown because too many files have changed in this diff Show More