Merge
This commit is contained in:
commit
e4b6aa6eea
@ -384,3 +384,4 @@ d7f519b004254b19e384131d9f0d0e40e31a0fd3 jdk-9+137
|
||||
7dcf453eacae79ee86a6bcc75fd0b546fc99b48a jdk-9+139
|
||||
a5815c6098a241d3a1df64d22b84b3524e4a77df jdk-9+140
|
||||
f64afae7f1a5608e438585bbf0bc23785e69cba0 jdk-9+141
|
||||
2b3e5caafe3594ea507c37675c4d3086f415dc64 jdk-9+142
|
||||
|
@ -759,6 +759,10 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
|
||||
# on ppc we don't prevent gcc to omit frame pointer but do prevent strict aliasing
|
||||
$2CFLAGS_JDK="${$2CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
s390 )
|
||||
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer -mbackchain -march=z10"
|
||||
$2CFLAGS_JDK="${$2CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
* )
|
||||
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer"
|
||||
$2CFLAGS_JDK="${$2CFLAGS_JDK} -fno-strict-aliasing"
|
||||
@ -940,6 +944,10 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
|
||||
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
|
||||
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -mcpu=power7 -mtune=power8"
|
||||
fi
|
||||
elif test "x$OPENJDK_$1_CPU" = xs390x; then
|
||||
if test "x$OPENJDK_$1_OS" = xlinux; then
|
||||
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -mbackchain -march=z10"
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "x$OPENJDK_$1_CPU_ENDIAN" = xlittle; then
|
||||
@ -999,6 +1007,7 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
|
||||
|
||||
# Setup some hard coded includes
|
||||
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK \
|
||||
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
||||
-I${JDK_TOPDIR}/src/java.base/share/native/include \
|
||||
-I${JDK_TOPDIR}/src/java.base/$OPENJDK_$1_OS/native/include \
|
||||
-I${JDK_TOPDIR}/src/java.base/$OPENJDK_$1_OS_TYPE/native/include \
|
||||
|
@ -5093,7 +5093,7 @@ VS_SDK_PLATFORM_NAME_2013=
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1477108079
|
||||
DATE_WHEN_GENERATED=1478079760
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -49840,6 +49840,10 @@ $as_echo "$supports" >&6; }
|
||||
# on ppc we don't prevent gcc to omit frame pointer but do prevent strict aliasing
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
s390 )
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer -mbackchain -march=z10"
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
* )
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer"
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
@ -50122,6 +50126,10 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
|
||||
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
|
||||
JVM_CFLAGS="$JVM_CFLAGS -mcpu=power7 -mtune=power8"
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_CPU" = xs390x; then
|
||||
if test "x$OPENJDK_TARGET_OS" = xlinux; then
|
||||
JVM_CFLAGS="$JVM_CFLAGS -mbackchain -march=z10"
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "x$OPENJDK_TARGET_CPU_ENDIAN" = xlittle; then
|
||||
@ -50270,6 +50278,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
|
||||
|
||||
# Setup some hard coded includes
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK \
|
||||
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
||||
-I${JDK_TOPDIR}/src/java.base/share/native/include \
|
||||
-I${JDK_TOPDIR}/src/java.base/$OPENJDK_TARGET_OS/native/include \
|
||||
-I${JDK_TOPDIR}/src/java.base/$OPENJDK_TARGET_OS_TYPE/native/include \
|
||||
@ -50655,6 +50664,10 @@ $as_echo "$supports" >&6; }
|
||||
# on ppc we don't prevent gcc to omit frame pointer but do prevent strict aliasing
|
||||
OPENJDK_BUILD_CFLAGS_JDK="${OPENJDK_BUILD_CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
s390 )
|
||||
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer -mbackchain -march=z10"
|
||||
OPENJDK_BUILD_CFLAGS_JDK="${OPENJDK_BUILD_CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
* )
|
||||
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer"
|
||||
OPENJDK_BUILD_CFLAGS_JDK="${OPENJDK_BUILD_CFLAGS_JDK} -fno-strict-aliasing"
|
||||
@ -50937,6 +50950,10 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
|
||||
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
|
||||
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -mcpu=power7 -mtune=power8"
|
||||
fi
|
||||
elif test "x$OPENJDK_BUILD_CPU" = xs390x; then
|
||||
if test "x$OPENJDK_BUILD_OS" = xlinux; then
|
||||
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -mbackchain -march=z10"
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "x$OPENJDK_BUILD_CPU_ENDIAN" = xlittle; then
|
||||
@ -51085,6 +51102,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
|
||||
|
||||
# Setup some hard coded includes
|
||||
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK \
|
||||
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
||||
-I${JDK_TOPDIR}/src/java.base/share/native/include \
|
||||
-I${JDK_TOPDIR}/src/java.base/$OPENJDK_BUILD_OS/native/include \
|
||||
-I${JDK_TOPDIR}/src/java.base/$OPENJDK_BUILD_OS_TYPE/native/include \
|
||||
|
@ -265,6 +265,10 @@ IMAGES_OUTPUTDIR=$(BUILD_OUTPUT)/images
|
||||
BUNDLES_OUTPUTDIR=$(BUILD_OUTPUT)/bundles
|
||||
TESTMAKE_OUTPUTDIR=$(BUILD_OUTPUT)/test-make
|
||||
MAKESUPPORT_OUTPUTDIR=$(BUILD_OUTPUT)/make-support
|
||||
|
||||
# By default, output javadoc directly into image
|
||||
JAVADOC_OUTPUTDIR = $(DOCS_IMAGE_DIR)
|
||||
|
||||
# This does not get overridden in a bootcycle build
|
||||
CONFIGURESUPPORT_OUTPUTDIR:=@CONFIGURESUPPORT_OUTPUTDIR@
|
||||
BUILDJDK_OUTPUTDIR=$(BUILD_OUTPUT)/buildjdk
|
||||
@ -788,7 +792,7 @@ INTERIM_IMAGE_DIR := $(SUPPORT_OUTPUTDIR)/interim-image
|
||||
|
||||
# Docs image
|
||||
DOCS_IMAGE_SUBDIR := docs
|
||||
DOCS_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(DOCS_IMAGE_SUBDIR)
|
||||
DOCS_IMAGE_DIR = $(IMAGES_OUTPUTDIR)/$(DOCS_IMAGE_SUBDIR)
|
||||
|
||||
# Macosx bundles directory definitions
|
||||
JDK_MACOSX_BUNDLE_SUBDIR=jdk-bundle
|
||||
|
@ -544,3 +544,4 @@ fc0956308c7a586267c5dd35dff74f773aa9c3eb jdk-9+138
|
||||
08492e67bf3226784dab3bf9ae967382ddbc1af5 jdk-9+139
|
||||
fec31089c2ef5a12dd64f401b0bf2e00f56ee0d0 jdk-9+140
|
||||
160a00bc6ed0af1fdf8418fc65e6bddbbc0c536d jdk-9+141
|
||||
7b48d63dfd6b8e2657288de3d7b1f153dee02d7e jdk-9+142
|
||||
|
@ -135,14 +135,14 @@ TARGETS += $(JVMTI_OUTPUTDIR)/jvmtiEnvRecommended.cpp
|
||||
# Copy jvmti.h to include dir
|
||||
|
||||
# The file is the same regardless of jvm variant. Only let one do the copy.
|
||||
#ifeq ($(JVM_VARIANT), $(firstword $(JVM_VARIANTS)))
|
||||
# $(eval $(call SetupCopyFiles, COPY_JVMTI_H, \
|
||||
# DEST := $(SUPPORT_OUTPUTDIR)/modules_include/java.base, \
|
||||
# FILES := $(JVMTI_OUTPUTDIR)/jvmti.h, \
|
||||
# ))
|
||||
ifeq ($(JVM_VARIANT), $(firstword $(JVM_VARIANTS)))
|
||||
$(eval $(call SetupCopyFiles, COPY_JVMTI_H, \
|
||||
DEST := $(SUPPORT_OUTPUTDIR)/modules_include/java.base, \
|
||||
FILES := $(JVMTI_OUTPUTDIR)/jvmti.h, \
|
||||
))
|
||||
|
||||
# TARGETS += $(COPY_JVMTI_H)
|
||||
#endif
|
||||
TARGETS += $(COPY_JVMTI_H)
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
# Create trace files in gensrc/tracefiles
|
||||
|
@ -176,6 +176,11 @@ endif
|
||||
|
||||
JVM_OPTIMIZATION ?= HIGHEST_JVM
|
||||
|
||||
# Need to set JVM_STRIPFLAGS to the default value from SPEC since the STRIPFLAGS
|
||||
# parameter to SetupNativeCompilation allows an empty value to override the
|
||||
# default.
|
||||
JVM_STRIPFLAGS ?= $(STRIPFLAGS)
|
||||
|
||||
################################################################################
|
||||
# Now set up the actual compilation of the main hotspot native library
|
||||
|
||||
@ -204,6 +209,7 @@ $(eval $(call SetupNativeCompilation, BUILD_LIBJVM, \
|
||||
OBJECT_DIR := $(JVM_OUTPUTDIR)/objs, \
|
||||
MAPFILE := $(JVM_MAPFILE), \
|
||||
USE_MAPFILE_FOR_SYMBOLS := true, \
|
||||
STRIPFLAGS := $(JVM_STRIPFLAGS), \
|
||||
EMBED_MANIFEST := true, \
|
||||
RC_FLAGS := $(JVM_RCFLAGS), \
|
||||
VERSIONINFO_RESOURCE := $(HOTSPOT_TOPDIR)/src/os/windows/vm/version.rc, \
|
||||
|
@ -59,6 +59,10 @@ endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, minimal), true)
|
||||
JVM_CFLAGS_FEATURES += -DMINIMAL_JVM -DVMTYPE=\"Minimal\"
|
||||
ifeq ($(OPENJDK_TARGET_OS), linux)
|
||||
# Override the default -g with a more liberal strip policy for the minimal JVM
|
||||
JVM_STRIPFLAGS := --strip-unneeded
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, dtrace), true)
|
||||
|
@ -45,6 +45,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/checked \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/PrivateInterfaceMethods \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \
|
||||
|
@ -3496,6 +3496,16 @@ bool Matcher::narrow_klass_use_complex_address() {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::const_oop_prefer_decode() {
|
||||
// Prefer ConN+DecodeN over ConP in simple compressed oops mode.
|
||||
return Universe::narrow_oop_base() == NULL;
|
||||
}
|
||||
|
||||
bool Matcher::const_klass_prefer_decode() {
|
||||
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
|
||||
return Universe::narrow_klass_base() == NULL;
|
||||
}
|
||||
|
||||
// Is it better to copy float constants, or load them directly from
|
||||
// memory? Intel can load a float constant from a direct address,
|
||||
// requiring no extra registers. Most RISCs will have to materialize
|
||||
@ -15502,6 +15512,24 @@ instruct string_indexof_conLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
|
||||
iRegI_R0 result, iRegI tmp1, iRegI tmp2,
|
||||
iRegI tmp3, rFlagsReg cr)
|
||||
%{
|
||||
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
|
||||
effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
|
||||
TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
|
||||
|
||||
format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %}
|
||||
|
||||
ins_encode %{
|
||||
__ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
|
||||
$result$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
$tmp3$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
|
||||
iRegI_R0 result, rFlagsReg cr)
|
||||
%{
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -130,8 +130,8 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
if (_num_fp_args < Argument::n_float_register_parameters_c) {
|
||||
__ ldrs(as_FloatRegister(_num_fp_args++), src);
|
||||
} else {
|
||||
__ ldrh(r0, src);
|
||||
__ strh(r0, Address(to(), _stack_offset));
|
||||
__ ldrw(r0, src);
|
||||
__ strw(r0, Address(to(), _stack_offset));
|
||||
_stack_offset += wordSize;
|
||||
_num_fp_args++;
|
||||
}
|
||||
@ -349,7 +349,7 @@ class SlowSignatureHandler
|
||||
_num_fp_args++;
|
||||
} else {
|
||||
*_to++ = from_obj;
|
||||
_num_int_args++;
|
||||
_num_fp_args++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -364,7 +364,7 @@ class SlowSignatureHandler
|
||||
_num_fp_args++;
|
||||
} else {
|
||||
*_to++ = from_obj;
|
||||
_num_int_args++;
|
||||
_num_fp_args++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,12 +60,12 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
|
||||
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
|
||||
address pc = _instructions->start() + pc_offset;
|
||||
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
|
||||
narrowKlass narrowOop = record_narrow_metadata_reference(constant, CHECK);
|
||||
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
|
||||
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
|
||||
Unimplemented();
|
||||
} else {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(pc);
|
||||
void* reference = record_metadata_reference(constant, CHECK);
|
||||
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
|
||||
move->set_data((intptr_t) reference);
|
||||
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(reference));
|
||||
}
|
||||
|
@ -4508,6 +4508,67 @@ void MacroAssembler::string_indexof(Register str2, Register str1,
|
||||
typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
|
||||
typedef void (MacroAssembler::* uxt_insn)(Register Rd, Register Rn);
|
||||
|
||||
void MacroAssembler::string_indexof_char(Register str1, Register cnt1,
|
||||
Register ch, Register result,
|
||||
Register tmp1, Register tmp2, Register tmp3)
|
||||
{
|
||||
Label CH1_LOOP, HAS_ZERO, DO1_SHORT, DO1_LOOP, MATCH, NOMATCH, DONE;
|
||||
Register cnt1_neg = cnt1;
|
||||
Register ch1 = rscratch1;
|
||||
Register result_tmp = rscratch2;
|
||||
|
||||
cmp(cnt1, 4);
|
||||
br(LT, DO1_SHORT);
|
||||
|
||||
orr(ch, ch, ch, LSL, 16);
|
||||
orr(ch, ch, ch, LSL, 32);
|
||||
|
||||
sub(cnt1, cnt1, 4);
|
||||
mov(result_tmp, cnt1);
|
||||
lea(str1, Address(str1, cnt1, Address::uxtw(1)));
|
||||
sub(cnt1_neg, zr, cnt1, LSL, 1);
|
||||
|
||||
mov(tmp3, 0x0001000100010001);
|
||||
|
||||
BIND(CH1_LOOP);
|
||||
ldr(ch1, Address(str1, cnt1_neg));
|
||||
eor(ch1, ch, ch1);
|
||||
sub(tmp1, ch1, tmp3);
|
||||
orr(tmp2, ch1, 0x7fff7fff7fff7fff);
|
||||
bics(tmp1, tmp1, tmp2);
|
||||
br(NE, HAS_ZERO);
|
||||
adds(cnt1_neg, cnt1_neg, 8);
|
||||
br(LT, CH1_LOOP);
|
||||
|
||||
cmp(cnt1_neg, 8);
|
||||
mov(cnt1_neg, 0);
|
||||
br(LT, CH1_LOOP);
|
||||
b(NOMATCH);
|
||||
|
||||
BIND(HAS_ZERO);
|
||||
rev(tmp1, tmp1);
|
||||
clz(tmp1, tmp1);
|
||||
add(cnt1_neg, cnt1_neg, tmp1, LSR, 3);
|
||||
b(MATCH);
|
||||
|
||||
BIND(DO1_SHORT);
|
||||
mov(result_tmp, cnt1);
|
||||
lea(str1, Address(str1, cnt1, Address::uxtw(1)));
|
||||
sub(cnt1_neg, zr, cnt1, LSL, 1);
|
||||
BIND(DO1_LOOP);
|
||||
ldrh(ch1, Address(str1, cnt1_neg));
|
||||
cmpw(ch, ch1);
|
||||
br(EQ, MATCH);
|
||||
adds(cnt1_neg, cnt1_neg, 2);
|
||||
br(LT, DO1_LOOP);
|
||||
BIND(NOMATCH);
|
||||
mov(result, -1);
|
||||
b(DONE);
|
||||
BIND(MATCH);
|
||||
add(result, result_tmp, cnt1_neg, ASR, 1);
|
||||
BIND(DONE);
|
||||
}
|
||||
|
||||
// Compare strings.
|
||||
void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
Register cnt1, Register cnt2, Register result,
|
||||
|
@ -1229,6 +1229,9 @@ public:
|
||||
Register tmp1, Register tmp2,
|
||||
Register tmp3, Register tmp4,
|
||||
int int_cnt1, Register result, int ae);
|
||||
void string_indexof_char(Register str1, Register cnt1,
|
||||
Register ch, Register result,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
private:
|
||||
void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
|
||||
Register src1, Register src2);
|
||||
|
@ -989,7 +989,16 @@ static void object_move(MacroAssembler* masm,
|
||||
|
||||
// A float arg may have to do float reg int reg conversion
|
||||
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
|
||||
if (src.first() != dst.first()) {
|
||||
assert(src.first()->is_stack() && dst.first()->is_stack() ||
|
||||
src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
|
||||
if (src.first()->is_stack()) {
|
||||
if (dst.first()->is_stack()) {
|
||||
__ ldrw(rscratch1, Address(rfp, reg2offset_in(src.first())));
|
||||
__ strw(rscratch1, Address(sp, reg2offset_out(dst.first())));
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else if (src.first() != dst.first()) {
|
||||
if (src.is_single_phys_reg() && dst.is_single_phys_reg())
|
||||
__ fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
|
||||
else
|
||||
@ -1023,7 +1032,16 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
|
||||
|
||||
// A double move
|
||||
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
|
||||
if (src.first() != dst.first()) {
|
||||
assert(src.first()->is_stack() && dst.first()->is_stack() ||
|
||||
src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
|
||||
if (src.first()->is_stack()) {
|
||||
if (dst.first()->is_stack()) {
|
||||
__ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
|
||||
__ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else if (src.first() != dst.first()) {
|
||||
if (src.is_single_phys_reg() && dst.is_single_phys_reg())
|
||||
__ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
|
||||
else
|
||||
|
@ -2743,7 +2743,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
|
||||
|
||||
Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52;
|
||||
Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52, _L_finish;
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
const Register to = c_rarg1; // destination array address
|
||||
@ -2754,9 +2754,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register keylen = rscratch1;
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
__ enter();
|
||||
|
||||
__ mov(rscratch2, len_reg);
|
||||
__ subsw(rscratch2, len_reg, zr);
|
||||
__ br(Assembler::LE, _L_finish);
|
||||
|
||||
__ ldrw(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
|
||||
__ ld1(v0, __ T16B, rvec);
|
||||
@ -2814,11 +2817,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ eor(v0, __ T16B, v0, v31);
|
||||
|
||||
__ st1(v0, __ T16B, __ post(to, 16));
|
||||
__ sub(len_reg, len_reg, 16);
|
||||
__ cbnz(len_reg, L_aes_loop);
|
||||
|
||||
__ subw(len_reg, len_reg, 16);
|
||||
__ cbnzw(len_reg, L_aes_loop);
|
||||
|
||||
__ st1(v0, __ T16B, rvec);
|
||||
|
||||
__ BIND(_L_finish);
|
||||
__ mov(r0, rscratch2);
|
||||
|
||||
__ leave();
|
||||
@ -2844,7 +2849,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
|
||||
Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52;
|
||||
Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52, _L_finish;
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
const Register to = c_rarg1; // destination array address
|
||||
@ -2855,9 +2860,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register keylen = rscratch1;
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
__ enter();
|
||||
|
||||
__ mov(rscratch2, len_reg);
|
||||
__ subsw(rscratch2, len_reg, zr);
|
||||
__ br(Assembler::LE, _L_finish);
|
||||
|
||||
__ ldrw(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
|
||||
__ ld1(v2, __ T16B, rvec);
|
||||
@ -2920,11 +2928,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ st1(v0, __ T16B, __ post(to, 16));
|
||||
__ orr(v2, __ T16B, v1, v1);
|
||||
|
||||
__ sub(len_reg, len_reg, 16);
|
||||
__ cbnz(len_reg, L_aes_loop);
|
||||
__ subw(len_reg, len_reg, 16);
|
||||
__ cbnzw(len_reg, L_aes_loop);
|
||||
|
||||
__ st1(v2, __ T16B, rvec);
|
||||
|
||||
__ BIND(_L_finish);
|
||||
__ mov(r0, rscratch2);
|
||||
|
||||
__ leave();
|
||||
|
@ -454,8 +454,9 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
||||
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// Check if we need to take lock at entry of synchronized method.
|
||||
if (UseJVMCICompiler) {
|
||||
// Check if we need to take lock at entry of synchronized method. This can
|
||||
// only occur on method entry so emit it only for vtos with step 0.
|
||||
if (UseJVMCICompiler && state == vtos && step == 0) {
|
||||
Label L;
|
||||
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
|
||||
__ cbz(rscratch1, L);
|
||||
@ -464,8 +465,17 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
||||
// Take lock.
|
||||
lock_method();
|
||||
__ bind(L);
|
||||
}
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
if (UseJVMCICompiler) {
|
||||
Label L;
|
||||
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
|
||||
__ cbz(rscratch1, L);
|
||||
__ stop("unexpected pending monitor in deopt entry");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
// handle exceptions
|
||||
{
|
||||
Label L;
|
||||
|
@ -3717,19 +3717,15 @@ void TemplateTable::monitorenter()
|
||||
|
||||
// allocate one if there's no free slot
|
||||
{
|
||||
Label entry, loop, no_adjust;
|
||||
Label entry, loop;
|
||||
// 1. compute new pointers // rsp: old expression stack top
|
||||
__ ldr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
|
||||
__ sub(esp, esp, entry_size); // move expression stack top
|
||||
__ sub(esp, esp, entry_size); // move expression stack top
|
||||
__ sub(c_rarg1, c_rarg1, entry_size); // move expression stack bottom
|
||||
__ mov(c_rarg3, esp); // set start value for copy loop
|
||||
__ str(c_rarg1, monitor_block_bot); // set new monitor block bottom
|
||||
|
||||
__ cmp(sp, c_rarg3); // Check if we need to move sp
|
||||
__ br(Assembler::LO, no_adjust); // to allow more stack space
|
||||
// for our new esp
|
||||
__ sub(sp, sp, 2 * wordSize);
|
||||
__ bind(no_adjust);
|
||||
__ sub(sp, sp, entry_size); // make room for the monitor
|
||||
|
||||
__ b(entry);
|
||||
// 2. move expression stack contents
|
||||
|
@ -64,17 +64,16 @@ void C1_MacroAssembler::explicit_null_check(Register base) {
|
||||
|
||||
|
||||
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
|
||||
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
|
||||
// Avoid stack bang as first instruction. It may get overwritten by patch_verified_entry.
|
||||
const Register return_pc = R20;
|
||||
mflr(return_pc);
|
||||
|
||||
// Make sure there is enough stack space for this method's activation.
|
||||
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
|
||||
generate_stack_overflow_check(bang_size_in_bytes);
|
||||
|
||||
// Create the frame.
|
||||
const Register return_pc = R0;
|
||||
|
||||
mflr(return_pc);
|
||||
// Get callers sp.
|
||||
std(return_pc, _abi(lr), R1_SP); // SP->lr = return_pc
|
||||
push_frame(frame_size_in_bytes, R0); // SP -= frame_size_in_bytes
|
||||
std(return_pc, _abi(lr), R1_SP); // SP->lr = return_pc
|
||||
push_frame(frame_size_in_bytes, R0); // SP -= frame_size_in_bytes
|
||||
}
|
||||
|
||||
|
||||
|
@ -1097,21 +1097,19 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en
|
||||
// No entry point given, use the current pc.
|
||||
if (entry_point == NULL) entry_point = __ pc();
|
||||
|
||||
if (!Compile::current()->in_scratch_emit_size()) {
|
||||
// Put the entry point as a constant into the constant pool.
|
||||
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
|
||||
if (entry_point_toc_addr == NULL) {
|
||||
ciEnv::current()->record_out_of_memory_failure();
|
||||
return offsets;
|
||||
}
|
||||
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
||||
|
||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
|
||||
if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
|
||||
__ relocate(rtype);
|
||||
// Put the entry point as a constant into the constant pool.
|
||||
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
|
||||
if (entry_point_toc_addr == NULL) {
|
||||
ciEnv::current()->record_out_of_memory_failure();
|
||||
return offsets;
|
||||
}
|
||||
|
||||
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
||||
|
||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
|
||||
if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
|
||||
__ relocate(rtype);
|
||||
|
||||
// Note: At this point we do not have the address of the trampoline
|
||||
// stub, and the entry point might be too far away for bl, so __ pc()
|
||||
// serves as dummy and the bl will be patched later.
|
||||
@ -2166,6 +2164,16 @@ bool Matcher::narrow_klass_use_complex_address() {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::const_oop_prefer_decode() {
|
||||
// Prefer ConN+DecodeN over ConP in simple compressed oops mode.
|
||||
return Universe::narrow_oop_base() == NULL;
|
||||
}
|
||||
|
||||
bool Matcher::const_klass_prefer_decode() {
|
||||
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
|
||||
return Universe::narrow_klass_base() == NULL;
|
||||
}
|
||||
|
||||
// Is it better to copy float constants, or load them directly from memory?
|
||||
// Intel can load a float constant from a direct address, requiring no
|
||||
// extra registers. Most RISCs will have to materialize an address into a
|
||||
@ -2424,23 +2432,21 @@ encode %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
int toc_offset = 0;
|
||||
|
||||
if (!ra_->C->in_scratch_emit_size()) {
|
||||
address const_toc_addr;
|
||||
// Create a non-oop constant, no relocation needed.
|
||||
// If it is an IC, it has a virtual_call_Relocation.
|
||||
const_toc_addr = __ long_constant((jlong)$src$$constant);
|
||||
if (const_toc_addr == NULL) {
|
||||
ciEnv::current()->record_out_of_memory_failure();
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the constant's TOC offset.
|
||||
toc_offset = __ offset_to_method_toc(const_toc_addr);
|
||||
|
||||
// Keep the current instruction offset in mind.
|
||||
((loadConLNode*)this)->_cbuf_insts_offset = __ offset();
|
||||
address const_toc_addr;
|
||||
// Create a non-oop constant, no relocation needed.
|
||||
// If it is an IC, it has a virtual_call_Relocation.
|
||||
const_toc_addr = __ long_constant((jlong)$src$$constant);
|
||||
if (const_toc_addr == NULL) {
|
||||
ciEnv::current()->record_out_of_memory_failure();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Get the constant's TOC offset.
|
||||
toc_offset = __ offset_to_method_toc(const_toc_addr);
|
||||
|
||||
// Keep the current instruction offset in mind.
|
||||
((loadConLNode*)this)->_cbuf_insts_offset = __ offset();
|
||||
|
||||
__ ld($dst$$Register, toc_offset, $toc$$Register);
|
||||
%}
|
||||
|
||||
@ -2576,32 +2582,30 @@ encode %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
int toc_offset = 0;
|
||||
|
||||
if (!ra_->C->in_scratch_emit_size()) {
|
||||
intptr_t val = $src$$constant;
|
||||
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
|
||||
address const_toc_addr;
|
||||
if (constant_reloc == relocInfo::oop_type) {
|
||||
// Create an oop constant and a corresponding relocation.
|
||||
AddressLiteral a = __ allocate_oop_address((jobject)val);
|
||||
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
||||
__ relocate(a.rspec());
|
||||
} else if (constant_reloc == relocInfo::metadata_type) {
|
||||
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
|
||||
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
||||
__ relocate(a.rspec());
|
||||
} else {
|
||||
// Create a non-oop constant, no relocation needed.
|
||||
const_toc_addr = __ long_constant((jlong)$src$$constant);
|
||||
}
|
||||
|
||||
if (const_toc_addr == NULL) {
|
||||
ciEnv::current()->record_out_of_memory_failure();
|
||||
return;
|
||||
}
|
||||
// Get the constant's TOC offset.
|
||||
toc_offset = __ offset_to_method_toc(const_toc_addr);
|
||||
intptr_t val = $src$$constant;
|
||||
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
|
||||
address const_toc_addr;
|
||||
if (constant_reloc == relocInfo::oop_type) {
|
||||
// Create an oop constant and a corresponding relocation.
|
||||
AddressLiteral a = __ allocate_oop_address((jobject)val);
|
||||
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
||||
__ relocate(a.rspec());
|
||||
} else if (constant_reloc == relocInfo::metadata_type) {
|
||||
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
|
||||
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
||||
__ relocate(a.rspec());
|
||||
} else {
|
||||
// Create a non-oop constant, no relocation needed.
|
||||
const_toc_addr = __ long_constant((jlong)$src$$constant);
|
||||
}
|
||||
|
||||
if (const_toc_addr == NULL) {
|
||||
ciEnv::current()->record_out_of_memory_failure();
|
||||
return;
|
||||
}
|
||||
// Get the constant's TOC offset.
|
||||
toc_offset = __ offset_to_method_toc(const_toc_addr);
|
||||
|
||||
__ ld($dst$$Register, toc_offset, $toc$$Register);
|
||||
%}
|
||||
|
||||
@ -3272,28 +3276,26 @@ encode %{
|
||||
} else {
|
||||
// Remember the offset not the address.
|
||||
const int start_offset = __ offset();
|
||||
|
||||
// The trampoline stub.
|
||||
if (!Compile::current()->in_scratch_emit_size()) {
|
||||
// No entry point given, use the current pc.
|
||||
// Make sure branch fits into
|
||||
if (entry_point == 0) entry_point = __ pc();
|
||||
// No entry point given, use the current pc.
|
||||
// Make sure branch fits into
|
||||
if (entry_point == 0) entry_point = __ pc();
|
||||
|
||||
// Put the entry point as a constant into the constant pool.
|
||||
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
|
||||
if (entry_point_toc_addr == NULL) {
|
||||
ciEnv::current()->record_out_of_memory_failure();
|
||||
return;
|
||||
}
|
||||
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
||||
|
||||
|
||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
|
||||
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
|
||||
int method_index = resolved_method_index(cbuf);
|
||||
__ relocate(_optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
|
||||
: static_call_Relocation::spec(method_index));
|
||||
// Put the entry point as a constant into the constant pool.
|
||||
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
|
||||
if (entry_point_toc_addr == NULL) {
|
||||
ciEnv::current()->record_out_of_memory_failure();
|
||||
return;
|
||||
}
|
||||
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
||||
|
||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
|
||||
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
|
||||
int method_index = resolved_method_index(cbuf);
|
||||
__ relocate(_optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
|
||||
: static_call_Relocation::spec(method_index));
|
||||
|
||||
// The real call.
|
||||
// Note: At this point we do not have the address of the trampoline
|
||||
|
@ -2550,7 +2550,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
__ lbzx(R17_tos, Rclass_or_obj, Roffset);
|
||||
__ extsb(R17_tos, R17_tos);
|
||||
__ push(ztos);
|
||||
if (!is_static) {
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
// use btos rewriting, no truncating to t/f bit is needed for getfield.
|
||||
patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
|
||||
}
|
||||
@ -2874,7 +2874,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
|
||||
__ andi(R17_tos, R17_tos, 0x1);
|
||||
__ stbx(R17_tos, Rclass_or_obj, Roffset);
|
||||
if (!is_static) { patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); }
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no);
|
||||
}
|
||||
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ beq(CR_is_vol, Lvolatile); // Volatile?
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, T
|
||||
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
|
||||
#ifdef _LP64
|
||||
NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
|
||||
narrowKlass narrowOop = record_narrow_metadata_reference(constant, CHECK);
|
||||
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
|
||||
move->set_data((intptr_t)narrowOop);
|
||||
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
|
||||
#else
|
||||
@ -79,7 +79,7 @@ void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, T
|
||||
#endif
|
||||
} else {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(pc);
|
||||
void* reference = record_metadata_reference(constant, CHECK);
|
||||
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
|
||||
move->set_data((intptr_t)reference);
|
||||
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(reference));
|
||||
}
|
||||
|
@ -2003,6 +2003,20 @@ bool Matcher::narrow_klass_use_complex_address() {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::const_oop_prefer_decode() {
|
||||
// TODO: Check if loading ConP from TOC in heap-based mode is better:
|
||||
// Prefer ConN+DecodeN over ConP in simple compressed oops mode.
|
||||
// return Universe::narrow_oop_base() == NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Matcher::const_klass_prefer_decode() {
|
||||
// TODO: Check if loading ConP from TOC in heap-based mode is better:
|
||||
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
|
||||
// return Universe::narrow_klass_base() == NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Is it better to copy float constants, or load them directly from memory?
|
||||
// Intel can load a float constant from a direct address, requiring no
|
||||
// extra registers. Most RISCs will have to materialize an address into a
|
||||
|
@ -384,8 +384,9 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
|
||||
address entry = __ pc();
|
||||
__ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
|
||||
#if INCLUDE_JVMCI
|
||||
// Check if we need to take lock at entry of synchronized method.
|
||||
if (UseJVMCICompiler) {
|
||||
// Check if we need to take lock at entry of synchronized method. This can
|
||||
// only occur on method entry so emit it only for vtos with step 0.
|
||||
if (UseJVMCICompiler && state == vtos && step == 0) {
|
||||
Label L;
|
||||
Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
|
||||
__ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter
|
||||
@ -395,6 +396,17 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
|
||||
// Take lock.
|
||||
lock_method();
|
||||
__ bind(L);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
if (UseJVMCICompiler) {
|
||||
Label L;
|
||||
Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
|
||||
__ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter
|
||||
__ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
|
||||
__ stop("unexpected pending monitor in deopt entry");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
{ Label L;
|
||||
|
@ -84,7 +84,6 @@
|
||||
declare_constant(VM_Version::sun4v_m) \
|
||||
declare_constant(VM_Version::blk_init_instructions_m) \
|
||||
declare_constant(VM_Version::fmaf_instructions_m) \
|
||||
declare_constant(VM_Version::fmau_instructions_m) \
|
||||
declare_constant(VM_Version::sparc64_family_m) \
|
||||
declare_constant(VM_Version::M_family_m) \
|
||||
declare_constant(VM_Version::T_family_m) \
|
||||
|
@ -179,7 +179,7 @@ void VM_Version::initialize() {
|
||||
assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
|
||||
|
||||
char buf[512];
|
||||
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
(has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
|
||||
(has_hardware_popc() ? ", popc" : ""),
|
||||
(has_vis1() ? ", vis1" : ""),
|
||||
@ -193,6 +193,7 @@ void VM_Version::initialize() {
|
||||
(has_sha512() ? ", sha512" : ""),
|
||||
(has_crc32c() ? ", crc32c" : ""),
|
||||
(is_ultra3() ? ", ultra3" : ""),
|
||||
(has_sparc5_instr() ? ", sparc5" : ""),
|
||||
(is_sun4v() ? ", sun4v" : ""),
|
||||
(is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
|
||||
(is_sparc64() ? ", sparc64" : ""),
|
||||
@ -487,16 +488,11 @@ int VM_Version::parse_features(const char* implementation) {
|
||||
if (strstr(impl, "SPARC-T1") != NULL) {
|
||||
features |= T1_model_m;
|
||||
}
|
||||
} else if (strstr(impl, "SUN4V-CPU") != NULL) {
|
||||
// Generic or migration class LDOM
|
||||
features |= T_family_m;
|
||||
} else {
|
||||
if (strstr(impl, "SPARC") == NULL) {
|
||||
#ifndef PRODUCT
|
||||
// kstat on Solaris 8 virtual machines (branded zones)
|
||||
// returns "(unsupported)" implementation. Solaris 8 is not
|
||||
// supported anymore, but include this check to be on the
|
||||
// safe side.
|
||||
warning("Can't parse CPU implementation = '%s', assume generic SPARC", impl);
|
||||
#endif
|
||||
}
|
||||
log_info(os, cpu)("Failed to parse CPU implementation = '%s'", impl);
|
||||
}
|
||||
os::free((void*)impl);
|
||||
return features;
|
||||
|
@ -34,30 +34,29 @@ class VM_Version: public Abstract_VM_Version {
|
||||
|
||||
protected:
|
||||
enum Feature_Flag {
|
||||
v8_instructions = 0,
|
||||
hardware_mul32 = 1,
|
||||
hardware_div32 = 2,
|
||||
hardware_fsmuld = 3,
|
||||
hardware_popc = 4,
|
||||
v9_instructions = 5,
|
||||
vis1_instructions = 6,
|
||||
vis2_instructions = 7,
|
||||
sun4v_instructions = 8,
|
||||
v8_instructions = 0,
|
||||
hardware_mul32 = 1,
|
||||
hardware_div32 = 2,
|
||||
hardware_fsmuld = 3,
|
||||
hardware_popc = 4,
|
||||
v9_instructions = 5,
|
||||
vis1_instructions = 6,
|
||||
vis2_instructions = 7,
|
||||
sun4v_instructions = 8,
|
||||
blk_init_instructions = 9,
|
||||
fmaf_instructions = 10,
|
||||
fmau_instructions = 11,
|
||||
vis3_instructions = 12,
|
||||
cbcond_instructions = 13,
|
||||
sparc64_family = 14,
|
||||
M_family = 15,
|
||||
T_family = 16,
|
||||
T1_model = 17,
|
||||
sparc5_instructions = 18,
|
||||
aes_instructions = 19,
|
||||
sha1_instruction = 20,
|
||||
sha256_instruction = 21,
|
||||
sha512_instruction = 22,
|
||||
crc32c_instruction = 23
|
||||
fmaf_instructions = 10,
|
||||
vis3_instructions = 11,
|
||||
cbcond_instructions = 12,
|
||||
sparc64_family = 13,
|
||||
M_family = 14,
|
||||
T_family = 15,
|
||||
T1_model = 16,
|
||||
sparc5_instructions = 17,
|
||||
aes_instructions = 18,
|
||||
sha1_instruction = 19,
|
||||
sha256_instruction = 20,
|
||||
sha512_instruction = 21,
|
||||
crc32c_instruction = 22
|
||||
};
|
||||
|
||||
enum Feature_Flag_Set {
|
||||
@ -75,7 +74,6 @@ protected:
|
||||
sun4v_m = 1 << sun4v_instructions,
|
||||
blk_init_instructions_m = 1 << blk_init_instructions,
|
||||
fmaf_instructions_m = 1 << fmaf_instructions,
|
||||
fmau_instructions_m = 1 << fmau_instructions,
|
||||
vis3_instructions_m = 1 << vis3_instructions,
|
||||
cbcond_instructions_m = 1 << cbcond_instructions,
|
||||
sparc64_family_m = 1 << sparc64_family,
|
||||
|
@ -89,14 +89,14 @@ void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, T
|
||||
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
|
||||
#ifdef _LP64
|
||||
address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
|
||||
*((narrowKlass*) operand) = record_narrow_metadata_reference(constant, CHECK);
|
||||
*((narrowKlass*) operand) = record_narrow_metadata_reference(_instructions, operand, constant, CHECK);
|
||||
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
|
||||
#else
|
||||
JVMCI_ERROR("compressed Klass* on 32bit");
|
||||
#endif
|
||||
} else {
|
||||
address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
|
||||
*((void**) operand) = record_metadata_reference(constant, CHECK);
|
||||
*((void**) operand) = record_metadata_reference(_instructions, operand, constant, CHECK);
|
||||
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
|
||||
}
|
||||
}
|
||||
|
@ -254,8 +254,9 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
|
||||
const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
#if INCLUDE_JVMCI
|
||||
// Check if we need to take lock at entry of synchronized method.
|
||||
if (UseJVMCICompiler) {
|
||||
// Check if we need to take lock at entry of synchronized method. This can
|
||||
// only occur on method entry so emit it only for vtos with step 0.
|
||||
if (UseJVMCICompiler && state == vtos && step == 0) {
|
||||
Label L;
|
||||
__ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
|
||||
__ jcc(Assembler::zero, L);
|
||||
@ -266,6 +267,16 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
|
||||
// Take lock.
|
||||
lock_method();
|
||||
__ bind(L);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
if (UseJVMCICompiler) {
|
||||
Label L;
|
||||
__ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
|
||||
__ jccb(Assembler::zero, L);
|
||||
__ stop("unexpected pending monitor in deopt entry");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
// handle exceptions
|
||||
|
@ -1452,6 +1452,15 @@ bool Matcher::narrow_klass_use_complex_address() {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Matcher::const_oop_prefer_decode() {
|
||||
ShouldNotCallThis();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Matcher::const_klass_prefer_decode() {
|
||||
ShouldNotCallThis();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Is it better to copy float constants, or load them directly from memory?
|
||||
// Intel can load a float constant from a direct address, requiring no
|
||||
|
@ -1660,6 +1660,19 @@ bool Matcher::narrow_klass_use_complex_address() {
|
||||
return (LogKlassAlignmentInBytes <= 3);
|
||||
}
|
||||
|
||||
bool Matcher::const_oop_prefer_decode() {
|
||||
// Prefer ConN+DecodeN over ConP.
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Matcher::const_klass_prefer_decode() {
|
||||
// TODO: Either support matching DecodeNKlass (heap-based) in operand
|
||||
// or condisider the following:
|
||||
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
|
||||
//return Universe::narrow_klass_base() == NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Is it better to copy float constants, or load them directly from
|
||||
// memory? Intel can load a float constant from a direct address,
|
||||
// requiring no extra registers. Most RISCs will have to materialize
|
||||
|
@ -96,9 +96,6 @@ public class SPARCHotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
|
||||
if ((config.vmVersionFeatures & config.sparcFmafInstructions) != 0) {
|
||||
features.add(CPUFeature.FMAF);
|
||||
}
|
||||
if ((config.vmVersionFeatures & config.sparcFmauInstructions) != 0) {
|
||||
features.add(CPUFeature.FMAU);
|
||||
}
|
||||
if ((config.vmVersionFeatures & config.sparcSparc64Family) != 0) {
|
||||
features.add(CPUFeature.SPARC64_FAMILY);
|
||||
}
|
||||
|
@ -55,7 +55,6 @@ class SPARCHotSpotVMConfig extends HotSpotVMConfigAccess {
|
||||
final int sparcSun4v = getConstant("VM_Version::sun4v_m", Integer.class);
|
||||
final int sparcBlkInitInstructions = getConstant("VM_Version::blk_init_instructions_m", Integer.class);
|
||||
final int sparcFmafInstructions = getConstant("VM_Version::fmaf_instructions_m", Integer.class);
|
||||
final int sparcFmauInstructions = getConstant("VM_Version::fmau_instructions_m", Integer.class);
|
||||
final int sparcSparc64Family = getConstant("VM_Version::sparc64_family_m", Integer.class);
|
||||
final int sparcMFamily = getConstant("VM_Version::M_family_m", Integer.class);
|
||||
final int sparcTFamily = getConstant("VM_Version::T_family_m", Integer.class);
|
||||
|
@ -20,17 +20,13 @@
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package jdk.vm.ci.hotspot.services;
|
||||
package jdk.vm.ci.hotspot;
|
||||
|
||||
/**
|
||||
* An empty implementation for {@link EventProvider}. This implementation is used when no logging is
|
||||
* requested.
|
||||
*/
|
||||
final class EmptyEventProvider extends EventProvider {
|
||||
|
||||
EmptyEventProvider() {
|
||||
super(null);
|
||||
}
|
||||
final class EmptyEventProvider implements EventProvider {
|
||||
|
||||
static InternalError shouldNotReachHere() {
|
||||
throw new InternalError("should not reach here");
|
@ -20,58 +20,36 @@
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package jdk.vm.ci.hotspot.services;
|
||||
package jdk.vm.ci.hotspot;
|
||||
|
||||
import jdk.vm.ci.hotspot.services.EmptyEventProvider.EmptyCompilationEvent;
|
||||
import jdk.vm.ci.hotspot.services.EmptyEventProvider.EmptyCompilerFailureEvent;
|
||||
import jdk.vm.ci.hotspot.EmptyEventProvider.EmptyCompilationEvent;
|
||||
import jdk.vm.ci.hotspot.EmptyEventProvider.EmptyCompilerFailureEvent;
|
||||
import jdk.vm.ci.services.JVMCIPermission;
|
||||
|
||||
/**
|
||||
* Service-provider class for logging compiler related events.
|
||||
*/
|
||||
public abstract class EventProvider {
|
||||
|
||||
private static Void checkPermission() {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new JVMCIPermission());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
EventProvider(Void ignore) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new instance of this class.
|
||||
*
|
||||
* @throws SecurityException if a security manager has been installed and it denies
|
||||
* {@link JVMCIPermission}
|
||||
*/
|
||||
protected EventProvider() {
|
||||
this(checkPermission());
|
||||
}
|
||||
public interface EventProvider {
|
||||
|
||||
/**
|
||||
* Creates and returns an empty implementation for {@link EventProvider}. This implementation
|
||||
* can be used when no logging is requested.
|
||||
*/
|
||||
public static EventProvider createEmptyEventProvider() {
|
||||
static EventProvider createEmptyEventProvider() {
|
||||
return new EmptyEventProvider();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns an empty implementation for {@link CompilationEvent}.
|
||||
*/
|
||||
public static CompilationEvent createEmptyCompilationEvent() {
|
||||
static CompilationEvent createEmptyCompilationEvent() {
|
||||
return new EmptyCompilationEvent();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns an empty implementation for {@link CompilationEvent}.
|
||||
*/
|
||||
public static CompilerFailureEvent createEmptyCompilerFailureEvent() {
|
||||
static CompilerFailureEvent createEmptyCompilerFailureEvent() {
|
||||
return new EmptyCompilerFailureEvent();
|
||||
}
|
||||
|
@ -26,8 +26,10 @@ import jdk.vm.ci.code.CompilationRequest;
|
||||
import jdk.vm.ci.common.JVMCIError;
|
||||
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.Option;
|
||||
import jdk.vm.ci.runtime.JVMCICompiler;
|
||||
import jdk.vm.ci.runtime.JVMCICompilerFactory;
|
||||
import jdk.vm.ci.runtime.JVMCIRuntime;
|
||||
import jdk.vm.ci.runtime.services.JVMCICompilerFactory;
|
||||
import jdk.vm.ci.services.JVMCIServiceLocator;
|
||||
import jdk.vm.ci.services.JVMCIPermission;
|
||||
import jdk.vm.ci.services.Services;
|
||||
|
||||
final class HotSpotJVMCICompilerConfig {
|
||||
@ -37,7 +39,7 @@ final class HotSpotJVMCICompilerConfig {
|
||||
* to perform a compilation. This allows the reflective parts of the JVMCI API to be used
|
||||
* without requiring a compiler implementation to be available.
|
||||
*/
|
||||
private static class DummyCompilerFactory extends JVMCICompilerFactory implements JVMCICompiler {
|
||||
private static class DummyCompilerFactory implements JVMCICompilerFactory, JVMCICompiler {
|
||||
|
||||
public HotSpotCompilationRequestResult compileMethod(CompilationRequest request) {
|
||||
throw new JVMCIError("no JVMCI compiler selected");
|
||||
@ -63,15 +65,16 @@ final class HotSpotJVMCICompilerConfig {
|
||||
* Gets the selected system compiler factory.
|
||||
*
|
||||
* @return the selected system compiler factory
|
||||
* @throws SecurityException if a security manager is present and it denies
|
||||
* {@link JVMCIPermission} for any {@link JVMCIServiceLocator} loaded by this method
|
||||
*/
|
||||
static JVMCICompilerFactory getCompilerFactory() {
|
||||
if (compilerFactory == null) {
|
||||
JVMCICompilerFactory factory = null;
|
||||
String compilerName = Option.Compiler.getString();
|
||||
if (compilerName != null) {
|
||||
for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
|
||||
for (JVMCICompilerFactory f : JVMCIServiceLocator.getProviders(JVMCICompilerFactory.class)) {
|
||||
if (f.getCompilerName().equals(compilerName)) {
|
||||
Services.exportJVMCITo(f.getClass());
|
||||
factory = f;
|
||||
}
|
||||
}
|
||||
@ -80,8 +83,9 @@ final class HotSpotJVMCICompilerConfig {
|
||||
}
|
||||
} else {
|
||||
// Auto select a single available compiler
|
||||
for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
|
||||
for (JVMCICompilerFactory f : JVMCIServiceLocator.getProviders(JVMCICompilerFactory.class)) {
|
||||
if (factory == null) {
|
||||
Services.exportJVMCITo(f.getClass());
|
||||
factory = f;
|
||||
} else {
|
||||
// Multiple factories seen - cancel auto selection
|
||||
|
@ -20,14 +20,14 @@
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package jdk.vm.ci.hotspot.services;
|
||||
package jdk.vm.ci.hotspot;
|
||||
|
||||
import jdk.vm.ci.runtime.services.JVMCICompilerFactory;
|
||||
import jdk.vm.ci.runtime.JVMCICompilerFactory;
|
||||
|
||||
/**
|
||||
* HotSpot extensions to {@link JVMCICompilerFactory}.
|
||||
*/
|
||||
public abstract class HotSpotJVMCICompilerFactory extends JVMCICompilerFactory {
|
||||
public abstract class HotSpotJVMCICompilerFactory implements JVMCICompilerFactory {
|
||||
|
||||
/**
|
||||
* Gets 0 or more prefixes identifying classes that should by compiled by C1 in simple mode
|
@ -27,13 +27,11 @@ import static jdk.vm.ci.common.InitTimer.timer;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.PrintStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.ServiceLoader;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import jdk.internal.misc.VM;
|
||||
@ -43,16 +41,15 @@ import jdk.vm.ci.code.CompiledCode;
|
||||
import jdk.vm.ci.code.InstalledCode;
|
||||
import jdk.vm.ci.common.InitTimer;
|
||||
import jdk.vm.ci.common.JVMCIError;
|
||||
import jdk.vm.ci.hotspot.services.HotSpotJVMCICompilerFactory;
|
||||
import jdk.vm.ci.hotspot.services.HotSpotJVMCICompilerFactory.CompilationLevel;
|
||||
import jdk.vm.ci.hotspot.services.HotSpotVMEventListener;
|
||||
import jdk.vm.ci.hotspot.HotSpotJVMCICompilerFactory.CompilationLevel;
|
||||
import jdk.vm.ci.meta.JavaKind;
|
||||
import jdk.vm.ci.meta.JavaType;
|
||||
import jdk.vm.ci.meta.ResolvedJavaType;
|
||||
import jdk.vm.ci.runtime.JVMCI;
|
||||
import jdk.vm.ci.runtime.JVMCIBackend;
|
||||
import jdk.vm.ci.runtime.JVMCICompiler;
|
||||
import jdk.vm.ci.runtime.services.JVMCICompilerFactory;
|
||||
import jdk.vm.ci.runtime.JVMCICompilerFactory;
|
||||
import jdk.vm.ci.services.JVMCIServiceLocator;
|
||||
import jdk.vm.ci.services.Services;
|
||||
|
||||
/**
|
||||
@ -90,14 +87,17 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
|
||||
* A list of all supported JVMCI options.
|
||||
*/
|
||||
public enum Option {
|
||||
// @formatter:off
|
||||
Compiler(String.class, null, "Selects the system compiler."),
|
||||
// Note: The following one is not used (see InitTimer.ENABLED). It is added here
|
||||
// so that -Djvmci.PrintFlags=true shows the option.
|
||||
InitTimer(boolean.class, false, "Specifies if initialization timing is enabled."),
|
||||
PrintConfig(boolean.class, false, "Prints VM configuration available via JVMCI and exits."),
|
||||
PrintFlags(boolean.class, false, "Prints all JVMCI flags and exits."),
|
||||
ShowFlags(boolean.class, false, "Prints all JVMCI flags and continues."),
|
||||
TraceMethodDataFilter(String.class, null, "");
|
||||
// so that -XX:+JVMCIPrintProperties shows the option.
|
||||
InitTimer(Boolean.class, false, "Specifies if initialization timing is enabled."),
|
||||
PrintConfig(Boolean.class, false, "Prints VM configuration available via JVMCI."),
|
||||
TraceMethodDataFilter(String.class, null,
|
||||
"Enables tracing of profiling info when read by JVMCI.",
|
||||
"Empty value: trace all methods",
|
||||
"Non-empty value: trace methods whose fully qualified name contains the value.");
|
||||
// @formatter:on
|
||||
|
||||
/**
|
||||
* The prefix for system properties that are JVMCI options.
|
||||
@ -113,25 +113,25 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
|
||||
private Object value;
|
||||
private final Object defaultValue;
|
||||
private boolean isDefault;
|
||||
private final String help;
|
||||
private final String[] helpLines;
|
||||
|
||||
Option(Class<?> type, Object defaultValue, String help) {
|
||||
Option(Class<?> type, Object defaultValue, String... helpLines) {
|
||||
assert Character.isUpperCase(name().charAt(0)) : "Option name must start with upper-case letter: " + name();
|
||||
this.type = type;
|
||||
this.value = UNINITIALIZED;
|
||||
this.defaultValue = defaultValue;
|
||||
this.help = help;
|
||||
this.helpLines = helpLines;
|
||||
}
|
||||
|
||||
@SuppressFBWarnings(value = "ES_COMPARING_STRINGS_WITH_EQ", justification = "sentinel must be String since it's a static final in an enum")
|
||||
private Object getValue() {
|
||||
if (value == UNINITIALIZED) {
|
||||
String propertyValue = VM.getSavedProperty(JVMCI_OPTION_PROPERTY_PREFIX + name());
|
||||
String propertyValue = VM.getSavedProperty(getPropertyName());
|
||||
if (propertyValue == null) {
|
||||
this.value = defaultValue;
|
||||
this.isDefault = true;
|
||||
} else {
|
||||
if (type == boolean.class) {
|
||||
if (type == Boolean.class) {
|
||||
this.value = Boolean.parseBoolean(propertyValue);
|
||||
} else if (type == String.class) {
|
||||
this.value = propertyValue;
|
||||
@ -146,6 +146,13 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the name of system property from which this option gets its value.
|
||||
*/
|
||||
public String getPropertyName() {
|
||||
return JVMCI_OPTION_PROPERTY_PREFIX + name();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the option's value as boolean.
|
||||
*
|
||||
@ -165,16 +172,31 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints all option flags to {@code out}.
|
||||
* Prints a description of the properties used to configure shared JVMCI code.
|
||||
*
|
||||
* @param out stream to print to
|
||||
*/
|
||||
public static void printFlags(PrintStream out) {
|
||||
out.println("[List of JVMCI options]");
|
||||
for (Option option : values()) {
|
||||
public static void printProperties(PrintStream out) {
|
||||
out.println("[JVMCI properties]");
|
||||
int typeWidth = 0;
|
||||
int nameWidth = 0;
|
||||
Option[] values = values();
|
||||
for (Option option : values) {
|
||||
typeWidth = Math.max(typeWidth, option.type.getSimpleName().length());
|
||||
nameWidth = Math.max(nameWidth, option.getPropertyName().length());
|
||||
}
|
||||
for (Option option : values) {
|
||||
Object value = option.getValue();
|
||||
String assign = option.isDefault ? ":=" : " =";
|
||||
out.printf("%9s %-40s %s %-14s %s%n", option.type.getSimpleName(), option, assign, value, option.help);
|
||||
if (value instanceof String) {
|
||||
value = '"' + String.valueOf(value) + '"';
|
||||
}
|
||||
String assign = option.isDefault ? " =" : ":=";
|
||||
String format = "%" + (typeWidth + 1) + "s %-" + (nameWidth + 1) + "s %s %s%n";
|
||||
out.printf(format, option.type.getSimpleName(), option.getPropertyName(), assign, value);
|
||||
String helpFormat = "%" + (typeWidth + 1) + "s %s%n";
|
||||
for (String line : option.helpLines) {
|
||||
out.printf(helpFormat, "", line);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -221,11 +243,7 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
|
||||
if (vmEventListeners == null) {
|
||||
synchronized (this) {
|
||||
if (vmEventListeners == null) {
|
||||
List<HotSpotVMEventListener> listeners = new ArrayList<>();
|
||||
for (HotSpotVMEventListener vmEventListener : ServiceLoader.load(HotSpotVMEventListener.class)) {
|
||||
listeners.add(vmEventListener);
|
||||
}
|
||||
vmEventListeners = listeners;
|
||||
vmEventListeners = JVMCIServiceLocator.getProviders(HotSpotVMEventListener.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -239,7 +257,6 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
|
||||
@SuppressWarnings("unused") private final String[] trivialPrefixes;
|
||||
|
||||
@SuppressWarnings("try")
|
||||
@SuppressFBWarnings(value = "DM_EXIT", justification = "PrintFlags is meant to exit the VM")
|
||||
private HotSpotJVMCIRuntime() {
|
||||
compilerToVm = new CompilerToVM();
|
||||
|
||||
@ -261,20 +278,6 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
|
||||
|
||||
metaAccessContext = new HotSpotJVMCIMetaAccessContext();
|
||||
|
||||
boolean printFlags = Option.PrintFlags.getBoolean();
|
||||
boolean showFlags = Option.ShowFlags.getBoolean();
|
||||
if (printFlags || showFlags) {
|
||||
Option.printFlags(System.out);
|
||||
if (printFlags) {
|
||||
System.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
if (Option.PrintConfig.getBoolean()) {
|
||||
printConfig(configStore, compilerToVm);
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
compilerFactory = HotSpotJVMCICompilerConfig.getCompilerFactory();
|
||||
if (compilerFactory instanceof HotSpotJVMCICompilerFactory) {
|
||||
hsCompilerFactory = (HotSpotJVMCICompilerFactory) compilerFactory;
|
||||
@ -298,6 +301,16 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
|
||||
trivialPrefixes = null;
|
||||
compilationLevelAdjustment = config.compLevelAdjustmentNone;
|
||||
}
|
||||
|
||||
if (config.getFlag("JVMCIPrintProperties", Boolean.class)) {
|
||||
PrintStream out = new PrintStream(getLogStream());
|
||||
Option.printProperties(out);
|
||||
compilerFactory.printProperties(out);
|
||||
}
|
||||
|
||||
if (Option.PrintConfig.getBoolean()) {
|
||||
printConfig(configStore, compilerToVm);
|
||||
}
|
||||
}
|
||||
|
||||
private JVMCIBackend registerBackend(JVMCIBackend backend) {
|
||||
|
@ -53,9 +53,9 @@ final class HotSpotMethodData {
|
||||
* Reference to the C++ MethodData object.
|
||||
*/
|
||||
final long metaspaceMethodData;
|
||||
@SuppressWarnings("unused") private final HotSpotResolvedJavaMethodImpl method;
|
||||
private final HotSpotResolvedJavaMethodImpl method;
|
||||
|
||||
public HotSpotMethodData(long metaspaceMethodData, HotSpotResolvedJavaMethodImpl method) {
|
||||
HotSpotMethodData(long metaspaceMethodData, HotSpotResolvedJavaMethodImpl method) {
|
||||
this.metaspaceMethodData = metaspaceMethodData;
|
||||
this.method = method;
|
||||
}
|
||||
@ -107,6 +107,18 @@ final class HotSpotMethodData {
|
||||
return UNSAFE.getByte(metaspaceMethodData + config.methodDataOopTrapHistoryOffset + config.deoptReasonOSROffset + reasonIndex) & 0xFF;
|
||||
}
|
||||
|
||||
public int getDecompileCount() {
|
||||
return UNSAFE.getInt(metaspaceMethodData + config.methodDataDecompiles);
|
||||
}
|
||||
|
||||
public int getOverflowRecompileCount() {
|
||||
return UNSAFE.getInt(metaspaceMethodData + config.methodDataOverflowRecompiles);
|
||||
}
|
||||
|
||||
public int getOverflowTrapCount() {
|
||||
return UNSAFE.getInt(metaspaceMethodData + config.methodDataOverflowTraps);
|
||||
}
|
||||
|
||||
public HotSpotMethodDataAccessor getNormalData(int position) {
|
||||
if (position >= normalDataSize()) {
|
||||
return null;
|
||||
@ -214,6 +226,12 @@ final class HotSpotMethodData {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
String nl = String.format("%n");
|
||||
String nlIndent = String.format("%n%38s", "");
|
||||
sb.append("Raw method data for ");
|
||||
sb.append(method.format("%H.%n(%p)"));
|
||||
sb.append(":");
|
||||
sb.append(nl);
|
||||
sb.append(String.format("nof_decompiles(%d) nof_overflow_recompiles(%d) nof_overflow_traps(%d)%n",
|
||||
getDecompileCount(), getOverflowRecompileCount(), getOverflowTrapCount()));
|
||||
if (hasNormalData()) {
|
||||
int pos = 0;
|
||||
HotSpotMethodDataAccessor data;
|
||||
@ -427,6 +445,10 @@ final class HotSpotMethodData {
|
||||
|
||||
protected abstract long getTypesNotRecordedExecutionCount(HotSpotMethodData data, int position);
|
||||
|
||||
public int getNonprofiledCount(HotSpotMethodData data, int position) {
|
||||
return data.readUnsignedIntAsSignedInt(position, NONPROFILED_COUNT_OFFSET);
|
||||
}
|
||||
|
||||
private JavaTypeProfile createTypeProfile(TriState nullSeen, RawItemProfile<ResolvedJavaType> profile) {
|
||||
if (profile.entries <= 0 || profile.totalCount <= 0) {
|
||||
return null;
|
||||
@ -462,7 +484,7 @@ final class HotSpotMethodData {
|
||||
TriState nullSeen = getNullSeen(data, pos);
|
||||
TriState exceptionSeen = getExceptionSeen(data, pos);
|
||||
sb.append(format("count(%d) null_seen(%s) exception_seen(%s) nonprofiled_count(%d) entries(%d)", getCounterValue(data, pos), nullSeen, exceptionSeen,
|
||||
getTypesNotRecordedExecutionCount(data, pos), profile.entries));
|
||||
getNonprofiledCount(data, pos), profile.entries));
|
||||
for (int i = 0; i < profile.entries; i++) {
|
||||
long count = profile.counts[i];
|
||||
sb.append(format("%n %s (%d, %4.2f)", profile.items[i].toJavaName(), count, (double) count / profile.totalCount));
|
||||
@ -490,7 +512,7 @@ final class HotSpotMethodData {
|
||||
|
||||
@Override
|
||||
protected long getTypesNotRecordedExecutionCount(HotSpotMethodData data, int position) {
|
||||
return data.readUnsignedIntAsSignedInt(position, NONPROFILED_COUNT_OFFSET);
|
||||
return getNonprofiledCount(data, position);
|
||||
}
|
||||
}
|
||||
|
||||
@ -788,7 +810,8 @@ final class HotSpotMethodData {
|
||||
|
||||
@Override
|
||||
public StringBuilder appendTo(StringBuilder sb, HotSpotMethodData data, int pos) {
|
||||
return null;
|
||||
sb.append("unknown profile data with tag: " + tag);
|
||||
return sb;
|
||||
}
|
||||
}
|
||||
|
||||
@ -822,10 +845,10 @@ final class HotSpotMethodData {
|
||||
private static boolean checkAccessorTags() {
|
||||
int expectedTag = 0;
|
||||
for (HotSpotMethodDataAccessor accessor : PROFILE_DATA_ACCESSORS) {
|
||||
if (expectedTag ==0 ) {
|
||||
if (expectedTag == 0) {
|
||||
assert accessor == null;
|
||||
} else {
|
||||
assert accessor.tag == expectedTag: expectedTag + " != " + accessor.tag + " " + accessor;
|
||||
assert accessor.tag == expectedTag : expectedTag + " != " + accessor.tag + " " + accessor;
|
||||
}
|
||||
expectedTag++;
|
||||
}
|
||||
|
@ -57,6 +57,18 @@ public final class HotSpotProfilingInfo implements ProfilingInfo {
|
||||
return method.getCodeSize();
|
||||
}
|
||||
|
||||
public int getDecompileCount() {
|
||||
return methodData.getDecompileCount();
|
||||
}
|
||||
|
||||
public int getOverflowRecompileCount() {
|
||||
return methodData.getOverflowRecompileCount();
|
||||
}
|
||||
|
||||
public int getOverflowTrapCount() {
|
||||
return methodData.getOverflowTrapCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public JavaTypeProfile getTypeProfile(int bci) {
|
||||
if (!isMature) {
|
||||
|
@ -434,7 +434,6 @@ final class HotSpotResolvedJavaMethodImpl extends HotSpotMethod implements HotSp
|
||||
methodData = new HotSpotMethodData(metaspaceMethodData, this);
|
||||
String methodDataFilter = Option.TraceMethodDataFilter.getString();
|
||||
if (methodDataFilter != null && this.format("%H.%n").contains(methodDataFilter)) {
|
||||
System.out.println("Raw method data for " + this.format("%H.%n(%p)") + ":");
|
||||
System.out.println(methodData.toString());
|
||||
}
|
||||
}
|
||||
|
@ -160,6 +160,10 @@ class HotSpotVMConfig extends HotSpotVMConfigAccess {
|
||||
final int methodDataOopTrapHistoryOffset = getFieldOffset("MethodData::_trap_hist._array[0]", Integer.class, "u1");
|
||||
final int methodDataIRSizeOffset = getFieldOffset("MethodData::_jvmci_ir_size", Integer.class, "int");
|
||||
|
||||
final int methodDataDecompiles = getFieldOffset("MethodData::_nof_decompiles", Integer.class, "uint");
|
||||
final int methodDataOverflowRecompiles = getFieldOffset("MethodData::_nof_overflow_recompiles", Integer.class, "uint");
|
||||
final int methodDataOverflowTraps = getFieldOffset("MethodData::_nof_overflow_traps", Integer.class, "uint");
|
||||
|
||||
final int nmethodCompLevelOffset = getFieldOffset("nmethod::_comp_level", Integer.class, "int");
|
||||
|
||||
final int compilationLevelNone = getConstant("CompLevel_none", Integer.class);
|
||||
|
@ -20,59 +20,35 @@
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package jdk.vm.ci.hotspot.services;
|
||||
package jdk.vm.ci.hotspot;
|
||||
|
||||
import jdk.vm.ci.code.CompiledCode;
|
||||
import jdk.vm.ci.code.InstalledCode;
|
||||
import jdk.vm.ci.hotspot.HotSpotCodeCacheProvider;
|
||||
import jdk.vm.ci.services.JVMCIPermission;
|
||||
|
||||
/**
|
||||
* Service-provider class for responding to VM events.
|
||||
* Listener for responding to VM events.
|
||||
*/
|
||||
public abstract class HotSpotVMEventListener {
|
||||
|
||||
private static Void checkPermission() {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new JVMCIPermission());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
HotSpotVMEventListener(Void ignore) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new instance of this class.
|
||||
*
|
||||
* @throws SecurityException if a security manager has been installed and it denies
|
||||
* {@link JVMCIPermission}
|
||||
*/
|
||||
protected HotSpotVMEventListener() {
|
||||
this(checkPermission());
|
||||
}
|
||||
public interface HotSpotVMEventListener {
|
||||
|
||||
/**
|
||||
* Notifies this client that the VM is shutting down.
|
||||
*/
|
||||
public void notifyShutdown() {
|
||||
default void notifyShutdown() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Notify on successful install into the code cache.
|
||||
*
|
||||
* @param hotSpotCodeCacheProvider
|
||||
* @param installedCode
|
||||
* @param compiledCode
|
||||
* @param hotSpotCodeCacheProvider the code cache into which the code was installed
|
||||
* @param installedCode the code that was installed
|
||||
* @param compiledCode the compiled code from which {@code installedCode} was produced
|
||||
*/
|
||||
public void notifyInstall(HotSpotCodeCacheProvider hotSpotCodeCacheProvider, InstalledCode installedCode, CompiledCode compiledCode) {
|
||||
default void notifyInstall(HotSpotCodeCacheProvider hotSpotCodeCacheProvider, InstalledCode installedCode, CompiledCode compiledCode) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Notify on completion of a bootstrap.
|
||||
*/
|
||||
public void notifyBootstrapFinished() {
|
||||
default void notifyBootstrapFinished() {
|
||||
}
|
||||
}
|
@ -20,54 +20,38 @@
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package jdk.vm.ci.runtime.services;
|
||||
package jdk.vm.ci.runtime;
|
||||
|
||||
import jdk.vm.ci.runtime.JVMCICompiler;
|
||||
import jdk.vm.ci.runtime.JVMCIRuntime;
|
||||
import jdk.vm.ci.services.JVMCIPermission;
|
||||
import java.io.PrintStream;
|
||||
|
||||
/**
|
||||
* Service-provider class for creating JVMCI compilers.
|
||||
* Factory for creating JVMCI compilers.
|
||||
*/
|
||||
public abstract class JVMCICompilerFactory {
|
||||
|
||||
private static Void checkPermission() {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new JVMCIPermission());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private JVMCICompilerFactory(Void ignore) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new instance of this class.
|
||||
*
|
||||
* @throws SecurityException if a security manager has been installed and it denies
|
||||
* {@link JVMCIPermission}
|
||||
*/
|
||||
protected JVMCICompilerFactory() {
|
||||
this(checkPermission());
|
||||
}
|
||||
public interface JVMCICompilerFactory {
|
||||
|
||||
/**
|
||||
* Get the name of this compiler. The name is used by JVMCI to determine which factory to use.
|
||||
*/
|
||||
public abstract String getCompilerName();
|
||||
String getCompilerName();
|
||||
|
||||
/**
|
||||
* Notifies this object that it has been selected to {@linkplain #createCompiler(JVMCIRuntime)
|
||||
* create} a compiler and it should now perform any heavy weight initialization that it deferred
|
||||
* during construction.
|
||||
*/
|
||||
public void onSelection() {
|
||||
default void onSelection() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new instance of a {@link JVMCICompiler}.
|
||||
*/
|
||||
public abstract JVMCICompiler createCompiler(JVMCIRuntime runtime);
|
||||
JVMCICompiler createCompiler(JVMCIRuntime runtime);
|
||||
|
||||
/**
|
||||
* Prints a description of the properties used to configure this compiler.
|
||||
*
|
||||
* @param out where to print the message
|
||||
*/
|
||||
default void printProperties(PrintStream out) {
|
||||
}
|
||||
}
|
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package jdk.vm.ci.services;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Service-provider class for the runtime to locate providers of JVMCI services where the latter are
|
||||
* not in packages exported by the JVMCI module. As part of instantiating
|
||||
* {@link JVMCIServiceLocator}, all JVMCI packages will be {@linkplain Services#exportJVMCITo(Class)
|
||||
* exported} to the module defining the class of the instantiated object.
|
||||
*
|
||||
* While the {@link #getProvider(Class)} method can be used directly, it's usually easier to use
|
||||
* {@link #getProviders(Class)}.
|
||||
*/
|
||||
public abstract class JVMCIServiceLocator {
|
||||
|
||||
private static Void checkPermission() {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new JVMCIPermission());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private JVMCIServiceLocator(Void ignore) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a capability for accessing JVMCI. Once successfully instantiated, JVMCI exports all
|
||||
* its packages to the module defining the type of this object.
|
||||
*
|
||||
* @throws SecurityException if a security manager has been installed and it denies
|
||||
* {@link JVMCIPermission}
|
||||
*/
|
||||
protected JVMCIServiceLocator() {
|
||||
this(checkPermission());
|
||||
Services.exportJVMCITo(getClass());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the provider of the service defined by {@code service} or {@code null} if this object
|
||||
* does not have a provider for {@code service}.
|
||||
*/
|
||||
public abstract <S> S getProvider(Class<S> service);
|
||||
|
||||
/**
|
||||
* Gets the providers of the service defined by {@code service} by querying the
|
||||
* {@link JVMCIServiceLocator} providers obtained by {@link Services#load(Class)}.
|
||||
*/
|
||||
public static <S> List<S> getProviders(Class<S> service) {
|
||||
List<S> providers = new ArrayList<>();
|
||||
for (JVMCIServiceLocator access : Services.load(JVMCIServiceLocator.class)) {
|
||||
S provider = access.getProvider(service);
|
||||
if (provider != null) {
|
||||
providers.add(provider);
|
||||
}
|
||||
}
|
||||
return providers;
|
||||
}
|
||||
}
|
@ -349,7 +349,6 @@ public class SPARC extends Architecture {
|
||||
SUN4V,
|
||||
BLK_INIT_INSTRUCTIONS,
|
||||
FMAF,
|
||||
FMAU,
|
||||
SPARC64_FAMILY,
|
||||
M_FAMILY,
|
||||
T_FAMILY,
|
||||
|
@ -25,12 +25,9 @@
|
||||
|
||||
module jdk.vm.ci {
|
||||
exports jdk.vm.ci.services;
|
||||
exports jdk.vm.ci.runtime.services;
|
||||
exports jdk.vm.ci.hotspot.services;
|
||||
|
||||
uses jdk.vm.ci.hotspot.services.HotSpotVMEventListener;
|
||||
uses jdk.vm.ci.services.JVMCIServiceLocator;
|
||||
uses jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory;
|
||||
uses jdk.vm.ci.runtime.services.JVMCICompilerFactory;
|
||||
|
||||
provides jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory with
|
||||
jdk.vm.ci.hotspot.aarch64.AArch64HotSpotJVMCIBackendFactory;
|
||||
|
@ -2563,7 +2563,7 @@ bool os::get_page_info(char *start, page_info* info) {
|
||||
uint64_t outdata[2];
|
||||
uint_t validity = 0;
|
||||
|
||||
if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
|
||||
if (meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2601,7 +2601,7 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected,
|
||||
addrs_count++;
|
||||
}
|
||||
|
||||
if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
|
||||
if (meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -4160,9 +4160,6 @@ void os::Solaris::install_signal_handlers() {
|
||||
void report_error(const char* file_name, int line_no, const char* title,
|
||||
const char* format, ...);
|
||||
|
||||
// (Static) wrapper for getisax(2) call.
|
||||
os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
|
||||
|
||||
// (Static) wrappers for the liblgrp API
|
||||
os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
|
||||
os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
|
||||
@ -4174,9 +4171,6 @@ os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
|
||||
os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
|
||||
os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
|
||||
|
||||
// (Static) wrapper for meminfo() call.
|
||||
os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
|
||||
|
||||
static address resolve_symbol_lazy(const char* name) {
|
||||
address addr = (address) dlsym(RTLD_DEFAULT, name);
|
||||
if (addr == NULL) {
|
||||
@ -4300,27 +4294,6 @@ bool os::Solaris::liblgrp_init() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void os::Solaris::misc_sym_init() {
|
||||
address func;
|
||||
|
||||
// getisax
|
||||
func = resolve_symbol_lazy("getisax");
|
||||
if (func != NULL) {
|
||||
os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
|
||||
}
|
||||
|
||||
// meminfo
|
||||
func = resolve_symbol_lazy("meminfo");
|
||||
if (func != NULL) {
|
||||
os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
|
||||
}
|
||||
}
|
||||
|
||||
uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
|
||||
assert(_getisax != NULL, "_getisax not set");
|
||||
return _getisax(array, n);
|
||||
}
|
||||
|
||||
// int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
|
||||
typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
|
||||
static pset_getloadavg_type pset_getloadavg_ptr = NULL;
|
||||
@ -4351,10 +4324,6 @@ void os::init(void) {
|
||||
|
||||
Solaris::initialize_system_info();
|
||||
|
||||
// Initialize misc. symbols as soon as possible, so we can use them
|
||||
// if we need them.
|
||||
Solaris::misc_sym_init();
|
||||
|
||||
int fd = ::open("/dev/zero", O_RDWR);
|
||||
if (fd < 0) {
|
||||
fatal("os::init: cannot open /dev/zero (%s)", os::strerror(errno));
|
||||
|
@ -73,8 +73,6 @@ class Solaris {
|
||||
LGRP_VIEW_OS // what's available to operating system
|
||||
} lgrp_view_t;
|
||||
|
||||
typedef uint_t (*getisax_func_t)(uint32_t* array, uint_t n);
|
||||
|
||||
typedef lgrp_id_t (*lgrp_home_func_t)(idtype_t idtype, id_t id);
|
||||
typedef lgrp_cookie_t (*lgrp_init_func_t)(lgrp_view_t view);
|
||||
typedef int (*lgrp_fini_func_t)(lgrp_cookie_t cookie);
|
||||
@ -86,11 +84,6 @@ class Solaris {
|
||||
lgrp_rsrc_t type);
|
||||
typedef int (*lgrp_nlgrps_func_t)(lgrp_cookie_t cookie);
|
||||
typedef int (*lgrp_cookie_stale_func_t)(lgrp_cookie_t cookie);
|
||||
typedef int (*meminfo_func_t)(const uint64_t inaddr[], int addr_count,
|
||||
const uint_t info_req[], int info_count,
|
||||
uint64_t outdata[], uint_t validity[]);
|
||||
|
||||
static getisax_func_t _getisax;
|
||||
|
||||
static lgrp_home_func_t _lgrp_home;
|
||||
static lgrp_init_func_t _lgrp_init;
|
||||
@ -102,8 +95,6 @@ class Solaris {
|
||||
static lgrp_cookie_stale_func_t _lgrp_cookie_stale;
|
||||
static lgrp_cookie_t _lgrp_cookie;
|
||||
|
||||
static meminfo_func_t _meminfo;
|
||||
|
||||
// Large Page Support
|
||||
static bool is_valid_page_size(size_t bytes);
|
||||
static size_t page_size_for_alignment(size_t alignment);
|
||||
@ -191,8 +182,6 @@ class Solaris {
|
||||
static void libthread_init();
|
||||
static void synchronization_init();
|
||||
static bool liblgrp_init();
|
||||
// Load miscellaneous symbols.
|
||||
static void misc_sym_init();
|
||||
// This boolean allows users to forward their own non-matching signals
|
||||
// to JVM_handle_solaris_signal, harmlessly.
|
||||
static bool signal_handlers_are_installed;
|
||||
@ -272,17 +261,6 @@ class Solaris {
|
||||
}
|
||||
static lgrp_cookie_t lgrp_cookie() { return _lgrp_cookie; }
|
||||
|
||||
static bool supports_getisax() { return _getisax != NULL; }
|
||||
static uint_t getisax(uint32_t* array, uint_t n);
|
||||
|
||||
static void set_meminfo(meminfo_func_t func) { _meminfo = func; }
|
||||
static int meminfo (const uint64_t inaddr[], int addr_count,
|
||||
const uint_t info_req[], int info_count,
|
||||
uint64_t outdata[], uint_t validity[]) {
|
||||
return _meminfo != NULL ? _meminfo(inaddr, addr_count, info_req, info_count,
|
||||
outdata, validity) : -1;
|
||||
}
|
||||
|
||||
static sigset_t* unblocked_signals();
|
||||
static sigset_t* vm_signals();
|
||||
static sigset_t* allowdebug_blocked_signals();
|
||||
|
@ -784,7 +784,7 @@ void os::set_native_thread_name(const char *name) {
|
||||
|
||||
__try {
|
||||
RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
|
||||
} __except(EXCEPTION_CONTINUE_EXECUTION) {}
|
||||
} __except(EXCEPTION_EXECUTE_HANDLER) {}
|
||||
}
|
||||
|
||||
bool os::distribute_processes(uint length, uint* distribution) {
|
||||
|
@ -1404,12 +1404,14 @@ static HANDLE open_sharedmem_object(const char* objectname, DWORD ofm_access, TR
|
||||
objectname); /* name for object */
|
||||
|
||||
if (fmh == NULL) {
|
||||
DWORD lasterror = GetLastError();
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("OpenFileMapping failed for shared memory object %s:"
|
||||
" lasterror = %d\n", objectname, GetLastError());
|
||||
" lasterror = %d\n", objectname, lasterror);
|
||||
}
|
||||
THROW_MSG_(vmSymbols::java_lang_Exception(),
|
||||
"Could not open PerfMemory", INVALID_HANDLE_VALUE);
|
||||
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
|
||||
err_msg("Could not open PerfMemory, error %d", lasterror),
|
||||
INVALID_HANDLE_VALUE);
|
||||
}
|
||||
|
||||
return fmh;;
|
||||
|
@ -343,8 +343,15 @@ public:
|
||||
#define _SC_L2CACHE_LINESZ 527 /* Size of L2 cache line */
|
||||
#endif
|
||||
|
||||
// Hardware capability bits that appeared after Solaris 11.1
|
||||
#ifndef AV_SPARC_FMAF
|
||||
#define AV_SPARC_FMAF 0x00000100 /* Fused Multiply-Add */
|
||||
#endif
|
||||
#ifndef AV2_SPARC_SPARC5
|
||||
#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
|
||||
#endif
|
||||
|
||||
int VM_Version::platform_features(int features) {
|
||||
assert(os::Solaris::supports_getisax(), "getisax() must be available");
|
||||
|
||||
// Check 32-bit architecture.
|
||||
if (Sysinfo(SI_ARCHITECTURE_32).match("sparc")) {
|
||||
@ -357,119 +364,75 @@ int VM_Version::platform_features(int features) {
|
||||
}
|
||||
|
||||
// Extract valid instruction set extensions.
|
||||
uint_t avs[2];
|
||||
uint_t avn = os::Solaris::getisax(avs, 2);
|
||||
assert(avn <= 2, "should return two or less av's");
|
||||
uint_t av = avs[0];
|
||||
uint_t avs[AV_HW2_IDX + 1];
|
||||
uint_t avn = getisax(avs, ARRAY_SIZE(avs));
|
||||
|
||||
log_info(os, cpu)("getisax(2) returned: " PTR32_FORMAT, av);
|
||||
if (avn > 1) {
|
||||
log_info(os, cpu)(" " PTR32_FORMAT, avs[1]);
|
||||
log_info(os, cpu)("getisax(2) returned %d words:", avn);
|
||||
for (int i = 0; i < avn; i++) {
|
||||
log_info(os, cpu)(" word %d: " PTR32_FORMAT, i, avs[i]);
|
||||
}
|
||||
|
||||
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
|
||||
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
|
||||
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
|
||||
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
|
||||
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
|
||||
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
|
||||
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
|
||||
if (avn > 1) {
|
||||
uint_t av2 = avs[1];
|
||||
#ifndef AV2_SPARC_SPARC5
|
||||
#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
|
||||
#endif
|
||||
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
|
||||
uint_t av1 = avs[AV_HW1_IDX];
|
||||
if (av1 & AV_SPARC_MUL32) features |= hardware_mul32_m;
|
||||
if (av1 & AV_SPARC_DIV32) features |= hardware_div32_m;
|
||||
if (av1 & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
|
||||
if (av1 & AV_SPARC_V8PLUS) features |= v9_instructions_m;
|
||||
if (av1 & AV_SPARC_POPC) features |= hardware_popc_m;
|
||||
if (av1 & AV_SPARC_VIS) features |= vis1_instructions_m;
|
||||
if (av1 & AV_SPARC_VIS2) features |= vis2_instructions_m;
|
||||
if (av1 & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
|
||||
if (av1 & AV_SPARC_FMAF) features |= fmaf_instructions_m;
|
||||
if (av1 & AV_SPARC_VIS3) features |= vis3_instructions_m;
|
||||
if (av1 & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
|
||||
if (av1 & AV_SPARC_CRC32C) features |= crc32c_instruction_m;
|
||||
if (av1 & AV_SPARC_AES) features |= aes_instructions_m;
|
||||
if (av1 & AV_SPARC_SHA1) features |= sha1_instruction_m;
|
||||
if (av1 & AV_SPARC_SHA256) features |= sha256_instruction_m;
|
||||
if (av1 & AV_SPARC_SHA512) features |= sha512_instruction_m;
|
||||
|
||||
if (avn > AV_HW2_IDX) {
|
||||
uint_t av2 = avs[AV_HW2_IDX];
|
||||
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
|
||||
}
|
||||
|
||||
// We only build on Solaris 10 and up, but some of the values below
|
||||
// are not defined on all versions of Solaris 10, so we define them,
|
||||
// if necessary.
|
||||
#ifndef AV_SPARC_ASI_BLK_INIT
|
||||
#define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */
|
||||
#endif
|
||||
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_FMAF
|
||||
#define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */
|
||||
#endif
|
||||
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_FMAU
|
||||
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
|
||||
#endif
|
||||
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_VIS3
|
||||
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
|
||||
#endif
|
||||
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_CBCOND
|
||||
#define AV_SPARC_CBCOND 0x10000000 /* compare and branch instrs supported */
|
||||
#endif
|
||||
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_CRC32C
|
||||
#define AV_SPARC_CRC32C 0x20000000 /* crc32c instruction supported */
|
||||
#endif
|
||||
if (av & AV_SPARC_CRC32C) features |= crc32c_instruction_m;
|
||||
|
||||
#ifndef AV_SPARC_AES
|
||||
#define AV_SPARC_AES 0x00020000 /* aes instrs supported */
|
||||
#endif
|
||||
if (av & AV_SPARC_AES) features |= aes_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_SHA1
|
||||
#define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */
|
||||
#endif
|
||||
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
|
||||
|
||||
#ifndef AV_SPARC_SHA256
|
||||
#define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */
|
||||
#endif
|
||||
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
|
||||
|
||||
#ifndef AV_SPARC_SHA512
|
||||
#define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */
|
||||
#endif
|
||||
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
|
||||
|
||||
// Determine the machine type.
|
||||
if (Sysinfo(SI_MACHINE).match("sun4v")) {
|
||||
features |= sun4v_m;
|
||||
}
|
||||
|
||||
bool use_solaris_12_api = false;
|
||||
Sysinfo impl(SI_CPUBRAND);
|
||||
if (impl.valid()) {
|
||||
// If SI_CPUBRAND works, that means Solaris 12 API to get the cache line sizes
|
||||
// is available to us as well
|
||||
use_solaris_12_api = true;
|
||||
features |= parse_features(impl.value());
|
||||
// If SI_CPUBRAND works, that means Solaris 12 API to get the cache line sizes
|
||||
// is available to us as well
|
||||
Sysinfo cpu_info(SI_CPUBRAND);
|
||||
bool use_solaris_12_api = cpu_info.valid();
|
||||
const char* impl;
|
||||
int impl_m = 0;
|
||||
if (use_solaris_12_api) {
|
||||
impl = cpu_info.value();
|
||||
log_info(os, cpu)("Parsing CPU implementation from %s", impl);
|
||||
impl_m = parse_features(impl);
|
||||
} else {
|
||||
// Otherwise use kstat to determine the machine type.
|
||||
kstat_ctl_t* kc = kstat_open();
|
||||
kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL);
|
||||
const char* implementation;
|
||||
bool has_implementation = false;
|
||||
if (ksp != NULL) {
|
||||
if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) {
|
||||
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
|
||||
for (int i = 0; i < ksp->ks_ndata; i++) {
|
||||
if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
|
||||
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
|
||||
has_implementation = true;
|
||||
log_info(os, cpu)("cpu_info.implementation: %s", implementation);
|
||||
features |= parse_features(implementation);
|
||||
break;
|
||||
if (kc != NULL) {
|
||||
kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL);
|
||||
if (ksp != NULL) {
|
||||
if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) {
|
||||
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
|
||||
for (int i = 0; i < ksp->ks_ndata; i++) {
|
||||
if (strcmp((const char*)&(knm[i].name), "implementation") == 0) {
|
||||
impl = KSTAT_NAMED_STR_PTR(&knm[i]);
|
||||
log_info(os, cpu)("Parsing CPU implementation from %s", impl);
|
||||
impl_m = parse_features(impl);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} // for(
|
||||
}
|
||||
}
|
||||
kstat_close(kc);
|
||||
}
|
||||
assert(has_implementation, "unknown cpu info (changed kstat interface?)");
|
||||
kstat_close(kc);
|
||||
}
|
||||
assert(impl_m != 0, "Unknown CPU implementation %s", impl);
|
||||
features |= impl_m;
|
||||
|
||||
bool is_sun4v = (features & sun4v_m) != 0;
|
||||
if (use_solaris_12_api && is_sun4v) {
|
||||
|
@ -153,6 +153,8 @@ void AbstractAssembler::generate_stack_overflow_check(int frame_size_in_bytes) {
|
||||
|
||||
void Label::add_patch_at(CodeBuffer* cb, int branch_loc) {
|
||||
assert(_loc == -1, "Label is unbound");
|
||||
// Don't add patch locations during scratch emit.
|
||||
if (cb->insts()->scratch_emit()) { return; }
|
||||
if (_patch_index < PatchCacheSize) {
|
||||
_patches[_patch_index] = branch_loc;
|
||||
} else {
|
||||
|
@ -331,6 +331,8 @@ void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, j
|
||||
}
|
||||
|
||||
void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
|
||||
// Do not relocate in scratch buffers.
|
||||
if (scratch_emit()) { return; }
|
||||
Relocation* reloc = spec.reloc();
|
||||
relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
|
||||
if (rtype == relocInfo::none) return;
|
||||
|
@ -92,6 +92,7 @@ class CodeSection VALUE_OBJ_CLASS_SPEC {
|
||||
address _locs_point; // last relocated position (grows upward)
|
||||
bool _locs_own; // did I allocate the locs myself?
|
||||
bool _frozen; // no more expansion of this section
|
||||
bool _scratch_emit; // Buffer is used for scratch emit, don't relocate.
|
||||
char _index; // my section number (SECT_INST, etc.)
|
||||
CodeBuffer* _outer; // enclosing CodeBuffer
|
||||
|
||||
@ -108,6 +109,7 @@ class CodeSection VALUE_OBJ_CLASS_SPEC {
|
||||
_locs_point = NULL;
|
||||
_locs_own = false;
|
||||
_frozen = false;
|
||||
_scratch_emit = false;
|
||||
debug_only(_index = (char)-1);
|
||||
debug_only(_outer = (CodeBuffer*)badAddress);
|
||||
}
|
||||
@ -166,6 +168,10 @@ class CodeSection VALUE_OBJ_CLASS_SPEC {
|
||||
bool is_frozen() const { return _frozen; }
|
||||
bool has_locs() const { return _locs_end != NULL; }
|
||||
|
||||
// Mark scratch buffer.
|
||||
void set_scratch_emit() { _scratch_emit = true; }
|
||||
bool scratch_emit() { return _scratch_emit; }
|
||||
|
||||
CodeBuffer* outer() const { return _outer; }
|
||||
|
||||
// is a given address in this section? (2nd version is end-inclusive)
|
||||
|
@ -1493,6 +1493,21 @@ void GraphBuilder::method_return(Value x, bool ignore_return) {
|
||||
// Check to see whether we are inlining. If so, Return
|
||||
// instructions become Gotos to the continuation point.
|
||||
if (continuation() != NULL) {
|
||||
|
||||
int invoke_bci = state()->caller_state()->bci();
|
||||
|
||||
if (x != NULL && !ignore_return) {
|
||||
ciMethod* caller = state()->scope()->caller()->method();
|
||||
Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci);
|
||||
if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) {
|
||||
ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type();
|
||||
if (declared_ret_type->is_klass() && x->exact_type() == NULL &&
|
||||
x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) {
|
||||
x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
|
||||
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
@ -1516,7 +1531,6 @@ void GraphBuilder::method_return(Value x, bool ignore_return) {
|
||||
// State at end of inlined method is the state of the caller
|
||||
// without the method parameters on stack, including the
|
||||
// return value, if any, of the inlined method on operand stack.
|
||||
int invoke_bci = state()->caller_state()->bci();
|
||||
set_state(state()->caller_state()->copy_for_parsing());
|
||||
if (x != NULL) {
|
||||
if (!ignore_return) {
|
||||
@ -1929,7 +1943,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
// number of implementors for decl_interface is 0 or 1. If
|
||||
// it's 0 then no class implements decl_interface and there's
|
||||
// no point in inlining.
|
||||
if (!holder->is_loaded() || decl_interface->nof_implementors() != 1 || decl_interface->has_default_methods()) {
|
||||
if (!holder->is_loaded() || decl_interface->nof_implementors() != 1 || decl_interface->has_nonstatic_concrete_methods()) {
|
||||
singleton = NULL;
|
||||
}
|
||||
}
|
||||
@ -4308,7 +4322,7 @@ void GraphBuilder::print_stats() {
|
||||
void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
|
||||
assert(known_holder == NULL || (known_holder->is_instance_klass() &&
|
||||
(!known_holder->is_interface() ||
|
||||
((ciInstanceKlass*)known_holder)->has_default_methods())), "should be default method");
|
||||
((ciInstanceKlass*)known_holder)->has_nonstatic_concrete_methods())), "should be non-static concrete method");
|
||||
if (known_holder != NULL) {
|
||||
if (known_holder->exact_klass() == NULL) {
|
||||
known_holder = compilation()->cha_exact_type(known_holder);
|
||||
|
@ -360,7 +360,8 @@ void Invoke::state_values_do(ValueVisitor* f) {
|
||||
}
|
||||
|
||||
ciType* Invoke::declared_type() const {
|
||||
ciType *t = _target->signature()->return_type();
|
||||
ciSignature* declared_signature = state()->scope()->method()->get_declared_signature_at_bci(state()->bci());
|
||||
ciType *t = declared_signature->return_type();
|
||||
assert(t->basic_type() != T_VOID, "need return value of void method?");
|
||||
return t;
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
|
||||
_init_state = ik->init_state();
|
||||
_nonstatic_field_size = ik->nonstatic_field_size();
|
||||
_has_nonstatic_fields = ik->has_nonstatic_fields();
|
||||
_has_default_methods = ik->has_default_methods();
|
||||
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
|
||||
_is_anonymous = ik->is_anonymous();
|
||||
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
|
||||
_has_injected_fields = -1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,7 @@ private:
|
||||
bool _has_finalizer;
|
||||
bool _has_subklass;
|
||||
bool _has_nonstatic_fields;
|
||||
bool _has_default_methods;
|
||||
bool _has_nonstatic_concrete_methods;
|
||||
bool _is_anonymous;
|
||||
|
||||
ciFlags _flags;
|
||||
@ -174,9 +174,9 @@ public:
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
bool has_default_methods() {
|
||||
bool has_nonstatic_concrete_methods() {
|
||||
assert(is_loaded(), "must be loaded");
|
||||
return _has_default_methods;
|
||||
return _has_nonstatic_concrete_methods;
|
||||
}
|
||||
|
||||
bool is_anonymous() {
|
||||
|
@ -256,6 +256,14 @@ class ciMethod : public ciMetadata {
|
||||
return get_method_at_bci(bci, ignored_will_link, &ignored_declared_signature);
|
||||
}
|
||||
|
||||
ciSignature* get_declared_signature_at_bci(int bci) {
|
||||
bool ignored_will_link;
|
||||
ciSignature* declared_signature;
|
||||
get_method_at_bci(bci, ignored_will_link, &declared_signature);
|
||||
assert(declared_signature != NULL, "cannot be null");
|
||||
return declared_signature;
|
||||
}
|
||||
|
||||
// Given a certain calling environment, find the monomorphic target
|
||||
// for the call. Return NULL if the call is not monomorphic in
|
||||
// its calling environment.
|
||||
|
@ -798,11 +798,11 @@ static bool put_after_lookup(const Symbol* name, const Symbol* sig, NameSigHash*
|
||||
void ClassFileParser::parse_interfaces(const ClassFileStream* const stream,
|
||||
const int itfs_len,
|
||||
ConstantPool* const cp,
|
||||
bool* const has_default_methods,
|
||||
bool* const has_nonstatic_concrete_methods,
|
||||
TRAPS) {
|
||||
assert(stream != NULL, "invariant");
|
||||
assert(cp != NULL, "invariant");
|
||||
assert(has_default_methods != NULL, "invariant");
|
||||
assert(has_nonstatic_concrete_methods != NULL, "invariant");
|
||||
|
||||
if (itfs_len == 0) {
|
||||
_local_interfaces = Universe::the_empty_klass_array();
|
||||
@ -844,8 +844,8 @@ void ClassFileParser::parse_interfaces(const ClassFileStream* const stream,
|
||||
"Implementing class");
|
||||
}
|
||||
|
||||
if (InstanceKlass::cast(interf())->has_default_methods()) {
|
||||
*has_default_methods = true;
|
||||
if (InstanceKlass::cast(interf())->has_nonstatic_concrete_methods()) {
|
||||
*has_nonstatic_concrete_methods = true;
|
||||
}
|
||||
_local_interfaces->at_put(index, interf());
|
||||
}
|
||||
@ -2830,12 +2830,12 @@ void ClassFileParser::parse_methods(const ClassFileStream* const cfs,
|
||||
bool is_interface,
|
||||
AccessFlags* promoted_flags,
|
||||
bool* has_final_method,
|
||||
bool* declares_default_methods,
|
||||
bool* declares_nonstatic_concrete_methods,
|
||||
TRAPS) {
|
||||
assert(cfs != NULL, "invariant");
|
||||
assert(promoted_flags != NULL, "invariant");
|
||||
assert(has_final_method != NULL, "invariant");
|
||||
assert(declares_default_methods != NULL, "invariant");
|
||||
assert(declares_nonstatic_concrete_methods != NULL, "invariant");
|
||||
|
||||
assert(NULL == _methods, "invariant");
|
||||
|
||||
@ -2860,11 +2860,11 @@ void ClassFileParser::parse_methods(const ClassFileStream* const cfs,
|
||||
if (method->is_final()) {
|
||||
*has_final_method = true;
|
||||
}
|
||||
// declares_default_methods: declares concrete instance methods, any access flags
|
||||
// declares_nonstatic_concrete_methods: declares concrete instance methods, any access flags
|
||||
// used for interface initialization, and default method inheritance analysis
|
||||
if (is_interface && !(*declares_default_methods)
|
||||
if (is_interface && !(*declares_nonstatic_concrete_methods)
|
||||
&& !method->is_abstract() && !method->is_static()) {
|
||||
*declares_default_methods = true;
|
||||
*declares_nonstatic_concrete_methods = true;
|
||||
}
|
||||
_methods->at_put(index, method);
|
||||
}
|
||||
@ -5250,8 +5250,8 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
|
||||
|
||||
ik->set_minor_version(_minor_version);
|
||||
ik->set_major_version(_major_version);
|
||||
ik->set_has_default_methods(_has_default_methods);
|
||||
ik->set_declares_default_methods(_declares_default_methods);
|
||||
ik->set_has_nonstatic_concrete_methods(_has_nonstatic_concrete_methods);
|
||||
ik->set_declares_nonstatic_concrete_methods(_declares_nonstatic_concrete_methods);
|
||||
|
||||
if (_host_klass != NULL) {
|
||||
assert (ik->is_anonymous(), "should be the same");
|
||||
@ -5311,12 +5311,9 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
|
||||
// check if this class overrides any final method
|
||||
check_final_method_override(ik, CHECK);
|
||||
|
||||
// check that if this class is an interface then it doesn't have static methods
|
||||
if (ik->is_interface()) {
|
||||
/* An interface in a JAVA 8 classfile can be static */
|
||||
if (_major_version < JAVA_8_VERSION) {
|
||||
check_illegal_static_method(ik, CHECK);
|
||||
}
|
||||
// reject static interface methods prior to Java 8
|
||||
if (ik->is_interface() && _major_version < JAVA_8_VERSION) {
|
||||
check_illegal_static_method(ik, CHECK);
|
||||
}
|
||||
|
||||
// Obtain this_klass' module entry
|
||||
@ -5336,9 +5333,9 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
|
||||
|
||||
assert(_all_mirandas != NULL, "invariant");
|
||||
|
||||
// Generate any default methods - default methods are interface methods
|
||||
// that have a default implementation. This is new with Lambda project.
|
||||
if (_has_default_methods ) {
|
||||
// Generate any default methods - default methods are public interface methods
|
||||
// that have a default implementation. This is new with Java 8.
|
||||
if (_has_nonstatic_concrete_methods) {
|
||||
DefaultMethods::generate_default_methods(ik,
|
||||
_all_mirandas,
|
||||
CHECK);
|
||||
@ -5523,8 +5520,8 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
|
||||
_java_fields_count(0),
|
||||
_need_verify(false),
|
||||
_relax_verify(false),
|
||||
_has_default_methods(false),
|
||||
_declares_default_methods(false),
|
||||
_has_nonstatic_concrete_methods(false),
|
||||
_declares_nonstatic_concrete_methods(false),
|
||||
_has_final_method(false),
|
||||
_has_finalizer(false),
|
||||
_has_empty_finalizer(false),
|
||||
@ -5778,9 +5775,22 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
|
||||
// Anonymous classes such as generated LambdaForm classes are also not included.
|
||||
if (SystemDictionaryShared::is_sharing_possible(_loader_data) &&
|
||||
_host_klass == NULL) {
|
||||
oop class_loader = _loader_data->class_loader();
|
||||
ResourceMark rm(THREAD);
|
||||
classlist_file->print_cr("%s", _class_name->as_C_string());
|
||||
classlist_file->flush();
|
||||
// For the boot and platform class loaders, check if the class is not found in the
|
||||
// java runtime image. Additional check for the boot class loader is if the class
|
||||
// is not found in the boot loader's appended entries. This indicates that the class
|
||||
// is not useable during run time, such as the ones found in the --patch-module entries,
|
||||
// so it should not be included in the classlist file.
|
||||
if (((class_loader == NULL && !ClassLoader::contains_append_entry(stream->source())) ||
|
||||
SystemDictionary::is_platform_class_loader(class_loader)) &&
|
||||
!ClassLoader::is_jrt(stream->source())) {
|
||||
tty->print_cr("skip writing class %s from source %s to classlist file",
|
||||
_class_name->as_C_string(), stream->source());
|
||||
} else {
|
||||
classlist_file->print_cr("%s", _class_name->as_C_string());
|
||||
classlist_file->flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -5798,7 +5808,7 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
|
||||
parse_interfaces(stream,
|
||||
_itfs_len,
|
||||
cp,
|
||||
&_has_default_methods,
|
||||
&_has_nonstatic_concrete_methods,
|
||||
CHECK);
|
||||
|
||||
assert(_local_interfaces != NULL, "invariant");
|
||||
@ -5821,7 +5831,7 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
|
||||
_access_flags.is_interface(),
|
||||
&promoted_flags,
|
||||
&_has_final_method,
|
||||
&_declares_default_methods,
|
||||
&_declares_nonstatic_concrete_methods,
|
||||
CHECK);
|
||||
|
||||
assert(_methods != NULL, "invariant");
|
||||
@ -5829,8 +5839,8 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
|
||||
// promote flags from parse_methods() to the klass' flags
|
||||
_access_flags.add_promoted_flags(promoted_flags.as_int());
|
||||
|
||||
if (_declares_default_methods) {
|
||||
_has_default_methods = true;
|
||||
if (_declares_nonstatic_concrete_methods) {
|
||||
_has_nonstatic_concrete_methods = true;
|
||||
}
|
||||
|
||||
// Additional attributes/annotations
|
||||
@ -5884,8 +5894,8 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
|
||||
}
|
||||
|
||||
if (_super_klass != NULL) {
|
||||
if (_super_klass->has_default_methods()) {
|
||||
_has_default_methods = true;
|
||||
if (_super_klass->has_nonstatic_concrete_methods()) {
|
||||
_has_nonstatic_concrete_methods = true;
|
||||
}
|
||||
|
||||
if (_super_klass->is_interface()) {
|
||||
|
@ -139,8 +139,8 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
bool _need_verify;
|
||||
bool _relax_verify;
|
||||
|
||||
bool _has_default_methods;
|
||||
bool _declares_default_methods;
|
||||
bool _has_nonstatic_concrete_methods;
|
||||
bool _declares_nonstatic_concrete_methods;
|
||||
bool _has_final_method;
|
||||
|
||||
// precomputed flags
|
||||
@ -186,7 +186,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
void parse_interfaces(const ClassFileStream* const stream,
|
||||
const int itfs_len,
|
||||
ConstantPool* const cp,
|
||||
bool* has_default_methods,
|
||||
bool* has_nonstatic_concrete_methods,
|
||||
TRAPS);
|
||||
|
||||
const InstanceKlass* parse_super_class(ConstantPool* const cp,
|
||||
@ -224,7 +224,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
bool is_interface,
|
||||
AccessFlags* const promoted_flags,
|
||||
bool* const has_final_method,
|
||||
bool* const declares_default_methods,
|
||||
bool* const declares_nonstatic_concrete_methods,
|
||||
TRAPS);
|
||||
|
||||
const u2* parse_exception_table(const ClassFileStream* const stream,
|
||||
|
@ -81,7 +81,6 @@ typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
|
||||
typedef void (JNICALL *ZipClose_t)(jzfile *zip);
|
||||
typedef jzentry* (JNICALL *FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
|
||||
typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
|
||||
typedef jboolean (JNICALL *ReadMappedEntry_t)(jzfile *zip, jzentry *entry, unsigned char **buf, char *namebuf);
|
||||
typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
|
||||
typedef jboolean (JNICALL *ZipInflateFully_t)(void *inBuf, jlong inLen, void *outBuf, jlong outLen, char **pmsg);
|
||||
typedef jint (JNICALL *Crc32_t)(jint crc, const jbyte *buf, jint len);
|
||||
@ -91,7 +90,6 @@ static ZipOpen_t ZipOpen = NULL;
|
||||
static ZipClose_t ZipClose = NULL;
|
||||
static FindEntry_t FindEntry = NULL;
|
||||
static ReadEntry_t ReadEntry = NULL;
|
||||
static ReadMappedEntry_t ReadMappedEntry = NULL;
|
||||
static GetNextEntry_t GetNextEntry = NULL;
|
||||
static canonicalize_fn_t CanonicalizeEntry = NULL;
|
||||
static ZipInflateFully_t ZipInflateFully = NULL;
|
||||
@ -353,15 +351,10 @@ u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_ter
|
||||
filename = NEW_RESOURCE_ARRAY(char, name_len + 1);
|
||||
}
|
||||
|
||||
// file found, get pointer to the entry in mmapped jar file.
|
||||
if (ReadMappedEntry == NULL ||
|
||||
!(*ReadMappedEntry)(_zip, entry, &buffer, filename)) {
|
||||
// mmapped access not available, perhaps due to compression,
|
||||
// read contents into resource array
|
||||
int size = (*filesize) + ((nul_terminate) ? 1 : 0);
|
||||
buffer = NEW_RESOURCE_ARRAY(u1, size);
|
||||
if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
|
||||
}
|
||||
// read contents into resource array
|
||||
int size = (*filesize) + ((nul_terminate) ? 1 : 0);
|
||||
buffer = NEW_RESOURCE_ARRAY(u1, size);
|
||||
if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
|
||||
|
||||
// return result
|
||||
if (nul_terminate) {
|
||||
@ -952,11 +945,11 @@ ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path, bo
|
||||
}
|
||||
|
||||
// returns true if entry already on class path
|
||||
bool ClassLoader::contains_entry(ClassPathEntry *entry) {
|
||||
bool ClassLoader::contains_append_entry(const char* name) {
|
||||
ClassPathEntry* e = _first_append_entry;
|
||||
while (e != NULL) {
|
||||
// assume zip entries have been canonicalized
|
||||
if (strcmp(entry->name(), e->name()) == 0) {
|
||||
if (strcmp(name, e->name()) == 0) {
|
||||
return true;
|
||||
}
|
||||
e = e->next();
|
||||
@ -998,7 +991,7 @@ bool ClassLoader::update_class_path_entry_list(const char *path,
|
||||
|
||||
// Do not reorder the bootclasspath which would break get_system_package().
|
||||
// Add new entry to linked list
|
||||
if (!check_for_duplicates || !contains_entry(new_entry)) {
|
||||
if (!check_for_duplicates || !contains_append_entry(new_entry->name())) {
|
||||
ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
|
||||
}
|
||||
return true;
|
||||
@ -1079,7 +1072,6 @@ void ClassLoader::load_zip_library() {
|
||||
ZipClose = CAST_TO_FN_PTR(ZipClose_t, os::dll_lookup(handle, "ZIP_Close"));
|
||||
FindEntry = CAST_TO_FN_PTR(FindEntry_t, os::dll_lookup(handle, "ZIP_FindEntry"));
|
||||
ReadEntry = CAST_TO_FN_PTR(ReadEntry_t, os::dll_lookup(handle, "ZIP_ReadEntry"));
|
||||
ReadMappedEntry = CAST_TO_FN_PTR(ReadMappedEntry_t, os::dll_lookup(handle, "ZIP_ReadMappedEntry"));
|
||||
GetNextEntry = CAST_TO_FN_PTR(GetNextEntry_t, os::dll_lookup(handle, "ZIP_GetNextEntry"));
|
||||
ZipInflateFully = CAST_TO_FN_PTR(ZipInflateFully_t, os::dll_lookup(handle, "ZIP_InflateFully"));
|
||||
Crc32 = CAST_TO_FN_PTR(Crc32_t, os::dll_lookup(handle, "ZIP_CRC32"));
|
||||
@ -2049,7 +2041,6 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
||||
if (nm != NULL && !m->is_method_handle_intrinsic()) {
|
||||
// Throw out the code so that the code cache doesn't fill up
|
||||
nm->make_not_entrant();
|
||||
m->clear_code();
|
||||
}
|
||||
CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization,
|
||||
methodHandle(), 0, CompileTask::Reason_CTW, THREAD);
|
||||
@ -2068,7 +2059,6 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
||||
if (nm != NULL && !m->is_method_handle_intrinsic()) {
|
||||
// Throw out the code so that the code cache doesn't fill up
|
||||
nm->make_not_entrant();
|
||||
m->clear_code();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -451,7 +451,7 @@ class ClassLoader: AllStatic {
|
||||
static void set_first_append_entry(ClassPathEntry* entry);
|
||||
|
||||
// indicates if class path already contains a entry (exact match by name)
|
||||
static bool contains_entry(ClassPathEntry* entry);
|
||||
static bool contains_append_entry(const char* name);
|
||||
|
||||
// adds a class path list
|
||||
static void add_to_list(ClassPathEntry* new_entry);
|
||||
|
@ -639,7 +639,6 @@ const char* ClassLoaderData::loader_name() {
|
||||
#undef CLD_DUMP_KLASSES
|
||||
|
||||
void ClassLoaderData::dump(outputStream * const out) {
|
||||
ResourceMark rm;
|
||||
out->print("ClassLoaderData CLD: " PTR_FORMAT ", loader: " PTR_FORMAT ", loader_klass: " PTR_FORMAT " %s {",
|
||||
p2i(this), p2i((void *)class_loader()),
|
||||
p2i(class_loader() != NULL ? class_loader()->klass() : NULL), loader_name());
|
||||
@ -656,7 +655,6 @@ void ClassLoaderData::dump(outputStream * const out) {
|
||||
|
||||
#ifdef CLD_DUMP_KLASSES
|
||||
if (Verbose) {
|
||||
ResourceMark rm;
|
||||
Klass* k = _klasses;
|
||||
while (k != NULL) {
|
||||
out->print_cr("klass " PTR_FORMAT ", %s, CT: %d, MUT: %d", k, k->name()->as_C_string(),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -914,7 +914,7 @@ static void create_defaults_and_exceptions(
|
||||
BytecodeBuffer buffer;
|
||||
|
||||
if (log_is_enabled(Debug, defaultmethods)) {
|
||||
ResourceMark rm;
|
||||
ResourceMark rm(THREAD);
|
||||
outputStream* logstream = Log(defaultmethods)::debug_stream();
|
||||
logstream->print("for slot: ");
|
||||
slot->print_on(logstream);
|
||||
@ -929,6 +929,7 @@ static void create_defaults_and_exceptions(
|
||||
if (method->has_target()) {
|
||||
Method* selected = method->get_selected_target();
|
||||
if (selected->method_holder()->is_interface()) {
|
||||
assert(!selected->is_private(), "pushing private interface method as default");
|
||||
defaults.push(selected);
|
||||
}
|
||||
} else if (method->throws_exception()) {
|
||||
|
@ -780,19 +780,26 @@ void java_lang_Class::set_mirror_module_field(KlassHandle k, Handle mirror, Hand
|
||||
// Put the class on the fixup_module_list to patch later when the java.lang.reflect.Module
|
||||
// for java.base is known.
|
||||
assert(!Universe::is_module_initialized(), "Incorrect java.lang.reflect.Module pre module system initialization");
|
||||
MutexLocker m1(Module_lock, THREAD);
|
||||
// Keep list of classes needing java.base module fixup
|
||||
if (!ModuleEntryTable::javabase_defined()) {
|
||||
if (fixup_module_field_list() == NULL) {
|
||||
GrowableArray<Klass*>* list =
|
||||
new (ResourceObj::C_HEAP, mtModule) GrowableArray<Klass*>(500, true);
|
||||
set_fixup_module_field_list(list);
|
||||
|
||||
bool javabase_was_defined = false;
|
||||
{
|
||||
MutexLocker m1(Module_lock, THREAD);
|
||||
// Keep list of classes needing java.base module fixup
|
||||
if (!ModuleEntryTable::javabase_defined()) {
|
||||
if (fixup_module_field_list() == NULL) {
|
||||
GrowableArray<Klass*>* list =
|
||||
new (ResourceObj::C_HEAP, mtModule) GrowableArray<Klass*>(500, true);
|
||||
set_fixup_module_field_list(list);
|
||||
}
|
||||
k->class_loader_data()->inc_keep_alive();
|
||||
fixup_module_field_list()->push(k());
|
||||
} else {
|
||||
javabase_was_defined = true;
|
||||
}
|
||||
k->class_loader_data()->inc_keep_alive();
|
||||
fixup_module_field_list()->push(k());
|
||||
} else {
|
||||
// java.base was defined at some point between calling create_mirror()
|
||||
// and obtaining the Module_lock, patch this particular class with java.base.
|
||||
}
|
||||
|
||||
// If java.base was already defined then patch this particular class with java.base.
|
||||
if (javabase_was_defined) {
|
||||
ModuleEntry *javabase_entry = ModuleEntryTable::javabase_moduleEntry();
|
||||
assert(javabase_entry != NULL && javabase_entry->module() != NULL,
|
||||
"Setting class module field, java.base should be defined");
|
||||
|
@ -74,7 +74,7 @@ instanceKlassHandle KlassFactory::check_shared_class_file_load_hook(
|
||||
(SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
|
||||
ClassFileStream* stream = new ClassFileStream(ptr,
|
||||
end_ptr - ptr,
|
||||
ent->_name,
|
||||
ent == NULL ? NULL : ent->_name,
|
||||
ClassFileStream::verify);
|
||||
ClassFileParser parser(stream,
|
||||
class_name,
|
||||
|
@ -368,9 +368,6 @@ void ModuleEntryTable::finalize_javabase(Handle module_handle, Symbol* version,
|
||||
|
||||
// Store pointer to the ModuleEntry for java.base in the java.lang.reflect.Module object.
|
||||
java_lang_reflect_Module::set_module_entry(module_handle(), jb_module);
|
||||
|
||||
// Patch any previously loaded classes' module field with java.base's java.lang.reflect.Module.
|
||||
patch_javabase_entries(module_handle);
|
||||
}
|
||||
|
||||
// Within java.lang.Class instances there is a java.lang.reflect.Module field
|
||||
@ -378,7 +375,6 @@ void ModuleEntryTable::finalize_javabase(Handle module_handle, Symbol* version,
|
||||
// definition, classes needing their module field set are added to the fixup_module_list.
|
||||
// Their module field is set once java.base's java.lang.reflect.Module is known to the VM.
|
||||
void ModuleEntryTable::patch_javabase_entries(Handle module_handle) {
|
||||
assert(Module_lock->owned_by_self(), "should have the Module_lock");
|
||||
if (module_handle.is_null()) {
|
||||
fatal("Unable to patch the module field of classes loaded prior to java.base's definition, invalid java.lang.reflect.Module");
|
||||
}
|
||||
|
@ -244,6 +244,12 @@ static void define_javabase_module(jobject module, jstring version,
|
||||
"Module java.base is already defined");
|
||||
}
|
||||
|
||||
// Only the thread that actually defined the base module will get here,
|
||||
// so no locking is needed.
|
||||
|
||||
// Patch any previously loaded class's module field with java.base's java.lang.reflect.Module.
|
||||
ModuleEntryTable::patch_javabase_entries(module_handle);
|
||||
|
||||
log_debug(modules)("define_javabase_module(): Definition of module: java.base,"
|
||||
" version: %s, location: %s, package #: %d",
|
||||
module_version != NULL ? module_version : "NULL",
|
||||
|
@ -1234,7 +1234,7 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
|
||||
SharedClassPathEntry* ent =
|
||||
(SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
|
||||
if (!Universe::is_module_initialized()) {
|
||||
assert(ent->is_jrt(),
|
||||
assert(ent != NULL && ent->is_jrt(),
|
||||
"Loading non-bootstrap classes before the module system is initialized");
|
||||
assert(class_loader.is_null(), "sanity");
|
||||
return true;
|
||||
@ -1257,6 +1257,7 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
|
||||
}
|
||||
|
||||
if (class_loader.is_null()) {
|
||||
assert(ent != NULL, "Shared class for NULL classloader must have valid SharedClassPathEntry");
|
||||
// The NULL classloader can load archived class originated from the
|
||||
// "modules" jimage and the -Xbootclasspath/a. For class from the
|
||||
// "modules" jimage, the PackageEntry/ModuleEntry must be defined
|
||||
|
@ -226,7 +226,7 @@ class SystemDictionary : AllStatic {
|
||||
WKID_LIMIT,
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
FIRST_JVMCI_WKID = WK_KLASS_ENUM_NAME(HotSpotCompiledCode_klass),
|
||||
FIRST_JVMCI_WKID = WK_KLASS_ENUM_NAME(JVMCI_klass),
|
||||
LAST_JVMCI_WKID = WK_KLASS_ENUM_NAME(Value_klass),
|
||||
#endif
|
||||
|
||||
|
@ -1252,7 +1252,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
if (method() != NULL && (method()->code() == this ||
|
||||
method()->from_compiled_entry() == verified_entry_point())) {
|
||||
HandleMark hm;
|
||||
method()->clear_code();
|
||||
method()->clear_code(false /* already owns Patching_lock */);
|
||||
}
|
||||
} // leave critical region under Patching_lock
|
||||
|
||||
|
@ -2340,13 +2340,11 @@ void CMSCollector::verify_after_remark_work_1() {
|
||||
{
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
gch->cms_process_roots(&srs,
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
@ -2414,13 +2412,11 @@ void CMSCollector::verify_after_remark_work_2() {
|
||||
{
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
gch->cms_process_roots(&srs,
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
}
|
||||
|
||||
@ -2903,13 +2899,11 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
gch->cms_process_roots(&srs,
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
}
|
||||
}
|
||||
@ -4290,13 +4284,11 @@ void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
|
||||
CLDToOopClosure cld_closure(&par_mri_cl, true);
|
||||
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
GenCollectedHeap::OldGen,
|
||||
gch->cms_process_roots(_strong_roots_scope,
|
||||
false, // yg was scanned above
|
||||
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
&par_mri_cl,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
assert(_collector->should_unload_classes()
|
||||
|| (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
|
||||
@ -4421,13 +4413,11 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
// ---------- remaining roots --------------
|
||||
_timer.reset();
|
||||
_timer.start();
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
GenCollectedHeap::OldGen,
|
||||
gch->cms_process_roots(_strong_roots_scope,
|
||||
false, // yg was scanned above
|
||||
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
&par_mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
|
||||
assert(_collector->should_unload_classes()
|
||||
@ -4970,13 +4960,11 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
gch->cms_process_roots(&srs,
|
||||
true, // young gen as roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
&mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
|
||||
assert(should_unload_classes()
|
||||
|
@ -605,14 +605,10 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
false);
|
||||
|
||||
par_scan_state.start_strong_roots();
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
GenCollectedHeap::YoungGen,
|
||||
true, // Process younger gens, if any, as strong roots.
|
||||
GenCollectedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&par_scan_state.to_space_root_closure(),
|
||||
&par_scan_state.older_gen_closure(),
|
||||
&cld_scan_closure);
|
||||
gch->young_process_roots(_strong_roots_scope,
|
||||
&par_scan_state.to_space_root_closure(),
|
||||
&par_scan_state.older_gen_closure(),
|
||||
&cld_scan_closure);
|
||||
|
||||
par_scan_state.end_strong_roots();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,99 +53,4 @@ void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_ind
|
||||
biased_index, bias(), length());
|
||||
}
|
||||
|
||||
class TestMappedArray : public G1BiasedMappedArray<int> {
|
||||
protected:
|
||||
virtual int default_value() const { return 0xBAADBABE; }
|
||||
public:
|
||||
static void test_biasedarray() {
|
||||
const size_t REGION_SIZE_IN_WORDS = 512;
|
||||
const size_t NUM_REGIONS = 20;
|
||||
HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
|
||||
|
||||
TestMappedArray array;
|
||||
array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
|
||||
REGION_SIZE_IN_WORDS * HeapWordSize);
|
||||
// Check address calculation (bounds)
|
||||
assert(array.bottom_address_mapped() == fake_heap,
|
||||
"bottom mapped address should be " PTR_FORMAT ", but is " PTR_FORMAT, p2i(fake_heap), p2i(array.bottom_address_mapped()));
|
||||
assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
|
||||
|
||||
int* bottom = array.address_mapped_to(fake_heap);
|
||||
assert((void*)bottom == (void*) array.base(), "must be");
|
||||
int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
|
||||
assert((void*)end == (void*)(array.base() + array.length()), "must be");
|
||||
// The entire array should contain default value elements
|
||||
for (int* current = bottom; current < end; current++) {
|
||||
assert(*current == array.default_value(), "must be");
|
||||
}
|
||||
|
||||
// Test setting values in the table
|
||||
|
||||
HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
|
||||
HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
|
||||
|
||||
// Set/get by address tests: invert some value; first retrieve one
|
||||
int actual_value = array.get_by_index(NUM_REGIONS / 2);
|
||||
array.set_by_index(NUM_REGIONS / 2, ~actual_value);
|
||||
// Get the same value by address, should correspond to the start of the "region"
|
||||
int value = array.get_by_address(region_start_address);
|
||||
assert(value == ~actual_value, "must be");
|
||||
// Get the same value by address, at one HeapWord before the start
|
||||
value = array.get_by_address(region_start_address - 1);
|
||||
assert(value == array.default_value(), "must be");
|
||||
// Get the same value by address, at the end of the "region"
|
||||
value = array.get_by_address(region_end_address);
|
||||
assert(value == ~actual_value, "must be");
|
||||
// Make sure the next value maps to another index
|
||||
value = array.get_by_address(region_end_address + 1);
|
||||
assert(value == array.default_value(), "must be");
|
||||
|
||||
// Reset the value in the array
|
||||
array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
|
||||
|
||||
// The entire array should have the default value again
|
||||
for (int* current = bottom; current < end; current++) {
|
||||
assert(*current == array.default_value(), "must be");
|
||||
}
|
||||
|
||||
// Set/get by index tests: invert some value
|
||||
idx_t index = NUM_REGIONS / 2;
|
||||
actual_value = array.get_by_index(index);
|
||||
array.set_by_index(index, ~actual_value);
|
||||
|
||||
value = array.get_by_index(index);
|
||||
assert(value == ~actual_value, "must be");
|
||||
|
||||
value = array.get_by_index(index - 1);
|
||||
assert(value == array.default_value(), "must be");
|
||||
|
||||
value = array.get_by_index(index + 1);
|
||||
assert(value == array.default_value(), "must be");
|
||||
|
||||
array.set_by_index(0, 0);
|
||||
value = array.get_by_index(0);
|
||||
assert(value == 0, "must be");
|
||||
|
||||
array.set_by_index(array.length() - 1, 0);
|
||||
value = array.get_by_index(array.length() - 1);
|
||||
assert(value == 0, "must be");
|
||||
|
||||
array.set_by_index(index, 0);
|
||||
|
||||
// The array should have three zeros, and default values otherwise
|
||||
size_t num_zeros = 0;
|
||||
for (int* current = bottom; current < end; current++) {
|
||||
assert(*current == array.default_value() || *current == 0, "must be");
|
||||
if (*current == 0) {
|
||||
num_zeros++;
|
||||
}
|
||||
}
|
||||
assert(num_zeros == 3, "must be");
|
||||
}
|
||||
};
|
||||
|
||||
void TestG1BiasedArray_test() {
|
||||
TestMappedArray::test_biasedarray();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -648,15 +648,10 @@ void DefNewGeneration::collect(bool full,
|
||||
// See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
|
||||
StrongRootsScope srs(0);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::YoungGen,
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
GenCollectedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&fsc_with_no_gc_barrier,
|
||||
&fsc_with_gc_barrier,
|
||||
&cld_scan_closure);
|
||||
gch->young_process_roots(&srs,
|
||||
&fsc_with_no_gc_barrier,
|
||||
&fsc_with_gc_barrier,
|
||||
&cld_scan_closure);
|
||||
}
|
||||
|
||||
// "evacuate followers".
|
||||
|
@ -196,14 +196,13 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
{
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
false, // Younger gens are not roots.
|
||||
GenCollectedHeap::SO_None,
|
||||
ClassUnloading,
|
||||
&follow_root_closure,
|
||||
&follow_root_closure,
|
||||
&follow_cld_closure);
|
||||
gch->full_process_roots(&srs,
|
||||
false, // not the adjust phase
|
||||
GenCollectedHeap::SO_None,
|
||||
ClassUnloading, // only strong roots if ClassUnloading
|
||||
// is enabled
|
||||
&follow_root_closure,
|
||||
&follow_cld_closure);
|
||||
}
|
||||
|
||||
// Process reference objects found during marking
|
||||
@ -295,14 +294,12 @@ void GenMarkSweep::mark_sweep_phase3() {
|
||||
{
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
GenCollectedHeap::OldGen,
|
||||
false, // Younger gens are not roots.
|
||||
GenCollectedHeap::SO_AllCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_cld_closure);
|
||||
gch->full_process_roots(&srs,
|
||||
true, // this is the adjust phase
|
||||
GenCollectedHeap::SO_AllCodeCache,
|
||||
false, // all roots
|
||||
&adjust_pointer_closure,
|
||||
&adjust_cld_closure);
|
||||
}
|
||||
|
||||
gch->gen_process_weak_roots(&adjust_pointer_closure);
|
||||
|
@ -35,7 +35,6 @@
|
||||
#include "prims/jni_md.h"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
#define LOG_STOP_TIME_FORMAT "(%.3fs, %.3fs) %.3fms"
|
||||
#define LOG_STOP_HEAP_FORMAT SIZE_FORMAT "M->" SIZE_FORMAT "M(" SIZE_FORMAT "M)"
|
||||
|
||||
inline void GCTraceTimeImpl::log_start(jlong start_counter) {
|
||||
@ -46,7 +45,7 @@ inline void GCTraceTimeImpl::log_start(jlong start_counter) {
|
||||
if (_gc_cause != GCCause::_no_gc) {
|
||||
out.print(" (%s)", GCCause::to_string(_gc_cause));
|
||||
}
|
||||
out.print_cr(" (%.3fs)", TimeHelper::counter_to_seconds(start_counter));
|
||||
out.cr();
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,7 +70,7 @@ inline void GCTraceTimeImpl::log_stop(jlong start_counter, jlong stop_counter) {
|
||||
out.print(" " LOG_STOP_HEAP_FORMAT, used_before_m, used_m, capacity_m);
|
||||
}
|
||||
|
||||
out.print_cr(" " LOG_STOP_TIME_FORMAT, start_time_in_secs, stop_time_in_secs, duration_in_ms);
|
||||
out.print_cr(" %.3fms", duration_in_ms);
|
||||
}
|
||||
|
||||
inline void GCTraceTimeImpl::time_stamp(Ticks& ticks) {
|
||||
@ -117,7 +116,7 @@ template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTa
|
||||
GCTraceConcTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::GCTraceConcTimeImpl(const char* title) :
|
||||
_enabled(LogImpl<T0, T1, T2, T3, T4, GuardTag>::is_level(Level)), _start_time(os::elapsed_counter()), _title(title) {
|
||||
if (_enabled) {
|
||||
LogImpl<T0, T1, T2, T3, T4>::template write<Level>("%s (%.3fs)", _title, TimeHelper::counter_to_seconds(_start_time));
|
||||
LogImpl<T0, T1, T2, T3, T4>::template write<Level>("%s", _title);
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,11 +124,8 @@ template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTa
|
||||
GCTraceConcTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::~GCTraceConcTimeImpl() {
|
||||
if (_enabled) {
|
||||
jlong stop_time = os::elapsed_counter();
|
||||
LogImpl<T0, T1, T2, T3, T4>::template write<Level>("%s " LOG_STOP_TIME_FORMAT,
|
||||
_title,
|
||||
TimeHelper::counter_to_seconds(_start_time),
|
||||
TimeHelper::counter_to_seconds(stop_time),
|
||||
TimeHelper::counter_to_millis(stop_time - _start_time));
|
||||
LogImpl<T0, T1, T2, T3, T4>::template write<Level>("%s %0.3fms", _title,
|
||||
TimeHelper::counter_to_millis(stop_time - _start_time));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -613,16 +613,6 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
|
||||
SystemDictionary::roots_oops_do(strong_roots, weak_roots);
|
||||
}
|
||||
|
||||
// All threads execute the following. A specific chunk of buckets
|
||||
// from the StringTable are the individual tasks.
|
||||
if (weak_roots != NULL) {
|
||||
if (is_par) {
|
||||
StringTable::possibly_parallel_oops_do(weak_roots);
|
||||
} else {
|
||||
StringTable::oops_do(weak_roots);
|
||||
}
|
||||
}
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
|
||||
if (so & SO_ScavengeCodeCache) {
|
||||
assert(code_roots != NULL, "must supply closure for code cache");
|
||||
@ -644,46 +634,82 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
|
||||
}
|
||||
}
|
||||
|
||||
void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
|
||||
GenerationType type,
|
||||
void GenCollectedHeap::process_string_table_roots(StrongRootsScope* scope,
|
||||
OopClosure* root_closure) {
|
||||
assert(root_closure != NULL, "Must be set");
|
||||
// All threads execute the following. A specific chunk of buckets
|
||||
// from the StringTable are the individual tasks.
|
||||
if (scope->n_threads() > 1) {
|
||||
StringTable::possibly_parallel_oops_do(root_closure);
|
||||
} else {
|
||||
StringTable::oops_do(root_closure);
|
||||
}
|
||||
}
|
||||
|
||||
void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
|
||||
OopsInGenClosure* root_closure,
|
||||
OopsInGenClosure* old_gen_closure,
|
||||
CLDClosure* cld_closure) {
|
||||
MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
|
||||
|
||||
process_roots(scope, SO_ScavengeCodeCache, root_closure, root_closure,
|
||||
cld_closure, cld_closure, &mark_code_closure);
|
||||
process_string_table_roots(scope, root_closure);
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
|
||||
root_closure->reset_generation();
|
||||
}
|
||||
|
||||
// When collection is parallel, all threads get to cooperate to do
|
||||
// old generation scanning.
|
||||
old_gen_closure->set_generation(_old_gen);
|
||||
rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
|
||||
old_gen_closure->reset_generation();
|
||||
|
||||
_process_strong_tasks->all_tasks_completed(scope->n_threads());
|
||||
}
|
||||
|
||||
void GenCollectedHeap::cms_process_roots(StrongRootsScope* scope,
|
||||
bool young_gen_as_roots,
|
||||
ScanningOption so,
|
||||
bool only_strong_roots,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* older_gens,
|
||||
OopsInGenClosure* root_closure,
|
||||
CLDClosure* cld_closure) {
|
||||
const bool is_adjust_phase = !only_strong_roots && !young_gen_as_roots;
|
||||
|
||||
bool is_moving_collection = false;
|
||||
if (type == YoungGen || is_adjust_phase) {
|
||||
// young collections are always moving
|
||||
is_moving_collection = true;
|
||||
}
|
||||
|
||||
MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
|
||||
OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
|
||||
MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
|
||||
CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
|
||||
|
||||
process_roots(scope, so,
|
||||
not_older_gens, weak_roots,
|
||||
cld_closure, weak_cld_closure,
|
||||
&mark_code_closure);
|
||||
|
||||
if (young_gen_as_roots) {
|
||||
if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
|
||||
if (type == OldGen) {
|
||||
not_older_gens->set_generation(_young_gen);
|
||||
_young_gen->oop_iterate(not_older_gens);
|
||||
}
|
||||
not_older_gens->reset_generation();
|
||||
}
|
||||
process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
|
||||
if (!only_strong_roots) {
|
||||
process_string_table_roots(scope, root_closure);
|
||||
}
|
||||
// When collection is parallel, all threads get to cooperate to do
|
||||
// old generation scanning.
|
||||
if (type == YoungGen) {
|
||||
older_gens->set_generation(_old_gen);
|
||||
rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
|
||||
older_gens->reset_generation();
|
||||
|
||||
if (young_gen_as_roots &&
|
||||
!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
|
||||
root_closure->set_generation(_young_gen);
|
||||
_young_gen->oop_iterate(root_closure);
|
||||
root_closure->reset_generation();
|
||||
}
|
||||
|
||||
_process_strong_tasks->all_tasks_completed(scope->n_threads());
|
||||
}
|
||||
|
||||
void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
|
||||
bool is_adjust_phase,
|
||||
ScanningOption so,
|
||||
bool only_strong_roots,
|
||||
OopsInGenClosure* root_closure,
|
||||
CLDClosure* cld_closure) {
|
||||
MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
|
||||
OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
|
||||
CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
|
||||
|
||||
process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
|
||||
if (is_adjust_phase) {
|
||||
// We never treat the string table as roots during marking
|
||||
// for the full gc, so we only need to process it during
|
||||
// the adjust phase.
|
||||
process_string_table_roots(scope, root_closure);
|
||||
}
|
||||
|
||||
_process_strong_tasks->all_tasks_completed(scope->n_threads());
|
||||
|
@ -374,16 +374,7 @@ public:
|
||||
// asserted to be this type.
|
||||
static GenCollectedHeap* heap();
|
||||
|
||||
// Invoke the "do_oop" method of one of the closures "not_older_gens"
|
||||
// or "older_gens" on root locations for the generations depending on
|
||||
// the type. (The "older_gens" closure is used for scanning references
|
||||
// from older generations; "not_older_gens" is used everywhere else.)
|
||||
// If "younger_gens_as_roots" is false, younger generations are
|
||||
// not scanned as roots; in this case, the caller must be arranging to
|
||||
// scan the younger generations itself. (For example, a generation might
|
||||
// explicitly mark reachable objects in younger generations, to avoid
|
||||
// excess storage retention.)
|
||||
// The "so" argument determines which of the roots
|
||||
// The ScanningOption determines which of the roots
|
||||
// the closure is applied to:
|
||||
// "SO_None" does none;
|
||||
enum ScanningOption {
|
||||
@ -401,19 +392,34 @@ public:
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobToOopClosure* code_roots);
|
||||
|
||||
public:
|
||||
static const bool StrongAndWeakRoots = false;
|
||||
static const bool StrongRootsOnly = true;
|
||||
void process_string_table_roots(StrongRootsScope* scope,
|
||||
OopClosure* root_closure);
|
||||
|
||||
void gen_process_roots(StrongRootsScope* scope,
|
||||
GenerationType type,
|
||||
public:
|
||||
void young_process_roots(StrongRootsScope* scope,
|
||||
OopsInGenClosure* root_closure,
|
||||
OopsInGenClosure* old_gen_closure,
|
||||
CLDClosure* cld_closure);
|
||||
|
||||
// If "young_gen_as_roots" is false, younger generations are
|
||||
// not scanned as roots; in this case, the caller must be arranging to
|
||||
// scan the younger generations itself. (For example, a generation might
|
||||
// explicitly mark reachable objects in younger generations, to avoid
|
||||
// excess storage retention.)
|
||||
void cms_process_roots(StrongRootsScope* scope,
|
||||
bool young_gen_as_roots,
|
||||
ScanningOption so,
|
||||
bool only_strong_roots,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* older_gens,
|
||||
OopsInGenClosure* root_closure,
|
||||
CLDClosure* cld_closure);
|
||||
|
||||
void full_process_roots(StrongRootsScope* scope,
|
||||
bool is_adjust_phase,
|
||||
ScanningOption so,
|
||||
bool only_strong_roots,
|
||||
OopsInGenClosure* root_closure,
|
||||
CLDClosure* cld_closure);
|
||||
|
||||
// Apply "root_closure" to all the weak roots of the system.
|
||||
// These include JNI weak roots, string table,
|
||||
// and referents of reachable weak refs.
|
||||
|
@ -40,6 +40,7 @@
|
||||
|
||||
class InvocationCounter VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
friend class JVMCIVMStructs;
|
||||
friend class ciReplay;
|
||||
private: // bit no: |31 3| 2 | 1 0 |
|
||||
unsigned int _counter; // format: [count|carry|state]
|
||||
|
@ -858,8 +858,10 @@ methodHandle LinkResolver::resolve_interface_method(const LinkInfo& link_info, B
|
||||
}
|
||||
|
||||
if (log_develop_is_enabled(Trace, itables)) {
|
||||
trace_method_resolution("invokeinterface resolved method: caller-class",
|
||||
link_info.current_klass(), resolved_klass,
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "%s resolved interface method: caller-class:",
|
||||
Bytecodes::name(code));
|
||||
trace_method_resolution(buf, link_info.current_klass(), resolved_klass,
|
||||
resolved_method, true);
|
||||
}
|
||||
|
||||
@ -1424,7 +1426,7 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result,
|
||||
}
|
||||
|
||||
if (log_develop_is_enabled(Trace, itables)) {
|
||||
trace_method_resolution("invokeinterface selected method: receiver-class",
|
||||
trace_method_resolution("invokeinterface selected method: receiver-class:",
|
||||
recv_klass, resolved_klass, sel_method, true);
|
||||
}
|
||||
// setup result
|
||||
|
@ -172,7 +172,7 @@ OopMap* CodeInstaller::create_oop_map(Handle debug_info, TRAPS) {
|
||||
return map;
|
||||
}
|
||||
|
||||
void* CodeInstaller::record_metadata_reference(Handle constant, TRAPS) {
|
||||
void* CodeInstaller::record_metadata_reference(CodeSection* section, address dest, Handle constant, TRAPS) {
|
||||
/*
|
||||
* This method needs to return a raw (untyped) pointer, since the value of a pointer to the base
|
||||
* class is in general not equal to the pointer of the subclass. When patching metaspace pointers,
|
||||
@ -184,12 +184,14 @@ void* CodeInstaller::record_metadata_reference(Handle constant, TRAPS) {
|
||||
Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj));
|
||||
assert(!HotSpotMetaspaceConstantImpl::compressed(constant), "unexpected compressed klass pointer %s @ " INTPTR_FORMAT, klass->name()->as_C_string(), p2i(klass));
|
||||
int index = _oop_recorder->find_index(klass);
|
||||
section->relocate(dest, metadata_Relocation::spec(index));
|
||||
TRACE_jvmci_3("metadata[%d of %d] = %s", index, _oop_recorder->metadata_count(), klass->name()->as_C_string());
|
||||
return klass;
|
||||
} else if (obj->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
|
||||
Method* method = (Method*) (address) HotSpotResolvedJavaMethodImpl::metaspaceMethod(obj);
|
||||
assert(!HotSpotMetaspaceConstantImpl::compressed(constant), "unexpected compressed method pointer %s @ " INTPTR_FORMAT, method->name()->as_C_string(), p2i(method));
|
||||
int index = _oop_recorder->find_index(method);
|
||||
section->relocate(dest, metadata_Relocation::spec(index));
|
||||
TRACE_jvmci_3("metadata[%d of %d] = %s", index, _oop_recorder->metadata_count(), method->name()->as_C_string());
|
||||
return method;
|
||||
} else {
|
||||
@ -198,7 +200,7 @@ void* CodeInstaller::record_metadata_reference(Handle constant, TRAPS) {
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
narrowKlass CodeInstaller::record_narrow_metadata_reference(Handle constant, TRAPS) {
|
||||
narrowKlass CodeInstaller::record_narrow_metadata_reference(CodeSection* section, address dest, Handle constant, TRAPS) {
|
||||
oop obj = HotSpotMetaspaceConstantImpl::metaspaceObject(constant);
|
||||
assert(HotSpotMetaspaceConstantImpl::compressed(constant), "unexpected uncompressed pointer");
|
||||
|
||||
@ -208,6 +210,7 @@ narrowKlass CodeInstaller::record_narrow_metadata_reference(Handle constant, TRA
|
||||
|
||||
Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj));
|
||||
int index = _oop_recorder->find_index(klass);
|
||||
section->relocate(dest, metadata_Relocation::spec(index));
|
||||
TRACE_jvmci_3("narrowKlass[%d of %d] = %s", index, _oop_recorder->metadata_count(), klass->name()->as_C_string());
|
||||
return Klass::encode_klass(klass);
|
||||
}
|
||||
@ -701,12 +704,12 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer,
|
||||
if (constant->is_a(HotSpotMetaspaceConstantImpl::klass())) {
|
||||
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
|
||||
#ifdef _LP64
|
||||
*((narrowKlass*) dest) = record_narrow_metadata_reference(constant, CHECK_OK);
|
||||
*((narrowKlass*) dest) = record_narrow_metadata_reference(_constants, dest, constant, CHECK_OK);
|
||||
#else
|
||||
JVMCI_ERROR_OK("unexpected compressed Klass* in 32-bit mode");
|
||||
#endif
|
||||
} else {
|
||||
*((void**) dest) = record_metadata_reference(constant, CHECK_OK);
|
||||
*((void**) dest) = record_metadata_reference(_constants, dest, constant, CHECK_OK);
|
||||
}
|
||||
} else if (constant->is_a(HotSpotObjectConstantImpl::klass())) {
|
||||
Handle obj = HotSpotObjectConstantImpl::object(constant);
|
||||
|
@ -189,9 +189,9 @@ protected:
|
||||
ScopeValue* get_scope_value(Handle value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second, TRAPS);
|
||||
MonitorValue* get_monitor_value(Handle value, GrowableArray<ScopeValue*>* objects, TRAPS);
|
||||
|
||||
void* record_metadata_reference(Handle constant, TRAPS);
|
||||
void* record_metadata_reference(CodeSection* section, address dest, Handle constant, TRAPS);
|
||||
#ifdef _LP64
|
||||
narrowKlass record_narrow_metadata_reference(Handle constant, TRAPS);
|
||||
narrowKlass record_narrow_metadata_reference(CodeSection* section, address dest, Handle constant, TRAPS);
|
||||
#endif
|
||||
|
||||
// extract the fields of the HotSpotCompiledCode
|
||||
|
@ -640,8 +640,6 @@ JVM_ENTRY(jobject, JVM_GetJVMCIRuntime(JNIEnv *env, jclass c))
|
||||
JVM_END
|
||||
|
||||
Handle JVMCIRuntime::callStatic(const char* className, const char* methodName, const char* signature, JavaCallArguments* args, TRAPS) {
|
||||
guarantee(!_HotSpotJVMCIRuntime_initialized, "cannot reinitialize HotSpotJVMCIRuntime");
|
||||
|
||||
TempNewSymbol name = SymbolTable::new_symbol(className, CHECK_(Handle()));
|
||||
KlassHandle klass = SystemDictionary::resolve_or_fail(name, true, CHECK_(Handle()));
|
||||
TempNewSymbol runtime = SymbolTable::new_symbol(methodName, CHECK_(Handle()));
|
||||
@ -656,42 +654,37 @@ Handle JVMCIRuntime::callStatic(const char* className, const char* methodName, c
|
||||
}
|
||||
|
||||
void JVMCIRuntime::initialize_HotSpotJVMCIRuntime(TRAPS) {
|
||||
if (JNIHandles::resolve(_HotSpotJVMCIRuntime_instance) == NULL) {
|
||||
ResourceMark rm;
|
||||
#ifdef ASSERT
|
||||
// This should only be called in the context of the JVMCI class being initialized
|
||||
TempNewSymbol name = SymbolTable::new_symbol("jdk/vm/ci/runtime/JVMCI", CHECK);
|
||||
Klass* k = SystemDictionary::resolve_or_null(name, CHECK);
|
||||
instanceKlassHandle klass = InstanceKlass::cast(k);
|
||||
assert(klass->is_being_initialized() && klass->is_reentrant_initialization(THREAD),
|
||||
"HotSpotJVMCIRuntime initialization should only be triggered through JVMCI initialization");
|
||||
#endif
|
||||
guarantee(!_HotSpotJVMCIRuntime_initialized, "cannot reinitialize HotSpotJVMCIRuntime");
|
||||
JVMCIRuntime::initialize_well_known_classes(CHECK);
|
||||
// This should only be called in the context of the JVMCI class being initialized
|
||||
instanceKlassHandle klass = InstanceKlass::cast(SystemDictionary::JVMCI_klass());
|
||||
guarantee(klass->is_being_initialized() && klass->is_reentrant_initialization(THREAD),
|
||||
"HotSpotJVMCIRuntime initialization should only be triggered through JVMCI initialization");
|
||||
|
||||
Handle result = callStatic("jdk/vm/ci/hotspot/HotSpotJVMCIRuntime",
|
||||
"runtime",
|
||||
"()Ljdk/vm/ci/hotspot/HotSpotJVMCIRuntime;", NULL, CHECK);
|
||||
objArrayOop trivial_prefixes = HotSpotJVMCIRuntime::trivialPrefixes(result);
|
||||
if (trivial_prefixes != NULL) {
|
||||
char** prefixes = NEW_C_HEAP_ARRAY(char*, trivial_prefixes->length(), mtCompiler);
|
||||
for (int i = 0; i < trivial_prefixes->length(); i++) {
|
||||
oop str = trivial_prefixes->obj_at(i);
|
||||
if (str == NULL) {
|
||||
THROW(vmSymbols::java_lang_NullPointerException());
|
||||
} else {
|
||||
prefixes[i] = strdup(java_lang_String::as_utf8_string(str));
|
||||
}
|
||||
Handle result = callStatic("jdk/vm/ci/hotspot/HotSpotJVMCIRuntime",
|
||||
"runtime",
|
||||
"()Ljdk/vm/ci/hotspot/HotSpotJVMCIRuntime;", NULL, CHECK);
|
||||
objArrayOop trivial_prefixes = HotSpotJVMCIRuntime::trivialPrefixes(result);
|
||||
if (trivial_prefixes != NULL) {
|
||||
char** prefixes = NEW_C_HEAP_ARRAY(char*, trivial_prefixes->length(), mtCompiler);
|
||||
for (int i = 0; i < trivial_prefixes->length(); i++) {
|
||||
oop str = trivial_prefixes->obj_at(i);
|
||||
if (str == NULL) {
|
||||
THROW(vmSymbols::java_lang_NullPointerException());
|
||||
} else {
|
||||
prefixes[i] = strdup(java_lang_String::as_utf8_string(str));
|
||||
}
|
||||
_trivial_prefixes = prefixes;
|
||||
_trivial_prefixes_count = trivial_prefixes->length();
|
||||
}
|
||||
int adjustment = HotSpotJVMCIRuntime::compilationLevelAdjustment(result);
|
||||
assert(adjustment >= JVMCIRuntime::none &&
|
||||
adjustment <= JVMCIRuntime::by_full_signature,
|
||||
"compilation level adjustment out of bounds");
|
||||
_comp_level_adjustment = (CompLevelAdjustment) adjustment;
|
||||
_HotSpotJVMCIRuntime_initialized = true;
|
||||
_HotSpotJVMCIRuntime_instance = JNIHandles::make_global(result());
|
||||
_trivial_prefixes = prefixes;
|
||||
_trivial_prefixes_count = trivial_prefixes->length();
|
||||
}
|
||||
int adjustment = HotSpotJVMCIRuntime::compilationLevelAdjustment(result);
|
||||
assert(adjustment >= JVMCIRuntime::none &&
|
||||
adjustment <= JVMCIRuntime::by_full_signature,
|
||||
"compilation level adjustment out of bounds");
|
||||
_comp_level_adjustment = (CompLevelAdjustment) adjustment;
|
||||
_HotSpotJVMCIRuntime_initialized = true;
|
||||
_HotSpotJVMCIRuntime_instance = JNIHandles::make_global(result());
|
||||
}
|
||||
|
||||
void JVMCIRuntime::initialize_JVMCI(TRAPS) {
|
||||
|
@ -85,6 +85,7 @@ bool JVMCIGlobals::check_jvmci_flags_are_consistent() {
|
||||
CHECK_NOT_SET(JVMCIUseFastLocking, EnableJVMCI)
|
||||
CHECK_NOT_SET(JVMCINMethodSizeLimit, EnableJVMCI)
|
||||
CHECK_NOT_SET(MethodProfileWidth, EnableJVMCI)
|
||||
CHECK_NOT_SET(JVMCIPrintProperties, EnableJVMCI)
|
||||
CHECK_NOT_SET(TraceUncollectedSpeculations, EnableJVMCI)
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -49,6 +49,9 @@
|
||||
experimental(bool, UseJVMCICompiler, false, \
|
||||
"Use JVMCI as the default compiler") \
|
||||
\
|
||||
experimental(bool, JVMCIPrintProperties, false, \
|
||||
"Prints properties used by the JVMCI compiler") \
|
||||
\
|
||||
experimental(bool, BootstrapJVMCI, false, \
|
||||
"Bootstrap JVMCI before running Java main method") \
|
||||
\
|
||||
|
@ -29,6 +29,7 @@
|
||||
#else
|
||||
#define JVMCI_WK_KLASSES_DO(do_klass) \
|
||||
/* JVMCI classes. These are loaded on-demand. */ \
|
||||
do_klass(JVMCI_klass, jdk_vm_ci_runtime_JVMCI, Jvmci) \
|
||||
do_klass(HotSpotCompiledCode_klass, jdk_vm_ci_hotspot_HotSpotCompiledCode, Jvmci) \
|
||||
do_klass(HotSpotCompiledCode_Comment_klass, jdk_vm_ci_hotspot_HotSpotCompiledCode_Comment, Jvmci) \
|
||||
do_klass(HotSpotCompiledNmethod_klass, jdk_vm_ci_hotspot_HotSpotCompiledNmethod, Jvmci) \
|
||||
|
@ -169,6 +169,8 @@
|
||||
nonstatic_field(JVMCIEnv, _task, CompileTask*) \
|
||||
nonstatic_field(JVMCIEnv, _jvmti_can_hotswap_or_post_breakpoint, bool) \
|
||||
\
|
||||
nonstatic_field(InvocationCounter, _counter, unsigned int) \
|
||||
\
|
||||
nonstatic_field(Klass, _secondary_super_cache, Klass*) \
|
||||
nonstatic_field(Klass, _secondary_supers, Array<Klass*>*) \
|
||||
nonstatic_field(Klass, _super, Klass*) \
|
||||
@ -199,13 +201,34 @@
|
||||
volatile_nonstatic_field(Method, _code, CompiledMethod*) \
|
||||
volatile_nonstatic_field(Method, _from_compiled_entry, address) \
|
||||
\
|
||||
nonstatic_field(MethodCounters, _nmethod_age, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_invocation_limit, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_backward_branch_limit, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \
|
||||
nonstatic_field(MethodCounters, _invoke_mask, int) \
|
||||
nonstatic_field(MethodCounters, _backedge_mask, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \
|
||||
JVMTI_ONLY(nonstatic_field(MethodCounters, _number_of_breakpoints, u2)) \
|
||||
nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \
|
||||
nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \
|
||||
\
|
||||
nonstatic_field(MethodData, _size, int) \
|
||||
nonstatic_field(MethodData, _method, Method*) \
|
||||
nonstatic_field(MethodData, _data_size, int) \
|
||||
nonstatic_field(MethodData, _data[0], intptr_t) \
|
||||
nonstatic_field(MethodData, _parameters_type_data_di, int) \
|
||||
nonstatic_field(MethodData, _nof_decompiles, uint) \
|
||||
nonstatic_field(MethodData, _nof_overflow_recompiles, uint) \
|
||||
nonstatic_field(MethodData, _nof_overflow_traps, uint) \
|
||||
nonstatic_field(MethodData, _trap_hist._array[0], u1) \
|
||||
nonstatic_field(MethodData, _eflags, intx) \
|
||||
nonstatic_field(MethodData, _arg_local, intx) \
|
||||
nonstatic_field(MethodData, _arg_stack, intx) \
|
||||
nonstatic_field(MethodData, _arg_returned, intx) \
|
||||
nonstatic_field(MethodData, _tenure_traps, uint) \
|
||||
nonstatic_field(MethodData, _invoke_mask, int) \
|
||||
nonstatic_field(MethodData, _backedge_mask, int) \
|
||||
nonstatic_field(MethodData, _jvmci_ir_size, int) \
|
||||
\
|
||||
nonstatic_field(nmethod, _verified_entry_point, address) \
|
||||
@ -290,6 +313,7 @@
|
||||
declare_toplevel_type(ExceptionTableElement) \
|
||||
declare_toplevel_type(Flag) \
|
||||
declare_toplevel_type(Flag*) \
|
||||
declare_toplevel_type(InvocationCounter) \
|
||||
declare_toplevel_type(JVMCIEnv) \
|
||||
declare_toplevel_type(LocalVariableTableElement) \
|
||||
declare_toplevel_type(narrowKlass) \
|
||||
@ -688,7 +712,6 @@
|
||||
declare_constant(VM_Version::sun4v_m) \
|
||||
declare_constant(VM_Version::blk_init_instructions_m) \
|
||||
declare_constant(VM_Version::fmaf_instructions_m) \
|
||||
declare_constant(VM_Version::fmau_instructions_m) \
|
||||
declare_constant(VM_Version::sparc64_family_m) \
|
||||
declare_constant(VM_Version::M_family_m) \
|
||||
declare_constant(VM_Version::T_family_m) \
|
||||
|
@ -29,6 +29,7 @@
|
||||
#define JVMCI_VM_SYMBOLS_DO(template, do_alias)
|
||||
#else
|
||||
#define JVMCI_VM_SYMBOLS_DO(template, do_alias) \
|
||||
template(jdk_vm_ci_runtime_JVMCI, "jdk/vm/ci/runtime/JVMCI") \
|
||||
template(jdk_vm_ci_hotspot_HotSpotCompiledCode, "jdk/vm/ci/hotspot/HotSpotCompiledCode") \
|
||||
template(jdk_vm_ci_hotspot_HotSpotCompiledCode_Comment, "jdk/vm/ci/hotspot/HotSpotCompiledCode$Comment") \
|
||||
template(jdk_vm_ci_hotspot_HotSpotCompiledNmethod, "jdk/vm/ci/hotspot/HotSpotCompiledNmethod") \
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -97,11 +97,7 @@ static bool is_regular_file(const char* filename) {
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
#ifdef _WINDOWS
|
||||
return (st.st_mode & S_IFMT) == _S_IFREG;
|
||||
#else
|
||||
return S_ISREG(st.st_mode);
|
||||
#endif
|
||||
return (st.st_mode & S_IFMT) == S_IFREG;
|
||||
}
|
||||
|
||||
// Try to find the next number that should be used for file rotation.
|
||||
|
@ -40,6 +40,7 @@
|
||||
LOG_TAG(attach) \
|
||||
LOG_TAG(barrier) \
|
||||
LOG_TAG(biasedlocking) \
|
||||
LOG_TAG(blocks) \
|
||||
LOG_TAG(bot) \
|
||||
LOG_TAG(breakpoint) \
|
||||
LOG_TAG(census) \
|
||||
@ -105,6 +106,7 @@
|
||||
LOG_TAG(scavenge) \
|
||||
LOG_TAG(scrub) \
|
||||
LOG_TAG(stacktrace) \
|
||||
LOG_TAG(stackwalk) \
|
||||
LOG_TAG(start) \
|
||||
LOG_TAG(startuptime) \
|
||||
LOG_TAG(state) \
|
||||
|
@ -263,7 +263,7 @@ void FileMapInfo::allocate_classpath_entry_table() {
|
||||
} else {
|
||||
struct stat st;
|
||||
if (os::stat(name, &st) == 0) {
|
||||
if ((st.st_mode & S_IFDIR) == S_IFDIR) {
|
||||
if ((st.st_mode & S_IFMT) == S_IFDIR) {
|
||||
if (!os::dir_is_empty(name)) {
|
||||
ClassLoader::exit_with_path_failure(
|
||||
"Cannot have non-empty directory in archived classpaths", name);
|
||||
|
@ -283,11 +283,15 @@ public:
|
||||
bool validate_classpath_entry_table();
|
||||
|
||||
static SharedClassPathEntry* shared_classpath(int index) {
|
||||
if (index < 0) {
|
||||
return NULL;
|
||||
}
|
||||
char* p = (char*)_classpath_entry_table;
|
||||
p += _classpath_entry_size * index;
|
||||
return (SharedClassPathEntry*)p;
|
||||
}
|
||||
static const char* shared_classpath_name(int index) {
|
||||
assert(index >= 0, "Sanity");
|
||||
return shared_classpath(index)->_name;
|
||||
}
|
||||
|
||||
|
@ -249,10 +249,65 @@ class ChunkManager : public CHeapObj<mtInternal> {
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
class SmallBlocks : public CHeapObj<mtClass> {
|
||||
const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize;
|
||||
const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize;
|
||||
|
||||
private:
|
||||
FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size];
|
||||
|
||||
FreeList<Metablock>& list_at(size_t word_size) {
|
||||
assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size);
|
||||
return _small_lists[word_size - _small_block_min_size];
|
||||
}
|
||||
|
||||
public:
|
||||
SmallBlocks() {
|
||||
for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
|
||||
uint k = i - _small_block_min_size;
|
||||
_small_lists[k].set_size(i);
|
||||
}
|
||||
}
|
||||
|
||||
size_t total_size() const {
|
||||
size_t result = 0;
|
||||
for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
|
||||
uint k = i - _small_block_min_size;
|
||||
result = result + _small_lists[k].count() * _small_lists[k].size();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static uint small_block_max_size() { return _small_block_max_size; }
|
||||
static uint small_block_min_size() { return _small_block_min_size; }
|
||||
|
||||
MetaWord* get_block(size_t word_size) {
|
||||
if (list_at(word_size).count() > 0) {
|
||||
MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head();
|
||||
return new_block;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
void return_block(Metablock* free_chunk, size_t word_size) {
|
||||
list_at(word_size).return_chunk_at_head(free_chunk, false);
|
||||
assert(list_at(word_size).count() > 0, "Should have a chunk");
|
||||
}
|
||||
|
||||
void print_on(outputStream* st) const {
|
||||
st->print_cr("SmallBlocks:");
|
||||
for (uint i = _small_block_min_size; i < _small_block_max_size; i++) {
|
||||
uint k = i - _small_block_min_size;
|
||||
st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Used to manage the free list of Metablocks (a block corresponds
|
||||
// to the allocation of a quantum of metadata).
|
||||
class BlockFreelist VALUE_OBJ_CLASS_SPEC {
|
||||
class BlockFreelist : public CHeapObj<mtClass> {
|
||||
BlockTreeDictionary* const _dictionary;
|
||||
SmallBlocks* _small_blocks;
|
||||
|
||||
// Only allocate and split from freelist if the size of the allocation
|
||||
// is at least 1/4th the size of the available block.
|
||||
@ -260,6 +315,12 @@ class BlockFreelist VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
// Accessors
|
||||
BlockTreeDictionary* dictionary() const { return _dictionary; }
|
||||
SmallBlocks* small_blocks() {
|
||||
if (_small_blocks == NULL) {
|
||||
_small_blocks = new SmallBlocks();
|
||||
}
|
||||
return _small_blocks;
|
||||
}
|
||||
|
||||
public:
|
||||
BlockFreelist();
|
||||
@ -269,8 +330,15 @@ class BlockFreelist VALUE_OBJ_CLASS_SPEC {
|
||||
MetaWord* get_block(size_t word_size);
|
||||
void return_block(MetaWord* p, size_t word_size);
|
||||
|
||||
size_t total_size() { return dictionary()->total_size(); }
|
||||
size_t total_size() const {
|
||||
size_t result = dictionary()->total_size();
|
||||
if (_small_blocks != NULL) {
|
||||
result = result + _small_blocks->total_size();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
@ -629,7 +697,7 @@ class SpaceManager : public CHeapObj<mtClass> {
|
||||
// are assumed to be in chunks in use by the SpaceManager
|
||||
// and all chunks in use by a SpaceManager are freed when
|
||||
// the class loader using the SpaceManager is collected.
|
||||
BlockFreelist _block_freelists;
|
||||
BlockFreelist* _block_freelists;
|
||||
|
||||
// protects virtualspace and chunk expansions
|
||||
static const char* _expand_lock_name;
|
||||
@ -643,9 +711,7 @@ class SpaceManager : public CHeapObj<mtClass> {
|
||||
_chunks_in_use[index] = v;
|
||||
}
|
||||
|
||||
BlockFreelist* block_freelists() const {
|
||||
return (BlockFreelist*) &_block_freelists;
|
||||
}
|
||||
BlockFreelist* block_freelists() const { return _block_freelists; }
|
||||
|
||||
Metaspace::MetadataType mdtype() { return _mdtype; }
|
||||
|
||||
@ -763,7 +829,9 @@ class SpaceManager : public CHeapObj<mtClass> {
|
||||
void verify_allocated_blocks_words();
|
||||
#endif
|
||||
|
||||
size_t get_raw_word_size(size_t word_size) {
|
||||
// This adjusts the size given to be greater than the minimum allocation size in
|
||||
// words for data in metaspace. Esentially the minimum size is currently 3 words.
|
||||
size_t get_allocation_word_size(size_t word_size) {
|
||||
size_t byte_size = word_size * BytesPerWord;
|
||||
|
||||
size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
|
||||
@ -807,20 +875,45 @@ void VirtualSpaceNode::verify_container_count() {
|
||||
|
||||
// BlockFreelist methods
|
||||
|
||||
BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
|
||||
BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {}
|
||||
|
||||
BlockFreelist::~BlockFreelist() {
|
||||
delete _dictionary;
|
||||
if (_small_blocks != NULL) {
|
||||
delete _small_blocks;
|
||||
}
|
||||
}
|
||||
|
||||
void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
|
||||
assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter");
|
||||
|
||||
Metablock* free_chunk = ::new (p) Metablock(word_size);
|
||||
if (word_size < SmallBlocks::small_block_max_size()) {
|
||||
small_blocks()->return_block(free_chunk, word_size);
|
||||
} else {
|
||||
dictionary()->return_chunk(free_chunk);
|
||||
}
|
||||
log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = "
|
||||
SIZE_FORMAT, p2i(free_chunk), word_size);
|
||||
}
|
||||
|
||||
MetaWord* BlockFreelist::get_block(size_t word_size) {
|
||||
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
|
||||
// Dark matter. Too small for dictionary.
|
||||
assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter");
|
||||
|
||||
// Try small_blocks first.
|
||||
if (word_size < SmallBlocks::small_block_max_size()) {
|
||||
// Don't create small_blocks() until needed. small_blocks() allocates the small block list for
|
||||
// this space manager.
|
||||
MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size);
|
||||
if (new_block != NULL) {
|
||||
log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
|
||||
p2i(new_block), word_size);
|
||||
return new_block;
|
||||
}
|
||||
}
|
||||
|
||||
if (word_size < BlockFreelist::min_dictionary_size()) {
|
||||
// If allocation in small blocks fails, this is Dark Matter. Too small for dictionary.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -839,15 +932,20 @@ MetaWord* BlockFreelist::get_block(size_t word_size) {
|
||||
MetaWord* new_block = (MetaWord*)free_block;
|
||||
assert(block_size >= word_size, "Incorrect size of block from freelist");
|
||||
const size_t unused = block_size - word_size;
|
||||
if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
|
||||
if (unused >= SmallBlocks::small_block_min_size()) {
|
||||
return_block(new_block + word_size, unused);
|
||||
}
|
||||
|
||||
log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
|
||||
p2i(new_block), word_size);
|
||||
return new_block;
|
||||
}
|
||||
|
||||
void BlockFreelist::print_on(outputStream* st) const {
|
||||
dictionary()->print_free_lists(st);
|
||||
if (_small_blocks != NULL) {
|
||||
_small_blocks->print_on(st);
|
||||
}
|
||||
}
|
||||
|
||||
// VirtualSpaceNode methods
|
||||
@ -2075,6 +2173,7 @@ SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
|
||||
_allocated_blocks_words(0),
|
||||
_allocated_chunks_words(0),
|
||||
_allocated_chunks_count(0),
|
||||
_block_freelists(NULL),
|
||||
_lock(lock)
|
||||
{
|
||||
initialize();
|
||||
@ -2164,8 +2263,10 @@ SpaceManager::~SpaceManager() {
|
||||
log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
|
||||
ResourceMark rm;
|
||||
locked_print_chunks_in_use_on(log.trace_stream());
|
||||
if (block_freelists() != NULL) {
|
||||
block_freelists()->print_on(log.trace_stream());
|
||||
}
|
||||
}
|
||||
|
||||
// Have to update before the chunks_in_use lists are emptied
|
||||
// below.
|
||||
@ -2215,6 +2316,10 @@ SpaceManager::~SpaceManager() {
|
||||
}
|
||||
log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex));
|
||||
chunk_manager()->slow_locked_verify();
|
||||
|
||||
if (_block_freelists != NULL) {
|
||||
delete _block_freelists;
|
||||
}
|
||||
}
|
||||
|
||||
const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
|
||||
@ -2253,10 +2358,12 @@ ChunkIndex ChunkManager::list_index(size_t size) {
|
||||
|
||||
void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
|
||||
assert_lock_strong(_lock);
|
||||
size_t raw_word_size = get_raw_word_size(word_size);
|
||||
size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
|
||||
assert(raw_word_size >= min_size,
|
||||
"Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size);
|
||||
// Allocations and deallocations are in raw_word_size
|
||||
size_t raw_word_size = get_allocation_word_size(word_size);
|
||||
// Lazily create a block_freelist
|
||||
if (block_freelists() == NULL) {
|
||||
_block_freelists = new BlockFreelist();
|
||||
}
|
||||
block_freelists()->return_block(p, raw_word_size);
|
||||
}
|
||||
|
||||
@ -2312,8 +2419,9 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
|
||||
void SpaceManager::retire_current_chunk() {
|
||||
if (current_chunk() != NULL) {
|
||||
size_t remaining_words = current_chunk()->free_word_size();
|
||||
if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
|
||||
block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
|
||||
if (remaining_words >= BlockFreelist::min_dictionary_size()) {
|
||||
MetaWord* ptr = current_chunk()->allocate(remaining_words);
|
||||
deallocate(ptr, remaining_words);
|
||||
inc_used_metrics(remaining_words);
|
||||
}
|
||||
}
|
||||
@ -2350,7 +2458,7 @@ Metachunk* SpaceManager::get_new_chunk(size_t word_size,
|
||||
* will be made to allocate a small chunk.
|
||||
*/
|
||||
MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
|
||||
size_t raw_word_size = get_raw_word_size(word_size);
|
||||
size_t raw_word_size = get_allocation_word_size(word_size);
|
||||
|
||||
if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
|
||||
return NULL;
|
||||
@ -2380,8 +2488,7 @@ MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
|
||||
|
||||
MetaWord* SpaceManager::allocate(size_t word_size) {
|
||||
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
size_t raw_word_size = get_raw_word_size(word_size);
|
||||
size_t raw_word_size = get_allocation_word_size(word_size);
|
||||
BlockFreelist* fl = block_freelists();
|
||||
MetaWord* p = NULL;
|
||||
// Allocation from the dictionary is expensive in the sense that
|
||||
@ -2389,7 +2496,7 @@ MetaWord* SpaceManager::allocate(size_t word_size) {
|
||||
// from the dictionary until it starts to get fat. Is this
|
||||
// a reasonable policy? Maybe an skinny dictionary is fast enough
|
||||
// for allocations. Do some profiling. JJJ
|
||||
if (fl->total_size() > allocation_from_dictionary_limit) {
|
||||
if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
|
||||
p = fl->get_block(raw_word_size);
|
||||
}
|
||||
if (p == NULL) {
|
||||
@ -2441,7 +2548,7 @@ void SpaceManager::verify() {
|
||||
// If there are blocks in the dictionary, then
|
||||
// verification of chunks does not work since
|
||||
// being in the dictionary alters a chunk.
|
||||
if (block_freelists()->total_size() == 0) {
|
||||
if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
|
||||
for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
|
||||
Metachunk* curr = chunks_in_use(i);
|
||||
while (curr != NULL) {
|
||||
@ -2499,7 +2606,7 @@ void SpaceManager::dump(outputStream* const out) const {
|
||||
}
|
||||
|
||||
if (log_is_enabled(Trace, gc, metaspace, freelist)) {
|
||||
block_freelists()->print_on(out);
|
||||
if (block_freelists() != NULL) block_freelists()->print_on(out);
|
||||
}
|
||||
|
||||
size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
|
||||
@ -3410,18 +3517,11 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
||||
|| Thread::current()->is_VM_thread(), "should be the VM thread");
|
||||
|
||||
if (DumpSharedSpaces && PrintSharedSpaces) {
|
||||
record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
|
||||
record_deallocation(ptr, vsm()->get_allocation_word_size(word_size));
|
||||
}
|
||||
|
||||
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
|
||||
|
||||
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
|
||||
// Dark matter. Too small for dictionary.
|
||||
#ifdef ASSERT
|
||||
Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
if (is_class && using_class_space()) {
|
||||
class_vsm()->deallocate(ptr, word_size);
|
||||
} else {
|
||||
@ -3451,7 +3551,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
|
||||
}
|
||||
if (PrintSharedSpaces) {
|
||||
space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
|
||||
space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size));
|
||||
}
|
||||
|
||||
// Zero initialize.
|
||||
@ -3509,10 +3609,11 @@ void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_s
|
||||
|
||||
// If result is still null, we are out of memory.
|
||||
Log(gc, metaspace, freelist) log;
|
||||
if (log.is_trace()) {
|
||||
log.trace("Metaspace allocation failed for size " SIZE_FORMAT, word_size);
|
||||
if (log.is_info()) {
|
||||
log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
|
||||
is_class_space_allocation(mdtype) ? "class" : "data", word_size);
|
||||
ResourceMark rm;
|
||||
outputStream* out = log.trace_stream();
|
||||
outputStream* out = log.info_stream();
|
||||
if (loader_data->metaspace_or_null() != NULL) {
|
||||
loader_data->dump(out);
|
||||
}
|
||||
|
@ -368,23 +368,36 @@ AnnotationArray** ConstMethod::default_annotations_addr() const {
|
||||
return (AnnotationArray**)constMethod_end() - offset;
|
||||
}
|
||||
|
||||
Array<u1>* copy_annotations(ClassLoaderData* loader_data, AnnotationArray* from, TRAPS) {
|
||||
int length = from->length();
|
||||
Array<u1>* a = MetadataFactory::new_array<u1>(loader_data, length, 0, CHECK_NULL);
|
||||
memcpy((void*)a->adr_at(0), (void*)from->adr_at(0), length);
|
||||
return a;
|
||||
}
|
||||
|
||||
// copy annotations from 'cm' to 'this'
|
||||
void ConstMethod::copy_annotations_from(ConstMethod* cm) {
|
||||
// Must make copy because these are deallocated with their constMethod, if redefined.
|
||||
void ConstMethod::copy_annotations_from(ClassLoaderData* loader_data, ConstMethod* cm, TRAPS) {
|
||||
Array<u1>* a;
|
||||
if (cm->has_method_annotations()) {
|
||||
assert(has_method_annotations(), "should be allocated already");
|
||||
set_method_annotations(cm->method_annotations());
|
||||
a = copy_annotations(loader_data, cm->method_annotations(), CHECK);
|
||||
set_method_annotations(a);
|
||||
}
|
||||
if (cm->has_parameter_annotations()) {
|
||||
assert(has_parameter_annotations(), "should be allocated already");
|
||||
set_parameter_annotations(cm->parameter_annotations());
|
||||
a = copy_annotations(loader_data, cm->parameter_annotations(), CHECK);
|
||||
set_parameter_annotations(a);
|
||||
}
|
||||
if (cm->has_type_annotations()) {
|
||||
assert(has_type_annotations(), "should be allocated already");
|
||||
set_type_annotations(cm->type_annotations());
|
||||
a = copy_annotations(loader_data, cm->type_annotations(), CHECK);
|
||||
set_type_annotations(a);
|
||||
}
|
||||
if (cm->has_default_annotations()) {
|
||||
assert(has_default_annotations(), "should be allocated already");
|
||||
set_default_annotations(cm->default_annotations());
|
||||
a = copy_annotations(loader_data, cm->default_annotations(), CHECK);
|
||||
set_default_annotations(a);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -469,7 +469,7 @@ public:
|
||||
}
|
||||
|
||||
// Copy annotations from other ConstMethod
|
||||
void copy_annotations_from(ConstMethod* cm);
|
||||
void copy_annotations_from(ClassLoaderData* loader_data, ConstMethod* cm, TRAPS);
|
||||
|
||||
// byte codes
|
||||
void set_code(address code) {
|
||||
|
@ -674,20 +674,20 @@ void InstanceKlass::link_methods(TRAPS) {
|
||||
|
||||
// Eagerly initialize superinterfaces that declare default methods (concrete instance: any access)
|
||||
void InstanceKlass::initialize_super_interfaces(instanceKlassHandle this_k, TRAPS) {
|
||||
assert (this_k->has_default_methods(), "caller should have checked this");
|
||||
assert (this_k->has_nonstatic_concrete_methods(), "caller should have checked this");
|
||||
for (int i = 0; i < this_k->local_interfaces()->length(); ++i) {
|
||||
Klass* iface = this_k->local_interfaces()->at(i);
|
||||
InstanceKlass* ik = InstanceKlass::cast(iface);
|
||||
|
||||
// Initialization is depth first search ie. we start with top of the inheritance tree
|
||||
// has_default_methods drives searching superinterfaces since it
|
||||
// means has_default_methods in its superinterface hierarchy
|
||||
if (ik->has_default_methods()) {
|
||||
// has_nonstatic_concrete_methods drives searching superinterfaces since it
|
||||
// means has_nonstatic_concrete_methods in its superinterface hierarchy
|
||||
if (ik->has_nonstatic_concrete_methods()) {
|
||||
ik->initialize_super_interfaces(ik, CHECK);
|
||||
}
|
||||
|
||||
// Only initialize() interfaces that "declare" concrete methods.
|
||||
if (ik->should_be_initialized() && ik->declares_default_methods()) {
|
||||
if (ik->should_be_initialized() && ik->declares_nonstatic_concrete_methods()) {
|
||||
ik->initialize(CHECK);
|
||||
}
|
||||
}
|
||||
@ -761,11 +761,11 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_k, TRAPS) {
|
||||
if (super_klass != NULL && super_klass->should_be_initialized()) {
|
||||
super_klass->initialize(THREAD);
|
||||
}
|
||||
// If C implements any interfaces that declares a non-abstract, non-static method,
|
||||
// If C implements any interface that declares a non-static, concrete method,
|
||||
// the initialization of C triggers initialization of its super interfaces.
|
||||
// Only need to recurse if has_default_methods which includes declaring and
|
||||
// inheriting default methods
|
||||
if (!HAS_PENDING_EXCEPTION && this_k->has_default_methods()) {
|
||||
// Only need to recurse if has_nonstatic_concrete_methods which includes declaring and
|
||||
// having a superinterface that declares, non-static, concrete methods
|
||||
if (!HAS_PENDING_EXCEPTION && this_k->has_nonstatic_concrete_methods()) {
|
||||
this_k->initialize_super_interfaces(this_k, THREAD);
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user