Merge
This commit is contained in:
commit
0603a70b79
4
.hgtags
4
.hgtags
@ -140,3 +140,7 @@ b5060eae3b32fd9f884a09774338cd8186d7fafa jdk8-b15
|
||||
736a63b854f321c7824b7e47890135f80aee05e3 jdk8-b16
|
||||
f0eccb2946986fb9626efde7d8ed9c8192623f5c jdk8-b17
|
||||
885050364691ac1ac978305c63f3368a197fb04d jdk8-b18
|
||||
0ff7113a0882ec82d642cb9f0297b4e497807ced jdk8-b19
|
||||
6561530ea757c3f3a6fb171c9cc7b3885cdeca85 jdk8-b20
|
||||
b3a426170188f52981cf4573a2f14d487fddab0d jdk8-b21
|
||||
e8f03541af27e38aafb619b96863e17f65ffe53b jdk8-b22
|
||||
|
@ -140,3 +140,7 @@ a4f28069d44a379cda99dd1d921d19f819726d22 jdk8-b15
|
||||
4e06ae613e99549835896720c7a68c29ad5543f5 jdk8-b17
|
||||
4e06ae613e99549835896720c7a68c29ad5543f5 jdk8-b16
|
||||
7010bd24cdd07bc7daef80702f39124854dec36c jdk8-b18
|
||||
237bc29afbfc6f56a4fe4a6008e2befb59c44bac jdk8-b19
|
||||
5a5eaf6374bcbe23530899579fed17a05b7705f3 jdk8-b20
|
||||
cc771d92284f71765eca14d6d08703c4af254c04 jdk8-b21
|
||||
7ad075c809952e355d25030605da6af30456ed74 jdk8-b22
|
||||
|
@ -140,3 +140,7 @@ e59c47de1ad8982ff3b0e843773a6902b36c2337 jdk8-b14
|
||||
82dc033975bb9b553b4ef97b6d483eda8de32e0f jdk8-b17
|
||||
82dc033975bb9b553b4ef97b6d483eda8de32e0f jdk8-b16
|
||||
312cf15d16577ef198b033d2a4cc0a52369b7343 jdk8-b18
|
||||
e1366c5d84ef984095a332bcee70b3938232d07d jdk8-b19
|
||||
51d8b6cb18c0978ecfa4f33e1537d35ee01b69fa jdk8-b20
|
||||
f157fc2a71a38ce44007a6f18d5b011824dce705 jdk8-b21
|
||||
a11d0062c445d5f36651c78650ab88aa594bcbff jdk8-b22
|
||||
|
@ -207,3 +207,9 @@ d1f29d4e0bc60e8bd7ae961f1306d8ab33290212 jdk8-b16
|
||||
a2fef924d8e6f37dac2a887315e3502876cc8e24 hs23-b08
|
||||
61165f53f1656b9f99e4fb806429bf98b99d59c3 jdk8-b18
|
||||
4bcf61041217f8677dcec18e90e9196acc945bba hs23-b09
|
||||
9232e0ecbc2cec54dcc8f93004fb00c214446460 jdk8-b19
|
||||
fe2c8764998112b7fefcd7d41599714813ae4327 jdk8-b20
|
||||
9952d1c439d64c5fd4ad1236a63a62bd5a49d4c3 jdk8-b21
|
||||
513351373923f74a7c91755748b95c9771e59f96 hs23-b10
|
||||
24727fb37561779077fdfa5a33342246f20e5c0f jdk8-b22
|
||||
dcc292399a39113957eebbd3e487b7e05e2c79fc hs23-b11
|
||||
|
@ -367,7 +367,7 @@ endif
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
|
||||
$(install-file)
|
||||
|
||||
# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
|
||||
# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h, jfr.h)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
|
||||
$(install-file)
|
||||
|
||||
@ -384,6 +384,16 @@ $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h: $(HS_JNI_ARCH_SRC)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/services/%
|
||||
$(install-file)
|
||||
|
||||
JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi)
|
||||
# export jfr.h
|
||||
ifeq ($JFR_EXISTS,1)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/agent/%
|
||||
$(install-file)
|
||||
else
|
||||
$(EXPORT_INCLUDE_DIR)/jfr.h:
|
||||
|
||||
endif
|
||||
|
||||
# Doc files (jvmti.html)
|
||||
$(EXPORT_DOCS_DIR)/platform/jvmti/%: $(DOCS_DIR)/%
|
||||
$(install-file)
|
||||
|
@ -96,6 +96,10 @@ ifdef DEFAULT_LIBPATH
|
||||
CPPFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
|
||||
endif
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
CFLAGS += -DINCLUDE_TRACE
|
||||
endif
|
||||
|
||||
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
||||
CFLAGS += $(CFLAGS_WARN/BYFILE)
|
||||
|
||||
@ -147,6 +151,12 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
|
||||
find $(HS_ALT_SRC)/share/vm/jfr -type d; \
|
||||
fi)
|
||||
endif
|
||||
|
||||
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
|
||||
CORE_PATHS+=$(GENERATED)/jvmtifiles
|
||||
|
||||
|
@ -294,3 +294,7 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmticmlr.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
|
||||
endif
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
|
||||
|
||||
HS_MAJOR_VER=23
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=10
|
||||
HS_BUILD_NUMBER=12
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -98,6 +98,10 @@ CPPFLAGS = \
|
||||
${JRE_VERSION} \
|
||||
${VM_DISTRO}
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
CFLAGS += -DINCLUDE_TRACE
|
||||
endif
|
||||
|
||||
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
||||
CFLAGS += $(CFLAGS_WARN/BYFILE)
|
||||
|
||||
@ -143,6 +147,12 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
|
||||
find $(HS_ALT_SRC)/share/vm/jfr -type d; \
|
||||
fi)
|
||||
endif
|
||||
|
||||
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
|
||||
CORE_PATHS+=$(GENERATED)/jvmtifiles
|
||||
|
||||
|
@ -93,7 +93,7 @@ CFLAGS += $(CFLAGS_WARN)
|
||||
CFLAGS += $(CFLAGS/NOEX)
|
||||
|
||||
# Extra flags from gnumake's invocation or environment
|
||||
CFLAGS += $(EXTRA_CFLAGS)
|
||||
CFLAGS += $(EXTRA_CFLAGS) -DINCLUDE_TRACE
|
||||
|
||||
# Math Library (libm.so), do not use -lm.
|
||||
# There might be two versions of libm.so on the build system:
|
||||
@ -160,6 +160,10 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
|
||||
|
||||
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
|
||||
find $(HS_ALT_SRC)/share/vm/jfr -type d; \
|
||||
fi)
|
||||
|
||||
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
|
||||
CORE_PATHS+=$(GENERATED)/jvmtifiles
|
||||
|
||||
|
@ -35,6 +35,8 @@ cl 2>&1 | grep "IA-64" >NUL
|
||||
if %errorlevel% == 0 goto isia64
|
||||
cl 2>&1 | grep "AMD64" >NUL
|
||||
if %errorlevel% == 0 goto amd64
|
||||
cl 2>&1 | grep "x64" >NUL
|
||||
if %errorlevel% == 0 goto amd64
|
||||
set ARCH=x86
|
||||
set BUILDARCH=i486
|
||||
set Platform_arch=x86
|
||||
|
@ -73,6 +73,13 @@ done
|
||||
|
||||
BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles"
|
||||
|
||||
if [ -d "${ALTSRC}/share/vm/jfr" ]; then
|
||||
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent"
|
||||
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent/isolated_deps/util"
|
||||
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/jvm"
|
||||
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
|
||||
fi
|
||||
|
||||
CORE_PATHS="${BASE_PATHS}"
|
||||
# shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS.
|
||||
if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then
|
||||
|
@ -58,7 +58,8 @@ ProjectCreatorIncludesPRIVATE=\
|
||||
-absoluteInclude $(HOTSPOTBUILDSPACE)/%f/generated \
|
||||
-ignorePath $(HOTSPOTBUILDSPACE)/%f/generated \
|
||||
-ignorePath src\share\vm\adlc \
|
||||
-ignorePath src\share\vm\shark
|
||||
-ignorePath src\share\vm\shark \
|
||||
-ignorePath posix
|
||||
|
||||
# This is referenced externally by both the IDE and batch builds
|
||||
ProjectCreatorOptions=
|
||||
@ -88,7 +89,7 @@ ProjectCreatorIDEOptions=\
|
||||
-jdkTargetRoot $(HOTSPOTJDKDIST) \
|
||||
-define ALIGN_STACK_FRAMES \
|
||||
-define VM_LITTLE_ENDIAN \
|
||||
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
|
||||
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
|
||||
-postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \
|
||||
-ignoreFile jsig.c \
|
||||
-ignoreFile jvmtiEnvRecommended.cpp \
|
||||
|
@ -19,7 +19,7 @@
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
# Resource file containing VERSIONINFO
|
||||
@ -30,7 +30,7 @@ Res_Files=.\version.res
|
||||
COMMONSRC=$(WorkSpace)\src
|
||||
ALTSRC=$(WorkSpace)\src\closed
|
||||
|
||||
!ifdef RELEASE
|
||||
!ifdef RELEASE
|
||||
!ifdef DEVELOP
|
||||
CPP_FLAGS=$(CPP_FLAGS) /D "DEBUG"
|
||||
!else
|
||||
@ -74,6 +74,10 @@ CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\""
|
||||
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
|
||||
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
|
||||
|
||||
!ifndef JAVASE_EMBEDDED
|
||||
CPP_FLAGS=$(CPP_FLAGS) /D "INCLUDE_TRACE"
|
||||
!endif
|
||||
|
||||
CPP_FLAGS=$(CPP_FLAGS) $(CPP_INCLUDE_DIRS)
|
||||
|
||||
# Define that so jni.h is on correct side
|
||||
@ -97,7 +101,7 @@ AGCT_EXPORT=/export:AsyncGetCallTrace
|
||||
!endif
|
||||
|
||||
# If you modify exports below please do the corresponding changes in
|
||||
# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
|
||||
# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
|
||||
LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
|
||||
/export:JNI_GetDefaultJavaVMInitArgs \
|
||||
/export:JNI_CreateJavaVM \
|
||||
@ -170,6 +174,7 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/oops
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/prims
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/runtime
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/services
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/trace
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/utilities
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/libadt
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/os/windows/vm
|
||||
@ -177,6 +182,13 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
|
||||
|
||||
!if exists($(ALTSRC)\share\vm\jfr)
|
||||
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent
|
||||
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent/isolated_deps/util
|
||||
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/jvm
|
||||
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
|
||||
!endif
|
||||
|
||||
VM_PATH={$(VM_PATH)}
|
||||
|
||||
# Special case files not using precompiled header files.
|
||||
@ -263,6 +275,9 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||
{$(COMMONSRC)\share\vm\services}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(COMMONSRC)\share\vm\trace}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(COMMONSRC)\share\vm\utilities}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
@ -340,6 +355,9 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||
{$(ALTSRC)\share\vm\services}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\trace}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\utilities}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
@ -371,6 +389,18 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||
{..\generated\jvmtifiles}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\jfr}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
default::
|
||||
|
||||
_build_pch_file.obj:
|
||||
|
@ -391,7 +391,7 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
__ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
__ should_not_reach_here();
|
||||
assert(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
guarantee(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
@ -474,8 +474,7 @@ int LIR_Assembler::emit_deopt_handler() {
|
||||
AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
|
||||
__ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
|
||||
__ delayed()->nop();
|
||||
assert(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
debug_only(__ stop("should have gone to the caller");)
|
||||
guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
|
@ -69,7 +69,7 @@ enum {
|
||||
#else
|
||||
call_stub_size = 20,
|
||||
#endif // _LP64
|
||||
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(10*4),
|
||||
deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(10*4) };
|
||||
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
|
||||
deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64) };
|
||||
|
||||
#endif // CPU_SPARC_VM_C1_LIRASSEMBLER_SPARC_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -810,7 +810,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
|
||||
#define DESCRIBE_FP_OFFSET(name) \
|
||||
values.describe(frame_no, fp() + frame::name##_offset, #name)
|
||||
@ -820,11 +820,19 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
|
||||
}
|
||||
|
||||
if (is_interpreted_frame()) {
|
||||
if (is_ricochet_frame()) {
|
||||
MethodHandles::RicochetFrame::describe(this, values, frame_no);
|
||||
} else if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_padding);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
|
||||
|
||||
// esp, according to Lesp (e.g. not depending on bci), if seems valid
|
||||
intptr_t* esp = *interpreter_frame_esp_addr();
|
||||
if ((esp >= sp()) && (esp < fp())) {
|
||||
values.describe(-1, esp, "*Lesp");
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_compiled_frame()) {
|
||||
@ -844,4 +852,3 @@ intptr_t *frame::initial_deoptimization_info() {
|
||||
// unused... but returns fp() to minimize changes introduced by 7087445
|
||||
return fp();
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,6 +82,8 @@ inline address* frame::O0_addr() const { return (address*) &younger_sp()[ I0->s
|
||||
|
||||
inline intptr_t* frame::sender_sp() const { return fp(); }
|
||||
|
||||
inline intptr_t* frame::real_fp() const { return fp(); }
|
||||
|
||||
// Used only in frame::oopmapreg_to_location
|
||||
// This return a value in VMRegImpl::slot_size
|
||||
inline int frame::pd_oop_map_offset_adjustment() const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -177,7 +177,7 @@ void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
|
||||
BLOCK_COMMENT("ricochet_blob.bounce");
|
||||
|
||||
if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
|
||||
trace_method_handle(_masm, "ricochet_blob.bounce");
|
||||
trace_method_handle(_masm, "return/ricochet_blob.bounce");
|
||||
|
||||
__ JMP(L1_continuation, 0);
|
||||
__ delayed()->nop();
|
||||
@ -268,14 +268,16 @@ void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
// Emit code to verify that FP is pointing at a valid ricochet frame.
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
enum {
|
||||
ARG_LIMIT = 255, SLOP = 45,
|
||||
// use this parameter for checking for garbage stack movements:
|
||||
UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
|
||||
// the slop defends against false alarms due to fencepost errors
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
|
||||
// The stack should look like this:
|
||||
// ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
|
||||
@ -1000,32 +1002,143 @@ void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
|
||||
BLOCK_COMMENT("} move_return_value");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) {
|
||||
RicochetFrame* rf = new RicochetFrame(*fr);
|
||||
|
||||
// ricochet slots (kept in registers for sparc)
|
||||
values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no));
|
||||
|
||||
// relevant ricochet targets (in caller frame)
|
||||
values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no));
|
||||
values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()), err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no));
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#ifndef PRODUCT
|
||||
extern "C" void print_method_handle(oop mh);
|
||||
void trace_method_handle_stub(const char* adaptername,
|
||||
oopDesc* mh,
|
||||
intptr_t* saved_sp) {
|
||||
intptr_t* saved_sp,
|
||||
intptr_t* args,
|
||||
intptr_t* tracing_fp) {
|
||||
bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh
|
||||
tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
|
||||
if (has_mh)
|
||||
|
||||
tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args);
|
||||
|
||||
if (Verbose) {
|
||||
// dumping last frame with frame::describe
|
||||
|
||||
JavaThread* p = JavaThread::active();
|
||||
|
||||
ResourceMark rm;
|
||||
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
|
||||
FrameValues values;
|
||||
|
||||
// Note: We want to allow trace_method_handle from any call site.
|
||||
// While trace_method_handle creates a frame, it may be entered
|
||||
// without a valid return PC in O7 (e.g. not just after a call).
|
||||
// Walking that frame could lead to failures due to that invalid PC.
|
||||
// => carefully detect that frame when doing the stack walking
|
||||
|
||||
// walk up to the right frame using the "tracing_fp" argument
|
||||
intptr_t* cur_sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
|
||||
frame cur_frame(cur_sp, frame::unpatchable, NULL);
|
||||
|
||||
while (cur_frame.fp() != (intptr_t *)(STACK_BIAS+(uintptr_t)tracing_fp)) {
|
||||
cur_frame = os::get_sender_for_C_frame(&cur_frame);
|
||||
}
|
||||
|
||||
// safely create a frame and call frame::describe
|
||||
intptr_t *dump_sp = cur_frame.sender_sp();
|
||||
intptr_t *dump_fp = cur_frame.link();
|
||||
|
||||
bool walkable = has_mh; // whether the traced frame shoud be walkable
|
||||
|
||||
// the sender for cur_frame is the caller of trace_method_handle
|
||||
if (walkable) {
|
||||
// The previous definition of walkable may have to be refined
|
||||
// if new call sites cause the next frame constructor to start
|
||||
// failing. Alternatively, frame constructors could be
|
||||
// modified to support the current or future non walkable
|
||||
// frames (but this is more intrusive and is not considered as
|
||||
// part of this RFE, which will instead use a simpler output).
|
||||
frame dump_frame = frame(dump_sp,
|
||||
cur_frame.sp(), // younger_sp
|
||||
false); // no adaptation
|
||||
dump_frame.describe(values, 1);
|
||||
} else {
|
||||
// Robust dump for frames which cannot be constructed from sp/younger_sp
|
||||
// Add descriptions without building a Java frame to avoid issues
|
||||
values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
|
||||
values.describe(-1, dump_sp, "sp");
|
||||
}
|
||||
|
||||
bool has_args = has_mh; // whether Gargs is meaningful
|
||||
|
||||
// mark args, if seems valid (may not be valid for some adapters)
|
||||
if (has_args) {
|
||||
if ((args >= dump_sp) && (args < dump_fp)) {
|
||||
values.describe(-1, args, "*G4_args");
|
||||
}
|
||||
}
|
||||
|
||||
// mark saved_sp, if seems valid (may not be valid for some adapters)
|
||||
intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp);
|
||||
if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) {
|
||||
values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS");
|
||||
}
|
||||
|
||||
// Note: the unextended_sp may not be correct
|
||||
tty->print_cr(" stack layout:");
|
||||
values.print(p);
|
||||
}
|
||||
|
||||
if (has_mh) {
|
||||
print_method_handle(mh);
|
||||
}
|
||||
}
|
||||
|
||||
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
|
||||
if (!TraceMethodHandles) return;
|
||||
BLOCK_COMMENT("trace_method_handle {");
|
||||
// save: Gargs, O5_savedSP
|
||||
__ save_frame(16);
|
||||
__ save_frame(16); // need space for saving required FPU state
|
||||
|
||||
__ set((intptr_t) adaptername, O0);
|
||||
__ mov(G3_method_handle, O1);
|
||||
__ mov(I5_savedSP, O2);
|
||||
__ mov(Gargs, O3);
|
||||
__ mov(I6, O4); // frame identifier for safe stack walking
|
||||
|
||||
// Save scratched registers that might be needed. Robustness is more
|
||||
// important than optimizing the saves for this debug only code.
|
||||
|
||||
// save FP result, valid at some call sites (adapter_opt_return_float, ...)
|
||||
Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
|
||||
__ stf(FloatRegisterImpl::D, Ftos_d, d_save);
|
||||
// Safely save all globals but G2 (handled by call_VM_leaf) and G7
|
||||
// (OS reserved).
|
||||
__ mov(G3_method_handle, L3);
|
||||
__ mov(Gargs, L4);
|
||||
__ mov(G5_method_type, L5);
|
||||
__ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
|
||||
__ mov(G6, L6);
|
||||
__ mov(G1, L1);
|
||||
|
||||
__ call_VM_leaf(L2 /* for G2 */, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
|
||||
|
||||
__ mov(L3, G3_method_handle);
|
||||
__ mov(L4, Gargs);
|
||||
__ mov(L5, G5_method_type);
|
||||
__ mov(L6, G6);
|
||||
__ mov(L1, G1);
|
||||
__ ldf(FloatRegisterImpl::D, d_save, Ftos_d);
|
||||
|
||||
__ restore();
|
||||
BLOCK_COMMENT("} trace_method_handle");
|
||||
}
|
||||
@ -1045,7 +1158,7 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
|
||||
// OP_COLLECT_ARGS is below...
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
|
||||
|(!UseRicochetFrames ? 0 :
|
||||
|(
|
||||
java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
|
||||
((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
|
||||
@ -1250,7 +1363,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
move_typed_arg(_masm, arg_type, false,
|
||||
prim_value_addr,
|
||||
Address(O0_argslot, 0),
|
||||
O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
|
||||
O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
|
||||
}
|
||||
|
||||
if (direct_to_method) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -145,6 +145,8 @@ class RicochetFrame : public ResourceObj {
|
||||
}
|
||||
|
||||
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
|
||||
|
||||
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// Additional helper methods for MethodHandles code generation:
|
||||
|
@ -9283,6 +9283,7 @@ instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{
|
||||
// (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
|
||||
instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
|
||||
match(Jump switch_val);
|
||||
effect(TEMP table);
|
||||
|
||||
ins_cost(350);
|
||||
|
||||
@ -10273,24 +10274,24 @@ instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, i
|
||||
// ============================================================================
|
||||
// inlined locking and unlocking
|
||||
|
||||
instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o7RegP scratch ) %{
|
||||
instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
|
||||
match(Set pcc (FastLock object box));
|
||||
|
||||
effect(KILL scratch, TEMP scratch2);
|
||||
effect(TEMP scratch2, USE_KILL box, KILL scratch);
|
||||
ins_cost(100);
|
||||
|
||||
format %{ "FASTLOCK $object, $box; KILL $scratch, $scratch2, $box" %}
|
||||
format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
|
||||
ins_encode( Fast_Lock(object, box, scratch, scratch2) );
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
|
||||
instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o7RegP scratch ) %{
|
||||
instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
|
||||
match(Set pcc (FastUnlock object box));
|
||||
effect(KILL scratch, TEMP scratch2);
|
||||
effect(TEMP scratch2, USE_KILL box, KILL scratch);
|
||||
ins_cost(100);
|
||||
|
||||
format %{ "FASTUNLOCK $object, $box; KILL $scratch, $scratch2, $box" %}
|
||||
format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
|
||||
ins_encode( Fast_Unlock(object, box, scratch, scratch2) );
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
@ -406,7 +406,7 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
// search an exception handler (rax: exception oop, rdx: throwing pc)
|
||||
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
|
||||
__ should_not_reach_here();
|
||||
assert(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
guarantee(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
@ -490,8 +490,7 @@ int LIR_Assembler::emit_deopt_handler() {
|
||||
|
||||
__ pushptr(here.addr());
|
||||
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
|
||||
|
||||
assert(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -651,13 +651,15 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
return &interpreter_frame_tos_address()[index];
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
|
||||
#define DESCRIBE_FP_OFFSET(name) \
|
||||
values.describe(frame_no, fp() + frame::name##_offset, #name)
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_interpreted_frame()) {
|
||||
if (is_ricochet_frame()) {
|
||||
MethodHandles::RicochetFrame::describe(this, values, frame_no);
|
||||
} else if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
@ -667,7 +669,6 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -675,3 +676,21 @@ intptr_t *frame::initial_deoptimization_info() {
|
||||
// used to reset the saved FP
|
||||
return fp();
|
||||
}
|
||||
|
||||
intptr_t* frame::real_fp() const {
|
||||
if (_cb != NULL) {
|
||||
// use the frame size if valid
|
||||
int size = _cb->frame_size();
|
||||
if ((size > 0) &&
|
||||
(! is_ricochet_frame())) {
|
||||
// Work-around: ricochet explicitly excluded because frame size is not
|
||||
// constant for the ricochet blob but its frame_size could not, for
|
||||
// some reasons, be declared as <= 0. This potentially confusing
|
||||
// size declaration should be fixed as another CR.
|
||||
return unextended_sp() + size;
|
||||
}
|
||||
}
|
||||
// else rely on fp()
|
||||
assert(! is_compiled_frame(), "unknown compiled frame size");
|
||||
return fp();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -188,6 +188,7 @@
|
||||
frame(intptr_t* sp, intptr_t* fp);
|
||||
|
||||
// accessors for the instance variables
|
||||
// Note: not necessarily the real 'frame pointer' (see real_fp)
|
||||
intptr_t* fp() const { return _fp; }
|
||||
|
||||
inline address* sender_pc_addr() const;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -279,14 +279,16 @@ void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
// Emit code to verify that RBP is pointing at a valid ricochet frame.
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
enum {
|
||||
ARG_LIMIT = 255, SLOP = 4,
|
||||
// use this parameter for checking for garbage stack movements:
|
||||
UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
|
||||
// the slop defends against false alarms due to fencepost errors
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
|
||||
// The stack should look like this:
|
||||
// ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
|
||||
@ -990,6 +992,26 @@ void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
|
||||
BLOCK_COMMENT("} move_return_value");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
#define DESCRIBE_RICOCHET_OFFSET(rf, name) \
|
||||
values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name)
|
||||
|
||||
void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) {
|
||||
address bp = (address) fr->fp();
|
||||
RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
|
||||
|
||||
// ricochet slots
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, exact_sender_sp);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, conversion);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, saved_args_base);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, saved_args_layout);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, saved_target);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, continuation);
|
||||
|
||||
// relevant ricochet targets (in caller frame)
|
||||
values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no));
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#ifndef PRODUCT
|
||||
extern "C" void print_method_handle(oop mh);
|
||||
@ -1001,11 +1023,12 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
intptr_t* saved_bp) {
|
||||
// called as a leaf from native code: do not block the JVM!
|
||||
bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh
|
||||
|
||||
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
|
||||
intptr_t* base_sp = last_sp;
|
||||
typedef MethodHandles::RicochetFrame RicochetFrame;
|
||||
RicochetFrame* rfp = (RicochetFrame*)((address)saved_bp - RicochetFrame::sender_link_offset_in_bytes());
|
||||
if (!UseRicochetFrames || Universe::heap()->is_in((address) rfp->saved_args_base())) {
|
||||
if (Universe::heap()->is_in((address) rfp->saved_args_base())) {
|
||||
// Probably an interpreter frame.
|
||||
base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
|
||||
}
|
||||
@ -1030,13 +1053,64 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
tty->cr();
|
||||
if (last_sp != saved_sp && last_sp != NULL)
|
||||
tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp);
|
||||
int stack_dump_count = 16;
|
||||
if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
|
||||
stack_dump_count = (int)(saved_bp + 2 - saved_sp);
|
||||
if (stack_dump_count > 64) stack_dump_count = 48;
|
||||
for (i = 0; i < stack_dump_count; i += 4) {
|
||||
tty->print_cr(" dump at SP[%d] "PTR_FORMAT": "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT,
|
||||
i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
|
||||
|
||||
{
|
||||
// dumping last frame with frame::describe
|
||||
|
||||
JavaThread* p = JavaThread::active();
|
||||
|
||||
ResourceMark rm;
|
||||
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
|
||||
FrameValues values;
|
||||
|
||||
// Note: We want to allow trace_method_handle from any call site.
|
||||
// While trace_method_handle creates a frame, it may be entered
|
||||
// without a PC on the stack top (e.g. not just after a call).
|
||||
// Walking that frame could lead to failures due to that invalid PC.
|
||||
// => carefully detect that frame when doing the stack walking
|
||||
|
||||
// Current C frame
|
||||
frame cur_frame = os::current_frame();
|
||||
|
||||
// Robust search of trace_calling_frame (independant of inlining).
|
||||
// Assumes saved_regs comes from a pusha in the trace_calling_frame.
|
||||
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
|
||||
frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
|
||||
while (trace_calling_frame.fp() < saved_regs) {
|
||||
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
|
||||
}
|
||||
|
||||
// safely create a frame and call frame::describe
|
||||
intptr_t *dump_sp = trace_calling_frame.sender_sp();
|
||||
intptr_t *dump_fp = trace_calling_frame.link();
|
||||
|
||||
bool walkable = has_mh; // whether the traced frame shoud be walkable
|
||||
|
||||
if (walkable) {
|
||||
// The previous definition of walkable may have to be refined
|
||||
// if new call sites cause the next frame constructor to start
|
||||
// failing. Alternatively, frame constructors could be
|
||||
// modified to support the current or future non walkable
|
||||
// frames (but this is more intrusive and is not considered as
|
||||
// part of this RFE, which will instead use a simpler output).
|
||||
frame dump_frame = frame(dump_sp, dump_fp);
|
||||
dump_frame.describe(values, 1);
|
||||
} else {
|
||||
// Stack may not be walkable (invalid PC above FP):
|
||||
// Add descriptions without building a Java frame to avoid issues
|
||||
values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
|
||||
values.describe(-1, dump_sp, "sp for #1");
|
||||
}
|
||||
|
||||
// mark saved_sp if seems valid
|
||||
if (has_mh) {
|
||||
if ((saved_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (saved_sp < dump_fp)) {
|
||||
values.describe(-1, saved_sp, "*saved_sp");
|
||||
}
|
||||
}
|
||||
|
||||
tty->print_cr(" stack layout:");
|
||||
values.print(p);
|
||||
}
|
||||
if (has_mh)
|
||||
print_method_handle(mh);
|
||||
@ -1066,26 +1140,49 @@ void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
|
||||
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
|
||||
if (!TraceMethodHandles) return;
|
||||
BLOCK_COMMENT("trace_method_handle {");
|
||||
__ push(rax);
|
||||
__ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp __ pusha();
|
||||
__ pusha();
|
||||
__ mov(rbx, rsp);
|
||||
__ enter();
|
||||
__ andptr(rsp, -16); // align stack if needed for FPU state
|
||||
__ pusha();
|
||||
__ mov(rbx, rsp); // for retreiving saved_regs
|
||||
// Note: saved_regs must be in the entered frame for the
|
||||
// robust stack walking implemented in trace_method_handle_stub.
|
||||
|
||||
// save FP result, valid at some call sites (adapter_opt_return_float, ...)
|
||||
__ increment(rsp, -2 * wordSize);
|
||||
if (UseSSE >= 2) {
|
||||
__ movdbl(Address(rsp, 0), xmm0);
|
||||
} else if (UseSSE == 1) {
|
||||
__ movflt(Address(rsp, 0), xmm0);
|
||||
} else {
|
||||
__ fst_d(Address(rsp, 0));
|
||||
}
|
||||
|
||||
// incoming state:
|
||||
// rcx: method handle
|
||||
// r13 or rsi: saved sp
|
||||
// To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
|
||||
// Note: fix the increment below if pushing more arguments
|
||||
__ push(rbp); // saved_bp
|
||||
__ push(rsi); // saved_sp
|
||||
__ push(rax); // entry_sp
|
||||
__ push(saved_last_sp_register()); // saved_sp
|
||||
__ push(rbp); // entry_sp (with extra align space)
|
||||
__ push(rbx); // pusha saved_regs
|
||||
__ push(rcx); // mh
|
||||
__ push(rcx); // adaptername
|
||||
__ push(rcx); // slot for adaptername
|
||||
__ movptr(Address(rsp, 0), (intptr_t) adaptername);
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
|
||||
__ leave();
|
||||
__ increment(rsp, 6 * wordSize); // MethodHandleStubArguments
|
||||
|
||||
if (UseSSE >= 2) {
|
||||
__ movdbl(xmm0, Address(rsp, 0));
|
||||
} else if (UseSSE == 1) {
|
||||
__ movflt(xmm0, Address(rsp, 0));
|
||||
} else {
|
||||
__ fld_d(Address(rsp, 0));
|
||||
}
|
||||
__ increment(rsp, 2 * wordSize);
|
||||
|
||||
__ popa();
|
||||
__ pop(rax);
|
||||
__ leave();
|
||||
BLOCK_COMMENT("} trace_method_handle");
|
||||
}
|
||||
#endif //PRODUCT
|
||||
@ -1104,7 +1201,7 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
|
||||
//OP_COLLECT_ARGS is below...
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
|
||||
|(!UseRicochetFrames ? 0 :
|
||||
|(
|
||||
java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
|
||||
((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -224,6 +224,8 @@ class RicochetFrame {
|
||||
}
|
||||
|
||||
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
|
||||
|
||||
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// Additional helper methods for MethodHandles code generation:
|
||||
|
@ -13435,20 +13435,20 @@ instruct RethrowException()
|
||||
// inlined locking and unlocking
|
||||
|
||||
|
||||
instruct cmpFastLock( eFlagsReg cr, eRegP object, eRegP box, eAXRegI tmp, eRegP scr) %{
|
||||
instruct cmpFastLock( eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
|
||||
match( Set cr (FastLock object box) );
|
||||
effect( TEMP tmp, TEMP scr );
|
||||
effect( TEMP tmp, TEMP scr, USE_KILL box );
|
||||
ins_cost(300);
|
||||
format %{ "FASTLOCK $object, $box KILLS $tmp,$scr" %}
|
||||
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
|
||||
ins_encode( Fast_Lock(object,box,tmp,scr) );
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct cmpFastUnlock( eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
|
||||
match( Set cr (FastUnlock object box) );
|
||||
effect( TEMP tmp );
|
||||
effect( TEMP tmp, USE_KILL box );
|
||||
ins_cost(300);
|
||||
format %{ "FASTUNLOCK $object, $box, $tmp" %}
|
||||
format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
|
||||
ins_encode( Fast_Unlock(object,box,tmp) );
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
@ -11511,13 +11511,13 @@ instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
|
||||
// inlined locking and unlocking
|
||||
|
||||
instruct cmpFastLock(rFlagsReg cr,
|
||||
rRegP object, rRegP box, rax_RegI tmp, rRegP scr)
|
||||
rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr)
|
||||
%{
|
||||
match(Set cr (FastLock object box));
|
||||
effect(TEMP tmp, TEMP scr);
|
||||
effect(TEMP tmp, TEMP scr, USE_KILL box);
|
||||
|
||||
ins_cost(300);
|
||||
format %{ "fastlock $object,$box,$tmp,$scr" %}
|
||||
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
|
||||
ins_encode(Fast_Lock(object, box, tmp, scr));
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@ -11526,10 +11526,10 @@ instruct cmpFastUnlock(rFlagsReg cr,
|
||||
rRegP object, rax_RegP box, rRegP tmp)
|
||||
%{
|
||||
match(Set cr (FastUnlock object box));
|
||||
effect(TEMP tmp);
|
||||
effect(TEMP tmp, USE_KILL box);
|
||||
|
||||
ins_cost(300);
|
||||
format %{ "fastunlock $object, $box, $tmp" %}
|
||||
format %{ "fastunlock $object,$box\t! kills $box,$tmp" %}
|
||||
ins_encode(Fast_Unlock(object, box, tmp));
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
@ -418,7 +418,7 @@ void ZeroFrame::identify_vp_word(int frame_index,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -72,6 +72,10 @@ inline intptr_t* frame::sender_sp() const {
|
||||
return fp() + 1;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::real_fp() const {
|
||||
return fp();
|
||||
}
|
||||
|
||||
inline intptr_t* frame::link() const {
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
@ -29,43 +29,3 @@ enum /* platform_dependent_constants */ {
|
||||
adapter_code_size = 0
|
||||
};
|
||||
|
||||
#define TARGET_ARCH_NYI_6939861 1
|
||||
// ..#ifdef TARGET_ARCH_NYI_6939861
|
||||
// .. // Here are some backward compatible declarations until the 6939861 ports are updated.
|
||||
// .. #define _adapter_flyby (_EK_LIMIT + 10)
|
||||
// .. #define _adapter_ricochet (_EK_LIMIT + 11)
|
||||
// .. #define _adapter_opt_spread_1 _adapter_opt_spread_1_ref
|
||||
// .. #define _adapter_opt_spread_more _adapter_opt_spread_ref
|
||||
// .. enum {
|
||||
// .. _INSERT_NO_MASK = -1,
|
||||
// .. _INSERT_REF_MASK = 0,
|
||||
// .. _INSERT_INT_MASK = 1,
|
||||
// .. _INSERT_LONG_MASK = 3
|
||||
// .. };
|
||||
// .. static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
|
||||
// .. arg_type = ek_bound_mh_arg_type(ek);
|
||||
// .. arg_mask = 0;
|
||||
// .. arg_slots = type2size[arg_type];;
|
||||
// .. }
|
||||
// .. static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
|
||||
// .. int swap_slots = ek_adapter_opt_swap_slots(ek);
|
||||
// .. rotate = ek_adapter_opt_swap_mode(ek);
|
||||
// .. swap_bytes = swap_slots * Interpreter::stackElementSize;
|
||||
// .. }
|
||||
// .. static int get_ek_adapter_opt_spread_info(EntryKind ek) {
|
||||
// .. return ek_adapter_opt_spread_count(ek);
|
||||
// .. }
|
||||
// ..
|
||||
// .. static void insert_arg_slots(MacroAssembler* _masm,
|
||||
// .. RegisterOrConstant arg_slots,
|
||||
// .. int arg_mask,
|
||||
// .. Register argslot_reg,
|
||||
// .. Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
|
||||
// ..
|
||||
// .. static void remove_arg_slots(MacroAssembler* _masm,
|
||||
// .. RegisterOrConstant arg_slots,
|
||||
// .. Register argslot_reg,
|
||||
// .. Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
|
||||
// ..
|
||||
// .. static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
|
||||
// ..#endif //TARGET_ARCH_NYI_6939861
|
||||
|
31
hotspot/src/os/bsd/vm/decoder_machO.cpp
Normal file
31
hotspot/src/os/bsd/vm/decoder_machO.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include "decoder_machO.hpp"
|
||||
#endif
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,45 +22,21 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "prims/jvm.h"
|
||||
#include "utilities/decoder.hpp"
|
||||
|
||||
#include <cxxabi.h>
|
||||
#ifndef OS_BSD_VM_DECODER_MACHO_HPP
|
||||
#define OS_BSD_VM_DECODER_MACHO_HPP
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
void Decoder::initialize() {
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
void Decoder::uninitialize() {
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
bool Decoder::can_decode_C_frame_in_vm() {
|
||||
return false;
|
||||
}
|
||||
|
||||
Decoder::decoder_status Decoder::decode(address addr, const char* filepath, char *buf, int buflen, int *offset) {
|
||||
return symbol_not_found;
|
||||
}
|
||||
#include "utilities/decoder.hpp"
|
||||
|
||||
// Just a placehold for now
|
||||
class MachODecoder: public NullDecoder {
|
||||
public:
|
||||
MachODecoder() { }
|
||||
~MachODecoder() { }
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
int status;
|
||||
char* result;
|
||||
size_t size = (size_t)buflen;
|
||||
#endif // OS_BSD_VM_DECODER_MACHO_HPP
|
||||
|
||||
// Don't pass buf to __cxa_demangle. In case of the 'buf' is too small,
|
||||
// __cxa_demangle will call system "realloc" for additional memory, which
|
||||
// may use different malloc/realloc mechanism that allocates 'buf'.
|
||||
if ((result = abi::__cxa_demangle(symbol, NULL, NULL, &status)) != NULL) {
|
||||
jio_snprintf(buf, buflen, "%s", result);
|
||||
// call c library's free
|
||||
::free(result);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
@ -1920,7 +1920,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
return true;
|
||||
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -23,11 +23,11 @@
|
||||
*/
|
||||
|
||||
#include "prims/jvm.h"
|
||||
#include "utilities/decoder.hpp"
|
||||
#include "utilities/decoder_elf.hpp"
|
||||
|
||||
#include <cxxabi.h>
|
||||
|
||||
bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
bool ElfDecoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
int status;
|
||||
char* result;
|
||||
size_t size = (size_t)buflen;
|
||||
@ -43,3 +43,4 @@ bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1732,7 +1732,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
return true;
|
||||
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -22,10 +22,11 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "utilities/decoder.hpp"
|
||||
#include "utilities/decoder_elf.hpp"
|
||||
|
||||
#include <demangle.h>
|
||||
|
||||
bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
bool ElfDecoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
return !cplus_demangle(symbol, buf, (size_t)buflen);
|
||||
}
|
||||
|
||||
|
@ -1997,7 +1997,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
}
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2015,7 +2015,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
return true;
|
||||
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,22 +24,24 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "prims/jvm.h"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/decoder.hpp"
|
||||
#include "decoder_windows.hpp"
|
||||
|
||||
HMODULE Decoder::_dbghelp_handle = NULL;
|
||||
bool Decoder::_can_decode_in_vm = false;
|
||||
pfn_SymGetSymFromAddr64 Decoder::_pfnSymGetSymFromAddr64 = NULL;
|
||||
pfn_UndecorateSymbolName Decoder::_pfnUndecorateSymbolName = NULL;
|
||||
WindowsDecoder::WindowsDecoder() {
|
||||
_dbghelp_handle = NULL;
|
||||
_can_decode_in_vm = false;
|
||||
_pfnSymGetSymFromAddr64 = NULL;
|
||||
_pfnUndecorateSymbolName = NULL;
|
||||
|
||||
void Decoder::initialize() {
|
||||
if (!_initialized) {
|
||||
_initialized = true;
|
||||
_decoder_status = no_error;
|
||||
initialize();
|
||||
}
|
||||
|
||||
HINSTANCE handle = os::win32::load_Windows_dll("dbghelp.dll", NULL, 0);
|
||||
void WindowsDecoder::initialize() {
|
||||
if (!has_error() && _dbghelp_handle == NULL) {
|
||||
HMODULE handle = ::LoadLibrary("dbghelp.dll");
|
||||
if (!handle) {
|
||||
_decoder_status = helper_not_found;
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
_dbghelp_handle = handle;
|
||||
@ -70,32 +72,29 @@ void Decoder::initialize() {
|
||||
|
||||
// find out if jvm.dll contains private symbols, by decoding
|
||||
// current function and comparing the result
|
||||
address addr = (address)Decoder::initialize;
|
||||
address addr = (address)Decoder::decode;
|
||||
char buf[MAX_PATH];
|
||||
if (decode(addr, buf, sizeof(buf), NULL) == no_error) {
|
||||
_can_decode_in_vm = !strcmp(buf, "Decoder::initialize");
|
||||
if (decode(addr, buf, sizeof(buf), NULL)) {
|
||||
_can_decode_in_vm = !strcmp(buf, "Decoder::decode");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Decoder::uninitialize() {
|
||||
assert(_initialized, "Decoder not yet initialized");
|
||||
void WindowsDecoder::uninitialize() {
|
||||
_pfnSymGetSymFromAddr64 = NULL;
|
||||
_pfnUndecorateSymbolName = NULL;
|
||||
if (_dbghelp_handle != NULL) {
|
||||
::FreeLibrary(_dbghelp_handle);
|
||||
}
|
||||
_initialized = false;
|
||||
_dbghelp_handle = NULL;
|
||||
}
|
||||
|
||||
bool Decoder::can_decode_C_frame_in_vm() {
|
||||
initialize();
|
||||
return _can_decode_in_vm;
|
||||
bool WindowsDecoder::can_decode_C_frame_in_vm() const {
|
||||
return (!has_error() && _can_decode_in_vm);
|
||||
}
|
||||
|
||||
|
||||
Decoder::decoder_status Decoder::decode(address addr, char *buf, int buflen, int *offset) {
|
||||
assert(_initialized, "Decoder not yet initialized");
|
||||
bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath) {
|
||||
if (_pfnSymGetSymFromAddr64 != NULL) {
|
||||
PIMAGEHLP_SYMBOL64 pSymbol;
|
||||
char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
|
||||
@ -105,19 +104,20 @@ Decoder::decoder_status Decoder::decode(address addr, char *buf, int buflen, int
|
||||
DWORD64 displacement;
|
||||
if (_pfnSymGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
|
||||
if (buf != NULL) {
|
||||
if (!demangle(pSymbol->Name, buf, buflen)) {
|
||||
if (demangle(pSymbol->Name, buf, buflen)) {
|
||||
jio_snprintf(buf, buflen, "%s", pSymbol->Name);
|
||||
}
|
||||
}
|
||||
if (offset != NULL) *offset = (int)displacement;
|
||||
return no_error;
|
||||
if(offset != NULL) *offset = (int)displacement;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return helper_not_found;
|
||||
if (buf != NULL && buflen > 0) buf[0] = '\0';
|
||||
if (offset != NULL) *offset = -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
assert(_initialized, "Decoder not yet initialized");
|
||||
bool WindowsDecoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
return _pfnUndecorateSymbolName != NULL &&
|
||||
_pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
|
||||
}
|
||||
|
61
hotspot/src/os/windows/vm/decoder_windows.hpp
Normal file
61
hotspot/src/os/windows/vm/decoder_windows.hpp
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_WINDOWS_VM_DECODER_WINDOWS_HPP
|
||||
#define OS_WINDOWS_VM_DECIDER_WINDOWS_HPP
|
||||
|
||||
#include <windows.h>
|
||||
#include <imagehlp.h>
|
||||
|
||||
#include "utilities/decoder.hpp"
|
||||
|
||||
// functions needed for decoding symbols
|
||||
typedef DWORD (WINAPI *pfn_SymSetOptions)(DWORD);
|
||||
typedef BOOL (WINAPI *pfn_SymInitialize)(HANDLE, PCTSTR, BOOL);
|
||||
typedef BOOL (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
|
||||
typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
|
||||
|
||||
class WindowsDecoder: public NullDecoder {
|
||||
|
||||
public:
|
||||
WindowsDecoder();
|
||||
~WindowsDecoder() { uninitialize(); };
|
||||
|
||||
bool can_decode_C_frame_in_vm() const;
|
||||
bool demangle(const char* symbol, char *buf, int buflen);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath = NULL);
|
||||
|
||||
private:
|
||||
void initialize();
|
||||
void uninitialize();
|
||||
|
||||
private:
|
||||
HMODULE _dbghelp_handle;
|
||||
bool _can_decode_in_vm;
|
||||
pfn_SymGetSymFromAddr64 _pfnSymGetSymFromAddr64;
|
||||
pfn_UndecorateSymbolName _pfnUndecorateSymbolName;
|
||||
};
|
||||
|
||||
#endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
|
||||
|
@ -1391,7 +1391,7 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
if (Decoder::decode(addr, buf, buflen, offset) == Decoder::no_error) {
|
||||
if (Decoder::decode(addr, buf, buflen, offset)) {
|
||||
return true;
|
||||
}
|
||||
if (offset != NULL) *offset = -1;
|
||||
|
@ -1354,9 +1354,10 @@ class LIR_OpBranch: public LIR_Op {
|
||||
CodeStub* _stub; // if this is a branch to a stub, this is the stub
|
||||
|
||||
public:
|
||||
LIR_OpBranch(LIR_Condition cond, Label* lbl)
|
||||
LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
|
||||
: LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
|
||||
, _cond(cond)
|
||||
, _type(type)
|
||||
, _label(lbl)
|
||||
, _block(NULL)
|
||||
, _ublock(NULL)
|
||||
@ -2053,7 +2054,7 @@ class LIR_List: public CompilationResourceObj {
|
||||
void jump(CodeStub* stub) {
|
||||
append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
|
||||
}
|
||||
void branch(LIR_Condition cond, Label* lbl) { append(new LIR_OpBranch(cond, lbl)); }
|
||||
void branch(LIR_Condition cond, BasicType type, Label* lbl) { append(new LIR_OpBranch(cond, type, lbl)); }
|
||||
void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
|
||||
assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
|
||||
append(new LIR_OpBranch(cond, type, block));
|
||||
|
@ -2350,7 +2350,7 @@ void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegi
|
||||
} else {
|
||||
LabelObj* L = new LabelObj();
|
||||
__ cmp(lir_cond_less, value, low_key);
|
||||
__ branch(lir_cond_less, L->label());
|
||||
__ branch(lir_cond_less, T_INT, L->label());
|
||||
__ cmp(lir_cond_lessEqual, value, high_key);
|
||||
__ branch(lir_cond_lessEqual, T_INT, dest);
|
||||
__ branch_destination(L->label());
|
||||
|
@ -413,8 +413,9 @@ static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, meth
|
||||
}
|
||||
bci = branch_bci + offset;
|
||||
}
|
||||
|
||||
assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
|
||||
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
|
||||
assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
|
||||
return osr_nm;
|
||||
}
|
||||
|
||||
|
@ -1589,7 +1589,7 @@ ciTypeFlow::Block::Block(ciTypeFlow* outer,
|
||||
_next = NULL;
|
||||
_on_work_list = false;
|
||||
_backedge_copy = false;
|
||||
_exception_entry = false;
|
||||
_has_monitorenter = false;
|
||||
_trap_bci = -1;
|
||||
_trap_index = 0;
|
||||
df_init();
|
||||
@ -2182,6 +2182,10 @@ bool ciTypeFlow::clone_loop_heads(Loop* lp, StateVector* temp_vector, JsrSet* te
|
||||
!head->is_clonable_exit(lp))
|
||||
continue;
|
||||
|
||||
// Avoid BoxLock merge.
|
||||
if (EliminateNestedLocks && head->has_monitorenter())
|
||||
continue;
|
||||
|
||||
// check not already cloned
|
||||
if (head->backedge_copy_count() != 0)
|
||||
continue;
|
||||
@ -2322,6 +2326,10 @@ void ciTypeFlow::flow_block(ciTypeFlow::Block* block,
|
||||
// Watch for bailouts.
|
||||
if (failing()) return;
|
||||
|
||||
if (str.cur_bc() == Bytecodes::_monitorenter) {
|
||||
block->set_has_monitorenter();
|
||||
}
|
||||
|
||||
if (res) {
|
||||
|
||||
// We have encountered a trap. Record it in this block.
|
||||
|
@ -544,15 +544,19 @@ public:
|
||||
// Has this block been cloned for a loop backedge?
|
||||
bool _backedge_copy;
|
||||
|
||||
// This block is entry to irreducible loop.
|
||||
bool _irreducible_entry;
|
||||
|
||||
// This block has monitor entry point.
|
||||
bool _has_monitorenter;
|
||||
|
||||
// A pointer used for our internal work list
|
||||
Block* _next;
|
||||
bool _on_work_list; // on the work list
|
||||
Block* _next;
|
||||
Block* _rpo_next; // Reverse post order list
|
||||
|
||||
// Loop info
|
||||
Loop* _loop; // nearest loop
|
||||
bool _irreducible_entry; // entry to irreducible loop
|
||||
bool _exception_entry; // entry to exception handler
|
||||
|
||||
ciBlock* ciblock() const { return _ciblock; }
|
||||
StateVector* state() const { return _state; }
|
||||
@ -689,6 +693,8 @@ public:
|
||||
bool is_loop_head() const { return _loop && _loop->head() == this; }
|
||||
void set_irreducible_entry(bool c) { _irreducible_entry = c; }
|
||||
bool is_irreducible_entry() const { return _irreducible_entry; }
|
||||
void set_has_monitorenter() { _has_monitorenter = true; }
|
||||
bool has_monitorenter() const { return _has_monitorenter; }
|
||||
bool is_visited() const { return has_pre_order(); }
|
||||
bool is_post_visited() const { return has_post_order(); }
|
||||
bool is_clonable_exit(Loop* lp);
|
||||
|
@ -1347,7 +1347,13 @@ class BacktraceBuilder: public StackObj {
|
||||
return _backtrace();
|
||||
}
|
||||
|
||||
inline void push(methodOop method, short bci, TRAPS) {
|
||||
inline void push(methodOop method, int bci, TRAPS) {
|
||||
// Smear the -1 bci to 0 since the array only holds unsigned
|
||||
// shorts. The later line number lookup would just smear the -1
|
||||
// to a 0 even if it could be recorded.
|
||||
if (bci == SynchronizationEntryBCI) bci = 0;
|
||||
assert(bci == (jushort)bci, "doesn't fit");
|
||||
|
||||
if (_index >= trace_chunk_size) {
|
||||
methodHandle mhandle(THREAD, method);
|
||||
expand(CHECK);
|
||||
@ -1574,8 +1580,13 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
|
||||
int chunk_count = 0;
|
||||
|
||||
for (;!st.at_end(); st.next()) {
|
||||
// add element
|
||||
bcis->ushort_at_put(chunk_count, st.bci());
|
||||
// Add entry and smear the -1 bci to 0 since the array only holds
|
||||
// unsigned shorts. The later line number lookup would just smear
|
||||
// the -1 to a 0 even if it could be recorded.
|
||||
int bci = st.bci();
|
||||
if (bci == SynchronizationEntryBCI) bci = 0;
|
||||
assert(bci == (jushort)bci, "doesn't fit");
|
||||
bcis->ushort_at_put(chunk_count, bci);
|
||||
methods->obj_at_put(chunk_count, st.method());
|
||||
|
||||
chunk_count++;
|
||||
|
@ -204,6 +204,24 @@ Symbol* SymbolTable::lookup_only(const char* name, int len,
|
||||
return s;
|
||||
}
|
||||
|
||||
// Look up the address of the literal in the SymbolTable for this Symbol*
|
||||
// Do not create any new symbols
|
||||
// Do not increment the reference count to keep this alive
|
||||
Symbol** SymbolTable::lookup_symbol_addr(Symbol* sym){
|
||||
unsigned int hash = hash_symbol((char*)sym->bytes(), sym->utf8_length());
|
||||
int index = the_table()->hash_to_index(hash);
|
||||
|
||||
for (HashtableEntry<Symbol*>* e = the_table()->bucket(index); e != NULL; e = e->next()) {
|
||||
if (e->hash() == hash) {
|
||||
Symbol* literal_sym = e->literal();
|
||||
if (sym == literal_sym) {
|
||||
return e->literal_addr();
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Suggestion: Push unicode-based lookup all the way into the hashing
|
||||
// and probing logic, so there is no need for convert_to_utf8 until
|
||||
// an actual new Symbol* is created.
|
||||
|
@ -144,6 +144,9 @@ public:
|
||||
|
||||
static void release(Symbol* sym);
|
||||
|
||||
// Look up the address of the literal in the SymbolTable for this Symbol*
|
||||
static Symbol** lookup_symbol_addr(Symbol* sym);
|
||||
|
||||
// jchar (utf16) version of lookups
|
||||
static Symbol* lookup_unicode(const jchar* name, int len, TRAPS);
|
||||
static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash);
|
||||
|
@ -2131,6 +2131,12 @@ void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
|
||||
}
|
||||
}
|
||||
|
||||
// Assign a classid if one has not already been assigned. The
|
||||
// counter does not need to be atomically incremented since this
|
||||
// is only done while holding the SystemDictionary_lock.
|
||||
// All loaded classes get a unique ID.
|
||||
TRACE_INIT_ID(k);
|
||||
|
||||
// Check for a placeholder. If there, remove it and make a
|
||||
// new system dictionary entry.
|
||||
placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
|
||||
|
@ -961,7 +961,7 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||
methodHandle hot_method,
|
||||
int hot_count,
|
||||
const char* comment,
|
||||
TRAPS) {
|
||||
Thread* thread) {
|
||||
// do nothing if compiler thread(s) is not available
|
||||
if (!_initialized ) {
|
||||
return;
|
||||
@ -1037,7 +1037,7 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||
|
||||
// Acquire our lock.
|
||||
{
|
||||
MutexLocker locker(queue->lock(), THREAD);
|
||||
MutexLocker locker(queue->lock(), thread);
|
||||
|
||||
// Make sure the method has not slipped into the queues since
|
||||
// last we checked; note that those checks were "fast bail-outs".
|
||||
@ -1119,7 +1119,7 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||
nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
int comp_level,
|
||||
methodHandle hot_method, int hot_count,
|
||||
const char* comment, TRAPS) {
|
||||
const char* comment, Thread* THREAD) {
|
||||
// make sure arguments make sense
|
||||
assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method");
|
||||
assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
|
||||
@ -1173,10 +1173,10 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
|
||||
// some prerequisites that are compiler specific
|
||||
if (compiler(comp_level)->is_c2() || compiler(comp_level)->is_shark()) {
|
||||
method->constants()->resolve_string_constants(CHECK_0);
|
||||
method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NULL);
|
||||
// Resolve all classes seen in the signature of the method
|
||||
// we are compiling.
|
||||
methodOopDesc::load_signature_classes(method, CHECK_0);
|
||||
methodOopDesc::load_signature_classes(method, CHECK_AND_CLEAR_NULL);
|
||||
}
|
||||
|
||||
// If the method is native, do the lookup in the thread requesting
|
||||
@ -1230,7 +1230,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, CHECK_0);
|
||||
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
|
||||
}
|
||||
|
||||
// return requested nmethod
|
||||
|
@ -333,7 +333,7 @@ class CompileBroker: AllStatic {
|
||||
methodHandle hot_method,
|
||||
int hot_count,
|
||||
const char* comment,
|
||||
TRAPS);
|
||||
Thread* thread);
|
||||
static CompileQueue* compile_queue(int comp_level) {
|
||||
if (is_c2_compile(comp_level)) return _c2_method_queue;
|
||||
if (is_c1_compile(comp_level)) return _c1_method_queue;
|
||||
@ -363,7 +363,7 @@ class CompileBroker: AllStatic {
|
||||
int comp_level,
|
||||
methodHandle hot_method,
|
||||
int hot_count,
|
||||
const char* comment, TRAPS);
|
||||
const char* comment, Thread* thread);
|
||||
|
||||
static void compiler_thread_loop();
|
||||
|
||||
|
@ -859,7 +859,9 @@ IRT_ENTRY(nmethod*,
|
||||
const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
|
||||
const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
|
||||
|
||||
assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
|
||||
nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread);
|
||||
assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
|
||||
|
||||
if (osr_nm != NULL) {
|
||||
// We may need to do on-stack replacement which requires that no
|
||||
|
@ -158,6 +158,9 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size,
|
||||
kl->set_next_sibling(NULL);
|
||||
kl->set_alloc_count(0);
|
||||
kl->set_alloc_size(0);
|
||||
#ifdef TRACE_SET_KLASS_TRACE_ID
|
||||
TRACE_SET_KLASS_TRACE_ID(kl, 0);
|
||||
#endif
|
||||
|
||||
kl->set_prototype_header(markOopDesc::prototype());
|
||||
kl->set_biased_lock_revocation_count(0);
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "oops/klassPS.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "trace/traceMacros.hpp"
|
||||
#include "utilities/accessFlags.hpp"
|
||||
#ifndef SERIALGC
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
|
||||
@ -80,6 +81,7 @@
|
||||
// [last_biased_lock_bulk_revocation_time] (64 bits)
|
||||
// [prototype_header]
|
||||
// [biased_lock_revocation_count]
|
||||
// [trace_id]
|
||||
|
||||
|
||||
// Forward declarations.
|
||||
@ -263,6 +265,9 @@ class Klass : public Klass_vtbl {
|
||||
markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
|
||||
jint _biased_lock_revocation_count;
|
||||
|
||||
#ifdef TRACE_DEFINE_KLASS_TRACE_ID
|
||||
TRACE_DEFINE_KLASS_TRACE_ID;
|
||||
#endif
|
||||
public:
|
||||
|
||||
// returns the enclosing klassOop
|
||||
@ -683,6 +688,9 @@ class Klass : public Klass_vtbl {
|
||||
jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
|
||||
void set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
|
||||
|
||||
#ifdef TRACE_DEFINE_KLASS_METHODS
|
||||
TRACE_DEFINE_KLASS_METHODS;
|
||||
#endif
|
||||
|
||||
// garbage collection support
|
||||
virtual void follow_weak_klass_links(
|
||||
|
@ -83,6 +83,7 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
|
||||
m->set_max_stack(0);
|
||||
m->set_max_locals(0);
|
||||
m->set_intrinsic_id(vmIntrinsics::_none);
|
||||
m->set_jfr_towrite(false);
|
||||
m->set_method_data(NULL);
|
||||
m->set_interpreter_throwout_count(0);
|
||||
m->set_vtable_index(methodOopDesc::garbage_vtable_index);
|
||||
|
@ -77,7 +77,7 @@
|
||||
// | method_size | max_stack |
|
||||
// | max_locals | size_of_parameters |
|
||||
// |------------------------------------------------------|
|
||||
// | intrinsic_id, (unused) | throwout_count |
|
||||
// |intrinsic_id| flags | throwout_count |
|
||||
// |------------------------------------------------------|
|
||||
// | num_breakpoints | (unused) |
|
||||
// |------------------------------------------------------|
|
||||
@ -124,6 +124,8 @@ class methodOopDesc : public oopDesc {
|
||||
u2 _max_locals; // Number of local variables used by this method
|
||||
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
|
||||
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
||||
u1 _jfr_towrite : 1, // Flags
|
||||
: 7;
|
||||
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
|
||||
u2 _number_of_breakpoints; // fullspeed debugging support
|
||||
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
|
||||
@ -225,6 +227,7 @@ class methodOopDesc : public oopDesc {
|
||||
void clear_number_of_breakpoints() { _number_of_breakpoints = 0; }
|
||||
|
||||
// index into instanceKlass methods() array
|
||||
// note: also used by jfr
|
||||
u2 method_idnum() const { return constMethod()->method_idnum(); }
|
||||
void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
|
||||
|
||||
@ -650,6 +653,9 @@ class methodOopDesc : public oopDesc {
|
||||
void init_intrinsic_id(); // updates from _none if a match
|
||||
static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
|
||||
|
||||
bool jfr_towrite() { return _jfr_towrite; }
|
||||
void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
|
||||
|
||||
// On-stack replacement support
|
||||
bool has_osr_nmethod(int level, bool match_level) {
|
||||
return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
|
||||
|
@ -284,13 +284,13 @@ class Block : public CFGElement {
|
||||
// helper function that adds caller save registers to MachProjNode
|
||||
void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
|
||||
// Schedule a call next in the block
|
||||
uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
|
||||
uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
|
||||
|
||||
// Perform basic-block local scheduling
|
||||
Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot);
|
||||
Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
|
||||
void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
|
||||
void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
|
||||
bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call);
|
||||
bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
|
||||
// Cleanup if any code lands between a Call and his Catch
|
||||
void call_catch_cleanup(Block_Array &bbs);
|
||||
// Detect implicit-null-check opportunities. Basically, find NULL checks
|
||||
|
@ -426,6 +426,9 @@
|
||||
product(bool, EliminateLocks, true, \
|
||||
"Coarsen locks when possible") \
|
||||
\
|
||||
product(bool, EliminateNestedLocks, true, \
|
||||
"Eliminate nested locks of the same object when possible") \
|
||||
\
|
||||
notproduct(bool, PrintLockStatistics, false, \
|
||||
"Print precise statistics on the dynamic lock usage") \
|
||||
\
|
||||
|
@ -400,10 +400,10 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
|
||||
Node *box = mcall->monitor_box(this, i);
|
||||
Node *obj = mcall->monitor_obj(this, i);
|
||||
if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
|
||||
while( !box->is_BoxLock() ) box = box->in(1);
|
||||
box = BoxLockNode::box_node(box);
|
||||
format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs );
|
||||
} else {
|
||||
OptoReg::Name box_reg = BoxLockNode::stack_slot(box);
|
||||
OptoReg::Name box_reg = BoxLockNode::reg(box);
|
||||
st->print(" MON-BOX%d=%s+%d",
|
||||
i,
|
||||
OptoReg::regname(OptoReg::c_frame_pointer),
|
||||
@ -411,8 +411,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
|
||||
}
|
||||
const char* obj_msg = "MON-OBJ[";
|
||||
if (EliminateLocks) {
|
||||
while( !box->is_BoxLock() ) box = box->in(1);
|
||||
if (box->as_BoxLock()->is_eliminated())
|
||||
if (BoxLockNode::box_node(box)->is_eliminated())
|
||||
obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
|
||||
}
|
||||
format_helper( regalloc, st, obj, obj_msg, i, &scobjs );
|
||||
@ -1387,8 +1386,9 @@ bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
|
||||
Node *n = ctrl_proj->in(0);
|
||||
if (n != NULL && n->is_Unlock()) {
|
||||
UnlockNode *unlock = n->as_Unlock();
|
||||
if ((lock->obj_node() == unlock->obj_node()) &&
|
||||
(lock->box_node() == unlock->box_node()) && !unlock->is_eliminated()) {
|
||||
if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
|
||||
BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
|
||||
!unlock->is_eliminated()) {
|
||||
lock_ops.append(unlock);
|
||||
return true;
|
||||
}
|
||||
@ -1431,8 +1431,8 @@ LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
|
||||
}
|
||||
if (ctrl->is_Lock()) {
|
||||
LockNode *lock = ctrl->as_Lock();
|
||||
if ((lock->obj_node() == unlock->obj_node()) &&
|
||||
(lock->box_node() == unlock->box_node())) {
|
||||
if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
|
||||
BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
|
||||
lock_result = lock;
|
||||
}
|
||||
}
|
||||
@ -1462,8 +1462,9 @@ bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* loc
|
||||
}
|
||||
if (lock1_node != NULL && lock1_node->is_Lock()) {
|
||||
LockNode *lock1 = lock1_node->as_Lock();
|
||||
if ((lock->obj_node() == lock1->obj_node()) &&
|
||||
(lock->box_node() == lock1->box_node()) && !lock1->is_eliminated()) {
|
||||
if (lock->obj_node()->eqv_uncast(lock1->obj_node()) &&
|
||||
BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
|
||||
!lock1->is_eliminated()) {
|
||||
lock_ops.append(lock1);
|
||||
return true;
|
||||
}
|
||||
@ -1507,19 +1508,16 @@ bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNod
|
||||
void AbstractLockNode::create_lock_counter(JVMState* state) {
|
||||
_counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
|
||||
}
|
||||
#endif
|
||||
|
||||
void AbstractLockNode::set_eliminated() {
|
||||
_eliminate = true;
|
||||
#ifndef PRODUCT
|
||||
void AbstractLockNode::set_eliminated_lock_counter() {
|
||||
if (_counter) {
|
||||
// Update the counter to indicate that this lock was eliminated.
|
||||
// The counter update code will stay around even though the
|
||||
// optimizer will eliminate the lock operation itself.
|
||||
_counter->set_tag(NamedCounter::EliminatedLockCounter);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
//=============================================================================
|
||||
Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
@ -1535,7 +1533,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// prevents macro expansion from expanding the lock. Since we don't
|
||||
// modify the graph, the value returned from this function is the
|
||||
// one computed above.
|
||||
if (can_reshape && EliminateLocks && (!is_eliminated() || is_coarsened())) {
|
||||
if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
|
||||
//
|
||||
// If we are locking an unescaped object, the lock/unlock is unnecessary
|
||||
//
|
||||
@ -1544,16 +1542,11 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (cgr != NULL)
|
||||
es = cgr->escape_state(obj_node());
|
||||
if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
|
||||
if (!is_eliminated()) {
|
||||
// Mark it eliminated to update any counters
|
||||
this->set_eliminated();
|
||||
} else {
|
||||
assert(is_coarsened(), "sanity");
|
||||
// The lock could be marked eliminated by lock coarsening
|
||||
// code during first IGVN before EA. Clear coarsened flag
|
||||
// to eliminate all associated locks/unlocks.
|
||||
this->clear_coarsened();
|
||||
}
|
||||
assert(!is_eliminated() || is_coarsened(), "sanity");
|
||||
// The lock could be marked eliminated by lock coarsening
|
||||
// code during first IGVN before EA. Replace coarsened flag
|
||||
// to eliminate all associated locks/unlocks.
|
||||
this->set_non_esc_obj();
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1613,8 +1606,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
for (int i = 0; i < lock_ops.length(); i++) {
|
||||
AbstractLockNode* lock = lock_ops.at(i);
|
||||
|
||||
// Mark it eliminated to update any counters
|
||||
lock->set_eliminated();
|
||||
// Mark it eliminated by coarsening and update any counters
|
||||
lock->set_coarsened();
|
||||
}
|
||||
} else if (ctrl->is_Region() &&
|
||||
@ -1631,6 +1623,40 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return result;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
bool LockNode::is_nested_lock_region() {
|
||||
BoxLockNode* box = box_node()->as_BoxLock();
|
||||
int stk_slot = box->stack_slot();
|
||||
if (stk_slot <= 0)
|
||||
return false; // External lock or it is not Box (Phi node).
|
||||
|
||||
// Ignore complex cases: merged locks or multiple locks.
|
||||
Node* obj = obj_node();
|
||||
LockNode* unique_lock = NULL;
|
||||
if (!box->is_simple_lock_region(&unique_lock, obj) ||
|
||||
(unique_lock != this)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Look for external lock for the same object.
|
||||
SafePointNode* sfn = this->as_SafePoint();
|
||||
JVMState* youngest_jvms = sfn->jvms();
|
||||
int max_depth = youngest_jvms->depth();
|
||||
for (int depth = 1; depth <= max_depth; depth++) {
|
||||
JVMState* jvms = youngest_jvms->of_depth(depth);
|
||||
int num_mon = jvms->nof_monitors();
|
||||
// Loop over monitors
|
||||
for (int idx = 0; idx < num_mon; idx++) {
|
||||
Node* obj_node = sfn->monitor_obj(jvms, idx);
|
||||
BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
|
||||
if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
uint UnlockNode::size_of() const { return sizeof(*this); }
|
||||
|
||||
@ -1649,7 +1675,7 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// modify the graph, the value returned from this function is the
|
||||
// one computed above.
|
||||
// Escape state is defined after Parse phase.
|
||||
if (can_reshape && EliminateLocks && (!is_eliminated() || is_coarsened())) {
|
||||
if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
|
||||
//
|
||||
// If we are unlocking an unescaped object, the lock/unlock is unnecessary.
|
||||
//
|
||||
@ -1658,16 +1684,11 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (cgr != NULL)
|
||||
es = cgr->escape_state(obj_node());
|
||||
if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
|
||||
if (!is_eliminated()) {
|
||||
// Mark it eliminated to update any counters
|
||||
this->set_eliminated();
|
||||
} else {
|
||||
assert(is_coarsened(), "sanity");
|
||||
// The lock could be marked eliminated by lock coarsening
|
||||
// code during first IGVN before EA. Clear coarsened flag
|
||||
// to eliminate all associated locks/unlocks.
|
||||
this->clear_coarsened();
|
||||
}
|
||||
assert(!is_eliminated() || is_coarsened(), "sanity");
|
||||
// The lock could be marked eliminated by lock coarsening
|
||||
// code during first IGVN before EA. Replace coarsened flag
|
||||
// to eliminate all associated locks/unlocks.
|
||||
this->set_non_esc_obj();
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
@ -840,8 +840,12 @@ public:
|
||||
//------------------------------AbstractLockNode-----------------------------------
|
||||
class AbstractLockNode: public CallNode {
|
||||
private:
|
||||
bool _eliminate; // indicates this lock can be safely eliminated
|
||||
bool _coarsened; // indicates this lock was coarsened
|
||||
enum {
|
||||
Regular = 0, // Normal lock
|
||||
NonEscObj, // Lock is used for non escaping object
|
||||
Coarsened, // Lock was coarsened
|
||||
Nested // Nested lock
|
||||
} _kind;
|
||||
#ifndef PRODUCT
|
||||
NamedCounter* _counter;
|
||||
#endif
|
||||
@ -858,12 +862,13 @@ protected:
|
||||
GrowableArray<AbstractLockNode*> &lock_ops);
|
||||
LockNode *find_matching_lock(UnlockNode* unlock);
|
||||
|
||||
// Update the counter to indicate that this lock was eliminated.
|
||||
void set_eliminated_lock_counter() PRODUCT_RETURN;
|
||||
|
||||
public:
|
||||
AbstractLockNode(const TypeFunc *tf)
|
||||
: CallNode(tf, NULL, TypeRawPtr::BOTTOM),
|
||||
_coarsened(false),
|
||||
_eliminate(false)
|
||||
_kind(Regular)
|
||||
{
|
||||
#ifndef PRODUCT
|
||||
_counter = NULL;
|
||||
@ -873,20 +878,23 @@ public:
|
||||
Node * obj_node() const {return in(TypeFunc::Parms + 0); }
|
||||
Node * box_node() const {return in(TypeFunc::Parms + 1); }
|
||||
Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
|
||||
void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
|
||||
|
||||
const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
|
||||
|
||||
virtual uint size_of() const { return sizeof(*this); }
|
||||
|
||||
bool is_eliminated() {return _eliminate; }
|
||||
// mark node as eliminated and update the counter if there is one
|
||||
void set_eliminated();
|
||||
bool is_eliminated() const { return (_kind != Regular); }
|
||||
bool is_non_esc_obj() const { return (_kind == NonEscObj); }
|
||||
bool is_coarsened() const { return (_kind == Coarsened); }
|
||||
bool is_nested() const { return (_kind == Nested); }
|
||||
|
||||
bool is_coarsened() { return _coarsened; }
|
||||
void set_coarsened() { _coarsened = true; }
|
||||
void clear_coarsened() { _coarsened = false; }
|
||||
void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
|
||||
void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
|
||||
void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
|
||||
|
||||
// locking does not modify its arguments
|
||||
virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
|
||||
virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void create_lock_counter(JVMState* s);
|
||||
@ -936,6 +944,8 @@ public:
|
||||
virtual void clone_jvms() {
|
||||
set_jvms(jvms()->clone_deep(Compile::current()));
|
||||
}
|
||||
|
||||
bool is_nested_lock_region(); // Is this Lock nested?
|
||||
};
|
||||
|
||||
//------------------------------Unlock---------------------------------------
|
||||
|
@ -1597,7 +1597,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
bool is_loop = (r->is_Loop() && r->req() == 3);
|
||||
// Then, check if there is a data loop when phi references itself directly
|
||||
// or through other data nodes.
|
||||
if (is_loop && !phase->eqv_uncast(uin, in(LoopNode::EntryControl)) ||
|
||||
if (is_loop && !uin->eqv_uncast(in(LoopNode::EntryControl)) ||
|
||||
!is_loop && is_unsafe_data_reference(uin)) {
|
||||
// Break this data loop to avoid creation of a dead loop.
|
||||
if (can_reshape) {
|
||||
|
@ -485,7 +485,11 @@ private:
|
||||
return yank_if_dead(old, current_block, &value, ®nd);
|
||||
}
|
||||
|
||||
int yank_if_dead( Node *old, Block *current_block, Node_List *value, Node_List *regnd );
|
||||
int yank_if_dead( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
|
||||
return yank_if_dead_recurse(old, old, current_block, value, regnd);
|
||||
}
|
||||
int yank_if_dead_recurse(Node *old, Node *orig_old, Block *current_block,
|
||||
Node_List *value, Node_List *regnd);
|
||||
int yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd );
|
||||
int elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List ®nd, bool can_change_regs );
|
||||
int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List &value, Node_List ®nd );
|
||||
|
@ -1842,20 +1842,15 @@ bool ConnectionGraph::compute_escape() {
|
||||
Node *n = C->macro_node(i);
|
||||
if (n->is_AbstractLock()) { // Lock and Unlock nodes
|
||||
AbstractLockNode* alock = n->as_AbstractLock();
|
||||
if (!alock->is_eliminated() || alock->is_coarsened()) {
|
||||
if (!alock->is_non_esc_obj()) {
|
||||
PointsToNode::EscapeState es = escape_state(alock->obj_node());
|
||||
assert(es != PointsToNode::UnknownEscape, "should know");
|
||||
if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
|
||||
if (!alock->is_eliminated()) {
|
||||
// Mark it eliminated to update any counters
|
||||
alock->set_eliminated();
|
||||
} else {
|
||||
// The lock could be marked eliminated by lock coarsening
|
||||
// code during first IGVN before EA. Clear coarsened flag
|
||||
// to eliminate all associated locks/unlocks and relock
|
||||
// during deoptimization.
|
||||
alock->clear_coarsened();
|
||||
}
|
||||
assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
|
||||
// The lock could be marked eliminated by lock coarsening
|
||||
// code during first IGVN before EA. Replace coarsened flag
|
||||
// to eliminate all associated locks/unlocks.
|
||||
alock->set_non_esc_obj();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1344,8 +1344,8 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
|
||||
|
||||
// Schedule locally. Right now a simple topological sort.
|
||||
// Later, do a real latency aware scheduler.
|
||||
int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique());
|
||||
memset( ready_cnt, -1, C->unique() * sizeof(int) );
|
||||
uint max_idx = C->unique();
|
||||
GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
|
||||
visited.Clear();
|
||||
for (i = 0; i < _num_blocks; i++) {
|
||||
if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
|
||||
|
@ -404,7 +404,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
|
||||
// remaining cases (most), choose the instruction with the greatest latency
|
||||
// (that is, the most number of pseudo-cycles required to the end of the
|
||||
// routine). If there is a tie, choose the instruction with the most inputs.
|
||||
Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot) {
|
||||
Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
|
||||
|
||||
// If only a single entry on the stack, use it
|
||||
uint cnt = worklist.size();
|
||||
@ -465,7 +465,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSe
|
||||
|
||||
// More than this instruction pending for successor to be ready,
|
||||
// don't choose this if other opportunities are ready
|
||||
if (ready_cnt[use->_idx] > 1)
|
||||
if (ready_cnt.at(use->_idx) > 1)
|
||||
n_choice = 1;
|
||||
}
|
||||
|
||||
@ -565,7 +565,7 @@ void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_p
|
||||
|
||||
|
||||
//------------------------------sched_call-------------------------------------
|
||||
uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
|
||||
uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
|
||||
RegMask regs;
|
||||
|
||||
// Schedule all the users of the call right now. All the users are
|
||||
@ -574,8 +574,9 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
|
||||
for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
|
||||
Node* n = mcall->fast_out(i);
|
||||
assert( n->is_MachProj(), "" );
|
||||
--ready_cnt[n->_idx];
|
||||
assert( !ready_cnt[n->_idx], "" );
|
||||
int n_cnt = ready_cnt.at(n->_idx)-1;
|
||||
ready_cnt.at_put(n->_idx, n_cnt);
|
||||
assert( n_cnt == 0, "" );
|
||||
// Schedule next to call
|
||||
_nodes.map(node_cnt++, n);
|
||||
// Collect defined registers
|
||||
@ -590,7 +591,9 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
|
||||
Node* m = n->fast_out(j); // Get user
|
||||
if( bbs[m->_idx] != this ) continue;
|
||||
if( m->is_Phi() ) continue;
|
||||
if( !--ready_cnt[m->_idx] )
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
ready_cnt.at_put(m->_idx, m_cnt);
|
||||
if( m_cnt == 0 )
|
||||
worklist.push(m);
|
||||
}
|
||||
|
||||
@ -655,7 +658,7 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
|
||||
|
||||
//------------------------------schedule_local---------------------------------
|
||||
// Topological sort within a block. Someday become a real scheduler.
|
||||
bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, VectorSet &next_call) {
|
||||
bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
|
||||
// Already "sorted" are the block start Node (as the first entry), and
|
||||
// the block-ending Node and any trailing control projections. We leave
|
||||
// these alone. PhiNodes and ParmNodes are made to follow the block start
|
||||
@ -695,7 +698,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
|
||||
local++; // One more block-local input
|
||||
}
|
||||
ready_cnt[n->_idx] = local; // Count em up
|
||||
ready_cnt.at_put(n->_idx, local); // Count em up
|
||||
|
||||
#ifdef ASSERT
|
||||
if( UseConcMarkSweepGC || UseG1GC ) {
|
||||
@ -729,7 +732,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
}
|
||||
}
|
||||
for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
|
||||
ready_cnt[_nodes[i2]->_idx] = 0;
|
||||
ready_cnt.at_put(_nodes[i2]->_idx, 0);
|
||||
|
||||
// All the prescheduled guys do not hold back internal nodes
|
||||
uint i3;
|
||||
@ -737,8 +740,10 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
Node *n = _nodes[i3]; // Get pre-scheduled
|
||||
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* m = n->fast_out(j);
|
||||
if( cfg->_bbs[m->_idx] ==this ) // Local-block user
|
||||
ready_cnt[m->_idx]--; // Fix ready count
|
||||
if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -747,7 +752,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
Node_List worklist;
|
||||
for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist
|
||||
Node *m = _nodes[i4];
|
||||
if( !ready_cnt[m->_idx] ) { // Zero ready count?
|
||||
if( !ready_cnt.at(m->_idx) ) { // Zero ready count?
|
||||
if (m->is_iteratively_computed()) {
|
||||
// Push induction variable increments last to allow other uses
|
||||
// of the phi to be scheduled first. The select() method breaks
|
||||
@ -775,14 +780,14 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
for (uint j=0; j<_nodes.size(); j++) {
|
||||
Node *n = _nodes[j];
|
||||
int idx = n->_idx;
|
||||
tty->print("# ready cnt:%3d ", ready_cnt[idx]);
|
||||
tty->print("# ready cnt:%3d ", ready_cnt.at(idx));
|
||||
tty->print("latency:%3d ", cfg->_node_latency->at_grow(idx));
|
||||
tty->print("%4d: %s\n", idx, n->Name());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
uint max_idx = matcher.C->unique();
|
||||
uint max_idx = (uint)ready_cnt.length();
|
||||
// Pull from worklist and schedule
|
||||
while( worklist.size() ) { // Worklist is not ready
|
||||
|
||||
@ -840,11 +845,13 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
Node* m = n->fast_out(i5); // Get user
|
||||
if( cfg->_bbs[m->_idx] != this ) continue;
|
||||
if( m->is_Phi() ) continue;
|
||||
if (m->_idx > max_idx) { // new node, skip it
|
||||
if (m->_idx >= max_idx) { // new node, skip it
|
||||
assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
|
||||
continue;
|
||||
}
|
||||
if( !--ready_cnt[m->_idx] )
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
ready_cnt.at_put(m->_idx, m_cnt);
|
||||
if( m_cnt == 0 )
|
||||
worklist.push(m);
|
||||
}
|
||||
}
|
||||
|
@ -819,7 +819,7 @@ inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
|
||||
if (stopped())
|
||||
return NULL; // already stopped
|
||||
bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
|
||||
if (zero_offset && _gvn.eqv_uncast(subseq_length, array_length))
|
||||
if (zero_offset && subseq_length->eqv_uncast(array_length))
|
||||
return NULL; // common case of whole-array copy
|
||||
Node* last = subseq_length;
|
||||
if (!zero_offset) // last += offset
|
||||
@ -4667,7 +4667,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
if (ReduceBulkZeroing
|
||||
&& !ZeroTLAB // pointless if already zeroed
|
||||
&& basic_elem_type != T_CONFLICT // avoid corner case
|
||||
&& !_gvn.eqv_uncast(src, dest)
|
||||
&& !src->eqv_uncast(dest)
|
||||
&& ((alloc = tightly_coupled_allocation(dest, slow_region))
|
||||
!= NULL)
|
||||
&& _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
|
||||
@ -4745,7 +4745,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
// copy_length is 0.
|
||||
if (!stopped() && dest_uninitialized) {
|
||||
Node* dest_length = alloc->in(AllocateNode::ALength);
|
||||
if (_gvn.eqv_uncast(copy_length, dest_length)
|
||||
if (copy_length->eqv_uncast(dest_length)
|
||||
|| _gvn.find_int_con(dest_length, 1) <= 0) {
|
||||
// There is no zeroing to do. No need for a secondary raw memory barrier.
|
||||
} else {
|
||||
@ -4791,7 +4791,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
||||
// with its attendant messy index arithmetic, and upgrade
|
||||
// the copy to a more hardware-friendly word size of 64 bits.
|
||||
Node* tail_ctl = NULL;
|
||||
if (!stopped() && !_gvn.eqv_uncast(dest_tail, dest_length)) {
|
||||
if (!stopped() && !dest_tail->eqv_uncast(dest_length)) {
|
||||
Node* cmp_lt = _gvn.transform( new(C,3) CmpINode(dest_tail, dest_length) );
|
||||
Node* bol_lt = _gvn.transform( new(C,2) BoolNode(cmp_lt, BoolTest::lt) );
|
||||
tail_ctl = generate_slow_guard(bol_lt, NULL);
|
||||
|
@ -49,18 +49,22 @@ BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
|
||||
|
||||
//-----------------------------hash--------------------------------------------
|
||||
uint BoxLockNode::hash() const {
|
||||
if (EliminateNestedLocks)
|
||||
return NO_HASH; // Each locked region has own BoxLock node
|
||||
return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
|
||||
}
|
||||
|
||||
//------------------------------cmp--------------------------------------------
|
||||
uint BoxLockNode::cmp( const Node &n ) const {
|
||||
if (EliminateNestedLocks)
|
||||
return (&n == this); // Always fail except on self
|
||||
const BoxLockNode &bn = (const BoxLockNode &)n;
|
||||
return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
|
||||
}
|
||||
|
||||
OptoReg::Name BoxLockNode::stack_slot(Node* box_node) {
|
||||
// Chase down the BoxNode
|
||||
while (!box_node->is_BoxLock()) {
|
||||
BoxLockNode* BoxLockNode::box_node(Node* box) {
|
||||
// Chase down the BoxNode after RA which may spill box nodes.
|
||||
while (!box->is_BoxLock()) {
|
||||
// if (box_node->is_SpillCopy()) {
|
||||
// Node *m = box_node->in(1);
|
||||
// if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
|
||||
@ -68,10 +72,64 @@ OptoReg::Name BoxLockNode::stack_slot(Node* box_node) {
|
||||
// continue;
|
||||
// }
|
||||
// }
|
||||
assert(box_node->is_SpillCopy() || box_node->is_Phi(), "Bad spill of Lock.");
|
||||
box_node = box_node->in(1);
|
||||
assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
|
||||
// Only BoxLock nodes with the same stack slot are merged.
|
||||
// So it is enough to trace one path to find the slot value.
|
||||
box = box->in(1);
|
||||
}
|
||||
return box_node->in_RegMask(0).find_first_elem();
|
||||
return box->as_BoxLock();
|
||||
}
|
||||
|
||||
OptoReg::Name BoxLockNode::reg(Node* box) {
|
||||
return box_node(box)->in_RegMask(0).find_first_elem();
|
||||
}
|
||||
|
||||
// Is BoxLock node used for one simple lock region (same box and obj)?
|
||||
bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
|
||||
LockNode* lock = NULL;
|
||||
bool has_one_lock = false;
|
||||
for (uint i = 0; i < this->outcnt(); i++) {
|
||||
Node* n = this->raw_out(i);
|
||||
assert(!n->is_Phi(), "should not merge BoxLock nodes");
|
||||
if (n->is_AbstractLock()) {
|
||||
AbstractLockNode* alock = n->as_AbstractLock();
|
||||
// Check lock's box since box could be referenced by Lock's debug info.
|
||||
if (alock->box_node() == this) {
|
||||
if (alock->obj_node()->eqv_uncast(obj)) {
|
||||
if ((unique_lock != NULL) && alock->is_Lock()) {
|
||||
if (lock == NULL) {
|
||||
lock = alock->as_Lock();
|
||||
has_one_lock = true;
|
||||
} else if (lock != alock->as_Lock()) {
|
||||
has_one_lock = false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false; // Different objects
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
// Verify that FastLock and Safepoint reference only this lock region.
|
||||
for (uint i = 0; i < this->outcnt(); i++) {
|
||||
Node* n = this->raw_out(i);
|
||||
if (n->is_FastLock()) {
|
||||
FastLockNode* flock = n->as_FastLock();
|
||||
assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
|
||||
}
|
||||
// Don't check monitor info in safepoints since the referenced object could
|
||||
// be different from the locked object. It could be Phi node of different
|
||||
// cast nodes which point to this locked object.
|
||||
// We assume that no other objects could be referenced in monitor info
|
||||
// associated with this BoxLock node because all associated locks and
|
||||
// unlocks are reference only this one object.
|
||||
}
|
||||
#endif
|
||||
if (unique_lock != NULL && has_one_lock) {
|
||||
*unique_lock = lock;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
@ -49,11 +49,11 @@
|
||||
|
||||
//------------------------------BoxLockNode------------------------------------
|
||||
class BoxLockNode : public Node {
|
||||
public:
|
||||
const int _slot;
|
||||
RegMask _inmask;
|
||||
bool _is_eliminated; // indicates this lock was safely eliminated
|
||||
const int _slot; // stack slot
|
||||
RegMask _inmask; // OptoReg corresponding to stack slot
|
||||
bool _is_eliminated; // Associated locks were safely eliminated
|
||||
|
||||
public:
|
||||
BoxLockNode( int lock );
|
||||
virtual int Opcode() const;
|
||||
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
|
||||
@ -66,11 +66,19 @@ public:
|
||||
virtual const class Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
|
||||
static OptoReg::Name stack_slot(Node* box_node);
|
||||
static OptoReg::Name reg(Node* box_node);
|
||||
static BoxLockNode* box_node(Node* box_node);
|
||||
static bool same_slot(Node* box1, Node* box2) {
|
||||
return box1->as_BoxLock()->_slot == box2->as_BoxLock()->_slot;
|
||||
}
|
||||
int stack_slot() const { return _slot; }
|
||||
|
||||
bool is_eliminated() { return _is_eliminated; }
|
||||
bool is_eliminated() const { return _is_eliminated; }
|
||||
// mark lock as eliminated.
|
||||
void set_eliminated() { _is_eliminated = true; }
|
||||
void set_eliminated() { _is_eliminated = true; }
|
||||
|
||||
// Is BoxLock node used for one simple lock region?
|
||||
bool is_simple_lock_region(LockNode** unique_lock, Node* obj);
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void format( PhaseRegAlloc *, outputStream *st ) const;
|
||||
@ -91,6 +99,7 @@ public:
|
||||
}
|
||||
Node* obj_node() const { return in(1); }
|
||||
Node* box_node() const { return in(2); }
|
||||
void set_box_node(Node* box) { set_req(2, box); }
|
||||
|
||||
// FastLock and FastUnlockNode do not hash, we need one for each correspoding
|
||||
// LockNode/UnLockNode to avoid creating Phi's.
|
||||
|
@ -3278,16 +3278,7 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
|
||||
#ifdef ASSERT
|
||||
if (legal->is_Start() && !early->is_Root()) {
|
||||
// Bad graph. Print idom path and fail.
|
||||
tty->print_cr( "Bad graph detected in build_loop_late");
|
||||
tty->print("n: ");n->dump(); tty->cr();
|
||||
tty->print("early: ");early->dump(); tty->cr();
|
||||
int ct = 0;
|
||||
Node *dbg_legal = LCA;
|
||||
while(!dbg_legal->is_Start() && ct < 100) {
|
||||
tty->print("idom[%d] ",ct); dbg_legal->dump(); tty->cr();
|
||||
ct++;
|
||||
dbg_legal = idom(dbg_legal);
|
||||
}
|
||||
dump_bad_graph(n, early, LCA);
|
||||
assert(false, "Bad graph detected in build_loop_late");
|
||||
}
|
||||
#endif
|
||||
@ -3337,6 +3328,88 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
|
||||
chosen_loop->_body.push(n);// Collect inner loops
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void PhaseIdealLoop::dump_bad_graph(Node* n, Node* early, Node* LCA) {
|
||||
tty->print_cr( "Bad graph detected in build_loop_late");
|
||||
tty->print("n: "); n->dump();
|
||||
tty->print("early(n): "); early->dump();
|
||||
if (n->in(0) != NULL && !n->in(0)->is_top() &&
|
||||
n->in(0) != early && !n->in(0)->is_Root()) {
|
||||
tty->print("n->in(0): "); n->in(0)->dump();
|
||||
}
|
||||
for (uint i = 1; i < n->req(); i++) {
|
||||
Node* in1 = n->in(i);
|
||||
if (in1 != NULL && in1 != n && !in1->is_top()) {
|
||||
tty->print("n->in(%d): ", i); in1->dump();
|
||||
Node* in1_early = get_ctrl(in1);
|
||||
tty->print("early(n->in(%d)): ", i); in1_early->dump();
|
||||
if (in1->in(0) != NULL && !in1->in(0)->is_top() &&
|
||||
in1->in(0) != in1_early && !in1->in(0)->is_Root()) {
|
||||
tty->print("n->in(%d)->in(0): ", i); in1->in(0)->dump();
|
||||
}
|
||||
for (uint j = 1; j < in1->req(); j++) {
|
||||
Node* in2 = in1->in(j);
|
||||
if (in2 != NULL && in2 != n && in2 != in1 && !in2->is_top()) {
|
||||
tty->print("n->in(%d)->in(%d): ", i, j); in2->dump();
|
||||
Node* in2_early = get_ctrl(in2);
|
||||
tty->print("early(n->in(%d)->in(%d)): ", i, j); in2_early->dump();
|
||||
if (in2->in(0) != NULL && !in2->in(0)->is_top() &&
|
||||
in2->in(0) != in2_early && !in2->in(0)->is_Root()) {
|
||||
tty->print("n->in(%d)->in(%d)->in(0): ", i, j); in2->in(0)->dump();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
tty->cr();
|
||||
tty->print("LCA(n): "); LCA->dump();
|
||||
for (uint i = 0; i < n->outcnt(); i++) {
|
||||
Node* u1 = n->raw_out(i);
|
||||
if (u1 == n)
|
||||
continue;
|
||||
tty->print("n->out(%d): ", i); u1->dump();
|
||||
if (u1->is_CFG()) {
|
||||
for (uint j = 0; j < u1->outcnt(); j++) {
|
||||
Node* u2 = u1->raw_out(j);
|
||||
if (u2 != u1 && u2 != n && u2->is_CFG()) {
|
||||
tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Node* u1_later = get_ctrl(u1);
|
||||
tty->print("later(n->out(%d)): ", i); u1_later->dump();
|
||||
if (u1->in(0) != NULL && !u1->in(0)->is_top() &&
|
||||
u1->in(0) != u1_later && !u1->in(0)->is_Root()) {
|
||||
tty->print("n->out(%d)->in(0): ", i); u1->in(0)->dump();
|
||||
}
|
||||
for (uint j = 0; j < u1->outcnt(); j++) {
|
||||
Node* u2 = u1->raw_out(j);
|
||||
if (u2 == n || u2 == u1)
|
||||
continue;
|
||||
tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
|
||||
if (!u2->is_CFG()) {
|
||||
Node* u2_later = get_ctrl(u2);
|
||||
tty->print("later(n->out(%d)->out(%d)): ", i, j); u2_later->dump();
|
||||
if (u2->in(0) != NULL && !u2->in(0)->is_top() &&
|
||||
u2->in(0) != u2_later && !u2->in(0)->is_Root()) {
|
||||
tty->print("n->out(%d)->in(0): ", i); u2->in(0)->dump();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
tty->cr();
|
||||
int ct = 0;
|
||||
Node *dbg_legal = LCA;
|
||||
while(!dbg_legal->is_Start() && ct < 100) {
|
||||
tty->print("idom[%d] ",ct); dbg_legal->dump();
|
||||
ct++;
|
||||
dbg_legal = idom(dbg_legal);
|
||||
}
|
||||
tty->cr();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
//------------------------------dump-------------------------------------------
|
||||
void PhaseIdealLoop::dump( ) const {
|
||||
|
@ -1040,6 +1040,10 @@ public:
|
||||
bool created_loop_node() { return _created_loop_node; }
|
||||
void register_new_node( Node *n, Node *blk );
|
||||
|
||||
#ifdef ASSERT
|
||||
void dump_bad_graph(Node* n, Node* early, Node* LCA);
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
void dump( ) const;
|
||||
void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const;
|
||||
|
@ -819,6 +819,8 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
|
||||
if( iff->is_If() ) { // Classic split-if?
|
||||
if( iff->in(0) != n_ctrl ) return; // Compare must be in same blk as if
|
||||
} else if (iff->is_CMove()) { // Trying to split-up a CMOVE
|
||||
// Can't split CMove with different control edge.
|
||||
if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) return;
|
||||
if( get_ctrl(iff->in(2)) == n_ctrl ||
|
||||
get_ctrl(iff->in(3)) == n_ctrl )
|
||||
return; // Inputs not yet split-up
|
||||
@ -937,7 +939,7 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
|
||||
}
|
||||
bool did_break = (i < imax); // Did we break out of the previous loop?
|
||||
if (!did_break && n->outcnt() > 1) { // All uses in outer loops!
|
||||
Node *late_load_ctrl;
|
||||
Node *late_load_ctrl = NULL;
|
||||
if (n->is_Load()) {
|
||||
// If n is a load, get and save the result from get_late_ctrl(),
|
||||
// to be later used in calculating the control for n's clones.
|
||||
|
@ -1789,7 +1789,8 @@ void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
|
||||
slow_call_address);
|
||||
}
|
||||
|
||||
//-----------------------mark_eliminated_locking_nodes-----------------------
|
||||
//-------------------mark_eliminated_box----------------------------------
|
||||
//
|
||||
// During EA obj may point to several objects but after few ideal graph
|
||||
// transformations (CCP) it may point to only one non escaping object
|
||||
// (but still using phi), corresponding locks and unlocks will be marked
|
||||
@ -1800,62 +1801,148 @@ void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
|
||||
// marked for elimination since new obj has no escape information.
|
||||
// Mark all associated (same box and obj) lock and unlock nodes for
|
||||
// elimination if some of them marked already.
|
||||
void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
|
||||
if (!alock->is_eliminated()) {
|
||||
void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
|
||||
if (oldbox->as_BoxLock()->is_eliminated())
|
||||
return; // This BoxLock node was processed already.
|
||||
|
||||
// New implementation (EliminateNestedLocks) has separate BoxLock
|
||||
// node for each locked region so mark all associated locks/unlocks as
|
||||
// eliminated even if different objects are referenced in one locked region
|
||||
// (for example, OSR compilation of nested loop inside locked scope).
|
||||
if (EliminateNestedLocks ||
|
||||
oldbox->as_BoxLock()->is_simple_lock_region(NULL, obj)) {
|
||||
// Box is used only in one lock region. Mark this box as eliminated.
|
||||
_igvn.hash_delete(oldbox);
|
||||
oldbox->as_BoxLock()->set_eliminated(); // This changes box's hash value
|
||||
_igvn.hash_insert(oldbox);
|
||||
|
||||
for (uint i = 0; i < oldbox->outcnt(); i++) {
|
||||
Node* u = oldbox->raw_out(i);
|
||||
if (u->is_AbstractLock() && !u->as_AbstractLock()->is_non_esc_obj()) {
|
||||
AbstractLockNode* alock = u->as_AbstractLock();
|
||||
// Check lock's box since box could be referenced by Lock's debug info.
|
||||
if (alock->box_node() == oldbox) {
|
||||
// Mark eliminated all related locks and unlocks.
|
||||
alock->set_non_esc_obj();
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (!alock->is_coarsened()) { // Eliminated by EA
|
||||
// Create new "eliminated" BoxLock node and use it
|
||||
// in monitor debug info for the same object.
|
||||
BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
|
||||
Node* obj = alock->obj_node();
|
||||
if (!oldbox->is_eliminated()) {
|
||||
BoxLockNode* newbox = oldbox->clone()->as_BoxLock();
|
||||
|
||||
// Create new "eliminated" BoxLock node and use it in monitor debug info
|
||||
// instead of oldbox for the same object.
|
||||
BoxLockNode* newbox = oldbox->clone()->as_BoxLock();
|
||||
|
||||
// Note: BoxLock node is marked eliminated only here and it is used
|
||||
// to indicate that all associated lock and unlock nodes are marked
|
||||
// for elimination.
|
||||
newbox->set_eliminated();
|
||||
transform_later(newbox);
|
||||
|
||||
// Replace old box node with new box for all users of the same object.
|
||||
for (uint i = 0; i < oldbox->outcnt();) {
|
||||
bool next_edge = true;
|
||||
|
||||
Node* u = oldbox->raw_out(i);
|
||||
if (u->is_AbstractLock()) {
|
||||
AbstractLockNode* alock = u->as_AbstractLock();
|
||||
if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) {
|
||||
// Replace Box and mark eliminated all related locks and unlocks.
|
||||
alock->set_non_esc_obj();
|
||||
_igvn.hash_delete(alock);
|
||||
alock->set_box_node(newbox);
|
||||
_igvn._worklist.push(alock);
|
||||
next_edge = false;
|
||||
}
|
||||
}
|
||||
if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) {
|
||||
FastLockNode* flock = u->as_FastLock();
|
||||
assert(flock->box_node() == oldbox, "sanity");
|
||||
_igvn.hash_delete(flock);
|
||||
flock->set_box_node(newbox);
|
||||
_igvn._worklist.push(flock);
|
||||
next_edge = false;
|
||||
}
|
||||
|
||||
// Replace old box in monitor debug info.
|
||||
if (u->is_SafePoint() && u->as_SafePoint()->jvms()) {
|
||||
SafePointNode* sfn = u->as_SafePoint();
|
||||
JVMState* youngest_jvms = sfn->jvms();
|
||||
int max_depth = youngest_jvms->depth();
|
||||
for (int depth = 1; depth <= max_depth; depth++) {
|
||||
JVMState* jvms = youngest_jvms->of_depth(depth);
|
||||
int num_mon = jvms->nof_monitors();
|
||||
// Loop over monitors
|
||||
for (int idx = 0; idx < num_mon; idx++) {
|
||||
Node* obj_node = sfn->monitor_obj(jvms, idx);
|
||||
Node* box_node = sfn->monitor_box(jvms, idx);
|
||||
if (box_node == oldbox && obj_node->eqv_uncast(obj)) {
|
||||
int j = jvms->monitor_box_offset(idx);
|
||||
_igvn.hash_delete(u);
|
||||
u->set_req(j, newbox);
|
||||
_igvn._worklist.push(u);
|
||||
next_edge = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (next_edge) i++;
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------mark_eliminated_locking_nodes-----------------------
|
||||
void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
|
||||
if (EliminateNestedLocks) {
|
||||
if (alock->is_nested()) {
|
||||
assert(alock->box_node()->as_BoxLock()->is_eliminated(), "sanity");
|
||||
return;
|
||||
} else if (!alock->is_non_esc_obj()) { // Not eliminated or coarsened
|
||||
// Only Lock node has JVMState needed here.
|
||||
if (alock->jvms() != NULL && alock->as_Lock()->is_nested_lock_region()) {
|
||||
// Mark eliminated related nested locks and unlocks.
|
||||
Node* obj = alock->obj_node();
|
||||
BoxLockNode* box_node = alock->box_node()->as_BoxLock();
|
||||
assert(!box_node->is_eliminated(), "should not be marked yet");
|
||||
// Note: BoxLock node is marked eliminated only here
|
||||
// and it is used to indicate that all associated lock
|
||||
// and unlock nodes are marked for elimination.
|
||||
newbox->set_eliminated();
|
||||
transform_later(newbox);
|
||||
// Replace old box node with new box for all users
|
||||
// of the same object.
|
||||
for (uint i = 0; i < oldbox->outcnt();) {
|
||||
|
||||
bool next_edge = true;
|
||||
Node* u = oldbox->raw_out(i);
|
||||
if (u->is_AbstractLock() &&
|
||||
u->as_AbstractLock()->obj_node() == obj &&
|
||||
u->as_AbstractLock()->box_node() == oldbox) {
|
||||
// Mark all associated locks and unlocks.
|
||||
u->as_AbstractLock()->set_eliminated();
|
||||
_igvn.hash_delete(u);
|
||||
u->set_req(TypeFunc::Parms + 1, newbox);
|
||||
next_edge = false;
|
||||
box_node->set_eliminated(); // Box's hash is always NO_HASH here
|
||||
for (uint i = 0; i < box_node->outcnt(); i++) {
|
||||
Node* u = box_node->raw_out(i);
|
||||
if (u->is_AbstractLock()) {
|
||||
alock = u->as_AbstractLock();
|
||||
if (alock->box_node() == box_node) {
|
||||
// Verify that this Box is referenced only by related locks.
|
||||
assert(alock->obj_node()->eqv_uncast(obj), "");
|
||||
// Mark all related locks and unlocks.
|
||||
alock->set_nested();
|
||||
}
|
||||
}
|
||||
// Replace old box in monitor debug info.
|
||||
if (u->is_SafePoint() && u->as_SafePoint()->jvms()) {
|
||||
SafePointNode* sfn = u->as_SafePoint();
|
||||
JVMState* youngest_jvms = sfn->jvms();
|
||||
int max_depth = youngest_jvms->depth();
|
||||
for (int depth = 1; depth <= max_depth; depth++) {
|
||||
JVMState* jvms = youngest_jvms->of_depth(depth);
|
||||
int num_mon = jvms->nof_monitors();
|
||||
// Loop over monitors
|
||||
for (int idx = 0; idx < num_mon; idx++) {
|
||||
Node* obj_node = sfn->monitor_obj(jvms, idx);
|
||||
Node* box_node = sfn->monitor_box(jvms, idx);
|
||||
if (box_node == oldbox && obj_node == obj) {
|
||||
int j = jvms->monitor_box_offset(idx);
|
||||
_igvn.hash_delete(u);
|
||||
u->set_req(j, newbox);
|
||||
next_edge = false;
|
||||
}
|
||||
} // for (int idx = 0;
|
||||
} // for (int depth = 1;
|
||||
} // if (u->is_SafePoint()
|
||||
if (next_edge) i++;
|
||||
} // for (uint i = 0; i < oldbox->outcnt();)
|
||||
} // if (!oldbox->is_eliminated())
|
||||
} // if (!alock->is_coarsened())
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
// Process locks for non escaping object
|
||||
assert(alock->is_non_esc_obj(), "");
|
||||
} // EliminateNestedLocks
|
||||
|
||||
if (alock->is_non_esc_obj()) { // Lock is used for non escaping object
|
||||
// Look for all locks of this object and mark them and
|
||||
// corresponding BoxLock nodes as eliminated.
|
||||
Node* obj = alock->obj_node();
|
||||
for (uint j = 0; j < obj->outcnt(); j++) {
|
||||
Node* o = obj->raw_out(j);
|
||||
if (o->is_AbstractLock() &&
|
||||
o->as_AbstractLock()->obj_node()->eqv_uncast(obj)) {
|
||||
alock = o->as_AbstractLock();
|
||||
Node* box = alock->box_node();
|
||||
// Replace old box node with new eliminated box for all users
|
||||
// of the same object and mark related locks as eliminated.
|
||||
mark_eliminated_box(box, obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we have determined that this lock/unlock can be eliminated, we simply
|
||||
@ -1870,7 +1957,7 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
|
||||
return false;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
if (alock->is_Lock() && !alock->is_coarsened()) {
|
||||
if (!alock->is_coarsened()) {
|
||||
// Check that new "eliminated" BoxLock node is created.
|
||||
BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
|
||||
assert(oldbox->is_eliminated(), "should be done already");
|
||||
@ -1962,6 +2049,8 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
|
||||
Node* box = lock->box_node();
|
||||
Node* flock = lock->fastlock_node();
|
||||
|
||||
assert(!box->as_BoxLock()->is_eliminated(), "sanity");
|
||||
|
||||
// Make the merge point
|
||||
Node *region;
|
||||
Node *mem_phi;
|
||||
@ -2196,6 +2285,8 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
|
||||
Node* obj = unlock->obj_node();
|
||||
Node* box = unlock->box_node();
|
||||
|
||||
assert(!box->as_BoxLock()->is_eliminated(), "sanity");
|
||||
|
||||
// No need for a null check on unlock
|
||||
|
||||
// Make the merge point
|
||||
|
@ -92,6 +92,7 @@ private:
|
||||
void process_users_of_allocation(AllocateNode *alloc);
|
||||
|
||||
void eliminate_card_mark(Node *cm);
|
||||
void mark_eliminated_box(Node* box, Node* obj);
|
||||
void mark_eliminated_locking_nodes(AbstractLockNode *alock);
|
||||
bool eliminate_locking_node(AbstractLockNode *alock);
|
||||
void expand_lock_node(LockNode *lock);
|
||||
|
@ -1718,8 +1718,10 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
|
||||
bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
|
||||
if (ReduceFieldZeroing || is_instance) {
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con())
|
||||
if (value != NULL && value->is_Con()) {
|
||||
assert(value->bottom_type()->higher_equal(_type),"sanity");
|
||||
return value->bottom_type();
|
||||
}
|
||||
}
|
||||
|
||||
if (is_instance) {
|
||||
@ -1759,6 +1761,20 @@ Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
const Type* LoadBNode::Value(PhaseTransform *phase) const {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con() &&
|
||||
!value->bottom_type()->higher_equal(_type)) {
|
||||
// If the input to the store does not fit with the load's result type,
|
||||
// it must be truncated. We can't delay until Ideal call since
|
||||
// a singleton Value is needed for split_thru_phi optimization.
|
||||
int con = value->get_int();
|
||||
return TypeInt::make((con << 24) >> 24);
|
||||
}
|
||||
return LoadNode::Value(phase);
|
||||
}
|
||||
|
||||
//--------------------------LoadUBNode::Ideal-------------------------------------
|
||||
//
|
||||
// If the previous store is to the same address as this load,
|
||||
@ -1775,6 +1791,20 @@ Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
const Type* LoadUBNode::Value(PhaseTransform *phase) const {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con() &&
|
||||
!value->bottom_type()->higher_equal(_type)) {
|
||||
// If the input to the store does not fit with the load's result type,
|
||||
// it must be truncated. We can't delay until Ideal call since
|
||||
// a singleton Value is needed for split_thru_phi optimization.
|
||||
int con = value->get_int();
|
||||
return TypeInt::make(con & 0xFF);
|
||||
}
|
||||
return LoadNode::Value(phase);
|
||||
}
|
||||
|
||||
//--------------------------LoadUSNode::Ideal-------------------------------------
|
||||
//
|
||||
// If the previous store is to the same address as this load,
|
||||
@ -1791,6 +1821,20 @@ Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
const Type* LoadUSNode::Value(PhaseTransform *phase) const {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con() &&
|
||||
!value->bottom_type()->higher_equal(_type)) {
|
||||
// If the input to the store does not fit with the load's result type,
|
||||
// it must be truncated. We can't delay until Ideal call since
|
||||
// a singleton Value is needed for split_thru_phi optimization.
|
||||
int con = value->get_int();
|
||||
return TypeInt::make(con & 0xFFFF);
|
||||
}
|
||||
return LoadNode::Value(phase);
|
||||
}
|
||||
|
||||
//--------------------------LoadSNode::Ideal--------------------------------------
|
||||
//
|
||||
// If the previous store is to the same address as this load,
|
||||
@ -1809,6 +1853,20 @@ Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
const Type* LoadSNode::Value(PhaseTransform *phase) const {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con() &&
|
||||
!value->bottom_type()->higher_equal(_type)) {
|
||||
// If the input to the store does not fit with the load's result type,
|
||||
// it must be truncated. We can't delay until Ideal call since
|
||||
// a singleton Value is needed for split_thru_phi optimization.
|
||||
int con = value->get_int();
|
||||
return TypeInt::make((con << 16) >> 16);
|
||||
}
|
||||
return LoadNode::Value(phase);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//----------------------------LoadKlassNode::make------------------------------
|
||||
// Polymorphic factory method:
|
||||
@ -2201,7 +2259,7 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// unsafe if I have intervening uses... Also disallowed for StoreCM
|
||||
// since they must follow each StoreP operation. Redundant StoreCMs
|
||||
// are eliminated just before matching in final_graph_reshape.
|
||||
if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address) &&
|
||||
if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(address) &&
|
||||
mem->Opcode() != Op_StoreCM) {
|
||||
// Looking at a dead closed cycle of memory?
|
||||
assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
|
||||
@ -2274,16 +2332,16 @@ Node *StoreNode::Identity( PhaseTransform *phase ) {
|
||||
|
||||
// Load then Store? Then the Store is useless
|
||||
if (val->is_Load() &&
|
||||
phase->eqv_uncast( val->in(MemNode::Address), adr ) &&
|
||||
phase->eqv_uncast( val->in(MemNode::Memory ), mem ) &&
|
||||
val->in(MemNode::Address)->eqv_uncast(adr) &&
|
||||
val->in(MemNode::Memory )->eqv_uncast(mem) &&
|
||||
val->as_Load()->store_Opcode() == Opcode()) {
|
||||
return mem;
|
||||
}
|
||||
|
||||
// Two stores in a row of the same value?
|
||||
if (mem->is_Store() &&
|
||||
phase->eqv_uncast( mem->in(MemNode::Address), adr ) &&
|
||||
phase->eqv_uncast( mem->in(MemNode::ValueIn), val ) &&
|
||||
mem->in(MemNode::Address)->eqv_uncast(adr) &&
|
||||
mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
|
||||
mem->Opcode() == Opcode()) {
|
||||
return mem;
|
||||
}
|
||||
|
@ -215,6 +215,7 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
virtual int store_Opcode() const { return Op_StoreB; }
|
||||
virtual BasicType memory_type() const { return T_BYTE; }
|
||||
};
|
||||
@ -228,6 +229,7 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
virtual int store_Opcode() const { return Op_StoreB; }
|
||||
virtual BasicType memory_type() const { return T_BYTE; }
|
||||
};
|
||||
@ -241,10 +243,25 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
virtual int store_Opcode() const { return Op_StoreC; }
|
||||
virtual BasicType memory_type() const { return T_CHAR; }
|
||||
};
|
||||
|
||||
//------------------------------LoadSNode--------------------------------------
|
||||
// Load a short (16bits signed) from memory
|
||||
class LoadSNode : public LoadNode {
|
||||
public:
|
||||
LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
|
||||
: LoadNode(c,mem,adr,at,ti) {}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
virtual int store_Opcode() const { return Op_StoreC; }
|
||||
virtual BasicType memory_type() const { return T_SHORT; }
|
||||
};
|
||||
|
||||
//------------------------------LoadINode--------------------------------------
|
||||
// Load an integer from memory
|
||||
class LoadINode : public LoadNode {
|
||||
@ -433,19 +450,6 @@ public:
|
||||
};
|
||||
|
||||
|
||||
//------------------------------LoadSNode--------------------------------------
|
||||
// Load a short (16bits signed) from memory
|
||||
class LoadSNode : public LoadNode {
|
||||
public:
|
||||
LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
|
||||
: LoadNode(c,mem,adr,at,ti) {}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual int store_Opcode() const { return Op_StoreC; }
|
||||
virtual BasicType memory_type() const { return T_SHORT; }
|
||||
};
|
||||
|
||||
//------------------------------StoreNode--------------------------------------
|
||||
// Store value; requires Store, Address and Value
|
||||
class StoreNode : public MemNode {
|
||||
|
@ -833,8 +833,20 @@ Node* Node::uncast() const {
|
||||
|
||||
//---------------------------uncast_helper-------------------------------------
|
||||
Node* Node::uncast_helper(const Node* p) {
|
||||
uint max_depth = 3;
|
||||
for (uint i = 0; i < max_depth; i++) {
|
||||
#ifdef ASSERT
|
||||
uint depth_count = 0;
|
||||
const Node* orig_p = p;
|
||||
#endif
|
||||
|
||||
while (true) {
|
||||
#ifdef ASSERT
|
||||
if (depth_count >= K) {
|
||||
orig_p->dump(4);
|
||||
if (p != orig_p)
|
||||
p->dump(1);
|
||||
}
|
||||
assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
|
||||
#endif
|
||||
if (p == NULL || p->req() != 2) {
|
||||
break;
|
||||
} else if (p->is_ConstraintCast()) {
|
||||
|
@ -429,6 +429,10 @@ protected:
|
||||
|
||||
// Strip away casting. (It is depth-limited.)
|
||||
Node* uncast() const;
|
||||
// Return whether two Nodes are equivalent, after stripping casting.
|
||||
bool eqv_uncast(const Node* n) const {
|
||||
return (this->uncast() == n->uncast());
|
||||
}
|
||||
|
||||
private:
|
||||
static Node* uncast_helper(const Node* n);
|
||||
|
@ -924,10 +924,10 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
||||
scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
|
||||
}
|
||||
|
||||
OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
|
||||
OptoReg::Name box_reg = BoxLockNode::reg(box_node);
|
||||
Location basic_lock = Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg));
|
||||
while( !box_node->is_BoxLock() ) box_node = box_node->in(1);
|
||||
monarray->append(new MonitorValue(scval, basic_lock, box_node->as_BoxLock()->is_eliminated()));
|
||||
bool eliminated = (box_node->is_BoxLock() && box_node->as_BoxLock()->is_eliminated());
|
||||
monarray->append(new MonitorValue(scval, basic_lock, eliminated));
|
||||
}
|
||||
|
||||
// We dump the object pool first, since deoptimization reads it in first.
|
||||
|
@ -1604,7 +1604,16 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
|
||||
continue;
|
||||
default: // All normal stuff
|
||||
if (phi == NULL) {
|
||||
if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
|
||||
const JVMState* jvms = map()->jvms();
|
||||
if (EliminateNestedLocks &&
|
||||
jvms->is_mon(j) && jvms->is_monitor_box(j)) {
|
||||
// BoxLock nodes are not commoning.
|
||||
// Use old BoxLock node as merged box.
|
||||
assert(newin->jvms()->is_monitor_box(j), "sanity");
|
||||
// This assert also tests that nodes are BoxLock.
|
||||
assert(BoxLockNode::same_slot(n, m), "sanity");
|
||||
C->gvn_replace_by(n, m);
|
||||
} else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
|
||||
phi = ensure_phi(j, nophi);
|
||||
}
|
||||
}
|
||||
|
@ -71,14 +71,14 @@ void Parse::do_checkcast() {
|
||||
// Throw uncommon trap if class is not loaded or the value we are casting
|
||||
// _from_ is not loaded, and value is not null. If the value _is_ NULL,
|
||||
// then the checkcast does nothing.
|
||||
const TypeInstPtr *tp = _gvn.type(obj)->isa_instptr();
|
||||
if (!will_link || (tp && !tp->is_loaded())) {
|
||||
const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
|
||||
if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
|
||||
if (C->log() != NULL) {
|
||||
if (!will_link) {
|
||||
C->log()->elem("assert_null reason='checkcast' klass='%d'",
|
||||
C->log()->identify(klass));
|
||||
}
|
||||
if (tp && !tp->is_loaded()) {
|
||||
if (tp && tp->klass() && !tp->klass()->is_loaded()) {
|
||||
// %%% Cannot happen?
|
||||
C->log()->elem("assert_null reason='checkcast source' klass='%d'",
|
||||
C->log()->identify(tp->klass()));
|
||||
|
@ -256,11 +256,6 @@ public:
|
||||
// For pessimistic optimizations this is simply pointer equivalence.
|
||||
bool eqv(const Node* n1, const Node* n2) const { return n1 == n2; }
|
||||
|
||||
// Return whether two Nodes are equivalent, after stripping casting.
|
||||
bool eqv_uncast(const Node* n1, const Node* n2) const {
|
||||
return eqv(n1->uncast(), n2->uncast());
|
||||
}
|
||||
|
||||
// For pessimistic passes, the return type must monotonically narrow.
|
||||
// For optimistic passes, the return type must monotonically widen.
|
||||
// It is possible to get into a "death march" in either type of pass,
|
||||
|
@ -89,32 +89,62 @@ int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_
|
||||
return blk_adjust;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static bool expected_yanked_node(Node *old, Node *orig_old) {
|
||||
// This code is expected only next original nodes:
|
||||
// - load from constant table node which may have next data input nodes:
|
||||
// MachConstantBase, Phi, MachTemp, MachSpillCopy
|
||||
// - load constant node which may have next data input nodes:
|
||||
// MachTemp, MachSpillCopy
|
||||
// - MachSpillCopy
|
||||
// - MachProj and Copy dead nodes
|
||||
if (old->is_MachSpillCopy()) {
|
||||
return true;
|
||||
} else if (old->is_Con()) {
|
||||
return true;
|
||||
} else if (old->is_MachProj()) { // Dead kills projection of Con node
|
||||
return (old == orig_old);
|
||||
} else if (old->is_Copy()) { // Dead copy of a callee-save value
|
||||
return (old == orig_old);
|
||||
} else if (old->is_MachTemp()) {
|
||||
return orig_old->is_Con();
|
||||
} else if (old->is_Phi() || old->is_MachConstantBase()) {
|
||||
return (orig_old->is_Con() && orig_old->is_MachConstant());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------yank_if_dead-----------------------------------
|
||||
// Removed an edge from 'old'. Yank if dead. Return adjustment counts to
|
||||
// Removed edges from 'old'. Yank if dead. Return adjustment counts to
|
||||
// iterators in the current block.
|
||||
int PhaseChaitin::yank_if_dead( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
|
||||
int PhaseChaitin::yank_if_dead_recurse(Node *old, Node *orig_old, Block *current_block,
|
||||
Node_List *value, Node_List *regnd) {
|
||||
int blk_adjust=0;
|
||||
while (old->outcnt() == 0 && old != C->top()) {
|
||||
if (old->outcnt() == 0 && old != C->top()) {
|
||||
#ifdef ASSERT
|
||||
if (!expected_yanked_node(old, orig_old)) {
|
||||
tty->print_cr("==============================================");
|
||||
tty->print_cr("orig_old:");
|
||||
orig_old->dump();
|
||||
tty->print_cr("old:");
|
||||
old->dump();
|
||||
assert(false, "unexpected yanked node");
|
||||
}
|
||||
if (old->is_Con())
|
||||
orig_old = old; // Reset to satisfy expected nodes checks.
|
||||
#endif
|
||||
blk_adjust += yank(old, current_block, value, regnd);
|
||||
|
||||
Node *tmp = NULL;
|
||||
for (uint i = 1; i < old->req(); i++) {
|
||||
if (old->in(i)->is_MachTemp()) {
|
||||
// handle TEMP inputs
|
||||
Node* machtmp = old->in(i);
|
||||
if (machtmp->outcnt() == 1) {
|
||||
assert(machtmp->unique_out() == old, "sanity");
|
||||
blk_adjust += yank(machtmp, current_block, value, regnd);
|
||||
machtmp->disconnect_inputs(NULL);
|
||||
}
|
||||
} else {
|
||||
assert(tmp == NULL, "can't handle more non MachTemp inputs");
|
||||
tmp = old->in(i);
|
||||
Node* n = old->in(i);
|
||||
if (n != NULL) {
|
||||
old->set_req(i, NULL);
|
||||
blk_adjust += yank_if_dead_recurse(n, orig_old, current_block, value, regnd);
|
||||
}
|
||||
}
|
||||
// Disconnect control and remove precedence edges if any exist
|
||||
old->disconnect_inputs(NULL);
|
||||
if( !tmp ) break;
|
||||
old = tmp;
|
||||
}
|
||||
return blk_adjust;
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ const Type *SubNode::Value( PhaseTransform *phase ) const {
|
||||
|
||||
// Not correct for SubFnode and AddFNode (must check for infinity)
|
||||
// Equal? Subtract is zero
|
||||
if (phase->eqv_uncast(in1, in2)) return add_id();
|
||||
if (in1->eqv_uncast(in2)) return add_id();
|
||||
|
||||
// Either input is BOTTOM ==> the result is the local BOTTOM
|
||||
if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
#include "prims/jni.h"
|
||||
#include "prims/jniCheck.hpp"
|
||||
#include "prims/jniExport.hpp"
|
||||
#include "prims/jniFastGetField.hpp"
|
||||
#include "prims/jvm.h"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
@ -66,6 +67,8 @@
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/vm_operations.hpp"
|
||||
#include "services/runtimeService.hpp"
|
||||
#include "trace/tracing.hpp"
|
||||
#include "trace/traceEventTypes.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/dtrace.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
@ -5139,6 +5142,11 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
|
||||
if (JvmtiExport::should_post_thread_life()) {
|
||||
JvmtiExport::post_thread_start(thread);
|
||||
}
|
||||
|
||||
EVENT_BEGIN(TraceEventThreadStart, event);
|
||||
EVENT_COMMIT(event,
|
||||
EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
|
||||
|
||||
// Check if we should compile all classes on bootclasspath
|
||||
NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();)
|
||||
// Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
|
||||
@ -5337,6 +5345,10 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae
|
||||
JvmtiExport::post_thread_start(thread);
|
||||
}
|
||||
|
||||
EVENT_BEGIN(TraceEventThreadStart, event);
|
||||
EVENT_COMMIT(event,
|
||||
EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
|
||||
|
||||
*(JNIEnv**)penv = thread->jni_environment();
|
||||
|
||||
// Now leaving the VM, so change thread_state. This is normally automatically taken care
|
||||
@ -5464,8 +5476,7 @@ jint JNICALL jni_GetEnv(JavaVM *vm, void **penv, jint version) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (JvmtiExport::is_jvmti_version(version)) {
|
||||
ret = JvmtiExport::get_jvmti_interface(vm, penv, version);
|
||||
if (JniExportedInterface::GetExportedInterface(vm, penv, version, &ret)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
42
hotspot/src/share/vm/prims/jniExport.hpp
Normal file
42
hotspot/src/share/vm/prims/jniExport.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_PRIMS_JNI_EXPORT_HPP
|
||||
#define SHARE_VM_PRIMS_JNI_EXPORT_HPP
|
||||
|
||||
#include "prims/jni.h"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
|
||||
class JniExportedInterface {
|
||||
public:
|
||||
static bool GetExportedInterface(JavaVM* vm, void** penv, jint version, jint* iface) {
|
||||
if (JvmtiExport::is_jvmti_version(version)) {
|
||||
*iface = JvmtiExport::get_jvmti_interface(vm, penv, version);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_PRIMS_JNI_EXPORT_HPP
|
@ -194,9 +194,6 @@ bool MethodHandles::spot_check_entry_names() {
|
||||
// MethodHandles::generate_adapters
|
||||
//
|
||||
void MethodHandles::generate_adapters() {
|
||||
#ifdef TARGET_ARCH_NYI_6939861
|
||||
if (FLAG_IS_DEFAULT(UseRicochetFrames)) UseRicochetFrames = false;
|
||||
#endif
|
||||
if (!EnableInvokeDynamic || SystemDictionary::MethodHandle_klass() == NULL) return;
|
||||
|
||||
assert(_adapter_code == NULL, "generate only once");
|
||||
@ -230,18 +227,6 @@ void MethodHandlesAdapterGenerator::generate() {
|
||||
}
|
||||
|
||||
|
||||
#ifdef TARGET_ARCH_NYI_6939861
|
||||
// these defs belong in methodHandles_<arch>.cpp
|
||||
frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
|
||||
ShouldNotCallThis();
|
||||
return fr;
|
||||
}
|
||||
void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* f, const RegisterMap* reg_map) {
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
#endif //TARGET_ARCH_NYI_6939861
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// MethodHandles::ek_supported
|
||||
//
|
||||
@ -251,28 +236,11 @@ bool MethodHandles::ek_supported(MethodHandles::EntryKind ek) {
|
||||
case _adapter_unused_13:
|
||||
return false; // not defined yet
|
||||
case _adapter_prim_to_ref:
|
||||
return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF);
|
||||
return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF);
|
||||
case _adapter_collect_args:
|
||||
return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS);
|
||||
return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS);
|
||||
case _adapter_fold_args:
|
||||
return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS);
|
||||
case _adapter_opt_return_any:
|
||||
return UseRicochetFrames;
|
||||
#ifdef TARGET_ARCH_NYI_6939861
|
||||
// ports before 6939861 supported only three kinds of spread ops
|
||||
case _adapter_spread_args:
|
||||
// restrict spreads to three kinds:
|
||||
switch (ek) {
|
||||
case _adapter_opt_spread_0:
|
||||
case _adapter_opt_spread_1:
|
||||
case _adapter_opt_spread_more:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
#endif //TARGET_ARCH_NYI_6939861
|
||||
return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -1988,9 +1956,6 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
|
||||
case _adapter_prim_to_ref: // boxer MH to use
|
||||
case _adapter_collect_args: // method handle which collects the args
|
||||
case _adapter_fold_args: // method handle which collects the args
|
||||
if (!UseRicochetFrames) {
|
||||
{ err = "box/collect/fold operators are not supported"; break; }
|
||||
}
|
||||
if (!java_lang_invoke_MethodHandle::is_instance(argument()))
|
||||
{ err = "MethodHandle adapter argument required"; break; }
|
||||
arg_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(argument()));
|
||||
@ -2370,7 +2335,6 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
|
||||
|
||||
case _adapter_prim_to_ref:
|
||||
{
|
||||
assert(UseRicochetFrames, "else don't come here");
|
||||
// vminfo will be the location to insert the return value
|
||||
vminfo = argslot;
|
||||
ek_opt = _adapter_opt_collect_ref;
|
||||
@ -2436,20 +2400,6 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
|
||||
|
||||
case _adapter_spread_args:
|
||||
{
|
||||
#ifdef TARGET_ARCH_NYI_6939861
|
||||
// ports before 6939861 supported only three kinds of spread ops
|
||||
if (!UseRicochetFrames) {
|
||||
int array_size = slots_pushed + 1;
|
||||
assert(array_size >= 0, "");
|
||||
vminfo = array_size;
|
||||
switch (array_size) {
|
||||
case 0: ek_opt = _adapter_opt_spread_0; break;
|
||||
case 1: ek_opt = _adapter_opt_spread_1; break;
|
||||
default: ek_opt = _adapter_opt_spread_more; break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
#endif //TARGET_ARCH_NYI_6939861
|
||||
// vminfo will be the required length of the array
|
||||
int array_size = (slots_pushed + 1) / (type2size[dest] == 2 ? 2 : 1);
|
||||
vminfo = array_size;
|
||||
@ -2494,7 +2444,6 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
|
||||
|
||||
case _adapter_collect_args:
|
||||
{
|
||||
assert(UseRicochetFrames, "else don't come here");
|
||||
int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument()));
|
||||
// vminfo will be the location to insert the return value
|
||||
vminfo = argslot;
|
||||
@ -2563,7 +2512,6 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu
|
||||
|
||||
case _adapter_fold_args:
|
||||
{
|
||||
assert(UseRicochetFrames, "else don't come here");
|
||||
int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument()));
|
||||
// vminfo will be the location to insert the return value
|
||||
vminfo = argslot + elem_slots;
|
||||
|
@ -738,46 +738,6 @@ public:
|
||||
#ifdef TARGET_ARCH_ppc
|
||||
# include "methodHandles_ppc.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef TARGET_ARCH_NYI_6939861
|
||||
// Here are some backward compatible declarations until the 6939861 ports are updated.
|
||||
#define _adapter_flyby (_EK_LIMIT + 10)
|
||||
#define _adapter_ricochet (_EK_LIMIT + 11)
|
||||
#define _adapter_opt_spread_1 _adapter_opt_spread_1_ref
|
||||
#define _adapter_opt_spread_more _adapter_opt_spread_ref
|
||||
enum {
|
||||
_INSERT_NO_MASK = -1,
|
||||
_INSERT_REF_MASK = 0,
|
||||
_INSERT_INT_MASK = 1,
|
||||
_INSERT_LONG_MASK = 3
|
||||
};
|
||||
static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
|
||||
arg_type = ek_bound_mh_arg_type(ek);
|
||||
arg_mask = 0;
|
||||
arg_slots = type2size[arg_type];;
|
||||
}
|
||||
static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
|
||||
int swap_slots = ek_adapter_opt_swap_slots(ek);
|
||||
rotate = ek_adapter_opt_swap_mode(ek);
|
||||
swap_bytes = swap_slots * Interpreter::stackElementSize;
|
||||
}
|
||||
static int get_ek_adapter_opt_spread_info(EntryKind ek) {
|
||||
return ek_adapter_opt_spread_count(ek);
|
||||
}
|
||||
|
||||
static void insert_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
int arg_mask,
|
||||
Register argslot_reg,
|
||||
Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
|
||||
|
||||
static void remove_arg_slots(MacroAssembler* _masm,
|
||||
RegisterOrConstant arg_slots,
|
||||
Register argslot_reg,
|
||||
Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
|
||||
|
||||
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
|
||||
#endif //TARGET_ARCH_NYI_6939861
|
||||
};
|
||||
|
||||
|
||||
|
@ -271,13 +271,10 @@ bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
|
||||
}
|
||||
|
||||
// Create MDO if necessary.
|
||||
void AdvancedThresholdPolicy::create_mdo(methodHandle mh, TRAPS) {
|
||||
void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
|
||||
if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
|
||||
if (mh->method_data() == NULL) {
|
||||
methodOopDesc::build_interpreter_method_data(mh, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
}
|
||||
methodOopDesc::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
|
||||
}
|
||||
}
|
||||
|
||||
@ -426,22 +423,22 @@ CompLevel AdvancedThresholdPolicy::loop_event(methodOop method, CompLevel cur_le
|
||||
}
|
||||
|
||||
// Update the rate and submit compile
|
||||
void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
|
||||
void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
|
||||
int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
|
||||
update_rate(os::javaTimeMillis(), mh());
|
||||
CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
|
||||
CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
|
||||
}
|
||||
|
||||
// Handle the invocation event.
|
||||
void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
|
||||
CompLevel level, nmethod* nm, TRAPS) {
|
||||
CompLevel level, nmethod* nm, JavaThread* thread) {
|
||||
if (should_create_mdo(mh(), level)) {
|
||||
create_mdo(mh, THREAD);
|
||||
create_mdo(mh, thread);
|
||||
}
|
||||
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
|
||||
CompLevel next_level = call_event(mh(), level);
|
||||
if (next_level != level) {
|
||||
compile(mh, InvocationEntryBci, next_level, THREAD);
|
||||
compile(mh, InvocationEntryBci, next_level, thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -449,13 +446,13 @@ void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHan
|
||||
// Handle the back branch event. Notice that we can compile the method
|
||||
// with a regular entry from here.
|
||||
void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
|
||||
int bci, CompLevel level, nmethod* nm, TRAPS) {
|
||||
int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
|
||||
if (should_create_mdo(mh(), level)) {
|
||||
create_mdo(mh, THREAD);
|
||||
create_mdo(mh, thread);
|
||||
}
|
||||
// Check if MDO should be created for the inlined method
|
||||
if (should_create_mdo(imh(), level)) {
|
||||
create_mdo(imh, THREAD);
|
||||
create_mdo(imh, thread);
|
||||
}
|
||||
|
||||
if (is_compilation_enabled()) {
|
||||
@ -463,7 +460,7 @@ void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHa
|
||||
CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
|
||||
// At the very least compile the OSR version
|
||||
if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
|
||||
compile(imh, bci, next_osr_level, THREAD);
|
||||
compile(imh, bci, next_osr_level, thread);
|
||||
}
|
||||
|
||||
// Use loop event as an opportunity to also check if there's been
|
||||
@ -502,14 +499,14 @@ void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHa
|
||||
next_level = CompLevel_full_profile;
|
||||
}
|
||||
if (cur_level != next_level) {
|
||||
compile(mh, InvocationEntryBci, next_level, THREAD);
|
||||
compile(mh, InvocationEntryBci, next_level, thread);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cur_level = comp_level(imh());
|
||||
next_level = call_event(imh(), cur_level);
|
||||
if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
|
||||
compile(imh, InvocationEntryBci, next_level, THREAD);
|
||||
compile(imh, InvocationEntryBci, next_level, thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ class AdvancedThresholdPolicy : public SimpleThresholdPolicy {
|
||||
// determines whether we should do that.
|
||||
inline bool should_create_mdo(methodOop method, CompLevel cur_level);
|
||||
// Create MDO if necessary.
|
||||
void create_mdo(methodHandle mh, TRAPS);
|
||||
void create_mdo(methodHandle mh, JavaThread* thread);
|
||||
// Is method profiled enough?
|
||||
bool is_method_profiled(methodOop method);
|
||||
|
||||
@ -208,12 +208,12 @@ protected:
|
||||
jlong start_time() const { return _start_time; }
|
||||
|
||||
// Submit a given method for compilation (and update the rate).
|
||||
virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
|
||||
virtual void submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
|
||||
// event() from SimpleThresholdPolicy would call these.
|
||||
virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
|
||||
CompLevel level, nmethod* nm, TRAPS);
|
||||
CompLevel level, nmethod* nm, JavaThread* thread);
|
||||
virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
|
||||
int bci, CompLevel level, nmethod* nm, TRAPS);
|
||||
int bci, CompLevel level, nmethod* nm, JavaThread* thread);
|
||||
public:
|
||||
AdvancedThresholdPolicy() : _start_time(0) { }
|
||||
// Select task is called by CompileBroker. We should return a task or NULL.
|
||||
|
@ -3164,6 +3164,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
if (!UseBiasedLocking || EmitSync != 0) {
|
||||
UseOptoBiasInlining = false;
|
||||
}
|
||||
if (!EliminateLocks) {
|
||||
EliminateNestedLocks = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
|
||||
|
@ -306,29 +306,27 @@ bool NonTieredCompPolicy::is_mature(methodOop method) {
|
||||
return (current >= initial + target);
|
||||
}
|
||||
|
||||
nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
|
||||
nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci,
|
||||
int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
|
||||
assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
|
||||
NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
|
||||
if (JvmtiExport::can_post_interpreter_events()) {
|
||||
assert(THREAD->is_Java_thread(), "Wrong type of thread");
|
||||
if (((JavaThread*)THREAD)->is_interp_only_mode()) {
|
||||
// If certain JVMTI events (e.g. frame pop event) are requested then the
|
||||
// thread is forced to remain in interpreted code. This is
|
||||
// implemented partly by a check in the run_compiled_code
|
||||
// section of the interpreter whether we should skip running
|
||||
// compiled code, and partly by skipping OSR compiles for
|
||||
// interpreted-only threads.
|
||||
if (bci != InvocationEntryBci) {
|
||||
reset_counter_for_back_branch_event(method);
|
||||
return NULL;
|
||||
}
|
||||
if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
|
||||
// If certain JVMTI events (e.g. frame pop event) are requested then the
|
||||
// thread is forced to remain in interpreted code. This is
|
||||
// implemented partly by a check in the run_compiled_code
|
||||
// section of the interpreter whether we should skip running
|
||||
// compiled code, and partly by skipping OSR compiles for
|
||||
// interpreted-only threads.
|
||||
if (bci != InvocationEntryBci) {
|
||||
reset_counter_for_back_branch_event(method);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (bci == InvocationEntryBci) {
|
||||
// when code cache is full, compilation gets switched off, UseCompiler
|
||||
// is set to false
|
||||
if (!method->has_compiled_code() && UseCompiler) {
|
||||
method_invocation_event(method, CHECK_NULL);
|
||||
method_invocation_event(method, thread);
|
||||
} else {
|
||||
// Force counter overflow on method entry, even if no compilation
|
||||
// happened. (The method_invocation_event call does this also.)
|
||||
@ -344,7 +342,7 @@ nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, i
|
||||
NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
|
||||
// when code cache is full, we should not compile any more...
|
||||
if (osr_nm == NULL && UseCompiler) {
|
||||
method_back_branch_event(method, bci, CHECK_NULL);
|
||||
method_back_branch_event(method, bci, thread);
|
||||
osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
|
||||
}
|
||||
if (osr_nm == NULL) {
|
||||
@ -395,7 +393,7 @@ void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, i
|
||||
|
||||
// SimpleCompPolicy - compile current method
|
||||
|
||||
void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
|
||||
void SimpleCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
|
||||
int hot_count = m->invocation_count();
|
||||
reset_counter_for_invocation_event(m);
|
||||
const char* comment = "count";
|
||||
@ -405,18 +403,18 @@ void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
|
||||
if (nm == NULL ) {
|
||||
const char* comment = "count";
|
||||
CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier,
|
||||
m, hot_count, comment, CHECK);
|
||||
m, hot_count, comment, thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
|
||||
void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
|
||||
int hot_count = m->backedge_count();
|
||||
const char* comment = "backedge_count";
|
||||
|
||||
if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
|
||||
CompileBroker::compile_method(m, bci, CompLevel_highest_tier,
|
||||
m, hot_count, comment, CHECK);
|
||||
m, hot_count, comment, thread);
|
||||
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
|
||||
}
|
||||
}
|
||||
@ -427,14 +425,13 @@ const char* StackWalkCompPolicy::_msg = NULL;
|
||||
|
||||
|
||||
// Consider m for compilation
|
||||
void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
|
||||
void StackWalkCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
|
||||
int hot_count = m->invocation_count();
|
||||
reset_counter_for_invocation_event(m);
|
||||
const char* comment = "count";
|
||||
|
||||
if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) {
|
||||
ResourceMark rm(THREAD);
|
||||
JavaThread *thread = (JavaThread*)THREAD;
|
||||
ResourceMark rm(thread);
|
||||
frame fr = thread->last_frame();
|
||||
assert(fr.is_interpreted_frame(), "must be interpreted");
|
||||
assert(fr.interpreter_frame_method() == m(), "bad method");
|
||||
@ -461,17 +458,17 @@ void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
|
||||
assert(top != NULL, "findTopInlinableFrame returned null");
|
||||
if (TraceCompilationPolicy) top->print();
|
||||
CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompLevel_highest_tier,
|
||||
m, hot_count, comment, CHECK);
|
||||
m, hot_count, comment, thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
|
||||
void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
|
||||
int hot_count = m->backedge_count();
|
||||
const char* comment = "backedge_count";
|
||||
|
||||
if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
|
||||
CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, CHECK);
|
||||
CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, thread);
|
||||
|
||||
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
virtual int compiler_count(CompLevel comp_level) = 0;
|
||||
// main notification entry, return a pointer to an nmethod if the OSR is required,
|
||||
// returns NULL otherwise.
|
||||
virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) = 0;
|
||||
virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) = 0;
|
||||
// safepoint() is called at the end of the safepoint
|
||||
virtual void do_safepoint_work() = 0;
|
||||
// reprofile request
|
||||
@ -105,15 +105,15 @@ public:
|
||||
virtual bool is_mature(methodOop method);
|
||||
virtual void initialize();
|
||||
virtual CompileTask* select_task(CompileQueue* compile_queue);
|
||||
virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
|
||||
virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
|
||||
virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0;
|
||||
virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
|
||||
virtual void method_invocation_event(methodHandle m, JavaThread* thread) = 0;
|
||||
virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread) = 0;
|
||||
};
|
||||
|
||||
class SimpleCompPolicy : public NonTieredCompPolicy {
|
||||
public:
|
||||
virtual void method_invocation_event(methodHandle m, TRAPS);
|
||||
virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
|
||||
virtual void method_invocation_event(methodHandle m, JavaThread* thread);
|
||||
virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread);
|
||||
};
|
||||
|
||||
// StackWalkCompPolicy - existing C2 policy
|
||||
@ -121,8 +121,8 @@ class SimpleCompPolicy : public NonTieredCompPolicy {
|
||||
#ifdef COMPILER2
|
||||
class StackWalkCompPolicy : public NonTieredCompPolicy {
|
||||
public:
|
||||
virtual void method_invocation_event(methodHandle m, TRAPS);
|
||||
virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
|
||||
virtual void method_invocation_event(methodHandle m, JavaThread* thread);
|
||||
virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread);
|
||||
|
||||
private:
|
||||
RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
|
||||
|
@ -211,7 +211,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
|
||||
#ifdef COMPILER2
|
||||
// Reallocate the non-escaping objects and restore their fields. Then
|
||||
// relock objects if synchronization on them was eliminated.
|
||||
if (DoEscapeAnalysis) {
|
||||
if (DoEscapeAnalysis || EliminateNestedLocks) {
|
||||
if (EliminateAllocations) {
|
||||
assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
|
||||
GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1315,7 +1315,6 @@ bool frame::verify_return_pc(address x) {
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
@ -1331,27 +1330,35 @@ void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
|
||||
guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*");
|
||||
guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void frame::describe(FrameValues& values, int frame_no) {
|
||||
// boundaries: sp and the 'real' frame pointer
|
||||
values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1);
|
||||
intptr_t* frame_pointer = real_fp(); // Note: may differ from fp()
|
||||
|
||||
// print frame info at the highest boundary
|
||||
intptr_t* info_address = MAX2(sp(), frame_pointer);
|
||||
|
||||
if (info_address != frame_pointer) {
|
||||
// print frame_pointer explicitly if not marked by the frame info
|
||||
values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1);
|
||||
}
|
||||
|
||||
if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
|
||||
// Label values common to most frames
|
||||
values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no));
|
||||
values.describe(-1, sp(), err_msg("sp for #%d", frame_no));
|
||||
if (is_compiled_frame()) {
|
||||
values.describe(-1, sp() + _cb->frame_size(), err_msg("computed fp for #%d", frame_no));
|
||||
} else {
|
||||
values.describe(-1, fp(), err_msg("fp for #%d", frame_no));
|
||||
}
|
||||
}
|
||||
|
||||
if (is_interpreted_frame()) {
|
||||
methodOop m = interpreter_frame_method();
|
||||
int bci = interpreter_frame_bci();
|
||||
|
||||
// Label the method and current bci
|
||||
values.describe(-1, MAX2(sp(), fp()),
|
||||
values.describe(-1, info_address,
|
||||
FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2);
|
||||
values.describe(-1, MAX2(sp(), fp()),
|
||||
values.describe(-1, info_address,
|
||||
err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1);
|
||||
if (m->max_locals() > 0) {
|
||||
intptr_t* l0 = interpreter_frame_local_at(0);
|
||||
@ -1383,21 +1390,36 @@ void frame::describe(FrameValues& values, int frame_no) {
|
||||
}
|
||||
} else if (is_entry_frame()) {
|
||||
// For now just label the frame
|
||||
values.describe(-1, MAX2(sp(), fp()), err_msg("#%d entry frame", frame_no), 2);
|
||||
values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
|
||||
} else if (is_compiled_frame()) {
|
||||
// For now just label the frame
|
||||
nmethod* nm = cb()->as_nmethod_or_null();
|
||||
values.describe(-1, MAX2(sp(), fp()),
|
||||
values.describe(-1, info_address,
|
||||
FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no,
|
||||
nm, nm->method()->name_and_sig_as_C_string(),
|
||||
is_deoptimized_frame() ? " (deoptimized" : ""), 2);
|
||||
(_deopt_state == is_deoptimized) ?
|
||||
" (deoptimized)" :
|
||||
((_deopt_state == unknown) ? " (state unknown)" : "")),
|
||||
2);
|
||||
} else if (is_native_frame()) {
|
||||
// For now just label the frame
|
||||
nmethod* nm = cb()->as_nmethod_or_null();
|
||||
values.describe(-1, MAX2(sp(), fp()),
|
||||
values.describe(-1, info_address,
|
||||
FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
|
||||
nm, nm->method()->name_and_sig_as_C_string()), 2);
|
||||
} else if (is_ricochet_frame()) {
|
||||
values.describe(-1, info_address, err_msg("#%d ricochet frame", frame_no), 2);
|
||||
} else {
|
||||
// provide default info if not handled before
|
||||
char *info = (char *) "special frame";
|
||||
if ((_cb != NULL) &&
|
||||
(_cb->name() != NULL)) {
|
||||
info = (char *)_cb->name();
|
||||
}
|
||||
values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2);
|
||||
}
|
||||
|
||||
// platform dependent additional data
|
||||
describe_pd(values, frame_no);
|
||||
}
|
||||
|
||||
@ -1414,7 +1436,7 @@ StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(t
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
|
||||
void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) {
|
||||
FrameValue fv;
|
||||
@ -1427,6 +1449,7 @@ void FrameValues::describe(int owner, intptr_t* location, const char* descriptio
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
void FrameValues::validate() {
|
||||
_values.sort(compare);
|
||||
bool error = false;
|
||||
@ -1452,7 +1475,7 @@ void FrameValues::validate() {
|
||||
}
|
||||
assert(!error, "invalid layout");
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
void FrameValues::print(JavaThread* thread) {
|
||||
_values.sort(compare);
|
||||
@ -1501,4 +1524,4 @@ void FrameValues::print(JavaThread* thread) {
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // ndef PRODUCT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -221,6 +221,15 @@ class frame VALUE_OBJ_CLASS_SPEC {
|
||||
// returns the stack pointer of the calling frame
|
||||
intptr_t* sender_sp() const;
|
||||
|
||||
// Returns the real 'frame pointer' for the current frame.
|
||||
// This is the value expected by the platform ABI when it defines a
|
||||
// frame pointer register. It may differ from the effective value of
|
||||
// the FP register when that register is used in the JVM for other
|
||||
// purposes (like compiled frames on some platforms).
|
||||
// On other platforms, it is defined so that the stack area used by
|
||||
// this frame goes from real_fp() to sp().
|
||||
intptr_t* real_fp() const;
|
||||
|
||||
// Deoptimization info, if needed (platform dependent).
|
||||
// Stored in the initial_info field of the unroll info, to be used by
|
||||
// the platform dependent deoptimization blobs.
|
||||
@ -485,7 +494,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
// A simple class to describe a location on the stack
|
||||
class FrameValue VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
@ -515,7 +524,9 @@ class FrameValues {
|
||||
// Used by frame functions to describe locations.
|
||||
void describe(int owner, intptr_t* location, const char* description, int priority = 0);
|
||||
|
||||
#ifdef ASSERT
|
||||
void validate();
|
||||
#endif
|
||||
void print(JavaThread* thread);
|
||||
};
|
||||
|
||||
|
@ -3574,7 +3574,7 @@ class CommandLineFlags {
|
||||
"Threshold at which tier 3 compilation is invoked (invocation " \
|
||||
"minimum must be satisfied.") \
|
||||
\
|
||||
product(intx, Tier3BackEdgeThreshold, 7000, \
|
||||
product(intx, Tier3BackEdgeThreshold, 60000, \
|
||||
"Back edge threshold at which tier 3 OSR compilation is invoked") \
|
||||
\
|
||||
product(intx, Tier4InvocationThreshold, 5000, \
|
||||
@ -3826,10 +3826,6 @@ class CommandLineFlags {
|
||||
develop(bool, StressMethodHandleWalk, false, \
|
||||
"Process all method handles with MethodHandleWalk") \
|
||||
\
|
||||
diagnostic(bool, UseRicochetFrames, true, \
|
||||
"use ricochet stack frames for method handle combination, " \
|
||||
"if the platform supports them") \
|
||||
\
|
||||
experimental(bool, TrustFinalNonStaticFields, false, \
|
||||
"trust final non-static declarations for constant folding") \
|
||||
\
|
||||
|
@ -57,6 +57,8 @@
|
||||
#include "runtime/task.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
#include "runtime/vm_operations.hpp"
|
||||
#include "trace/tracing.hpp"
|
||||
#include "trace/traceEventTypes.hpp"
|
||||
#include "utilities/dtrace.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/histogram.hpp"
|
||||
@ -502,6 +504,11 @@ void before_exit(JavaThread * thread) {
|
||||
if (JvmtiExport::should_post_thread_life()) {
|
||||
JvmtiExport::post_thread_end(thread);
|
||||
}
|
||||
|
||||
EVENT_BEGIN(TraceEventThreadEnd, event);
|
||||
EVENT_COMMIT(event,
|
||||
EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
|
||||
|
||||
// Always call even when there are not JVMTI environments yet, since environments
|
||||
// may be attached late and JVMTI must track phases of VM execution
|
||||
JvmtiExport::post_vm_death();
|
||||
|
@ -134,6 +134,12 @@ Monitor* GCTaskManager_lock = NULL;
|
||||
|
||||
Mutex* Management_lock = NULL;
|
||||
Monitor* Service_lock = NULL;
|
||||
Mutex* Stacktrace_lock = NULL;
|
||||
|
||||
Monitor* JfrQuery_lock = NULL;
|
||||
Monitor* JfrMsg_lock = NULL;
|
||||
Mutex* JfrBuffer_lock = NULL;
|
||||
Mutex* JfrStream_lock = NULL;
|
||||
|
||||
#define MAX_NUM_MUTEX 128
|
||||
static Monitor * _mutex_array[MAX_NUM_MUTEX];
|
||||
@ -209,6 +215,7 @@ void mutex_init() {
|
||||
def(Patching_lock , Mutex , special, true ); // used for safepointing and code patching.
|
||||
def(ObjAllocPost_lock , Monitor, special, false);
|
||||
def(Service_lock , Monitor, special, true ); // used for service thread operations
|
||||
def(Stacktrace_lock , Mutex, special, true ); // used for JFR stacktrace database
|
||||
def(JmethodIdCreation_lock , Mutex , leaf, true ); // used for creating jmethodIDs.
|
||||
|
||||
def(SystemDictionary_lock , Monitor, leaf, true ); // lookups done by VM thread
|
||||
@ -273,6 +280,11 @@ void mutex_init() {
|
||||
def(Debug3_lock , Mutex , nonleaf+4, true );
|
||||
def(ProfileVM_lock , Monitor, nonleaf+4, false); // used for profiling of the VMThread
|
||||
def(CompileThread_lock , Monitor, nonleaf+5, false );
|
||||
|
||||
def(JfrQuery_lock , Monitor, nonleaf, true); // JFR locks, keep these in consecutive order
|
||||
def(JfrMsg_lock , Monitor, nonleaf+2, true);
|
||||
def(JfrBuffer_lock , Mutex, nonleaf+3, true);
|
||||
def(JfrStream_lock , Mutex, nonleaf+4, true);
|
||||
}
|
||||
|
||||
GCMutexLocker::GCMutexLocker(Monitor * mutex) {
|
||||
|
@ -136,6 +136,12 @@ extern Mutex* HotCardCache_lock; // protects the hot card cache
|
||||
|
||||
extern Mutex* Management_lock; // a lock used to serialize JVM management
|
||||
extern Monitor* Service_lock; // a lock used for service thread operation
|
||||
extern Mutex* Stacktrace_lock; // used to guard access to the stacktrace table
|
||||
|
||||
extern Monitor* JfrQuery_lock; // protects JFR use
|
||||
extern Monitor* JfrMsg_lock; // protects JFR messaging
|
||||
extern Mutex* JfrBuffer_lock; // protects JFR buffer operations
|
||||
extern Mutex* JfrStream_lock; // protects JFR stream access
|
||||
|
||||
// A MutexLocker provides mutual exclusion with respect to a given mutex
|
||||
// for the scope which contains the locker. The lock is an OS lock, not
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user