BEGIN WARNING: At this time (Spring/Summer 2010) JDK 7 is starting a transition to
@@ -971,14 +972,13 @@ So for now you should be able to build with either VS2003 or VS2010.
We do not guarantee that VS2008 will work, although there is sufficient
makefile support to make at least basic JDK builds plausible.
Visual Studio 2010 Express compilers are now able to build all the
-open source repositories, but this is 32 bit only, since
-we have not yet seen the 7.1 Windows SDK with the 64 bit
-compilers. END WARNING.
+open source repositories, but this is 32 bit only. To build 64 bit
+Windows binaries use the the 7.1 Windows SDK.END WARNING.
The 32-bit OpenJDK Windows build
requires
Microsoft Visual Studio C++ 2010 (VS2010) Professional
- Edition compiler.
+ Edition or Express compiler.
The compiler and other tools are expected to reside
in the location defined by the variable
VS100COMNTOOLS which
@@ -1001,14 +1001,33 @@ compilers. END WARNING.
The path /usr/bin must be after the path to the
Visual Studio product.
- On X64, the set up is much the same in VS2010
+ For X64, builds, when using the VS2010 Professional
+ compiler, the 64 bit build set up is much the same as 32 bit
except that you run amd64\VCVARS64.BAT
to set the compiler environment variables.
- Previously 64 builds had used the 64 bit compiler in
- an unbundled Windows SDK but this is no longer necessary.
+ Previously 64 bit builds had used the 64 bit compiler in
+ an unbundled Windows SDK but this is no longer necessary if
+ you have VS2010 Professional.
+ What was tested is just directly setting up LIB, INCLUDE,
+ PATH and based on the installation directories using the
+ DOS short name appropriate for the system, (you will
+ need to set them for yours, not just blindly copy this) eg :
+
+ set VSINSTALLDIR=c:\PROGRA~2\MICROS~1.0
+ set WindowsSdkDir=c:\PROGRA~1\MICROS~1\Windows\v7.1
+ set PATH=%VSINSTALLDIR%\vc\bin\amd64;%VSINSTALLDIR%\Common7\IDE;%WindowsSdkDir%\bin;%PATH%
+ set INCLUDE=%VSINSTALLDIR%\vc\include;%WindowsSdkDir%\include
+ set LIB=%VSINSTALLDIR%\vc\lib\amd64;%WindowsSdkDir%\lib\x64
+
diff --git a/corba/.hgtags b/corba/.hgtags
index ee410d0fdcf..dc12ef037cf 100644
--- a/corba/.hgtags
+++ b/corba/.hgtags
@@ -71,3 +71,5 @@ bcd2fc089227559ac5be927923609fac29f067fa jdk7-b91
533c11186b44e3a02d6c5fe69a73260505fcfe5e jdk7-b94
06dbf406818c789bb586c1de4c002024cd26ecd2 jdk7-b95
edc2a2659c77dabc55cb55bb617bad89e3a05bb3 jdk7-b96
+4ec9d59374caa1e5d72fa802291b4d66955a4936 jdk7-b97
+3b99409057e4c255da946f9f540d051a5ef4ab23 jdk7-b98
diff --git a/hotspot/.hgtags b/hotspot/.hgtags
index c56e818cbeb..c1b84f69bd2 100644
--- a/hotspot/.hgtags
+++ b/hotspot/.hgtags
@@ -100,3 +100,5 @@ d38f45079fe98792a7381dbb4b64f5b589ec8c58 jdk7-b94
91d861ba858daca645993a1ab6ba2fa06a8f4a5b jdk7-b95
573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 jdk7-b96
573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 hs19-b02
+5f42499e57adc16380780f40541e1a66cd601891 jdk7-b97
+8a045b3f5c13eaad92ff4baf15ca671845fcad1a jdk7-b98
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java
index 2c7b98b36f1..be3e853882c 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java
@@ -42,8 +42,6 @@ public class CodeBlob extends VMObject {
private static CIntegerField instructionsOffsetField;
private static CIntegerField frameCompleteOffsetField;
private static CIntegerField dataOffsetField;
- private static CIntegerField oopsOffsetField;
- private static CIntegerField oopsLengthField;
private static CIntegerField frameSizeField;
private static AddressField oopMapsField;
@@ -72,8 +70,6 @@ public class CodeBlob extends VMObject {
frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset");
instructionsOffsetField = type.getCIntegerField("_instructions_offset");
dataOffsetField = type.getCIntegerField("_data_offset");
- oopsOffsetField = type.getCIntegerField("_oops_offset");
- oopsLengthField = type.getCIntegerField("_oops_length");
frameSizeField = type.getCIntegerField("_frame_size");
oopMapsField = type.getAddressField("_oop_maps");
@@ -131,19 +127,10 @@ public class CodeBlob extends VMObject {
return headerBegin().addOffsetTo(sizeField.getValue(addr));
}
- public Address oopsBegin() {
- return headerBegin().addOffsetTo(oopsOffsetField.getValue(addr));
- }
-
- public Address oopsEnd() {
- return oopsBegin().addOffsetTo(getOopsLength());
- }
-
// Offsets
public int getRelocationOffset() { return (int) headerSizeField.getValue(addr); }
public int getInstructionsOffset() { return (int) instructionsOffsetField.getValue(addr); }
public int getDataOffset() { return (int) dataOffsetField.getValue(addr); }
- public int getOopsOffset() { return (int) oopsOffsetField.getValue(addr); }
// Sizes
public int getSize() { return (int) sizeField.getValue(addr); }
@@ -157,19 +144,9 @@ public class CodeBlob extends VMObject {
// FIXME: add relocationContains
public boolean instructionsContains(Address addr) { return instructionsBegin().lessThanOrEqual(addr) && instructionsEnd().greaterThan(addr); }
public boolean dataContains(Address addr) { return dataBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr); }
- public boolean oopsContains(Address addr) { return oopsBegin().lessThanOrEqual(addr) && oopsEnd().greaterThan(addr); }
public boolean contains(Address addr) { return instructionsContains(addr); }
public boolean isFrameCompleteAt(Address a) { return instructionsContains(a) && a.minus(instructionsBegin()) >= frameCompleteOffsetField.getValue(addr); }
- /** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
- public OopHandle getOopAt(int index) {
- if (index == 0) return null;
- if (Assert.ASSERTS_ENABLED) {
- Assert.that(index > 0 && index <= getOopsLength(), "must be a valid non-zero index");
- }
- return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
- }
-
// Reclamation support (really only used by the nmethods, but in order to get asserts to work
// in the CodeCache they are defined virtual here)
public boolean isZombie() { return false; }
@@ -223,18 +200,8 @@ public class CodeBlob extends VMObject {
}
protected void printComponentsOn(PrintStream tty) {
- // FIXME: add relocation information
tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
" data: [" + dataBegin() + ", " + dataEnd() + "), " +
- " oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
" frame size: " + getFrameSize());
}
-
- //--------------------------------------------------------------------------------
- // Internals only below this point
- //
-
- private int getOopsLength() {
- return (int) oopsLengthField.getValue(addr);
- }
}
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java
index f1357edca85..e5b0a72718e 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@ public class NMethod extends CodeBlob {
private static CIntegerField deoptOffsetField;
private static CIntegerField origPCOffsetField;
private static CIntegerField stubOffsetField;
+ private static CIntegerField oopsOffsetField;
private static CIntegerField scopesDataOffsetField;
private static CIntegerField scopesPCsOffsetField;
private static CIntegerField dependenciesOffsetField;
@@ -98,6 +99,7 @@ public class NMethod extends CodeBlob {
deoptOffsetField = type.getCIntegerField("_deoptimize_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
stubOffsetField = type.getCIntegerField("_stub_offset");
+ oopsOffsetField = type.getCIntegerField("_oops_offset");
scopesDataOffsetField = type.getCIntegerField("_scopes_data_offset");
scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset");
dependenciesOffsetField = type.getCIntegerField("_dependencies_offset");
@@ -141,7 +143,9 @@ public class NMethod extends CodeBlob {
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
public Address deoptBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); }
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
- public Address stubEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
+ public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); }
+ public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); }
+ public Address oopsEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address scopesDataBegin() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
@@ -156,6 +160,7 @@ public class NMethod extends CodeBlob {
public int constantsSize() { return (int) constantsEnd() .minus(constantsBegin()); }
public int codeSize() { return (int) codeEnd() .minus(codeBegin()); }
public int stubSize() { return (int) stubEnd() .minus(stubBegin()); }
+ public int oopsSize() { return (int) oopsEnd() .minus(oopsBegin()); }
public int scopesDataSize() { return (int) scopesDataEnd() .minus(scopesDataBegin()); }
public int scopesPCsSize() { return (int) scopesPCsEnd() .minus(scopesPCsBegin()); }
public int dependenciesSize() { return (int) dependenciesEnd().minus(dependenciesBegin()); }
@@ -178,6 +183,7 @@ public class NMethod extends CodeBlob {
public boolean constantsContains (Address addr) { return constantsBegin() .lessThanOrEqual(addr) && constantsEnd() .greaterThan(addr); }
public boolean codeContains (Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); }
public boolean stubContains (Address addr) { return stubBegin() .lessThanOrEqual(addr) && stubEnd() .greaterThan(addr); }
+ public boolean oopsContains (Address addr) { return oopsBegin() .lessThanOrEqual(addr) && oopsEnd() .greaterThan(addr); }
public boolean scopesDataContains (Address addr) { return scopesDataBegin() .lessThanOrEqual(addr) && scopesDataEnd() .greaterThan(addr); }
public boolean scopesPCsContains (Address addr) { return scopesPCsBegin() .lessThanOrEqual(addr) && scopesPCsEnd() .greaterThan(addr); }
public boolean handlerTableContains(Address addr) { return handlerTableBegin().lessThanOrEqual(addr) && handlerTableEnd().greaterThan(addr); }
@@ -187,6 +193,15 @@ public class NMethod extends CodeBlob {
public Address getEntryPoint() { return entryPointField.getValue(addr); }
public Address getVerifiedEntryPoint() { return verifiedEntryPointField.getValue(addr); }
+ /** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
+ public OopHandle getOopAt(int index) {
+ if (index == 0) return null;
+ if (Assert.ASSERTS_ENABLED) {
+ Assert.that(index > 0 && index <= oopsSize(), "must be a valid non-zero index");
+ }
+ return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
+ }
+
// FIXME: add interpreter_entry_point()
// FIXME: add lazy_interpreter_entry_point() for C2
@@ -338,6 +353,14 @@ public class NMethod extends CodeBlob {
printOn(System.out);
}
+ protected void printComponentsOn(PrintStream tty) {
+ // FIXME: add relocation information
+ tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
+ " data: [" + dataBegin() + ", " + dataEnd() + "), " +
+ " oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
+ " frame size: " + getFrameSize());
+ }
+
public String toString() {
Method method = getMethod();
return "NMethod for " +
@@ -367,6 +390,7 @@ public class NMethod extends CodeBlob {
private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); }
private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); }
private int getStubOffset() { return (int) stubOffsetField .getValue(addr); }
+ private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); }
private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); }
private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); }
private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); }
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java
index d3c4a7c38ff..2733af50aa1 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java
@@ -73,18 +73,11 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
public CompactibleFreeListSpace(Address addr) {
super(addr);
- if ( VM.getVM().isLP64() ) {
- heapWordSize = 8;
- IndexSetStart = 1;
- IndexSetStride = 1;
- }
- else {
- heapWordSize = 4;
- IndexSetStart = 2;
- IndexSetStride = 2;
- }
-
- IndexSetSize = 257;
+ VM vm = VM.getVM();
+ heapWordSize = vm.getHeapWordSize();
+ IndexSetStart = vm.getMinObjAlignmentInBytes() / heapWordSize;
+ IndexSetStride = IndexSetStart;
+ IndexSetSize = 257;
}
// Accessing block offset table
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java
index 7e1f2a37680..02ba1eaff90 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java
@@ -128,7 +128,7 @@ public class Oop {
// Align the object size.
public static long alignObjectSize(long size) {
- return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignment());
+ return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignmentInBytes());
}
// All vm's align longs, so pad out certain offsets.
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
index c35a47fcf65..3d2afd9d754 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
@@ -93,6 +93,7 @@ public class VM {
/** alignment constants */
private boolean isLP64;
private int bytesPerLong;
+ private int objectAlignmentInBytes;
private int minObjAlignmentInBytes;
private int logMinObjAlignmentInBytes;
private int heapWordSize;
@@ -313,9 +314,6 @@ public class VM {
isLP64 = debugger.getMachineDescription().isLP64();
}
bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue();
- minObjAlignmentInBytes = db.lookupIntConstant("MinObjAlignmentInBytes").intValue();
- // minObjAlignment = db.lookupIntConstant("MinObjAlignment").intValue();
- logMinObjAlignmentInBytes = db.lookupIntConstant("LogMinObjAlignmentInBytes").intValue();
heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
oopSize = db.lookupIntConstant("oopSize").intValue();
@@ -323,6 +321,15 @@ public class VM {
uintxType = db.lookupType("uintx");
boolType = (CIntegerType) db.lookupType("bool");
+ minObjAlignmentInBytes = getObjectAlignmentInBytes();
+ if (minObjAlignmentInBytes == 8) {
+ logMinObjAlignmentInBytes = 3;
+ } else if (minObjAlignmentInBytes == 16) {
+ logMinObjAlignmentInBytes = 4;
+ } else {
+ throw new RuntimeException("Object alignment " + minObjAlignmentInBytes + " not yet supported");
+ }
+
if (isCompressedOopsEnabled()) {
// Size info for oops within java objects is fixed
heapOopSize = (int)getIntSize();
@@ -492,10 +499,6 @@ public class VM {
}
/** Get minimum object alignment in bytes. */
- public int getMinObjAlignment() {
- return minObjAlignmentInBytes;
- }
-
public int getMinObjAlignmentInBytes() {
return minObjAlignmentInBytes;
}
@@ -754,6 +757,14 @@ public class VM {
return compressedOopsEnabled.booleanValue();
}
+ public int getObjectAlignmentInBytes() {
+ if (objectAlignmentInBytes == 0) {
+ Flag flag = getCommandLineFlag("ObjectAlignmentInBytes");
+ objectAlignmentInBytes = (flag == null) ? 8 : (int)flag.getIntx();
+ }
+ return objectAlignmentInBytes;
+ }
+
// returns null, if not available.
public Flag[] getCommandLineFlags() {
if (commandLineFlags == null) {
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java
index efe628ba1bd..b1e3c4acc9a 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -98,7 +98,12 @@ public class PointerFinder {
}
loc.inBlobInstructions = loc.blob.instructionsContains(a);
loc.inBlobData = loc.blob.dataContains(a);
- loc.inBlobOops = loc.blob.oopsContains(a);
+
+ if (loc.blob.isNMethod()) {
+ NMethod nm = (NMethod) loc.blob;
+ loc.inBlobOops = nm.oopsContains(a);
+ }
+
loc.inBlobUnknownLocation = (!(loc.inBlobInstructions ||
loc.inBlobData ||
loc.inBlobOops));
diff --git a/hotspot/make/hotspot_version b/hotspot/make/hotspot_version
index 2443d73e50c..b1c39386c5c 100644
--- a/hotspot/make/hotspot_version
+++ b/hotspot/make/hotspot_version
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=19
HS_MINOR_VER=0
-HS_BUILD_NUMBER=02
+HS_BUILD_NUMBER=03
JDK_MAJOR_VER=1
JDK_MINOR_VER=7
diff --git a/hotspot/make/solaris/makefiles/defs.make b/hotspot/make/solaris/makefiles/defs.make
index 831c648f7c9..9e1d981cee0 100644
--- a/hotspot/make/solaris/makefiles/defs.make
+++ b/hotspot/make/solaris/makefiles/defs.make
@@ -80,12 +80,10 @@ ifeq ($(ARCH_DATA_MODEL), 32)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so
- ifeq ($(ARCH),sparc)
- EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
- EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
- EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
- EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
- endif
+ EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
+ EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
+ EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
+ EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
endif
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so
diff --git a/hotspot/make/windows/makefiles/defs.make b/hotspot/make/windows/makefiles/defs.make
index 7bb84ea2fd6..851fe4f4832 100644
--- a/hotspot/make/windows/makefiles/defs.make
+++ b/hotspot/make/windows/makefiles/defs.make
@@ -69,8 +69,20 @@ ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),)
MAKE_ARGS += Platform_arch_model=x86_64
endif
+ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) EM64T),)
+ ARCH_DATA_MODEL=64
+ PLATFORM=windows-amd64
+ VM_PLATFORM=windows_amd64
+ HS_ARCH=x86
+ MAKE_ARGS += LP64=1
+ MAKE_ARGS += ARCH=x86
+ MAKE_ARGS += BUILDARCH=amd64
+ MAKE_ARGS += Platform_arch=x86
+ MAKE_ARGS += Platform_arch_model=x86_64
+endif
+
# NB later OS versions than 2003 may report "Intel64"
-ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) "EM64T\|Intel64"),)
+ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) Intel64),)
ARCH_DATA_MODEL=64
PLATFORM=windows-amd64
VM_PLATFORM=windows_amd64
diff --git a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp
index 248c1fd1a38..2f95cb9e8bd 100644
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp
@@ -87,6 +87,7 @@ REGISTER_DECLARATION(Register, Gtemp , G5);
// JSR 292 fixed register usages:
REGISTER_DECLARATION(Register, G5_method_type , G5);
REGISTER_DECLARATION(Register, G3_method_handle , G3);
+REGISTER_DECLARATION(Register, L7_mh_SP_save , L7);
// The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
// because a single patchable "set" instruction (NativeMovConstReg,
diff --git a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp
index 203aa2934ee..e81b874a403 100644
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -181,8 +181,8 @@ bool FrameMap::is_caller_save_register (Register r) {
}
-void FrameMap::init () {
- if (_init_done) return;
+void FrameMap::initialize() {
+ assert(!_init_done, "once");
int i=0;
// Register usage:
@@ -345,6 +345,13 @@ LIR_Opr FrameMap::stack_pointer() {
}
+// JSR 292
+LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
+ assert(L7 == L7_mh_SP_save, "must be same register");
+ return L7_opr;
+}
+
+
bool FrameMap::validate_frame() {
int max_offset = in_bytes(framesize_in_bytes());
int java_index = 0;
diff --git a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp
index c14bbedd8a7..592d30b4103 100644
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp
@@ -143,6 +143,3 @@
static bool is_caller_save_register (LIR_Opr reg);
static bool is_caller_save_register (Register r);
-
- // JSR 292
- static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }
diff --git a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
index 881d1012310..4b489b81dfc 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
@@ -736,7 +736,8 @@ void LIR_Assembler::align_call(LIR_Code) {
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
__ call(op->addr(), rtype);
- // the peephole pass fills the delay slot
+ // The peephole pass fills the delay slot, add_call_info is done in
+ // LIR_Assembler::emit_delay.
}
@@ -745,7 +746,8 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
__ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
__ relocate(rspec);
__ call(op->addr(), relocInfo::none);
- // the peephole pass fills the delay slot
+ // The peephole pass fills the delay slot, add_call_info is done in
+ // LIR_Assembler::emit_delay.
}
@@ -766,16 +768,6 @@ void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
}
-void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
- Unimplemented();
-}
-
-
-void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
- Unimplemented();
-}
-
-
// load with 32-bit displacement
int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
int load_offset = code_offset();
@@ -2934,7 +2926,7 @@ void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
// we may also be emitting the call info for the instruction
// which we are the delay slot of.
- CodeEmitInfo * call_info = op->call_info();
+ CodeEmitInfo* call_info = op->call_info();
if (call_info) {
add_call_info(code_offset(), call_info);
}
@@ -3159,6 +3151,7 @@ void LIR_Assembler::peephole(LIR_List* lir) {
tty->print_cr("delayed");
inst->at(i - 1)->print();
inst->at(i)->print();
+ tty->cr();
}
#endif
continue;
@@ -3174,8 +3167,8 @@ void LIR_Assembler::peephole(LIR_List* lir) {
case lir_static_call:
case lir_virtual_call:
case lir_icvirtual_call:
- case lir_optvirtual_call: {
- LIR_Op* delay_op = NULL;
+ case lir_optvirtual_call:
+ case lir_dynamic_call: {
LIR_Op* prev = inst->at(i - 1);
if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
(op->code() != lir_virtual_call ||
@@ -3192,15 +3185,14 @@ void LIR_Assembler::peephole(LIR_List* lir) {
tty->print_cr("delayed");
inst->at(i - 1)->print();
inst->at(i)->print();
+ tty->cr();
}
#endif
continue;
}
- if (!delay_op) {
- delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
- inst->insert_before(i + 1, delay_op);
- }
+ LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
+ inst->insert_before(i + 1, delay_op);
break;
}
}
diff --git a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
index 7c300acd659..a8b7b08154d 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
@@ -221,7 +221,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
if (needs_card_mark) {
LIR_Opr ptr = new_pointer_register();
__ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
- return new LIR_Address(ptr, 0, type);
+ return new LIR_Address(ptr, type);
} else {
return new LIR_Address(base_opr, offset, type);
}
@@ -231,7 +231,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) {
LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer);
- LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
+ LIR_Address* addr = new LIR_Address(pointer, T_INT);
increment_counter(addr, step);
}
@@ -1159,7 +1159,7 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
if (type == T_ARRAY || type == T_OBJECT) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, index_op, tmp);
- addr = new LIR_Address(tmp, 0, type);
+ addr = new LIR_Address(tmp, type);
} else {
addr = new LIR_Address(base_op, index_op, type);
}
diff --git a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
index a6106b0c812..e1e4c3ac17e 100644
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
@@ -679,8 +679,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
G2_thread, Oissuing_pc->after_save());
__ verify_not_null_oop(Oexception->after_save());
- __ jmp(O0, 0);
- __ delayed()->restore();
+
+ // Restore SP from L7 if the exception PC is a MethodHandle call site.
+ __ mov(O0, G5); // Save the target address.
+ __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
+ __ tst(L0); // Condition codes are preserved over the restore.
+ __ restore();
+
+ __ jmp(G5, 0);
+ __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.
}
break;
diff --git a/hotspot/src/cpu/sparc/vm/copy_sparc.hpp b/hotspot/src/cpu/sparc/vm/copy_sparc.hpp
index abc7bf6bbeb..e082ab8a687 100644
--- a/hotspot/src/cpu/sparc/vm/copy_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/copy_sparc.hpp
@@ -154,7 +154,7 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
}
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
- assert(MinObjAlignmentInBytes == BytesPerLong, "need alternate implementation");
+ assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
julong* to = (julong*)tohw;
julong v = ((julong)value << 32) | value;
@@ -162,7 +162,7 @@ static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value)
// and be equal to 0 on 64-bit platform.
size_t odd = count % (BytesPerLong / HeapWordSize) ;
- size_t aligned_count = align_object_size(count - odd) / HeapWordsPerLong;
+ size_t aligned_count = align_object_offset(count - odd) / HeapWordsPerLong;
julong* end = ((julong*)tohw) + aligned_count - 1;
while (to <= end) {
DEBUG_ONLY(count -= BytesPerLong / HeapWordSize ;)
diff --git a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp
index 7a1dc4a4ad6..5e573155743 100644
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp
@@ -336,9 +336,11 @@ frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
#endif // ASSERT
}
-frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_stack) {
- _sp = sp;
- _younger_sp = younger_sp;
+frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
+ _sp(sp),
+ _younger_sp(younger_sp),
+ _deopt_state(unknown),
+ _sp_adjustment_by_callee(0) {
if (younger_sp == NULL) {
// make a deficient frame which doesn't know where its PC is
_pc = NULL;
@@ -352,20 +354,32 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_sta
// wrong. (the _last_native_pc will have the right value)
// So do not put add any asserts on the _pc here.
}
- if (younger_frame_adjusted_stack) {
- // compute adjustment to this frame's SP made by its interpreted callee
- _sp_adjustment_by_callee = (intptr_t*)((intptr_t)younger_sp[I5_savedSP->sp_offset_in_saved_window()] +
- STACK_BIAS) - sp;
- } else {
- _sp_adjustment_by_callee = 0;
+
+ if (_pc != NULL)
+ _cb = CodeCache::find_blob(_pc);
+
+ // Check for MethodHandle call sites.
+ if (_cb != NULL) {
+ nmethod* nm = _cb->as_nmethod_or_null();
+ if (nm != NULL) {
+ if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
+ _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
+ // The SP is already adjusted by this MH call site, don't
+ // overwrite this value with the wrong interpreter value.
+ younger_frame_is_interpreted = false;
+ }
+ }
}
- _deopt_state = unknown;
+ if (younger_frame_is_interpreted) {
+ // compute adjustment to this frame's SP made by its interpreted callee
+ _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
+ }
- // It is important that frame be fully construct when we do this lookup
- // as get_original_pc() needs correct value for unextended_sp()
+ // It is important that the frame is fully constructed when we do
+ // this lookup as get_deopt_original_pc() needs a correct value for
+ // unextended_sp() which uses _sp_adjustment_by_callee.
if (_pc != NULL) {
- _cb = CodeCache::find_blob(_pc);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
@@ -462,9 +476,8 @@ frame frame::sender(RegisterMap* map) const {
if (is_entry_frame()) return sender_for_entry_frame(map);
- intptr_t* younger_sp = sp();
- intptr_t* sp = sender_sp();
- bool adjusted_stack = false;
+ intptr_t* younger_sp = sp();
+ intptr_t* sp = sender_sp();
// Note: The version of this operation on any platform with callee-save
// registers must update the register map (if not null).
@@ -483,8 +496,8 @@ frame frame::sender(RegisterMap* map) const {
// interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
// explicitly recognized.
- adjusted_stack = is_interpreted_frame();
- if (adjusted_stack) {
+ bool frame_is_interpreted = is_interpreted_frame();
+ if (frame_is_interpreted) {
map->make_integer_regs_unsaved();
map->shift_window(sp, younger_sp);
} else if (_cb != NULL) {
@@ -503,7 +516,7 @@ frame frame::sender(RegisterMap* map) const {
}
}
}
- return frame(sp, younger_sp, adjusted_stack);
+ return frame(sp, younger_sp, frame_is_interpreted);
}
diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
index b1cb39216f5..179d459eb10 100644
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
@@ -720,25 +720,30 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
- int bcp_offset, bool giant_index) {
+ int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
- if (!giant_index) {
+ if (index_size == sizeof(u2)) {
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
- } else {
+ } else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
xor3(tmp, -1, tmp); // convert to plain index
+ } else if (index_size == sizeof(u1)) {
+ assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
+ ldub(Lbcp, bcp_offset, tmp);
+ } else {
+ ShouldNotReachHere();
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
- int bcp_offset, bool giant_index) {
+ int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp);
assert_not_delayed();
- get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
+ get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
// convert from field index to ConstantPoolCacheEntry index and from
// word index to byte offset
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
@@ -747,12 +752,15 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
- int bcp_offset, bool giant_index) {
+ int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp);
assert_not_delayed();
- assert(!giant_index,"NYI");
- get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
+ if (index_size == sizeof(u2)) {
+ get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
+ } else {
+ ShouldNotReachHere(); // other sizes not supported here
+ }
// convert from field index to ConstantPoolCacheEntry index
// and from word index to byte offset
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
index 7f18e6ef66b..66a60635e40 100644
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
@@ -182,9 +182,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register Rdst,
setCCOrNot should_set_CC = dont_set_CC );
- void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
- void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
- void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
+ void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
+ void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
+ void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
// common code
diff --git a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
index 248fa7ed54c..887bfd63e63 100644
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
@@ -375,10 +375,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Register O0_scratch = O0_argslot;
int stackElementSize = Interpreter::stackElementSize;
- // Make space on the stack for the arguments.
- __ sub(SP, 4*stackElementSize, SP);
- __ sub(Gargs, 3*stackElementSize, Gargs);
- //__ sub(Lesp, 3*stackElementSize, Lesp);
+ // Make space on the stack for the arguments and set Gargs
+ // correctly.
+ __ sub(SP, 4*stackElementSize, SP); // Keep stack aligned.
+ __ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs);
// void raiseException(int code, Object actual, Object required)
__ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code
diff --git a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp
index 76cc86cd32c..37b4bb89107 100644
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -321,7 +321,8 @@ void NativeMovConstReg::set_data(intptr_t x) {
set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
- CodeBlob* nm = CodeCache::find_blob(instruction_address());
+ CodeBlob* cb = CodeCache::find_blob(instruction_address());
+ nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) {
RelocIterator iter(nm, instruction_address(), next_instruction_address());
oop* oop_addr = NULL;
@@ -430,7 +431,8 @@ void NativeMovConstRegPatching::set_data(int x) {
set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
- CodeBlob* nm = CodeCache::find_blob(instruction_address());
+ CodeBlob* cb = CodeCache::find_blob(instruction_address());
+ nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) {
RelocIterator iter(nm, instruction_address(), next_instruction_address());
oop* oop_addr = NULL;
diff --git a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
index 1d2db4c0e9c..a726217fd0a 100644
--- a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -142,9 +142,12 @@ REGISTER_DEFINITION(Register, G1_scratch);
REGISTER_DEFINITION(Register, G3_scratch);
REGISTER_DEFINITION(Register, G4_scratch);
REGISTER_DEFINITION(Register, Gtemp);
+REGISTER_DEFINITION(Register, Lentry_args);
+
+// JSR 292
REGISTER_DEFINITION(Register, G5_method_type);
REGISTER_DEFINITION(Register, G3_method_handle);
-REGISTER_DEFINITION(Register, Lentry_args);
+REGISTER_DEFINITION(Register, L7_mh_SP_save);
#ifdef CC_INTERP
REGISTER_DEFINITION(Register, Lstate);
diff --git a/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp b/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp
index e19058741c2..e6f641e26b7 100644
--- a/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -116,6 +116,11 @@ void OptoRuntime::generate_exception_blob() {
__ mov(O0, G3_scratch); // Move handler address to temp
__ restore();
+ // Restore SP from L7 if the exception PC is a MethodHandle call site.
+ __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), O7);
+ __ tst(O7);
+ __ movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);
+
// G3_scratch contains handler address
// Since this may be the deopt blob we must set O7 to look like we returned
// from the original pc that threw the exception
diff --git a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
index 9d39bee00ff..ee836d6c7b4 100644
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
@@ -908,26 +908,13 @@ void AdapterGenerator::gen_i2c_adapter(
// O0-O5 - Outgoing args in compiled layout
// O6 - Adjusted or restored SP
// O7 - Valid return address
- // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
+ // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
// F0-F7 - more outgoing args
// Gargs is the incoming argument base, and also an outgoing argument.
__ sub(Gargs, BytesPerWord, Gargs);
-#ifdef ASSERT
- {
- // on entry OsavedSP and SP should be equal
- Label ok;
- __ cmp(O5_savedSP, SP);
- __ br(Assembler::equal, false, Assembler::pt, ok);
- __ delayed()->nop();
- __ stop("I5_savedSP not set");
- __ should_not_reach_here();
- __ bind(ok);
- }
-#endif
-
// ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
// WITH O7 HOLDING A VALID RETURN PC
//
diff --git a/hotspot/src/cpu/sparc/vm/sparc.ad b/hotspot/src/cpu/sparc/vm/sparc.ad
index 8def93fc58c..50c0b5429c0 100644
--- a/hotspot/src/cpu/sparc/vm/sparc.ad
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad
@@ -534,7 +534,10 @@ bool can_branch_register( Node *bol, Node *cmp ) {
// The "return address" is the address of the call instruction, plus 8.
int MachCallStaticJavaNode::ret_addr_offset() {
- return NativeCall::instruction_size; // call; delay slot
+ int offset = NativeCall::instruction_size; // call; delay slot
+ if (_method_handle_invoke)
+ offset += 4; // restore SP
+ return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset() {
@@ -818,6 +821,10 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
!(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
!(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
+ !(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) &&
+ !(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) &&
+ !(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) &&
+ !(n->ideal_Opcode()==Op_Load8B && ld_op==Op_LoadD) &&
!(n->rule() == loadUB_rule)) {
verify_oops_warning(n, n->ideal_Opcode(), ld_op);
}
@@ -829,6 +836,9 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
!(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
!(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
!(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
+ !(n->ideal_Opcode()==Op_Store2I && st_op==Op_StoreD) &&
+ !(n->ideal_Opcode()==Op_Store4C && st_op==Op_StoreD) &&
+ !(n->ideal_Opcode()==Op_Store8B && st_op==Op_StoreD) &&
!(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
verify_oops_warning(n, n->ideal_Opcode(), st_op);
}
@@ -1750,6 +1760,12 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = false;
+bool Matcher::narrow_oop_use_complex_address() {
+ NOT_LP64(ShouldNotCallThis());
+ assert(UseCompressedOops, "only for compressed oops code");
+ return false;
+}
+
// Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a
@@ -1858,7 +1874,7 @@ RegMask Matcher::modL_proj_mask() {
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
- return RegMask();
+ return L7_REGP_mask;
}
%}
@@ -2441,6 +2457,16 @@ encode %{
/*preserve_g2=*/true, /*force far call*/true);
%}
+ enc_class preserve_SP %{
+ MacroAssembler _masm(&cbuf);
+ __ mov(SP, L7_mh_SP_save);
+ %}
+
+ enc_class restore_SP %{
+ MacroAssembler _masm(&cbuf);
+ __ mov(L7_mh_SP_save, SP);
+ %}
+
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
@@ -9213,6 +9239,7 @@ instruct safePoint_poll(iRegP poll) %{
// Call Java Static Instruction
instruct CallStaticJavaDirect( method meth ) %{
match(CallStaticJava);
+ predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
size(8);
@@ -9223,6 +9250,20 @@ instruct CallStaticJavaDirect( method meth ) %{
ins_pipe(simple_call);
%}
+// Call Java Static Instruction (method handle version)
+instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
+ match(CallStaticJava);
+ predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
+ effect(USE meth, KILL l7_mh_SP_save);
+
+ size(8);
+ ins_cost(CALL_COST);
+ format %{ "CALL,static/MethodHandle" %}
+ ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
+ ins_pc_relative(1);
+ ins_pipe(simple_call);
+%}
+
// Call Java Dynamic Instruction
instruct CallDynamicJavaDirect( method meth ) %{
match(CallDynamicJava);
diff --git a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
index e016a96f7d6..65404a44d66 100644
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
@@ -2911,16 +2911,6 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
- // generic method handle stubs
- if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
- for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
- ek < MethodHandles::_EK_LIMIT;
- ek = MethodHandles::EntryKind(1 + (int)ek)) {
- StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
- MethodHandles::generate_method_handle_stub(_masm, ek);
- }
- }
-
// Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations.
}
diff --git a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp
index 1c539537562..466e30e6249 100644
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp
@@ -43,7 +43,7 @@ enum /* platform_dependent_constants */ {
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
- method_handles_adapters_code_size = 5000
+ method_handles_adapters_code_size = 6000
};
class Sparc {
diff --git a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
index 6dcddc713de..57da8ed2b92 100644
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
@@ -204,7 +204,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
- __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
+ __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
__ ba(false, L_got_cache);
__ delayed()->nop();
}
diff --git a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
index 82403d220a1..5edeadaccfc 100644
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
@@ -1949,23 +1949,30 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
}
// ----------------------------------------------------------------------------
-void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
- assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
- bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
-
+void TemplateTable::resolve_cache_and_index(int byte_no,
+ Register result,
+ Register Rcache,
+ Register index,
+ size_t index_size) {
// Depends on cpCacheOop layout!
- const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved;
- __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
- if (is_invokedynamic) {
- // We are resolved if the f1 field contains a non-null CallSite object.
+ __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+ if (byte_no == f1_oop) {
+ // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
+ // This kind of CP cache entry does not need to match the flags byte, because
+ // there is a 1-1 relation between bytecode type and CP entry type.
+ assert_different_registers(result, Rcache);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
- ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
- __ tst(Lbyte_code);
+ ConstantPoolCacheEntry::f1_offset(), result);
+ __ tst(result);
__ br(Assembler::notEqual, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
} else {
+ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+ assert(result == noreg, ""); //else change code for setting result
+ const int shift_count = (1 + byte_no)*BitsPerByte;
+
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
@@ -1992,7 +1999,10 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
// first time invocation - must resolve first
__ call_VM(noreg, entry, O1);
// Update registers with resolved info
- __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+ __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+ if (result != noreg)
+ __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCacheEntry::f1_offset(), result);
__ bind(resolved);
}
@@ -2001,7 +2011,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register Ritable_index,
Register Rflags,
bool is_invokevirtual,
- bool is_invokevfinal) {
+ bool is_invokevfinal,
+ bool is_invokedynamic) {
// Uses both G3_scratch and G4_scratch
Register Rcache = G3_scratch;
Register Rscratch = G4_scratch;
@@ -2025,11 +2036,15 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
if (is_invokevfinal) {
__ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
+ __ ld_ptr(Rcache, method_offset, Rmethod);
+ } else if (byte_no == f1_oop) {
+ // Resolved f1_oop goes directly into 'method' register.
+ resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
} else {
- resolve_cache_and_index(byte_no, Rcache, Rscratch);
+ resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
+ __ ld_ptr(Rcache, method_offset, Rmethod);
}
- __ ld_ptr(Rcache, method_offset, Rmethod);
if (Ritable_index != noreg) {
__ ld_ptr(Rcache, index_offset, Ritable_index);
}
@@ -2110,7 +2125,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
Register Rflags = G1_scratch;
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
- resolve_cache_and_index(byte_no, Rcache, index);
+ resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
jvmti_post_field_access(Rcache, index, is_static, false);
load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
@@ -2475,7 +2490,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Register Rflags = G1_scratch;
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
- resolve_cache_and_index(byte_no, Rcache, index);
+ resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
jvmti_post_field_mod(Rcache, index, is_static);
load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
@@ -2816,6 +2831,7 @@ void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Regist
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f2_byte, "use this argument");
Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch;
@@ -2823,7 +2839,7 @@ void TemplateTable::invokevirtual(int byte_no) {
Register Rrecv = G5_method;
Label notFinal;
- load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true);
+ load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// Check for vfinal
@@ -2864,9 +2880,10 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f2_byte, "use this argument");
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
- /*is_invokevfinal*/true);
+ /*is_invokevfinal*/true, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
invokevfinal_helper(G3_scratch, Lscratch);
}
@@ -2901,12 +2918,13 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch;
Register Rret = Lscratch;
- load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
+ load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_method);
@@ -2934,12 +2952,13 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch;
Register Rret = Lscratch;
- load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
+ load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_method);
@@ -2992,6 +3011,7 @@ void TemplateTable::invokeinterface_object_method(Register RklassOop,
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G4_scratch;
Register Rret = G3_scratch;
@@ -3001,7 +3021,7 @@ void TemplateTable::invokeinterface(int byte_no) {
Register Rflags = O1;
assert_different_registers(Rscratch, G5_method);
- load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, false);
+ load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// get receiver
@@ -3118,6 +3138,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_oop, "use this argument");
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3132,7 +3153,6 @@ void TemplateTable::invokedynamic(int byte_no) {
// G5: CallSite object (f1)
// XX: unused (f2)
- // G3: receiver address
// XX: flags (unused)
Register G5_callsite = G5_method;
@@ -3140,7 +3160,8 @@ void TemplateTable::invokedynamic(int byte_no) {
Register Rtemp = G1_scratch;
Register Rret = Lscratch;
- load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
+ load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
+ /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_callsite);
diff --git a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
index d61eba158f8..ba44fe6a52b 100644
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
@@ -65,13 +65,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseInlineCaches, false);
}
#ifdef _LP64
- // Single issue niagara1 is slower for CompressedOops
- // but niagaras after that it's fine.
- if (!is_niagara1_plus()) {
- if (FLAG_IS_DEFAULT(UseCompressedOops)) {
- FLAG_SET_ERGO(bool, UseCompressedOops, false);
- }
- }
// 32-bit oops don't make sense for the 64-bit VM on sparc
// since the 32-bit VM has the same registers and smaller objects.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.cpp b/hotspot/src/cpu/x86/vm/assembler_x86.cpp
index 1b50044007e..ab758216c57 100644
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp
@@ -7643,6 +7643,9 @@ void MacroAssembler::verify_oop(Register reg, const char* s) {
// Pass register number to verify_oop_subroutine
char* b = new char[strlen(s) + 50];
sprintf(b, "verify_oop: %s: %s", reg->name(), s);
+#ifdef _LP64
+ push(rscratch1); // save r10, trashed by movptr()
+#endif
push(rax); // save rax,
push(reg); // pass register argument
ExternalAddress buffer((address) b);
@@ -7653,6 +7656,7 @@ void MacroAssembler::verify_oop(Register reg, const char* s) {
// call indirectly to solve generation ordering problem
movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
call(rax);
+ // Caller pops the arguments (oop, message) and restores rax, r10
}
@@ -7767,6 +7771,9 @@ void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
char* b = new char[strlen(s) + 50];
sprintf(b, "verify_oop_addr: %s", s);
+#ifdef _LP64
+ push(rscratch1); // save r10, trashed by movptr()
+#endif
push(rax); // save rax,
// addr may contain rsp so we will have to adjust it based on the push
// we just did
@@ -7789,7 +7796,7 @@ void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
// call indirectly to solve generation ordering problem
movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
call(rax);
- // Caller pops the arguments and restores rax, from the stack
+ // Caller pops the arguments (addr, message) and restores rax, r10.
}
void MacroAssembler::verify_tlab() {
@@ -8185,9 +8192,14 @@ void MacroAssembler::load_prototype_header(Register dst, Register src) {
assert (Universe::heap() != NULL, "java heap should be initialized");
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
if (Universe::narrow_oop_shift() != 0) {
- assert(Address::times_8 == LogMinObjAlignmentInBytes &&
- Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
- movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ if (LogMinObjAlignmentInBytes == Address::times_8) {
+ movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ } else {
+ // OK to use shift since we don't need to preserve flags.
+ shlq(dst, LogMinObjAlignmentInBytes);
+ movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ }
} else {
movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
}
@@ -8361,31 +8373,43 @@ void MacroAssembler::decode_heap_oop(Register r) {
}
void MacroAssembler::decode_heap_oop_not_null(Register r) {
+ // Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_shift() != 0) {
- assert (Address::times_8 == LogMinObjAlignmentInBytes &&
- Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
- // Don't use Shift since it modifies flags.
- leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
+ assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ shlq(r, LogMinObjAlignmentInBytes);
+ if (Universe::narrow_oop_base() != NULL) {
+ addq(r, r12_heapbase);
+ }
} else {
assert (Universe::narrow_oop_base() == NULL, "sanity");
}
}
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
+ // Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_shift() != 0) {
- assert (Address::times_8 == LogMinObjAlignmentInBytes &&
- Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
- leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+ assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ if (LogMinObjAlignmentInBytes == Address::times_8) {
+ leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+ } else {
+ if (dst != src) {
+ movq(dst, src);
+ }
+ shlq(dst, LogMinObjAlignmentInBytes);
+ if (Universe::narrow_oop_base() != NULL) {
+ addq(dst, r12_heapbase);
+ }
+ }
} else if (dst != src) {
assert (Universe::narrow_oop_base() == NULL, "sanity");
movq(dst, src);
diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.hpp b/hotspot/src/cpu/x86/vm/assembler_x86.hpp
index 2d24be625e4..5384f6bf46b 100644
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp
@@ -135,6 +135,9 @@ REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
#endif // _LP64
+// JSR 292 fixed register usages:
+REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp);
+
// Address is an abstraction used to represent a memory location
// using any of the amd64 addressing modes with one object.
//
diff --git a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp
index 55d2435d384..20e78c242c5 100644
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -136,8 +136,8 @@ XMMRegister FrameMap::nr2xmmreg(int rnr) {
// FrameMap
//--------------------------------------------------------
-void FrameMap::init() {
- if (_init_done) return;
+void FrameMap::initialize() {
+ assert(!_init_done, "once");
assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers");
map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0);
@@ -309,6 +309,13 @@ LIR_Opr FrameMap::stack_pointer() {
}
+// JSR 292
+LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
+ assert(rbp == rbp_mh_SP_save, "must be same register");
+ return rbp_opr;
+}
+
+
bool FrameMap::validate_frame() {
return true;
}
diff --git a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp
index cd2a8223099..c479663ca6e 100644
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp
@@ -126,6 +126,3 @@
assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
return _caller_save_xmm_regs[i];
}
-
- // JSR 292
- static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }
diff --git a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
index 6e59c45860f..39ca0e2d8bb 100644
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
@@ -2462,9 +2462,18 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
}
#endif // _LP64
} else {
+#ifdef _LP64
+ Register r_lo;
+ if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
+ r_lo = right->as_register();
+ } else {
+ r_lo = right->as_register_lo();
+ }
+#else
Register r_lo = right->as_register_lo();
Register r_hi = right->as_register_hi();
assert(l_lo != r_hi, "overwriting registers");
+#endif
switch (code) {
case lir_logic_and:
__ andptr(l_lo, r_lo);
@@ -2784,7 +2793,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
__ call(AddressLiteral(op->addr(), rtype));
- add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
+ add_call_info(code_offset(), op->info());
}
@@ -2795,7 +2804,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
(__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
__ call(AddressLiteral(op->addr(), rh));
- add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
+ add_call_info(code_offset(), op->info());
}
@@ -2805,16 +2814,6 @@ void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
}
-void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
- __ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
-}
-
-
-void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
- __ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
-}
-
-
void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size);
diff --git a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
index f5581e9d380..a37aab25aa6 100644
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
@@ -175,7 +175,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
// store and again for the card mark.
LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp);
- return new LIR_Address(tmp, 0, type);
+ return new LIR_Address(tmp, type);
} else {
return addr;
}
@@ -185,7 +185,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) {
LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer);
- LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
+ LIR_Address* addr = new LIR_Address(pointer, T_INT);
increment_counter(addr, step);
}
diff --git a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
index 42a50df460d..4bfcdb84f33 100644
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
@@ -782,7 +782,7 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// Restore SP from BP if the exception PC is a MethodHandle call site.
NOT_LP64(__ get_thread(thread);)
__ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp);
+ __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// continue at exception handler (return address removed)
// note: do *not* remove arguments when unwinding the
@@ -1581,7 +1581,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ should_not_reach_here();
break;
}
-
__ push(rax);
__ push(rdx);
@@ -1605,8 +1604,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// Can we store original value in the thread's buffer?
- LP64_ONLY(__ movslq(tmp, queue_index);)
#ifdef _LP64
+ __ movslq(tmp, queue_index);
__ cmpq(tmp, 0);
#else
__ cmpl(queue_index, 0);
@@ -1628,13 +1627,33 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ jmp(done);
__ bind(runtime);
- // load the pre-value
__ push(rcx);
+#ifdef _LP64
+ __ push(r8);
+ __ push(r9);
+ __ push(r10);
+ __ push(r11);
+# ifndef _WIN64
+ __ push(rdi);
+ __ push(rsi);
+# endif
+#endif
+ // load the pre-value
f.load_argument(0, rcx);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
+#ifdef _LP64
+# ifndef _WIN64
+ __ pop(rsi);
+ __ pop(rdi);
+# endif
+ __ pop(r11);
+ __ pop(r10);
+ __ pop(r9);
+ __ pop(r8);
+#endif
__ pop(rcx);
-
__ bind(done);
+
__ pop(rdx);
__ pop(rax);
}
@@ -1664,13 +1683,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
PtrQueue::byte_offset_of_buf()));
__ push(rax);
- __ push(rdx);
+ __ push(rcx);
NOT_LP64(__ get_thread(thread);)
ExternalAddress cardtable((address)ct->byte_map_base);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
- const Register card_addr = rdx;
+ const Register card_addr = rcx;
#ifdef _LP64
const Register tmp = rscratch1;
f.load_argument(0, card_addr);
@@ -1679,7 +1698,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// get the address of the card
__ addq(card_addr, tmp);
#else
- const Register card_index = rdx;
+ const Register card_index = rcx;
f.load_argument(0, card_index);
__ shrl(card_index, CardTableModRefBS::card_shift);
@@ -1716,12 +1735,32 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ jmp(done);
__ bind(runtime);
- NOT_LP64(__ push(rcx);)
+ __ push(rdx);
+#ifdef _LP64
+ __ push(r8);
+ __ push(r9);
+ __ push(r10);
+ __ push(r11);
+# ifndef _WIN64
+ __ push(rdi);
+ __ push(rsi);
+# endif
+#endif
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
- NOT_LP64(__ pop(rcx);)
-
- __ bind(done);
+#ifdef _LP64
+# ifndef _WIN64
+ __ pop(rsi);
+ __ pop(rdi);
+# endif
+ __ pop(r11);
+ __ pop(r10);
+ __ pop(r9);
+ __ pop(r8);
+#endif
__ pop(rdx);
+ __ bind(done);
+
+ __ pop(rcx);
__ pop(rax);
}
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp
index ec2fd1a2477..aae41ec4c08 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp
@@ -189,11 +189,11 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, i
}
-void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, bool giant_index) {
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
- if (!giant_index) {
+ if (index_size == sizeof(u2)) {
load_unsigned_short(reg, Address(rsi, bcp_offset));
- } else {
+ } else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(reg, Address(rsi, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise
@@ -201,14 +201,19 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
// plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(reg); // convert to plain index
+ } else if (index_size == sizeof(u1)) {
+ assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
+ load_unsigned_byte(reg, Address(rsi, bcp_offset));
+ } else {
+ ShouldNotReachHere();
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
- int bcp_offset, bool giant_index) {
+ int bcp_offset, size_t index_size) {
assert(cache != index, "must use different registers");
- get_cache_index_at_bcp(index, bcp_offset, giant_index);
+ get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
@@ -216,9 +221,9 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
- int bcp_offset, bool giant_index) {
+ int bcp_offset, size_t index_size) {
assert(cache != tmp, "must use different register");
- get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
+ get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp
index b97b60fcbbd..97f044c9066 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp
@@ -76,9 +76,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
- void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, bool giant_index = false);
- void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
- void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
+ void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
+ void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
+ void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
// Expression stack
void f2ieee(); // truncate ftos to 32bits
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
index 3ed22820222..50b0b4cd88b 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
@@ -187,11 +187,11 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
int bcp_offset,
- bool giant_index) {
+ size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
- if (!giant_index) {
+ if (index_size == sizeof(u2)) {
load_unsigned_short(index, Address(r13, bcp_offset));
- } else {
+ } else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(index, Address(r13, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise
@@ -199,6 +199,11 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
// plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(index); // convert to plain index
+ } else if (index_size == sizeof(u1)) {
+ assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
+ load_unsigned_byte(index, Address(r13, bcp_offset));
+ } else {
+ ShouldNotReachHere();
}
}
@@ -206,9 +211,9 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index,
int bcp_offset,
- bool giant_index) {
+ size_t index_size) {
assert(cache != index, "must use different registers");
- get_cache_index_at_bcp(index, bcp_offset, giant_index);
+ get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
@@ -219,9 +224,9 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
- bool giant_index) {
+ size_t index_size) {
assert(cache != tmp, "must use different register");
- get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
+ get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp
index 6b8a84421bf..8eb9537501a 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp
@@ -95,10 +95,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index,
- int bcp_offset, bool giant_index = false);
+ int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
- int bcp_offset, bool giant_index = false);
- void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
+ int bcp_offset, size_t index_size = sizeof(u2));
+ void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void pop_ptr(Register r = rax);
diff --git a/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp b/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp
index 070c75e969d..ee19fdd26cc 100644
--- a/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,3 +115,6 @@ REGISTER_DEFINITION(MMXRegister, mmx4 );
REGISTER_DEFINITION(MMXRegister, mmx5 );
REGISTER_DEFINITION(MMXRegister, mmx6 );
REGISTER_DEFINITION(MMXRegister, mmx7 );
+
+// JSR 292
+REGISTER_DEFINITION(Register, rbp_mh_SP_save);
diff --git a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
index 44b365d1b45..9a32a67fe74 100644
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -117,7 +117,7 @@ void OptoRuntime::generate_exception_blob() {
// Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp);
+ __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it.
diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index c90b322b1c2..4f679636492 100644
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -3305,7 +3305,7 @@ void OptoRuntime::generate_exception_blob() {
// Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp);
+ __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// We have a handler in rax (could be deopt blob).
__ mov(r8, rax);
diff --git a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index bf7e1278bca..9dacdcaf316 100644
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -914,6 +914,7 @@ class StubGenerator: public StubCodeGenerator {
// * [tos + 5]: error message (char*)
// * [tos + 6]: object to verify (oop)
// * [tos + 7]: saved rax - saved by caller and bashed
+ // * [tos + 8]: saved r10 (rscratch1) - saved by caller
// * = popped on exit
address generate_verify_oop() {
StubCodeMark mark(this, "StubRoutines", "verify_oop");
@@ -934,6 +935,7 @@ class StubGenerator: public StubCodeGenerator {
// After previous pushes.
oop_to_verify = 6 * wordSize,
saved_rax = 7 * wordSize,
+ saved_r10 = 8 * wordSize,
// Before the call to MacroAssembler::debug(), see below.
return_addr = 16 * wordSize,
@@ -983,15 +985,17 @@ class StubGenerator: public StubCodeGenerator {
// return if everything seems ok
__ bind(exit);
__ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
+ __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
__ pop(c_rarg3); // restore c_rarg3
__ pop(c_rarg2); // restore c_rarg2
__ pop(r12); // restore r12
__ popf(); // restore flags
- __ ret(3 * wordSize); // pop caller saved stuff
+ __ ret(4 * wordSize); // pop caller saved stuff
// handle errors
__ bind(error);
__ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
+ __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
__ pop(c_rarg3); // get saved c_rarg3 back
__ pop(c_rarg2); // get saved c_rarg2 back
__ pop(r12); // get saved r12 back
@@ -1009,6 +1013,7 @@ class StubGenerator: public StubCodeGenerator {
// * [tos + 17] error message (char*)
// * [tos + 18] object to verify (oop)
// * [tos + 19] saved rax - saved by caller and bashed
+ // * [tos + 20] saved r10 (rscratch1) - saved by caller
// * = popped on exit
__ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
@@ -1021,7 +1026,7 @@ class StubGenerator: public StubCodeGenerator {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
__ mov(rsp, r12); // restore rsp
__ popa(); // pop registers (includes r12)
- __ ret(3 * wordSize); // pop caller saved stuff
+ __ ret(4 * wordSize); // pop caller saved stuff
return start;
}
diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
index 58b9374b433..c7fbd4b15b4 100644
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
@@ -214,7 +214,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
__ jcc(Assembler::equal, L_giant_index);
}
- __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
+ __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
@@ -226,7 +226,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
- __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
+ __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
__ jmp(L_got_cache);
}
diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
index 0678b0b1349..1d4266e4f6a 100644
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
@@ -192,7 +192,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
__ jcc(Assembler::equal, L_giant_index);
}
- __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
+ __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr,
@@ -205,7 +205,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
- __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
+ __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
__ jmp(L_got_cache);
}
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
index acf85ee269c..1051d73aa35 100644
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
@@ -2012,22 +2012,29 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
__ membar(order_constraint);
}
-void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
- assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
- bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
-
+void TemplateTable::resolve_cache_and_index(int byte_no,
+ Register result,
+ Register Rcache,
+ Register index,
+ size_t index_size) {
Register temp = rbx;
- assert_different_registers(Rcache, index, temp);
+ assert_different_registers(result, Rcache, index, temp);
- const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved;
- __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
- if (is_invokedynamic) {
- // we are resolved if the f1 field contains a non-null CallSite object
- __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
+ __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+ if (byte_no == f1_oop) {
+ // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
+ // This kind of CP cache entry does not need to match the flags byte, because
+ // there is a 1-1 relation between bytecode type and CP entry type.
+ assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
+ __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
+ __ testptr(result, result);
__ jcc(Assembler::notEqual, resolved);
} else {
+ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+ assert(result == noreg, ""); //else change code for setting result
+ const int shift_count = (1 + byte_no)*BitsPerByte;
__ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count);
// have we resolved this bytecode?
@@ -2053,7 +2060,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ movl(temp, (int)bytecode());
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
- __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+ __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+ if (result != noreg)
+ __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ bind(resolved);
}
@@ -2087,7 +2096,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register itable_index,
Register flags,
bool is_invokevirtual,
- bool is_invokevfinal /*unused*/) {
+ bool is_invokevfinal /*unused*/,
+ bool is_invokedynamic) {
// setup registers
const Register cache = rcx;
const Register index = rdx;
@@ -2109,13 +2119,18 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset());
- resolve_cache_and_index(byte_no, cache, index);
-
- __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
+ if (byte_no == f1_oop) {
+ // Resolved f1_oop goes directly into 'method' register.
+ assert(is_invokedynamic, "");
+ resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
+ } else {
+ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+ __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
+ }
if (itable_index != noreg) {
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
- __ movl(flags , Address(cache, index, Address::times_ptr, flags_offset ));
+ __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
}
@@ -2169,7 +2184,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
const Register off = rbx;
const Register flags = rax;
- resolve_cache_and_index(byte_no, cache, index);
+ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_access(cache, index, is_static, false);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@@ -2378,7 +2393,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
const Register off = rbx;
const Register flags = rax;
- resolve_cache_and_index(byte_no, cache, index);
+ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@@ -2815,10 +2830,11 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// save 'interpreter return address'
__ save_bcp();
- load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
+ load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// load receiver if needed (note: no return address pushed yet)
if (load_receiver) {
+ assert(!is_invokedynamic, "");
__ movl(recv, flags);
__ andl(recv, 0xFF);
// recv count is 0 based?
@@ -2910,6 +2926,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f2_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// rbx,: index
@@ -2922,6 +2939,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
@@ -2932,6 +2950,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
@@ -2942,12 +2961,14 @@ void TemplateTable::invokestatic(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f2_byte, "use this argument");
__ stop("fast_invokevfinal not used on x86");
}
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rax, rbx, byte_no);
// rax,: Interface
@@ -3036,11 +3057,11 @@ void TemplateTable::invokedynamic(int byte_no) {
return;
}
+ assert(byte_no == f1_oop, "use this argument");
prepare_invoke(rax, rbx, byte_no);
// rax: CallSite object (f1)
// rbx: unused (f2)
- // rcx: receiver address
// rdx: flags (unused)
if (ProfileInterpreter) {
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
index 2bc2bf4e8ff..c262875222e 100644
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
@@ -2015,21 +2015,28 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
}
}
-void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
- assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
- bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
-
+void TemplateTable::resolve_cache_and_index(int byte_no,
+ Register result,
+ Register Rcache,
+ Register index,
+ size_t index_size) {
const Register temp = rbx;
- assert_different_registers(Rcache, index, temp);
+ assert_different_registers(result, Rcache, index, temp);
- const int shift_count = (1 + byte_no) * BitsPerByte;
Label resolved;
- __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
- if (is_invokedynamic) {
- // we are resolved if the f1 field contains a non-null CallSite object
- __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
+ __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+ if (byte_no == f1_oop) {
+ // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
+ // This kind of CP cache entry does not need to match the flags byte, because
+ // there is a 1-1 relation between bytecode type and CP entry type.
+ assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
+ __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
+ __ testptr(result, result);
__ jcc(Assembler::notEqual, resolved);
} else {
+ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+ assert(result == noreg, ""); //else change code for setting result
+ const int shift_count = (1 + byte_no) * BitsPerByte;
__ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count);
// have we resolved this bytecode?
@@ -2064,7 +2071,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
- __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+ __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
+ if (result != noreg)
+ __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ bind(resolved);
}
@@ -2100,7 +2109,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register itable_index,
Register flags,
bool is_invokevirtual,
- bool is_invokevfinal /*unused*/) {
+ bool is_invokevfinal, /*unused*/
+ bool is_invokedynamic) {
// setup registers
const Register cache = rcx;
const Register index = rdx;
@@ -2120,15 +2130,18 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset());
- resolve_cache_and_index(byte_no, cache, index);
-
- assert(wordSize == 8, "adjust code below");
- __ movptr(method, Address(cache, index, Address::times_8, method_offset));
- if (itable_index != noreg) {
- __ movptr(itable_index,
- Address(cache, index, Address::times_8, index_offset));
+ if (byte_no == f1_oop) {
+ // Resolved f1_oop goes directly into 'method' register.
+ assert(is_invokedynamic, "");
+ resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
+ } else {
+ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+ __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
}
- __ movl(flags , Address(cache, index, Address::times_8, flags_offset));
+ if (itable_index != noreg) {
+ __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
+ }
+ __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
}
@@ -2187,7 +2200,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
const Register flags = rax;
const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
- resolve_cache_and_index(byte_no, cache, index);
+ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_access(cache, index, is_static, false);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@@ -2390,7 +2403,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
const Register flags = rax;
const Register bc = c_rarg3;
- resolve_cache_and_index(byte_no, cache, index);
+ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@@ -2815,10 +2828,11 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// save 'interpreter return address'
__ save_bcp();
- load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
+ load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// load receiver if needed (note: no return address pushed yet)
if (load_receiver) {
+ assert(!is_invokedynamic, "");
__ movl(recv, flags);
__ andl(recv, 0xFF);
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
@@ -2914,6 +2928,7 @@ void TemplateTable::invokevirtual_helper(Register index,
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f2_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// rbx: index
@@ -2926,6 +2941,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
@@ -2936,6 +2952,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
@@ -2945,11 +2962,13 @@ void TemplateTable::invokestatic(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f2_byte, "use this argument");
__ stop("fast_invokevfinal not used on amd64");
}
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rax, rbx, byte_no);
// rax: Interface
@@ -3027,6 +3046,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
+ assert(byte_no == f1_oop, "use this argument");
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3039,6 +3059,7 @@ void TemplateTable::invokedynamic(int byte_no) {
return;
}
+ assert(byte_no == f1_oop, "use this argument");
prepare_invoke(rax, rbx, byte_no);
// rax: CallSite object (f1)
diff --git a/hotspot/src/cpu/x86/vm/x86_32.ad b/hotspot/src/cpu/x86/vm/x86_32.ad
index 96785136118..b4c57056f45 100644
--- a/hotspot/src/cpu/x86/vm/x86_32.ad
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad
@@ -1377,6 +1377,12 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = true;
+bool Matcher::narrow_oop_use_complex_address() {
+ ShouldNotCallThis();
+ return true;
+}
+
+
// Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a
@@ -1841,14 +1847,14 @@ encode %{
MacroAssembler _masm(&cbuf);
// RBP is preserved across all calls, even compiled calls.
// Use it to preserve RSP in places where the callee might change the SP.
- __ movptr(rbp, rsp);
+ __ movptr(rbp_mh_SP_save, rsp);
debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
%}
enc_class restore_SP %{
MacroAssembler _masm(&cbuf);
- __ movptr(rsp, rbp);
+ __ movptr(rsp, rbp_mh_SP_save);
%}
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
@@ -13570,7 +13576,7 @@ instruct CallStaticJavaDirect(method meth) %{
// Call Java Static Instruction (method handle version)
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{
+instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
diff --git a/hotspot/src/cpu/x86/vm/x86_64.ad b/hotspot/src/cpu/x86/vm/x86_64.ad
index 6590af0ac10..ba91cb79915 100644
--- a/hotspot/src/cpu/x86/vm/x86_64.ad
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad
@@ -1851,29 +1851,24 @@ uint reloc_java_to_interp()
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
if (UseCompressedOops) {
- st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
+ st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
if (Universe::narrow_oop_shift() != 0) {
- st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
+ st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1");
}
- st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
+ st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
} else {
- st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
- "# Inline cache check", oopDesc::klass_offset_in_bytes());
+ st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
+ "# Inline cache check");
}
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
- st->print_cr("\tnop");
- if (!OptoBreakpoint) {
- st->print_cr("\tnop");
- }
+ st->print_cr("\tnop\t# nops to align entry point");
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
MacroAssembler masm(&cbuf);
-#ifdef ASSERT
uint code_size = cbuf.code_size();
-#endif
if (UseCompressedOops) {
masm.load_klass(rscratch1, j_rarg0);
masm.cmpptr(rax, rscratch1);
@@ -1884,33 +1879,21 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
/* WARNING these NOPs are critical so that verified entry point is properly
- aligned for patching by NativeJump::patch_verified_entry() */
- int nops_cnt = 1;
- if (!OptoBreakpoint) {
+ 4 bytes aligned for patching by NativeJump::patch_verified_entry() */
+ int nops_cnt = 4 - ((cbuf.code_size() - code_size) & 0x3);
+ if (OptoBreakpoint) {
// Leave space for int3
- nops_cnt += 1;
+ nops_cnt -= 1;
}
- if (UseCompressedOops) {
- // ??? divisible by 4 is aligned?
- nops_cnt += 1;
- }
- masm.nop(nops_cnt);
-
- assert(cbuf.code_size() - code_size == size(ra_),
- "checking code size of inline cache node");
+ nops_cnt &= 0x3; // Do not add nops if code is aligned.
+ if (nops_cnt > 0)
+ masm.nop(nops_cnt);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
{
- if (UseCompressedOops) {
- if (Universe::narrow_oop_shift() == 0) {
- return OptoBreakpoint ? 15 : 16;
- } else {
- return OptoBreakpoint ? 19 : 20;
- }
- } else {
- return OptoBreakpoint ? 11 : 12;
- }
+ return MachNode::size(ra_); // too many variables; just compute it
+ // the hard way
}
@@ -2054,6 +2037,11 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// into registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = true;
+bool Matcher::narrow_oop_use_complex_address() {
+ assert(UseCompressedOops, "only for compressed oops code");
+ return (LogMinObjAlignmentInBytes <= 3);
+}
+
// Is it better to copy float constants, or load them directly from
// memory? Intel can load a float constant from a direct address,
// requiring no extra registers. Most RISCs will have to materialize
@@ -2635,14 +2623,14 @@ encode %{
MacroAssembler _masm(&cbuf);
// RBP is preserved across all calls, even compiled calls.
// Use it to preserve RSP in places where the callee might change the SP.
- __ movptr(rbp, rsp);
+ __ movptr(rbp_mh_SP_save, rsp);
debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
%}
enc_class restore_SP %{
MacroAssembler _masm(&cbuf);
- __ movptr(rsp, rbp);
+ __ movptr(rsp, rbp_mh_SP_save);
%}
enc_class Java_Static_Call(method meth)
@@ -5127,7 +5115,7 @@ operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
// Note: x86 architecture doesn't support "scale * index + offset" without a base
// we can't free r12 even with Universe::narrow_oop_base() == NULL.
operand indCompressedOopOffset(rRegN reg, immL32 off) %{
- predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
+ predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) off);
@@ -7742,10 +7730,11 @@ instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
ins_pipe(ialu_reg_long);
%}
-instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{
+instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
match(Set dst (DecodeN src));
+ effect(KILL cr);
format %{ "decode_heap_oop_not_null $dst,$src" %}
ins_encode %{
Register s = $src$$Register;
@@ -12604,7 +12593,7 @@ instruct CallStaticJavaDirect(method meth) %{
// Call Java Static Instruction (method handle version)
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{
+instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth);
diff --git a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
index 603ef0891b2..d39ffdd3fb7 100644
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -232,12 +232,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _instructions_offset);
GEN_OFFS(CodeBlob, _data_offset);
- GEN_OFFS(CodeBlob, _oops_offset);
- GEN_OFFS(CodeBlob, _oops_length);
GEN_OFFS(CodeBlob, _frame_size);
printf("\n");
GEN_OFFS(nmethod, _method);
+ GEN_OFFS(nmethod, _oops_offset);
GEN_OFFS(nmethod, _scopes_data_offset);
GEN_OFFS(nmethod, _scopes_pcs_offset);
GEN_OFFS(nmethod, _handler_table_offset);
diff --git a/hotspot/src/os/solaris/dtrace/libjvm_db.c b/hotspot/src/os/solaris/dtrace/libjvm_db.c
index be3b99b580c..691cbb7be8f 100644
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -130,7 +130,7 @@ typedef struct Nmethod_t {
int32_t scopes_data_beg; /* _scopes_data_offset */
int32_t scopes_data_end;
int32_t oops_beg; /* _oops_offset */
- int32_t oops_len; /* _oops_length */
+ int32_t oops_end;
int32_t scopes_pcs_beg; /* _scopes_pcs_offset */
int32_t scopes_pcs_end;
@@ -597,9 +597,9 @@ static int nmethod_info(Nmethod_t *N)
CHECK_FAIL(err);
/* Oops */
- err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_offset, &N->oops_beg, SZ32);
+ err = ps_pread(J->P, nm + OFFSET_nmethod_oops_offset, &N->oops_beg, SZ32);
CHECK_FAIL(err);
- err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_length, &N->oops_len, SZ32);
+ err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->oops_end, SZ32);
CHECK_FAIL(err);
/* scopes_pcs */
@@ -624,8 +624,8 @@ static int nmethod_info(Nmethod_t *N)
fprintf(stderr, "\t nmethod_info: orig_pc_offset: %#x \n",
N->orig_pc_offset);
- fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_len: %#x\n",
- N->oops_beg, N->oops_len);
+ fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_end: %#x\n",
+ N->oops_beg, N->oops_end);
fprintf(stderr, "\t nmethod_info: scopes_data_beg: %#x, scopes_data_end: %#x\n",
N->scopes_data_beg, N->scopes_data_end);
@@ -959,8 +959,8 @@ static int scopeDesc_chain(Nmethod_t *N) {
err = scope_desc_at(N, decode_offset, vf);
CHECK_FAIL(err);
- if (vf->methodIdx > N->oops_len) {
- fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
+ if (vf->methodIdx > ((N->oops_end - N->oops_beg) / POINTER_SIZE)) {
+ fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops length) !\n");
return -1;
}
err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
diff --git a/hotspot/src/share/vm/asm/codeBuffer.hpp b/hotspot/src/share/vm/asm/codeBuffer.hpp
index f3748c364d4..2f1a8d3c8fe 100644
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp
@@ -510,9 +510,9 @@ class CodeBuffer: public StackObj {
copy_relocations_to(blob);
copy_code_to(blob);
}
- void copy_oops_to(CodeBlob* blob) {
+ void copy_oops_to(nmethod* nm) {
if (!oop_recorder()->is_unused()) {
- oop_recorder()->copy_to(blob);
+ oop_recorder()->copy_to(nm);
}
}
diff --git a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp
index 00b8173f1fb..5607fc00fa8 100644
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp
@@ -26,9 +26,11 @@
#include "incls/_c1_Canonicalizer.cpp.incl"
-static void do_print_value(Value* vp) {
- (*vp)->print_line();
-}
+class PrintValueVisitor: public ValueVisitor {
+ void visit(Value* vp) {
+ (*vp)->print_line();
+ }
+};
void Canonicalizer::set_canonical(Value x) {
assert(x != NULL, "value must exist");
@@ -37,10 +39,11 @@ void Canonicalizer::set_canonical(Value x) {
// in the instructions).
if (canonical() != x) {
if (PrintCanonicalization) {
- canonical()->input_values_do(do_print_value);
+ PrintValueVisitor do_print_value;
+ canonical()->input_values_do(&do_print_value);
canonical()->print_line();
tty->print_cr("canonicalized to:");
- x->input_values_do(do_print_value);
+ x->input_values_do(&do_print_value);
x->print_line();
tty->cr();
}
@@ -202,7 +205,7 @@ void Canonicalizer::do_StoreField (StoreField* x) {
// limit this optimization to current block
if (value != NULL && in_current_block(conv)) {
set_canonical(new StoreField(x->obj(), x->offset(), x->field(), value, x->is_static(),
- x->lock_stack(), x->state_before(), x->is_loaded(), x->is_initialized()));
+ x->lock_stack(), x->state_before(), x->is_loaded(), x->is_initialized()));
return;
}
}
diff --git a/hotspot/src/share/vm/c1/c1_Compilation.cpp b/hotspot/src/share/vm/c1/c1_Compilation.cpp
index 2bf8902941c..d6f72b8d1b6 100644
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp
@@ -66,9 +66,6 @@ class PhaseTraceTime: public TraceTime {
}
};
-Arena* Compilation::_arena = NULL;
-Compilation* Compilation::_compilation = NULL;
-
// Implementation of Compilation
@@ -238,9 +235,23 @@ void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
}
+void Compilation::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
+ // Preinitialize the consts section to some large size:
+ int locs_buffer_size = 20 * (relocInfo::length_limit + sizeof(relocInfo));
+ char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
+ code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
+ locs_buffer_size / sizeof(relocInfo));
+ code->initialize_consts_size(Compilation::desired_max_constant_size());
+ // Call stubs + deopt/exception handler
+ code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
+ LIR_Assembler::exception_handler_size +
+ LIR_Assembler::deopt_handler_size);
+}
+
+
int Compilation::emit_code_body() {
// emit code
- Runtime1::setup_code_buffer(code(), allocator()->num_calls());
+ setup_code_buffer(code(), allocator()->num_calls());
code()->initialize_oop_recorder(env()->oop_recorder());
_masm = new C1_MacroAssembler(code());
@@ -422,7 +433,8 @@ void Compilation::generate_exception_handler_table() {
}
-Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method, int osr_bci)
+Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method,
+ int osr_bci, BufferBlob* buffer_blob)
: _compiler(compiler)
, _env(env)
, _method(method)
@@ -437,8 +449,10 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _bailout_msg(NULL)
, _exception_info_list(NULL)
, _allocator(NULL)
-, _code(Runtime1::get_buffer_blob()->instructions_begin(),
- Runtime1::get_buffer_blob()->instructions_size())
+, _next_id(0)
+, _next_block_id(0)
+, _code(buffer_blob->instructions_begin(),
+ buffer_blob->instructions_size())
, _current_instruction(NULL)
#ifndef PRODUCT
, _last_instruction_printed(NULL)
@@ -446,17 +460,15 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
{
PhaseTraceTime timeit(_t_compile);
- assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
_arena = Thread::current()->resource_area();
- _compilation = this;
+ _env->set_compiler_data(this);
_exception_info_list = new ExceptionInfoList();
_implicit_exception_table.set_size(0);
compile_method();
}
Compilation::~Compilation() {
- _arena = NULL;
- _compilation = NULL;
+ _env->set_compiler_data(NULL);
}
diff --git a/hotspot/src/share/vm/c1/c1_Compilation.hpp b/hotspot/src/share/vm/c1/c1_Compilation.hpp
index 9d24b24a79b..86d6cbf7a05 100644
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp
@@ -53,15 +53,11 @@ define_stack(ExceptionInfoList, ExceptionInfoArray)
class Compilation: public StackObj {
friend class CompilationResourceObj;
- private:
-
- static Arena* _arena;
- static Arena* arena() { return _arena; }
-
- static Compilation* _compilation;
-
private:
// compilation specifics
+ Arena* _arena;
+ int _next_id;
+ int _next_block_id;
AbstractCompiler* _compiler;
ciEnv* _env;
ciMethod* _method;
@@ -108,10 +104,14 @@ class Compilation: public StackObj {
public:
// creation
- Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method, int osr_bci);
+ Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method,
+ int osr_bci, BufferBlob* buffer_blob);
~Compilation();
- static Compilation* current_compilation() { return _compilation; }
+
+ static Compilation* current() {
+ return (Compilation*) ciEnv::current()->compiler_data();
+ }
// accessors
ciEnv* env() const { return _env; }
@@ -128,6 +128,15 @@ class Compilation: public StackObj {
CodeBuffer* code() { return &_code; }
C1_MacroAssembler* masm() const { return _masm; }
CodeOffsets* offsets() { return &_offsets; }
+ Arena* arena() { return _arena; }
+
+ // Instruction ids
+ int get_next_id() { return _next_id++; }
+ int number_of_instructions() const { return _next_id; }
+
+ // BlockBegin ids
+ int get_next_block_id() { return _next_block_id++; }
+ int number_of_blocks() const { return _next_block_id; }
// setters
void set_has_exception_handlers(bool f) { _has_exception_handlers = f; }
@@ -158,6 +167,15 @@ class Compilation: public StackObj {
bool bailed_out() const { return _bailout_msg != NULL; }
const char* bailout_msg() const { return _bailout_msg; }
+ static int desired_max_code_buffer_size() {
+ return (int) NMethodSizeLimit; // default 256K or 512K
+ }
+ static int desired_max_constant_size() {
+ return (int) NMethodSizeLimit / 10; // about 25K
+ }
+
+ static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
+
// timers
static void print_timers();
@@ -203,7 +221,10 @@ class InstructionMark: public StackObj {
// Base class for objects allocated by the compiler in the compilation arena
class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC {
public:
- void* operator new(size_t size) { return Compilation::arena()->Amalloc(size); }
+ void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); }
+ void* operator new(size_t size, Arena* arena) {
+ return arena->Amalloc(size);
+ }
void operator delete(void* p) {} // nothing to do
};
diff --git a/hotspot/src/share/vm/c1/c1_Compiler.cpp b/hotspot/src/share/vm/c1/c1_Compiler.cpp
index 53216e39925..958a080c80a 100644
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp
@@ -27,9 +27,6 @@
volatile int Compiler::_runtimes = uninitialized;
-volatile bool Compiler::_compiling = false;
-
-
Compiler::Compiler() {
}
@@ -39,47 +36,62 @@ Compiler::~Compiler() {
}
+void Compiler::initialize_all() {
+ BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+ Arena* arena = new Arena();
+ Runtime1::initialize(buffer_blob);
+ FrameMap::initialize();
+ // initialize data structures
+ ValueType::initialize(arena);
+ // Instruction::initialize();
+ // BlockBegin::initialize();
+ GraphBuilder::initialize();
+ // note: to use more than one instance of LinearScan at a time this function call has to
+ // be moved somewhere outside of this constructor:
+ Interval::initialize(arena);
+}
+
+
void Compiler::initialize() {
if (_runtimes != initialized) {
- initialize_runtimes( Runtime1::initialize, &_runtimes);
+ initialize_runtimes( initialize_all, &_runtimes);
}
mark_initialized();
}
+BufferBlob* Compiler::build_buffer_blob() {
+ // setup CodeBuffer. Preallocate a BufferBlob of size
+ // NMethodSizeLimit plus some extra space for constants.
+ int code_buffer_size = Compilation::desired_max_code_buffer_size() +
+ Compilation::desired_max_constant_size();
+ BufferBlob* blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
+ code_buffer_size);
+ guarantee(blob != NULL, "must create initial code buffer");
+ return blob;
+}
+
+
void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
+ // Allocate buffer blob once at startup since allocation for each
+ // compilation seems to be too expensive (at least on Intel win32).
+ BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+ if (buffer_blob == NULL) {
+ buffer_blob = build_buffer_blob();
+ CompilerThread::current()->set_buffer_blob(buffer_blob);
+ }
if (!is_initialized()) {
initialize();
}
// invoke compilation
-#ifdef TIERED
- // We are thread in native here...
- CompilerThread* thread = CompilerThread::current();
- {
- ThreadInVMfromNative tv(thread);
- MutexLocker only_one (C1_lock, thread);
- while ( _compiling) {
- C1_lock->wait();
- }
- _compiling = true;
- }
-#endif // TIERED
{
// We are nested here because we need for the destructor
// of Compilation to occur before we release the any
// competing compiler thread
ResourceMark rm;
- Compilation c(this, env, method, entry_bci);
+ Compilation c(this, env, method, entry_bci, buffer_blob);
}
-#ifdef TIERED
- {
- ThreadInVMfromNative tv(thread);
- MutexLocker only_one (C1_lock, thread);
- _compiling = false;
- C1_lock->notify();
- }
-#endif // TIERED
}
diff --git a/hotspot/src/share/vm/c1/c1_Compiler.hpp b/hotspot/src/share/vm/c1/c1_Compiler.hpp
index 3f7d1f94db0..a8e6eacd748 100644
--- a/hotspot/src/share/vm/c1/c1_Compiler.hpp
+++ b/hotspot/src/share/vm/c1/c1_Compiler.hpp
@@ -31,10 +31,6 @@ class Compiler: public AbstractCompiler {
// Tracks whether runtime has been initialized
static volatile int _runtimes;
- // In tiered it is possible for multiple threads to want to do compilation
- // only one can enter c1 at a time
- static volatile bool _compiling;
-
public:
// Creation
Compiler();
@@ -47,6 +43,7 @@ class Compiler: public AbstractCompiler {
virtual bool is_c1() { return true; };
#endif // TIERED
+ BufferBlob* build_buffer_blob();
// Missing feature tests
virtual bool supports_native() { return true; }
@@ -58,6 +55,7 @@ class Compiler: public AbstractCompiler {
// Initialization
virtual void initialize();
+ static void initialize_all();
// Compilation entry point for methods
virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
diff --git a/hotspot/src/share/vm/c1/c1_FrameMap.cpp b/hotspot/src/share/vm/c1/c1_FrameMap.cpp
index d9ff44afa69..55fd77b9809 100644
--- a/hotspot/src/share/vm/c1/c1_FrameMap.cpp
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.cpp
@@ -153,7 +153,7 @@ int FrameMap::_cpu_reg2rnr [FrameMap::nof_cpu_regs];
FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
- if (!_init_done) init();
+ assert(_init_done, "should already be completed");
_framesize = -1;
_num_spills = -1;
diff --git a/hotspot/src/share/vm/c1/c1_FrameMap.hpp b/hotspot/src/share/vm/c1/c1_FrameMap.hpp
index 73172973e34..4c4c99a7492 100644
--- a/hotspot/src/share/vm/c1/c1_FrameMap.hpp
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -150,6 +150,9 @@ class FrameMap : public CompilationResourceObj {
// Opr representing the stack_pointer on this platform
static LIR_Opr stack_pointer();
+ // JSR 292
+ static LIR_Opr method_handle_invoke_SP_save_opr();
+
static BasicTypeArray* signature_type_array_for(const ciMethod* method);
static BasicTypeArray* signature_type_array_for(const char * signature);
@@ -232,7 +235,7 @@ class FrameMap : public CompilationResourceObj {
return _caller_save_fpu_regs[i];
}
- static void init();
+ static void initialize();
};
// CallingConvention
diff --git a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
index ba8b85442b2..9ffb2ad083e 100644
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -2438,13 +2438,13 @@ BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface: invoke(code); break;
- case Bytecodes::_new : new_instance(s.get_index_big()); break;
+ case Bytecodes::_new : new_instance(s.get_index_u2()); break;
case Bytecodes::_newarray : new_type_array(); break;
case Bytecodes::_anewarray : new_object_array(); break;
case Bytecodes::_arraylength : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
case Bytecodes::_athrow : throw_op(s.cur_bci()); break;
- case Bytecodes::_checkcast : check_cast(s.get_index_big()); break;
- case Bytecodes::_instanceof : instance_of(s.get_index_big()); break;
+ case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break;
+ case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break;
// Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break;
case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break;
@@ -2530,16 +2530,10 @@ void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining)
}
-bool GraphBuilder::_is_initialized = false;
bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes];
bool GraphBuilder::_is_async[Bytecodes::number_of_java_codes];
void GraphBuilder::initialize() {
- // make sure initialization happens only once (need a
- // lock here, if we allow the compiler to be re-entrant)
- if (is_initialized()) return;
- _is_initialized = true;
-
// the following bytecodes are assumed to potentially
// throw exceptions in compiled code - note that e.g.
// monitorexit & the return bytecodes do not throw
@@ -2855,7 +2849,6 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
BlockList* bci2block = blm.bci2block();
BlockBegin* start_block = bci2block->at(0);
- assert(is_initialized(), "GraphBuilder must have been initialized");
push_root_scope(scope, bci2block, start_block);
// setup state for std entry
diff --git a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp
index 52fe6d7fba2..4ce9dd2bdf3 100644
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp
@@ -162,7 +162,6 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
};
// for all GraphBuilders
- static bool _is_initialized; // true if trap tables were initialized, false otherwise
static bool _can_trap[Bytecodes::number_of_java_codes];
static bool _is_async[Bytecodes::number_of_java_codes];
@@ -268,7 +267,6 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
Instruction* append_split(StateSplit* instr);
// other helpers
- static bool is_initialized() { return _is_initialized; }
static bool is_async(Bytecodes::Code code) {
assert(0 <= code && code < Bytecodes::number_of_java_codes, "illegal bytecode");
return _is_async[code];
diff --git a/hotspot/src/share/vm/c1/c1_IR.cpp b/hotspot/src/share/vm/c1/c1_IR.cpp
index a534634e9c8..4df75f40f66 100644
--- a/hotspot/src/share/vm/c1/c1_IR.cpp
+++ b/hotspot/src/share/vm/c1/c1_IR.cpp
@@ -230,7 +230,8 @@ CodeEmitInfo::CodeEmitInfo(int bci, ValueStack* stack, XHandlers* exception_hand
, _stack(stack)
, _exception_handlers(exception_handlers)
, _next(NULL)
- , _id(-1) {
+ , _id(-1)
+ , _is_method_handle_invoke(false) {
assert(_stack != NULL, "must be non null");
assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode");
}
@@ -241,7 +242,8 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
, _exception_handlers(NULL)
, _bci(info->_bci)
, _scope_debug_info(NULL)
- , _oop_map(NULL) {
+ , _oop_map(NULL)
+ , _is_method_handle_invoke(info->_is_method_handle_invoke) {
if (lock_stack_only) {
if (info->_stack != NULL) {
_stack = info->_stack->copy_locks();
@@ -259,10 +261,10 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
}
-void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) {
+void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
// record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
- _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke);
+ _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke);
recorder->end_safepoint(pc_offset);
}
@@ -285,11 +287,6 @@ void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
_locals_size(in_WordSize(-1))
, _num_loops(0) {
- // initialize data structures
- ValueType::initialize();
- Instruction::initialize();
- BlockBegin::initialize();
- GraphBuilder::initialize();
// setup IR fields
_compilation = compilation;
_top_scope = new IRScope(compilation, NULL, -1, method, osr_bci, true);
@@ -379,15 +376,15 @@ void IR::split_critical_edges() {
}
-class UseCountComputer: public AllStatic {
+class UseCountComputer: public ValueVisitor, BlockClosure {
private:
- static void update_use_count(Value* n) {
+ void visit(Value* n) {
// Local instructions and Phis for expression stack values at the
// start of basic blocks are not added to the instruction list
if ((*n)->bci() == -99 && (*n)->as_Local() == NULL &&
(*n)->as_Phi() == NULL) {
assert(false, "a node was not appended to the graph");
- Compilation::current_compilation()->bailout("a node was not appended to the graph");
+ Compilation::current()->bailout("a node was not appended to the graph");
}
// use n's input if not visited before
if (!(*n)->is_pinned() && !(*n)->has_uses()) {
@@ -400,31 +397,31 @@ class UseCountComputer: public AllStatic {
(*n)->_use_count++;
}
- static Values* worklist;
- static int depth;
+ Values* worklist;
+ int depth;
enum {
max_recurse_depth = 20
};
- static void uses_do(Value* n) {
+ void uses_do(Value* n) {
depth++;
if (depth > max_recurse_depth) {
// don't allow the traversal to recurse too deeply
worklist->push(*n);
} else {
- (*n)->input_values_do(update_use_count);
+ (*n)->input_values_do(this);
// special handling for some instructions
if ((*n)->as_BlockEnd() != NULL) {
// note on BlockEnd:
// must 'use' the stack only if the method doesn't
// terminate, however, in those cases stack is empty
- (*n)->state_values_do(update_use_count);
+ (*n)->state_values_do(this);
}
}
depth--;
}
- static void basic_compute_use_count(BlockBegin* b) {
+ void block_do(BlockBegin* b) {
depth = 0;
// process all pinned nodes as the roots of expression trees
for (Instruction* n = b; n != NULL; n = n->next()) {
@@ -447,18 +444,19 @@ class UseCountComputer: public AllStatic {
assert(depth == 0, "should have counted back down");
}
+ UseCountComputer() {
+ worklist = new Values();
+ depth = 0;
+ }
+
public:
static void compute(BlockList* blocks) {
- worklist = new Values();
- blocks->blocks_do(basic_compute_use_count);
- worklist = NULL;
+ UseCountComputer ucc;
+ blocks->iterate_backward(&ucc);
}
};
-Values* UseCountComputer::worklist = NULL;
-int UseCountComputer::depth = 0;
-
// helper macro for short definition of trace-output inside code
#ifndef PRODUCT
#define TRACE_LINEAR_SCAN(level, code) \
@@ -1300,7 +1298,7 @@ void IR::verify() {
#endif // PRODUCT
-void SubstitutionResolver::substitute(Value* v) {
+void SubstitutionResolver::visit(Value* v) {
Value v0 = *v;
if (v0) {
Value vs = v0->subst();
@@ -1311,20 +1309,22 @@ void SubstitutionResolver::substitute(Value* v) {
}
#ifdef ASSERT
-void check_substitute(Value* v) {
- Value v0 = *v;
- if (v0) {
- Value vs = v0->subst();
- assert(vs == v0, "missed substitution");
+class SubstitutionChecker: public ValueVisitor {
+ void visit(Value* v) {
+ Value v0 = *v;
+ if (v0) {
+ Value vs = v0->subst();
+ assert(vs == v0, "missed substitution");
+ }
}
-}
+};
#endif
void SubstitutionResolver::block_do(BlockBegin* block) {
Instruction* last = NULL;
for (Instruction* n = block; n != NULL;) {
- n->values_do(substitute);
+ n->values_do(this);
// need to remove this instruction from the instruction stream
if (n->subst() != n) {
assert(last != NULL, "must have last");
@@ -1336,8 +1336,9 @@ void SubstitutionResolver::block_do(BlockBegin* block) {
}
#ifdef ASSERT
- if (block->state()) block->state()->values_do(check_substitute);
- block->block_values_do(check_substitute);
- if (block->end() && block->end()->state()) block->end()->state()->values_do(check_substitute);
+ SubstitutionChecker check_substitute;
+ if (block->state()) block->state()->values_do(&check_substitute);
+ block->block_values_do(&check_substitute);
+ if (block->end() && block->end()->state()) block->end()->state()->values_do(&check_substitute);
#endif
}
diff --git a/hotspot/src/share/vm/c1/c1_IR.hpp b/hotspot/src/share/vm/c1/c1_IR.hpp
index 3130c381a8b..05ef1789f85 100644
--- a/hotspot/src/share/vm/c1/c1_IR.hpp
+++ b/hotspot/src/share/vm/c1/c1_IR.hpp
@@ -269,6 +269,7 @@ class CodeEmitInfo: public CompilationResourceObj {
int _bci;
CodeEmitInfo* _next;
int _id;
+ bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
FrameMap* frame_map() const { return scope()->compilation()->frame_map(); }
Compilation* compilation() const { return scope()->compilation(); }
@@ -287,7 +288,8 @@ class CodeEmitInfo: public CompilationResourceObj {
, _stack(NULL)
, _exception_handlers(NULL)
, _next(NULL)
- , _id(-1) {
+ , _id(-1)
+ , _is_method_handle_invoke(false) {
}
// make a copy
@@ -302,13 +304,16 @@ class CodeEmitInfo: public CompilationResourceObj {
int bci() const { return _bci; }
void add_register_oop(LIR_Opr opr);
- void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false);
+ void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
CodeEmitInfo* next() const { return _next; }
void set_next(CodeEmitInfo* next) { _next = next; }
int id() const { return _id; }
void set_id(int id) { _id = id; }
+
+ bool is_method_handle_invoke() const { return _is_method_handle_invoke; }
+ void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; }
};
@@ -366,8 +371,8 @@ class IR: public CompilationResourceObj {
// instructions from the instruction list.
//
-class SubstitutionResolver: public BlockClosure {
- static void substitute(Value* v);
+class SubstitutionResolver: public BlockClosure, ValueVisitor {
+ virtual void visit(Value* v);
public:
SubstitutionResolver(IR* hir) {
diff --git a/hotspot/src/share/vm/c1/c1_Instruction.cpp b/hotspot/src/share/vm/c1/c1_Instruction.cpp
index f7bdfad85e9..018047a3519 100644
--- a/hotspot/src/share/vm/c1/c1_Instruction.cpp
+++ b/hotspot/src/share/vm/c1/c1_Instruction.cpp
@@ -29,8 +29,6 @@
// Implementation of Instruction
-int Instruction::_next_id = 0;
-
#ifdef ASSERT
void Instruction::create_hi_word() {
assert(type()->is_double_word() && _hi_word == NULL, "only double word has high word");
@@ -193,22 +191,22 @@ ciType* CheckCast::exact_type() const {
}
-void ArithmeticOp::other_values_do(void f(Value*)) {
+void ArithmeticOp::other_values_do(ValueVisitor* f) {
if (lock_stack() != NULL) lock_stack()->values_do(f);
}
-void NullCheck::other_values_do(void f(Value*)) {
+void NullCheck::other_values_do(ValueVisitor* f) {
lock_stack()->values_do(f);
}
-void AccessArray::other_values_do(void f(Value*)) {
+void AccessArray::other_values_do(ValueVisitor* f) {
if (lock_stack() != NULL) lock_stack()->values_do(f);
}
// Implementation of AccessField
-void AccessField::other_values_do(void f(Value*)) {
+void AccessField::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
if (lock_stack() != NULL) lock_stack()->values_do(f);
}
@@ -270,7 +268,7 @@ bool LogicOp::is_commutative() const {
// Implementation of CompareOp
-void CompareOp::other_values_do(void f(Value*)) {
+void CompareOp::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
}
@@ -302,12 +300,12 @@ IRScope* StateSplit::scope() const {
}
-void StateSplit::state_values_do(void f(Value*)) {
+void StateSplit::state_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);
}
-void BlockBegin::state_values_do(void f(Value*)) {
+void BlockBegin::state_values_do(ValueVisitor* f) {
StateSplit::state_values_do(f);
if (is_set(BlockBegin::exception_entry_flag)) {
@@ -318,13 +316,13 @@ void BlockBegin::state_values_do(void f(Value*)) {
}
-void MonitorEnter::state_values_do(void f(Value*)) {
+void MonitorEnter::state_values_do(ValueVisitor* f) {
StateSplit::state_values_do(f);
_lock_stack_before->values_do(f);
}
-void Intrinsic::state_values_do(void f(Value*)) {
+void Intrinsic::state_values_do(ValueVisitor* f) {
StateSplit::state_values_do(f);
if (lock_stack() != NULL) lock_stack()->values_do(f);
}
@@ -349,8 +347,9 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
assert(args != NULL, "args must exist");
#ifdef ASSERT
- values_do(assert_value);
-#endif // ASSERT
+ AssertValues assert_value;
+ values_do(&assert_value);
+#endif
// provide an initial guess of signature size.
_signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0));
@@ -368,7 +367,7 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
}
-void Invoke::state_values_do(void f(Value*)) {
+void Invoke::state_values_do(ValueVisitor* f) {
StateSplit::state_values_do(f);
if (state_before() != NULL) state_before()->values_do(f);
if (state() != NULL) state()->values_do(f);
@@ -500,30 +499,27 @@ BlockBegin* Constant::compare(Instruction::Condition cond, Value right,
}
-void Constant::other_values_do(void f(Value*)) {
+void Constant::other_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);
}
// Implementation of NewArray
-void NewArray::other_values_do(void f(Value*)) {
+void NewArray::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
}
// Implementation of TypeCheck
-void TypeCheck::other_values_do(void f(Value*)) {
+void TypeCheck::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
}
// Implementation of BlockBegin
-int BlockBegin::_next_block_id = 0;
-
-
void BlockBegin::set_end(BlockEnd* end) {
assert(end != NULL, "should not reset block end to NULL");
BlockEnd* old_end = _end;
@@ -738,7 +734,7 @@ void BlockBegin::iterate_postorder(BlockClosure* closure) {
}
-void BlockBegin::block_values_do(void f(Value*)) {
+void BlockBegin::block_values_do(ValueVisitor* f) {
for (Instruction* n = this; n != NULL; n = n->next()) n->values_do(f);
}
@@ -930,7 +926,7 @@ void BlockList::blocks_do(void f(BlockBegin*)) {
}
-void BlockList::values_do(void f(Value*)) {
+void BlockList::values_do(ValueVisitor* f) {
for (int i = length() - 1; i >= 0; i--) at(i)->block_values_do(f);
}
@@ -973,7 +969,7 @@ void BlockEnd::substitute_sux(BlockBegin* old_sux, BlockBegin* new_sux) {
}
-void BlockEnd::other_values_do(void f(Value*)) {
+void BlockEnd::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
}
@@ -1012,6 +1008,6 @@ int Phi::operand_count() const {
// Implementation of Throw
-void Throw::state_values_do(void f(Value*)) {
+void Throw::state_values_do(ValueVisitor* f) {
BlockEnd::state_values_do(f);
}
diff --git a/hotspot/src/share/vm/c1/c1_Instruction.hpp b/hotspot/src/share/vm/c1/c1_Instruction.hpp
index 98e19f11473..98e9d41bc8a 100644
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp
@@ -116,6 +116,13 @@ class BlockClosure: public CompilationResourceObj {
};
+// A simple closure class for visiting the values of an Instruction
+class ValueVisitor: public StackObj {
+ public:
+ virtual void visit(Value* v) = 0;
+};
+
+
// Some array and list classes
define_array(BlockBeginArray, BlockBegin*)
define_stack(_BlockList, BlockBeginArray)
@@ -129,7 +136,7 @@ class BlockList: public _BlockList {
void iterate_forward(BlockClosure* closure);
void iterate_backward(BlockClosure* closure);
void blocks_do(void f(BlockBegin*));
- void values_do(void f(Value*));
+ void values_do(ValueVisitor* f);
void print(bool cfg_only = false, bool live_only = false) PRODUCT_RETURN;
};
@@ -264,8 +271,6 @@ class InstructionVisitor: public StackObj {
class Instruction: public CompilationResourceObj {
private:
- static int _next_id; // the node counter
-
int _id; // the unique instruction id
int _bci; // the instruction bci
int _use_count; // the number of instructions refering to this value (w/o prev/next); only roots can have use count = 0 or > 1
@@ -283,6 +288,7 @@ class Instruction: public CompilationResourceObj {
#endif
friend class UseCountComputer;
+ friend class BlockBegin;
protected:
void set_bci(int bci) { assert(bci == SynchronizationEntryBCI || bci >= 0, "illegal bci"); _bci = bci; }
@@ -292,6 +298,13 @@ class Instruction: public CompilationResourceObj {
}
public:
+ void* operator new(size_t size) {
+ Compilation* c = Compilation::current();
+ void* res = c->arena()->Amalloc(size);
+ ((Instruction*)res)->_id = c->get_next_id();
+ return res;
+ }
+
enum InstructionFlag {
NeedsNullCheckFlag = 0,
CanTrapFlag,
@@ -338,13 +351,13 @@ class Instruction: public CompilationResourceObj {
static Condition negate(Condition cond);
// initialization
- static void initialize() { _next_id = 0; }
- static int number_of_instructions() { return _next_id; }
+ static int number_of_instructions() {
+ return Compilation::current()->number_of_instructions();
+ }
// creation
Instruction(ValueType* type, bool type_is_constant = false, bool create_hi = true)
- : _id(_next_id++)
- , _bci(-99)
+ : _bci(-99)
, _use_count(0)
, _pin_state(0)
, _type(type)
@@ -479,10 +492,10 @@ class Instruction: public CompilationResourceObj {
virtual bool can_trap() const { return false; }
- virtual void input_values_do(void f(Value*)) = 0;
- virtual void state_values_do(void f(Value*)) { /* usually no state - override on demand */ }
- virtual void other_values_do(void f(Value*)) { /* usually no other - override on demand */ }
- void values_do(void f(Value*)) { input_values_do(f); state_values_do(f); other_values_do(f); }
+ virtual void input_values_do(ValueVisitor* f) = 0;
+ virtual void state_values_do(ValueVisitor* f) { /* usually no state - override on demand */ }
+ virtual void other_values_do(ValueVisitor* f) { /* usually no other - override on demand */ }
+ void values_do(ValueVisitor* f) { input_values_do(f); state_values_do(f); other_values_do(f); }
virtual ciType* exact_type() const { return NULL; }
virtual ciType* declared_type() const { return NULL; }
@@ -517,9 +530,12 @@ class Instruction: public CompilationResourceObj {
// Debugging support
+
#ifdef ASSERT
- static void assert_value(Value* x) { assert((*x) != NULL, "value must exist"); }
- #define ASSERT_VALUES values_do(assert_value);
+class AssertValues: public ValueVisitor {
+ void visit(Value* x) { assert((*x) != NULL, "value must exist"); }
+};
+ #define ASSERT_VALUES { AssertValues assert_value; values_do(&assert_value); }
#else
#define ASSERT_VALUES
#endif // ASSERT
@@ -555,7 +571,7 @@ LEAF(HiWord, Instruction)
void make_illegal() { set_type(illegalType); }
// generic
- virtual void input_values_do(void f(Value*)) { ShouldNotReachHere(); }
+ virtual void input_values_do(ValueVisitor* f) { ShouldNotReachHere(); }
};
@@ -615,7 +631,7 @@ LEAF(Phi, Instruction)
}
// generic
- virtual void input_values_do(void f(Value*)) {
+ virtual void input_values_do(ValueVisitor* f) {
}
};
@@ -635,7 +651,7 @@ LEAF(Local, Instruction)
int java_index() const { return _java_index; }
// generic
- virtual void input_values_do(void f(Value*)) { /* no values */ }
+ virtual void input_values_do(ValueVisitor* f) { /* no values */ }
};
@@ -663,8 +679,8 @@ LEAF(Constant, Instruction)
// generic
virtual bool can_trap() const { return state() != NULL; }
- virtual void input_values_do(void f(Value*)) { /* no values */ }
- virtual void other_values_do(void f(Value*));
+ virtual void input_values_do(ValueVisitor* f) { /* no values */ }
+ virtual void other_values_do(ValueVisitor* f);
virtual intx hash() const;
virtual bool is_equal(Value v) const;
@@ -734,8 +750,8 @@ BASE(AccessField, Instruction)
// generic
virtual bool can_trap() const { return needs_null_check() || needs_patching(); }
- virtual void input_values_do(void f(Value*)) { f(&_obj); }
- virtual void other_values_do(void f(Value*));
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_obj); }
+ virtual void other_values_do(ValueVisitor* f);
};
@@ -776,7 +792,7 @@ LEAF(StoreField, AccessField)
bool needs_write_barrier() const { return check_flag(NeedsWriteBarrierFlag); }
// generic
- virtual void input_values_do(void f(Value*)) { AccessField::input_values_do(f); f(&_value); }
+ virtual void input_values_do(ValueVisitor* f) { AccessField::input_values_do(f); f->visit(&_value); }
};
@@ -804,8 +820,8 @@ BASE(AccessArray, Instruction)
// generic
virtual bool can_trap() const { return needs_null_check(); }
- virtual void input_values_do(void f(Value*)) { f(&_array); }
- virtual void other_values_do(void f(Value*));
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_array); }
+ virtual void other_values_do(ValueVisitor* f);
};
@@ -857,7 +873,7 @@ BASE(AccessIndexed, AccessArray)
bool compute_needs_range_check();
// generic
- virtual void input_values_do(void f(Value*)) { AccessArray::input_values_do(f); f(&_index); if (_length != NULL) f(&_length); }
+ virtual void input_values_do(ValueVisitor* f) { AccessArray::input_values_do(f); f->visit(&_index); if (_length != NULL) f->visit(&_length); }
};
@@ -909,7 +925,7 @@ LEAF(StoreIndexed, AccessIndexed)
bool needs_store_check() const { return check_flag(NeedsStoreCheckFlag); }
// generic
- virtual void input_values_do(void f(Value*)) { AccessIndexed::input_values_do(f); f(&_value); }
+ virtual void input_values_do(ValueVisitor* f) { AccessIndexed::input_values_do(f); f->visit(&_value); }
};
@@ -927,7 +943,7 @@ LEAF(NegateOp, Instruction)
Value x() const { return _x; }
// generic
- virtual void input_values_do(void f(Value*)) { f(&_x); }
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_x); }
};
@@ -956,7 +972,7 @@ BASE(Op2, Instruction)
// generic
virtual bool is_commutative() const { return false; }
- virtual void input_values_do(void f(Value*)) { f(&_x); f(&_y); }
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_x); f->visit(&_y); }
};
@@ -982,7 +998,7 @@ LEAF(ArithmeticOp, Op2)
// generic
virtual bool is_commutative() const;
virtual bool can_trap() const;
- virtual void other_values_do(void f(Value*));
+ virtual void other_values_do(ValueVisitor* f);
HASHING3(Op2, true, op(), x()->subst(), y()->subst())
};
@@ -1023,7 +1039,7 @@ LEAF(CompareOp, Op2)
// generic
HASHING3(Op2, true, op(), x()->subst(), y()->subst())
- virtual void other_values_do(void f(Value*));
+ virtual void other_values_do(ValueVisitor* f);
};
@@ -1051,7 +1067,7 @@ LEAF(IfOp, Op2)
Value fval() const { return _fval; }
// generic
- virtual void input_values_do(void f(Value*)) { Op2::input_values_do(f); f(&_tval); f(&_fval); }
+ virtual void input_values_do(ValueVisitor* f) { Op2::input_values_do(f); f->visit(&_tval); f->visit(&_fval); }
};
@@ -1071,7 +1087,7 @@ LEAF(Convert, Instruction)
Value value() const { return _value; }
// generic
- virtual void input_values_do(void f(Value*)) { f(&_value); }
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_value); }
HASHING2(Convert, true, op(), value()->subst())
};
@@ -1100,8 +1116,8 @@ LEAF(NullCheck, Instruction)
// generic
virtual bool can_trap() const { return check_flag(CanTrapFlag); /* null-check elimination sets to false */ }
- virtual void input_values_do(void f(Value*)) { f(&_obj); }
- virtual void other_values_do(void f(Value*));
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_obj); }
+ virtual void other_values_do(ValueVisitor* f);
HASHING1(NullCheck, true, obj()->subst())
};
@@ -1127,8 +1143,8 @@ BASE(StateSplit, Instruction)
void set_state(ValueStack* state) { _state = state; }
// generic
- virtual void input_values_do(void f(Value*)) { /* no values */ }
- virtual void state_values_do(void f(Value*));
+ virtual void input_values_do(ValueVisitor* f) { /* no values */ }
+ virtual void state_values_do(ValueVisitor* f);
};
@@ -1169,12 +1185,12 @@ LEAF(Invoke, StateSplit)
// generic
virtual bool can_trap() const { return true; }
- virtual void input_values_do(void f(Value*)) {
+ virtual void input_values_do(ValueVisitor* f) {
StateSplit::input_values_do(f);
- if (has_receiver()) f(&_recv);
- for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
+ if (has_receiver()) f->visit(&_recv);
+ for (int i = 0; i < _args->length(); i++) f->visit(_args->adr_at(i));
}
- virtual void state_values_do(void f(Value*));
+ virtual void state_values_do(ValueVisitor *f);
};
@@ -1212,8 +1228,8 @@ BASE(NewArray, StateSplit)
// generic
virtual bool can_trap() const { return true; }
- virtual void input_values_do(void f(Value*)) { StateSplit::input_values_do(f); f(&_length); }
- virtual void other_values_do(void f(Value*));
+ virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_length); }
+ virtual void other_values_do(ValueVisitor* f);
};
@@ -1262,7 +1278,7 @@ LEAF(NewMultiArray, NewArray)
int rank() const { return dims()->length(); }
// generic
- virtual void input_values_do(void f(Value*)) {
+ virtual void input_values_do(ValueVisitor* f) {
// NOTE: we do not call NewArray::input_values_do since "length"
// is meaningless for a multi-dimensional array; passing the
// zeroth element down to NewArray as its length is a bad idea
@@ -1270,7 +1286,7 @@ LEAF(NewMultiArray, NewArray)
// get updated, and the value must not be traversed twice. Was bug
// - kbr 4/10/2001
StateSplit::input_values_do(f);
- for (int i = 0; i < _dims->length(); i++) f(_dims->adr_at(i));
+ for (int i = 0; i < _dims->length(); i++) f->visit(_dims->adr_at(i));
}
};
@@ -1300,8 +1316,8 @@ BASE(TypeCheck, StateSplit)
// generic
virtual bool can_trap() const { return true; }
- virtual void input_values_do(void f(Value*)) { StateSplit::input_values_do(f); f(&_obj); }
- virtual void other_values_do(void f(Value*));
+ virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_obj); }
+ virtual void other_values_do(ValueVisitor* f);
};
@@ -1366,7 +1382,7 @@ BASE(AccessMonitor, StateSplit)
int monitor_no() const { return _monitor_no; }
// generic
- virtual void input_values_do(void f(Value*)) { StateSplit::input_values_do(f); f(&_obj); }
+ virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_obj); }
};
@@ -1385,7 +1401,7 @@ LEAF(MonitorEnter, AccessMonitor)
// accessors
ValueStack* lock_stack_before() const { return _lock_stack_before; }
- virtual void state_values_do(void f(Value*));
+ virtual void state_values_do(ValueVisitor* f);
// generic
virtual bool can_trap() const { return true; }
@@ -1454,11 +1470,11 @@ LEAF(Intrinsic, StateSplit)
// generic
virtual bool can_trap() const { return check_flag(CanTrapFlag); }
- virtual void input_values_do(void f(Value*)) {
+ virtual void input_values_do(ValueVisitor* f) {
StateSplit::input_values_do(f);
- for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
+ for (int i = 0; i < _args->length(); i++) f->visit(_args->adr_at(i));
}
- virtual void state_values_do(void f(Value*));
+ virtual void state_values_do(ValueVisitor* f);
};
@@ -1467,8 +1483,6 @@ class LIR_List;
LEAF(BlockBegin, StateSplit)
private:
- static int _next_block_id; // the block counter
-
int _block_id; // the unique block id
int _depth_first_number; // number of this block in a depth-first ordering
int _linear_scan_number; // number of this block in linear-scan ordering
@@ -1510,14 +1524,22 @@ LEAF(BlockBegin, StateSplit)
friend class SuxAndWeightAdjuster;
public:
+ void* operator new(size_t size) {
+ Compilation* c = Compilation::current();
+ void* res = c->arena()->Amalloc(size);
+ ((BlockBegin*)res)->_id = c->get_next_id();
+ ((BlockBegin*)res)->_block_id = c->get_next_block_id();
+ return res;
+ }
+
// initialization/counting
- static void initialize() { _next_block_id = 0; }
- static int number_of_blocks() { return _next_block_id; }
+ static int number_of_blocks() {
+ return Compilation::current()->number_of_blocks();
+ }
// creation
BlockBegin(int bci)
: StateSplit(illegalType)
- , _block_id(_next_block_id++)
, _depth_first_number(-1)
, _linear_scan_number(-1)
, _loop_depth(0)
@@ -1592,7 +1614,7 @@ LEAF(BlockBegin, StateSplit)
void init_stores_to_locals(int locals_count) { _stores_to_locals = BitMap(locals_count); _stores_to_locals.clear(); }
// generic
- virtual void state_values_do(void f(Value*));
+ virtual void state_values_do(ValueVisitor* f);
// successors and predecessors
int number_of_sux() const;
@@ -1646,7 +1668,7 @@ LEAF(BlockBegin, StateSplit)
void iterate_preorder (BlockClosure* closure);
void iterate_postorder (BlockClosure* closure);
- void block_values_do(void f(Value*));
+ void block_values_do(ValueVisitor* f);
// loops
void set_loop_index(int ix) { _loop_index = ix; }
@@ -1698,7 +1720,7 @@ BASE(BlockEnd, StateSplit)
void set_begin(BlockBegin* begin);
// generic
- virtual void other_values_do(void f(Value*));
+ virtual void other_values_do(ValueVisitor* f);
// successors
int number_of_sux() const { return _sux != NULL ? _sux->length() : 0; }
@@ -1787,7 +1809,7 @@ LEAF(If, BlockEnd)
void set_profiled_bci(int bci) { _profiled_bci = bci; }
// generic
- virtual void input_values_do(void f(Value*)) { BlockEnd::input_values_do(f); f(&_x); f(&_y); }
+ virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); }
};
@@ -1841,7 +1863,7 @@ LEAF(IfInstanceOf, BlockEnd)
}
// generic
- virtual void input_values_do(void f(Value*)) { BlockEnd::input_values_do(f); f(&_obj); }
+ virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_obj); }
};
@@ -1863,7 +1885,7 @@ BASE(Switch, BlockEnd)
int length() const { return number_of_sux() - 1; }
// generic
- virtual void input_values_do(void f(Value*)) { BlockEnd::input_values_do(f); f(&_tag); }
+ virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_tag); }
};
@@ -1916,9 +1938,9 @@ LEAF(Return, BlockEnd)
bool has_result() const { return result() != NULL; }
// generic
- virtual void input_values_do(void f(Value*)) {
+ virtual void input_values_do(ValueVisitor* f) {
BlockEnd::input_values_do(f);
- if (has_result()) f(&_result);
+ if (has_result()) f->visit(&_result);
}
};
@@ -1938,8 +1960,8 @@ LEAF(Throw, BlockEnd)
// generic
virtual bool can_trap() const { return true; }
- virtual void input_values_do(void f(Value*)) { BlockEnd::input_values_do(f); f(&_exception); }
- virtual void state_values_do(void f(Value*));
+ virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_exception); }
+ virtual void state_values_do(ValueVisitor* f);
};
@@ -1971,7 +1993,7 @@ LEAF(OsrEntry, Instruction)
#endif
// generic
- virtual void input_values_do(void f(Value*)) { }
+ virtual void input_values_do(ValueVisitor* f) { }
};
@@ -1984,7 +2006,7 @@ LEAF(ExceptionObject, Instruction)
}
// generic
- virtual void input_values_do(void f(Value*)) { }
+ virtual void input_values_do(ValueVisitor* f) { }
};
@@ -2008,7 +2030,7 @@ LEAF(RoundFP, Instruction)
Value input() const { return _input; }
// generic
- virtual void input_values_do(void f(Value*)) { f(&_input); }
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_input); }
};
@@ -2033,8 +2055,8 @@ BASE(UnsafeOp, Instruction)
BasicType basic_type() { return _basic_type; }
// generic
- virtual void input_values_do(void f(Value*)) { }
- virtual void other_values_do(void f(Value*)) { }
+ virtual void input_values_do(ValueVisitor* f) { }
+ virtual void other_values_do(ValueVisitor* f) { }
};
@@ -2078,9 +2100,9 @@ BASE(UnsafeRawOp, UnsafeOp)
void set_log2_scale(int log2_scale) { _log2_scale = log2_scale; }
// generic
- virtual void input_values_do(void f(Value*)) { UnsafeOp::input_values_do(f);
- f(&_base);
- if (has_index()) f(&_index); }
+ virtual void input_values_do(ValueVisitor* f) { UnsafeOp::input_values_do(f);
+ f->visit(&_base);
+ if (has_index()) f->visit(&_index); }
};
@@ -2128,8 +2150,8 @@ LEAF(UnsafePutRaw, UnsafeRawOp)
Value value() { return _value; }
// generic
- virtual void input_values_do(void f(Value*)) { UnsafeRawOp::input_values_do(f);
- f(&_value); }
+ virtual void input_values_do(ValueVisitor* f) { UnsafeRawOp::input_values_do(f);
+ f->visit(&_value); }
};
@@ -2149,9 +2171,9 @@ BASE(UnsafeObjectOp, UnsafeOp)
Value offset() { return _offset; }
bool is_volatile() { return _is_volatile; }
// generic
- virtual void input_values_do(void f(Value*)) { UnsafeOp::input_values_do(f);
- f(&_object);
- f(&_offset); }
+ virtual void input_values_do(ValueVisitor* f) { UnsafeOp::input_values_do(f);
+ f->visit(&_object);
+ f->visit(&_offset); }
};
@@ -2180,8 +2202,8 @@ LEAF(UnsafePutObject, UnsafeObjectOp)
Value value() { return _value; }
// generic
- virtual void input_values_do(void f(Value*)) { UnsafeObjectOp::input_values_do(f);
- f(&_value); }
+ virtual void input_values_do(ValueVisitor* f) { UnsafeObjectOp::input_values_do(f);
+ f->visit(&_value); }
};
@@ -2238,7 +2260,7 @@ LEAF(ProfileCall, Instruction)
Value recv() { return _recv; }
ciKlass* known_holder() { return _known_holder; }
- virtual void input_values_do(void f(Value*)) { if (_recv != NULL) f(&_recv); }
+ virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); }
};
@@ -2266,7 +2288,7 @@ LEAF(ProfileCounter, Instruction)
int offset() { return _offset; }
int increment() { return _increment; }
- virtual void input_values_do(void f(Value*)) { f(&_mdo); }
+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_mdo); }
};
diff --git a/hotspot/src/share/vm/c1/c1_LIR.cpp b/hotspot/src/share/vm/c1/c1_LIR.cpp
index 86815794c29..6aa0f5e0f22 100644
--- a/hotspot/src/share/vm/c1/c1_LIR.cpp
+++ b/hotspot/src/share/vm/c1/c1_LIR.cpp
@@ -715,7 +715,10 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
}
if (opJavaCall->_info) do_info(opJavaCall->_info);
- if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr());
+ if (opJavaCall->is_method_handle_invoke()) {
+ opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
+ do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
+ }
do_call();
if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result);
diff --git a/hotspot/src/share/vm/c1/c1_LIR.hpp b/hotspot/src/share/vm/c1/c1_LIR.hpp
index 504b9f38b9c..aabcfaa84f2 100644
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp
@@ -505,15 +505,22 @@ class LIR_Address: public LIR_OprPtr {
, _type(type)
, _disp(0) { verify(); }
- LIR_Address(LIR_Opr base, int disp, BasicType type):
+ LIR_Address(LIR_Opr base, intx disp, BasicType type):
_base(base)
, _index(LIR_OprDesc::illegalOpr())
, _scale(times_1)
, _type(type)
, _disp(disp) { verify(); }
+ LIR_Address(LIR_Opr base, BasicType type):
+ _base(base)
+ , _index(LIR_OprDesc::illegalOpr())
+ , _scale(times_1)
+ , _type(type)
+ , _disp(0) { verify(); }
+
#ifdef X86
- LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
+ LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
_base(base)
, _index(index)
, _scale(scale)
@@ -1033,8 +1040,9 @@ class LIR_OpJavaCall: public LIR_OpCall {
friend class LIR_OpVisitState;
private:
- ciMethod* _method;
- LIR_Opr _receiver;
+ ciMethod* _method;
+ LIR_Opr _receiver;
+ LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
public:
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
@@ -1043,14 +1051,18 @@ class LIR_OpJavaCall: public LIR_OpCall {
CodeEmitInfo* info)
: LIR_OpCall(code, addr, result, arguments, info)
, _receiver(receiver)
- , _method(method) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
+ , _method(method)
+ , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
+ { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
LIR_OprList* arguments, CodeEmitInfo* info)
: LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
, _receiver(receiver)
- , _method(method) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
+ , _method(method)
+ , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
+ { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_Opr receiver() const { return _receiver; }
ciMethod* method() const { return _method; }
diff --git a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp
index 741fe2873e5..75c713d501b 100644
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp
@@ -301,9 +301,9 @@ void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
}
-void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
+void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
flush_debug_info(pc_offset);
- cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
+ cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
if (cinfo->exception_handlers() != NULL) {
compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
}
@@ -413,12 +413,6 @@ void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
verify_oop_map(op->info());
- // JSR 292
- // Preserve the SP over MethodHandle call sites.
- if (op->is_method_handle_invoke()) {
- preserve_SP(op);
- }
-
if (os::is_MP()) {
// must align calls sites, otherwise they can't be updated atomically on MP hardware
align_call(op->code());
@@ -444,10 +438,6 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
default: ShouldNotReachHere();
}
- if (op->is_method_handle_invoke()) {
- restore_SP(op);
- }
-
#if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it
if (UseSSE < 2) {
diff --git a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp
index ad7e53930ad..e40ebd51d1e 100644
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -84,7 +84,7 @@ class LIR_Assembler: public CompilationResourceObj {
Address as_Address_hi(LIR_Address* addr);
// debug information
- void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false);
+ void add_call_info(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_branch(CodeEmitInfo* info);
void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_div0_here(CodeEmitInfo* info);
@@ -212,10 +212,6 @@ class LIR_Assembler: public CompilationResourceObj {
void ic_call( LIR_OpJavaCall* op);
void vtable_call( LIR_OpJavaCall* op);
- // JSR 292
- void preserve_SP(LIR_OpJavaCall* op);
- void restore_SP( LIR_OpJavaCall* op);
-
void osr_entry();
void build_frame();
diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
index 469dd84a000..36b46eeaa9f 100644
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -304,7 +304,7 @@ void LIRGenerator::block_do_prolog(BlockBegin* block) {
__ branch_destination(block->label());
if (LIRTraceExecution &&
- Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() &&
+ Compilation::current()->hir()->start()->block_id() != block->block_id() &&
!block->is_set(BlockBegin::exception_entry_flag)) {
assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
trace_block_entry(block);
@@ -1309,7 +1309,7 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be");
- addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT));
+ addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
}
CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
info);
@@ -1325,7 +1325,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
new_val->as_constant_ptr()->as_jobject() == NULL) return;
if (!new_val->is_register()) {
- LIR_Opr new_val_reg = new_pointer_register();
+ LIR_Opr new_val_reg = new_register(T_OBJECT);
if (new_val->is_constant()) {
__ move(new_val, new_val_reg);
} else {
@@ -1337,7 +1337,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
- LIR_Opr ptr = new_pointer_register();
+ LIR_Opr ptr = new_register(T_OBJECT);
if (!address->index()->is_valid() && address->disp() == 0) {
__ move(address->base(), ptr);
} else {
@@ -1350,7 +1350,6 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
LIR_Opr xor_res = new_pointer_register();
LIR_Opr xor_shift_res = new_pointer_register();
-
if (TwoOperandLIRForm ) {
__ move(addr, xor_res);
__ logical_xor(xor_res, new_val, xor_res);
@@ -1368,7 +1367,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
}
if (!new_val->is_register()) {
- LIR_Opr new_val_reg = new_pointer_register();
+ LIR_Opr new_val_reg = new_register(T_OBJECT);
__ leal(new_val, new_val_reg);
new_val = new_val_reg;
}
@@ -1377,7 +1376,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
__ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
CodeStub* slow = new G1PostBarrierStub(addr, new_val);
- __ branch(lir_cond_notEqual, T_INT, slow);
+ __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
__ branch_destination(slow->continuation());
}
@@ -2371,9 +2370,17 @@ void LIRGenerator::do_Invoke(Invoke* x) {
bool optimized = x->target_is_loaded() && x->target_is_final();
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
+ // JSR 292
+ // Preserve the SP over MethodHandle call sites.
+ ciMethod* target = x->target();
+ if (target->is_method_handle_invoke()) {
+ info->set_is_method_handle_invoke(true);
+ __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
+ }
+
switch (x->code()) {
case Bytecodes::_invokestatic:
- __ call_static(x->target(), result_register,
+ __ call_static(target, result_register,
SharedRuntime::get_resolve_static_call_stub(),
arg_list, info);
break;
@@ -2383,17 +2390,17 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// for final target we still produce an inline cache, in order
// to be able to call mixed mode
if (x->code() == Bytecodes::_invokespecial || optimized) {
- __ call_opt_virtual(x->target(), receiver, result_register,
+ __ call_opt_virtual(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info);
} else if (x->vtable_index() < 0) {
- __ call_icvirtual(x->target(), receiver, result_register,
+ __ call_icvirtual(target, receiver, result_register,
SharedRuntime::get_resolve_virtual_call_stub(),
arg_list, info);
} else {
int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
- __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
+ __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
}
break;
case Bytecodes::_invokedynamic: {
@@ -2432,7 +2439,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
- __ call_dynamic(x->target(), receiver, result_register,
+ __ call_dynamic(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info);
break;
@@ -2442,6 +2449,12 @@ void LIRGenerator::do_Invoke(Invoke* x) {
break;
}
+ // JSR 292
+ // Restore the SP after MethodHandle call sites.
+ if (target->is_method_handle_invoke()) {
+ __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
+ }
+
if (x->type()->is_float() || x->type()->is_double()) {
// Force rounding of results from non-strictfp when in strictfp
// scope (or when we don't know the strictness of the callee, to
diff --git a/hotspot/src/share/vm/c1/c1_LinearScan.cpp b/hotspot/src/share/vm/c1/c1_LinearScan.cpp
index 907e584d59d..0888bef3c01 100644
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp
@@ -84,10 +84,6 @@ LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
, _fpu_stack_allocator(NULL)
#endif
{
- // note: to use more than on instance of LinearScan at a time this function call has to
- // be moved somewhere outside of this constructor:
- Interval::initialize();
-
assert(this->ir() != NULL, "check if valid");
assert(this->compilation() != NULL, "check if valid");
assert(this->gen() != NULL, "check if valid");
@@ -3929,8 +3925,8 @@ Range::Range(int from, int to, Range* next) :
// initialize sentinel
Range* Range::_end = NULL;
-void Range::initialize() {
- _end = new Range(max_jint, max_jint, NULL);
+void Range::initialize(Arena* arena) {
+ _end = new (arena) Range(max_jint, max_jint, NULL);
}
int Range::intersects_at(Range* r2) const {
@@ -3976,9 +3972,9 @@ void Range::print(outputStream* out) const {
// initialize sentinel
Interval* Interval::_end = NULL;
-void Interval::initialize() {
- Range::initialize();
- _end = new Interval(-1);
+void Interval::initialize(Arena* arena) {
+ Range::initialize(arena);
+ _end = new (arena) Interval(-1);
}
Interval::Interval(int reg_num) :
diff --git a/hotspot/src/share/vm/c1/c1_LinearScan.hpp b/hotspot/src/share/vm/c1/c1_LinearScan.hpp
index 49bce80dc0a..9d5b2171e52 100644
--- a/hotspot/src/share/vm/c1/c1_LinearScan.hpp
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.hpp
@@ -462,7 +462,7 @@ class Range : public CompilationResourceObj {
public:
Range(int from, int to, Range* next);
- static void initialize();
+ static void initialize(Arena* arena);
static Range* end() { return _end; }
int from() const { return _from; }
@@ -529,7 +529,7 @@ class Interval : public CompilationResourceObj {
public:
Interval(int reg_num);
- static void initialize();
+ static void initialize(Arena* arena);
static Interval* end() { return _end; }
// accessors
diff --git a/hotspot/src/share/vm/c1/c1_Optimizer.cpp b/hotspot/src/share/vm/c1/c1_Optimizer.cpp
index f432e4de39c..fd5ddd53eab 100644
--- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp
+++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp
@@ -437,11 +437,8 @@ public:
// Because of a static contained within (for the purpose of iteration
// over instructions), it is only valid to have one of these active at
// a time
-class NullCheckEliminator {
+class NullCheckEliminator: public ValueVisitor {
private:
- static NullCheckEliminator* _static_nce;
- static void do_value(Value* vp);
-
Optimizer* _opt;
ValueSet* _visitable_instructions; // Visit each instruction only once per basic block
@@ -504,6 +501,8 @@ class NullCheckEliminator {
// Process a graph
void iterate(BlockBegin* root);
+ void visit(Value* f);
+
// In some situations (like NullCheck(x); getfield(x)) the debug
// information from the explicit NullCheck can be used to populate
// the getfield, even if the two instructions are in different
@@ -602,14 +601,11 @@ void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_las
void NullCheckVisitor::do_ProfileCounter (ProfileCounter* x) {}
-NullCheckEliminator* NullCheckEliminator::_static_nce = NULL;
-
-
-void NullCheckEliminator::do_value(Value* p) {
+void NullCheckEliminator::visit(Value* p) {
assert(*p != NULL, "should not find NULL instructions");
- if (_static_nce->visitable(*p)) {
- _static_nce->mark_visited(*p);
- (*p)->visit(&_static_nce->_visitor);
+ if (visitable(*p)) {
+ mark_visited(*p);
+ (*p)->visit(&_visitor);
}
}
@@ -637,7 +633,6 @@ void NullCheckEliminator::iterate_all() {
void NullCheckEliminator::iterate_one(BlockBegin* block) {
- _static_nce = this;
clear_visitable_state();
// clear out an old explicit null checks
set_last_explicit_null_check(NULL);
@@ -712,7 +707,7 @@ void NullCheckEliminator::iterate_one(BlockBegin* block) {
mark_visitable(instr);
if (instr->is_root() || instr->can_trap() || (instr->as_NullCheck() != NULL)) {
mark_visited(instr);
- instr->input_values_do(&NullCheckEliminator::do_value);
+ instr->input_values_do(this);
instr->visit(&_visitor);
}
}
diff --git a/hotspot/src/share/vm/c1/c1_Runtime1.cpp b/hotspot/src/share/vm/c1/c1_Runtime1.cpp
index 7db7841c64c..b157987dada 100644
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp
@@ -60,7 +60,6 @@ void StubAssembler::set_num_rt_args(int args) {
// Implementation of Runtime1
-bool Runtime1::_is_initialized = false;
CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
const char *Runtime1::_blob_names[] = {
RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
@@ -89,8 +88,6 @@ int Runtime1::_throw_array_store_exception_count = 0;
int Runtime1::_throw_count = 0;
#endif
-BufferBlob* Runtime1::_buffer_blob = NULL;
-
// Simple helper to see if the caller of a runtime stub which
// entered the VM has been deoptimized
@@ -117,43 +114,14 @@ static void deopt_caller() {
}
-BufferBlob* Runtime1::get_buffer_blob() {
- // Allocate code buffer space only once
- BufferBlob* blob = _buffer_blob;
- if (blob == NULL) {
- // setup CodeBuffer. Preallocate a BufferBlob of size
- // NMethodSizeLimit plus some extra space for constants.
- int code_buffer_size = desired_max_code_buffer_size() + desired_max_constant_size();
- blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
- code_buffer_size);
- guarantee(blob != NULL, "must create initial code buffer");
- _buffer_blob = blob;
- }
- return _buffer_blob;
-}
-
-void Runtime1::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
- // Preinitialize the consts section to some large size:
- int locs_buffer_size = 20 * (relocInfo::length_limit + sizeof(relocInfo));
- char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
- code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
- locs_buffer_size / sizeof(relocInfo));
- code->initialize_consts_size(desired_max_constant_size());
- // Call stubs + deopt/exception handler
- code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
- LIR_Assembler::exception_handler_size +
- LIR_Assembler::deopt_handler_size);
-}
-
-
-void Runtime1::generate_blob_for(StubID id) {
+void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
assert(0 <= id && id < number_of_ids, "illegal stub id");
ResourceMark rm;
// create code buffer for code storage
- CodeBuffer code(get_buffer_blob()->instructions_begin(),
- get_buffer_blob()->instructions_size());
+ CodeBuffer code(buffer_blob->instructions_begin(),
+ buffer_blob->instructions_size());
- setup_code_buffer(&code, 0);
+ Compilation::setup_code_buffer(&code, 0);
// create assembler for code generation
StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
@@ -204,35 +172,28 @@ void Runtime1::generate_blob_for(StubID id) {
}
-void Runtime1::initialize() {
- // Warning: If we have more than one compilation running in parallel, we
- // need a lock here with the current setup (lazy initialization).
- if (!is_initialized()) {
- _is_initialized = true;
-
- // platform-dependent initialization
- initialize_pd();
- // generate stubs
- for (int id = 0; id < number_of_ids; id++) generate_blob_for((StubID)id);
- // printing
+void Runtime1::initialize(BufferBlob* blob) {
+ // platform-dependent initialization
+ initialize_pd();
+ // generate stubs
+ for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
+ // printing
#ifndef PRODUCT
- if (PrintSimpleStubs) {
- ResourceMark rm;
- for (int id = 0; id < number_of_ids; id++) {
- _blobs[id]->print();
- if (_blobs[id]->oop_maps() != NULL) {
- _blobs[id]->oop_maps()->print();
- }
+ if (PrintSimpleStubs) {
+ ResourceMark rm;
+ for (int id = 0; id < number_of_ids; id++) {
+ _blobs[id]->print();
+ if (_blobs[id]->oop_maps() != NULL) {
+ _blobs[id]->oop_maps()->print();
}
}
-#endif
}
+#endif
}
CodeBlob* Runtime1::blob_for(StubID id) {
assert(0 <= id && id < number_of_ids, "illegal stub id");
- if (!is_initialized()) initialize();
return _blobs[id];
}
diff --git a/hotspot/src/share/vm/c1/c1_Runtime1.hpp b/hotspot/src/share/vm/c1/c1_Runtime1.hpp
index aed47055a11..c2c589cc791 100644
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp
@@ -70,18 +70,6 @@ class StubAssembler;
class Runtime1: public AllStatic {
friend class VMStructs;
friend class ArrayCopyStub;
- private:
- static int desired_max_code_buffer_size() {
- return (int) NMethodSizeLimit; // default 256K or 512K
- }
- static int desired_max_constant_size() {
- return (int) NMethodSizeLimit / 10; // about 25K
- }
-
- // Note: This buffers is allocated once at startup since allocation
- // for each compilation seems to be too expensive (at least on Intel
- // win32).
- static BufferBlob* _buffer_blob;
public:
enum StubID {
@@ -115,12 +103,11 @@ class Runtime1: public AllStatic {
#endif
private:
- static bool _is_initialized;
static CodeBlob* _blobs[number_of_ids];
static const char* _blob_names[];
// stub generation
- static void generate_blob_for(StubID id);
+ static void generate_blob_for(BufferBlob* blob, StubID id);
static OopMapSet* generate_code_for(StubID id, StubAssembler* masm);
static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument);
static void generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool ignore_fpu_registers = false);
@@ -162,12 +149,8 @@ class Runtime1: public AllStatic {
static void patch_code(JavaThread* thread, StubID stub_id);
public:
- static BufferBlob* get_buffer_blob();
- static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
-
// initialization
- static bool is_initialized() { return _is_initialized; }
- static void initialize();
+ static void initialize(BufferBlob* blob);
static void initialize_pd();
// stubs
diff --git a/hotspot/src/share/vm/c1/c1_ValueStack.cpp b/hotspot/src/share/vm/c1/c1_ValueStack.cpp
index 1e08e04f43d..261176507b3 100644
--- a/hotspot/src/share/vm/c1/c1_ValueStack.cpp
+++ b/hotspot/src/share/vm/c1/c1_ValueStack.cpp
@@ -119,14 +119,14 @@ void ValueStack::pin_stack_for_linear_scan() {
// apply function to all values of a list; factored out from values_do(f)
-void ValueStack::apply(Values list, void f(Value*)) {
+void ValueStack::apply(Values list, ValueVisitor* f) {
for (int i = 0; i < list.length(); i++) {
Value* va = list.adr_at(i);
Value v0 = *va;
if (v0 != NULL) {
if (!v0->type()->is_illegal()) {
assert(v0->as_HiWord() == NULL, "should never see HiWord during traversal");
- f(va);
+ f->visit(va);
#ifdef ASSERT
Value v1 = *va;
if (v0 != v1) {
@@ -143,7 +143,7 @@ void ValueStack::apply(Values list, void f(Value*)) {
}
-void ValueStack::values_do(void f(Value*)) {
+void ValueStack::values_do(ValueVisitor* f) {
apply(_stack, f);
apply(_locks, f);
diff --git a/hotspot/src/share/vm/c1/c1_ValueStack.hpp b/hotspot/src/share/vm/c1/c1_ValueStack.hpp
index 3aaca56b4a8..9e254bf0dcd 100644
--- a/hotspot/src/share/vm/c1/c1_ValueStack.hpp
+++ b/hotspot/src/share/vm/c1/c1_ValueStack.hpp
@@ -41,7 +41,7 @@ class ValueStack: public CompilationResourceObj {
}
// helper routine
- static void apply(Values list, void f(Value*));
+ static void apply(Values list, ValueVisitor* f);
public:
// creation
@@ -143,7 +143,7 @@ class ValueStack: public CompilationResourceObj {
void pin_stack_for_linear_scan();
// iteration
- void values_do(void f(Value*));
+ void values_do(ValueVisitor* f);
// untyped manipulation (for dup_x1, etc.)
void clear_stack() { _stack.clear(); }
diff --git a/hotspot/src/share/vm/c1/c1_ValueType.cpp b/hotspot/src/share/vm/c1/c1_ValueType.cpp
index 94fb7d2b028..24a2083d07c 100644
--- a/hotspot/src/share/vm/c1/c1_ValueType.cpp
+++ b/hotspot/src/share/vm/c1/c1_ValueType.cpp
@@ -46,27 +46,26 @@ IntConstant* intOne = NULL;
ObjectConstant* objectNull = NULL;
-void ValueType::initialize() {
+void ValueType::initialize(Arena* arena) {
// Note: Must initialize all types for each compilation
// as they are allocated within a ResourceMark!
// types
- voidType = new VoidType();
- intType = new IntType();
- longType = new LongType();
- floatType = new FloatType();
- doubleType = new DoubleType();
- objectType = new ObjectType();
- arrayType = new ArrayType();
- instanceType = new InstanceType();
- classType = new ClassType();
- addressType = new AddressType();
- illegalType = new IllegalType();
+ voidType = new (arena) VoidType();
+ intType = new (arena) IntType();
+ longType = new (arena) LongType();
+ floatType = new (arena) FloatType();
+ doubleType = new (arena) DoubleType();
+ objectType = new (arena) ObjectType();
+ arrayType = new (arena) ArrayType();
+ instanceType = new (arena) InstanceType();
+ classType = new (arena) ClassType();
+ addressType = new (arena) AddressType();
+ illegalType = new (arena) IllegalType();
- // constants
- intZero = new IntConstant(0);
- intOne = new IntConstant(1);
- objectNull = new ObjectConstant(ciNullObject::make());
+ intZero = new (arena) IntConstant(0);
+ intOne = new (arena) IntConstant(1);
+ objectNull = new (arena) ObjectConstant(ciNullObject::make());
};
diff --git a/hotspot/src/share/vm/c1/c1_ValueType.hpp b/hotspot/src/share/vm/c1/c1_ValueType.hpp
index 001df60be59..3098114cf95 100644
--- a/hotspot/src/share/vm/c1/c1_ValueType.hpp
+++ b/hotspot/src/share/vm/c1/c1_ValueType.hpp
@@ -94,7 +94,7 @@ class ValueType: public CompilationResourceObj {
public:
// initialization
- static void initialize();
+ static void initialize(Arena* arena);
// accessors
virtual ValueType* base() const = 0; // the 'canonical' type (e.g., intType for an IntConstant)
diff --git a/hotspot/src/share/vm/ci/ciMethod.cpp b/hotspot/src/share/vm/ci/ciMethod.cpp
index f445a70822b..189b5510468 100644
--- a/hotspot/src/share/vm/ci/ciMethod.cpp
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp
@@ -690,20 +690,32 @@ int ciMethod::scale_count(int count, float prof_factor) {
// ------------------------------------------------------------------
// invokedynamic support
+
+// ------------------------------------------------------------------
+// ciMethod::is_method_handle_invoke
//
+// Return true if the method is a MethodHandle target.
bool ciMethod::is_method_handle_invoke() const {
- check_is_loaded();
- bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
+ bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
+ methodOopDesc::is_method_handle_invoke_name(name()->sid()));
#ifdef ASSERT
- {
- VM_ENTRY_MARK;
- bool flag2 = get_methodOop()->is_method_handle_invoke();
- assert(flag == flag2, "consistent");
+ if (is_loaded()) {
+ bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
+ {
+ VM_ENTRY_MARK;
+ bool flag3 = get_methodOop()->is_method_handle_invoke();
+ assert(flag2 == flag3, "consistent");
+ assert(flag == flag3, "consistent");
+ }
}
#endif //ASSERT
return flag;
}
+// ------------------------------------------------------------------
+// ciMethod::is_method_handle_adapter
+//
+// Return true if the method is a generated MethodHandle adapter.
bool ciMethod::is_method_handle_adapter() const {
check_is_loaded();
VM_ENTRY_MARK;
diff --git a/hotspot/src/share/vm/ci/ciStreams.cpp b/hotspot/src/share/vm/ci/ciStreams.cpp
index 6ebcde1a44d..2dfd8c7a535 100644
--- a/hotspot/src/share/vm/ci/ciStreams.cpp
+++ b/hotspot/src/share/vm/ci/ciStreams.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -81,27 +81,21 @@ int ciExceptionHandlerStream::count_remaining() {
// providing accessors for constant pool items.
// ------------------------------------------------------------------
-// ciBytecodeStream::wide
-//
-// Special handling for the wide bytcode
-Bytecodes::Code ciBytecodeStream::wide()
-{
- // Get following bytecode; do not return wide
- Bytecodes::Code bc = (Bytecodes::Code)_pc[1];
- _pc += 2; // Skip both bytecodes
- _pc += 2; // Skip index always
- if( bc == Bytecodes::_iinc )
- _pc += 2; // Skip optional constant
- _was_wide = _pc; // Flag last wide bytecode found
- return bc;
-}
-
-// ------------------------------------------------------------------
-// ciBytecodeStream::table
+// ciBytecodeStream::next_wide_or_table
//
// Special handling for switch ops
-Bytecodes::Code ciBytecodeStream::table( Bytecodes::Code bc ) {
- switch( bc ) { // Check for special bytecode handling
+Bytecodes::Code ciBytecodeStream::next_wide_or_table(Bytecodes::Code bc) {
+ switch (bc) { // Check for special bytecode handling
+ case Bytecodes::_wide:
+ // Special handling for the wide bytcode
+ // Get following bytecode; do not return wide
+ assert(Bytecodes::Code(_pc[0]) == Bytecodes::_wide, "");
+ bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)_pc[1]);
+ assert(Bytecodes::wide_length_for(bc) > 2, "must make progress");
+ _pc += Bytecodes::wide_length_for(bc);
+ _was_wide = _pc; // Flag last wide bytecode found
+ assert(is_wide(), "accessor works right");
+ break;
case Bytecodes::_lookupswitch:
_pc++; // Skip wide bytecode
@@ -164,7 +158,7 @@ void ciBytecodeStream::force_bci(int bci) {
int ciBytecodeStream::get_klass_index() const {
switch(cur_bc()) {
case Bytecodes::_ldc:
- return get_index();
+ return get_index_u1();
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
case Bytecodes::_checkcast:
@@ -173,7 +167,7 @@ int ciBytecodeStream::get_klass_index() const {
case Bytecodes::_multianewarray:
case Bytecodes::_new:
case Bytecodes::_newarray:
- return get_index_big();
+ return get_index_u2();
default:
ShouldNotReachHere();
return 0;
@@ -199,10 +193,10 @@ ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
int ciBytecodeStream::get_constant_index() const {
switch(cur_bc()) {
case Bytecodes::_ldc:
- return get_index();
+ return get_index_u1();
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
- return get_index_big();
+ return get_index_u2();
default:
ShouldNotReachHere();
return 0;
@@ -239,7 +233,7 @@ int ciBytecodeStream::get_field_index() {
cur_bc() == Bytecodes::_putfield ||
cur_bc() == Bytecodes::_getstatic ||
cur_bc() == Bytecodes::_putstatic, "wrong bc");
- return get_index_big();
+ return get_index_u2_cpcache();
}
@@ -319,7 +313,9 @@ int ciBytecodeStream::get_method_index() {
ShouldNotReachHere();
}
#endif
- return get_index_int();
+ if (has_index_u4())
+ return get_index_u4(); // invokedynamic
+ return get_index_u2_cpcache();
}
// ------------------------------------------------------------------
diff --git a/hotspot/src/share/vm/ci/ciStreams.hpp b/hotspot/src/share/vm/ci/ciStreams.hpp
index 77a15f2b3ba..c0f74544428 100644
--- a/hotspot/src/share/vm/ci/ciStreams.hpp
+++ b/hotspot/src/share/vm/ci/ciStreams.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,15 +31,19 @@
// their original form during iteration.
class ciBytecodeStream : StackObj {
private:
- // Handling for the weird bytecodes
- Bytecodes::Code wide(); // Handle wide bytecode
- Bytecodes::Code table(Bytecodes::Code); // Handle complicated inline table
+ // Handling for the weird bytecodes
+ Bytecodes::Code next_wide_or_table(Bytecodes::Code); // Handle _wide & complicated inline table
static Bytecodes::Code check_java(Bytecodes::Code c) {
assert(Bytecodes::is_java_code(c), "should not return _fast bytecodes");
return c;
}
+ static Bytecodes::Code check_defined(Bytecodes::Code c) {
+ assert(Bytecodes::is_defined(c), "");
+ return c;
+ }
+
ciMethod* _method; // the method
ciInstanceKlass* _holder;
address _bc_start; // Start of current bytecode for table
@@ -50,11 +54,21 @@ private:
address _end; // Past end of bytecodes
address _pc; // Current PC
Bytecodes::Code _bc; // Current bytecode
+ Bytecodes::Code _raw_bc; // Current bytecode, raw form
void reset( address base, unsigned int size ) {
_bc_start =_was_wide = 0;
_start = _pc = base; _end = base + size; }
+ void assert_wide(bool require_wide) const {
+ if (require_wide)
+ { assert(is_wide(), "must be a wide instruction"); }
+ else { assert(!is_wide(), "must not be a wide instruction"); }
+ }
+
+ Bytecode* bytecode() const { return Bytecode_at(_bc_start); }
+ Bytecode* next_bytecode() const { return Bytecode_at(_pc); }
+
public:
// End-Of-Bytecodes
static Bytecodes::Code EOBC() {
@@ -92,11 +106,12 @@ public:
}
address cur_bcp() const { return _bc_start; } // Returns bcp to current instruction
- int next_bci() const { return _pc -_start; }
+ int next_bci() const { return _pc - _start; }
int cur_bci() const { return _bc_start - _start; }
int instruction_size() const { return _pc - _bc_start; }
Bytecodes::Code cur_bc() const{ return check_java(_bc); }
+ Bytecodes::Code cur_bc_raw() const { return check_defined(_raw_bc); }
Bytecodes::Code next_bc() { return Bytecodes::java_code((Bytecodes::Code)* _pc); }
// Return current ByteCode and increment PC to next bytecode, skipping all
@@ -109,85 +124,76 @@ public:
// Fetch Java bytecode
// All rewritten bytecodes maintain the size of original bytecode.
- _bc = Bytecodes::java_code((Bytecodes::Code)*_pc);
+ _bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)*_pc);
int csize = Bytecodes::length_for(_bc); // Expected size
-
- if( _bc == Bytecodes::_wide ) {
- _bc=wide(); // Handle wide bytecode
- } else if( csize == 0 ) {
- _bc=table(_bc); // Handle inline tables
- } else {
- _pc += csize; // Bump PC past bytecode
+ _pc += csize; // Bump PC past bytecode
+ if (csize == 0) {
+ _bc = next_wide_or_table(_bc);
}
return check_java(_bc);
}
bool is_wide() const { return ( _pc == _was_wide ); }
+ // Does this instruction contain an index which refes into the CP cache?
+ bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
+
+ int get_index_u1() const {
+ return bytecode()->get_index_u1(cur_bc_raw());
+ }
+
// Get a byte index following this bytecode.
// If prefixed with a wide bytecode, get a wide index.
int get_index() const {
- assert_index_size(is_wide() ? 2 : 1);
return (_pc == _was_wide) // was widened?
- ? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index
- : _bc_start[1]; // no, return narrow index
+ ? get_index_u2(true) // yes, return wide index
+ : get_index_u1(); // no, return narrow index
}
- // Get 2-byte index (getfield/putstatic/etc)
- int get_index_big() const {
- assert_index_size(2);
- return Bytes::get_Java_u2(_bc_start+1);
+ // Get 2-byte index (byte swapping depending on which bytecode)
+ int get_index_u2(bool is_wide = false) const {
+ return bytecode()->get_index_u2(cur_bc_raw(), is_wide);
}
- // Get 2-byte index (or 4-byte, for invokedynamic)
- int get_index_int() const {
- return has_giant_index() ? get_index_giant() : get_index_big();
+ // Get 2-byte index in native byte order. (Rewriter::rewrite makes these.)
+ int get_index_u2_cpcache() const {
+ return bytecode()->get_index_u2_cpcache(cur_bc_raw());
}
// Get 4-byte index, for invokedynamic.
- int get_index_giant() const {
- assert_index_size(4);
- return Bytes::get_native_u4(_bc_start+1);
+ int get_index_u4() const {
+ return bytecode()->get_index_u4(cur_bc_raw());
}
- bool has_giant_index() const { return (cur_bc() == Bytecodes::_invokedynamic); }
+ bool has_index_u4() const {
+ return bytecode()->has_index_u4(cur_bc_raw());
+ }
// Get dimensions byte (multinewarray)
int get_dimensions() const { return *(unsigned char*)(_pc-1); }
// Sign-extended index byte/short, no widening
- int get_byte() const { return (int8_t)(_pc[-1]); }
- int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); }
- int get_long() const { return (int32_t)Bytes::get_Java_u4(_pc-4); }
+ int get_constant_u1() const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); }
+ int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
// Get a byte signed constant for "iinc". Invalid for other bytecodes.
// If prefixed with a wide bytecode, get a wide constant
- int get_iinc_con() const {return (_pc==_was_wide) ? get_short() :get_byte();}
+ int get_iinc_con() const {return (_pc==_was_wide) ? (jshort) get_constant_u2(true) : (jbyte) get_constant_u1();}
// 2-byte branch offset from current pc
- int get_dest( ) const {
- assert( Bytecodes::length_at(_bc_start) == sizeof(jshort)+1, "get_dest called with bad bytecode" );
- return _bc_start-_start + (short)Bytes::get_Java_u2(_pc-2);
+ int get_dest() const {
+ return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw());
}
// 2-byte branch offset from next pc
- int next_get_dest( ) const {
- address next_bc_start = _pc;
- assert( _pc < _end, "" );
- Bytecodes::Code next_bc = (Bytecodes::Code)*_pc;
- assert( next_bc != Bytecodes::_wide, "");
- int next_csize = Bytecodes::length_for(next_bc);
- assert( next_csize != 0, "" );
- assert( next_bc <= Bytecodes::_jsr_w, "");
- address next_pc = _pc + next_csize;
- assert( Bytecodes::length_at(next_bc_start) == sizeof(jshort)+1, "next_get_dest called with bad bytecode" );
- return next_bc_start-_start + (short)Bytes::get_Java_u2(next_pc-2);
+ int next_get_dest() const {
+ assert(_pc < _end, "");
+ return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq);
}
// 4-byte branch offset from current pc
- int get_far_dest( ) const {
- assert( Bytecodes::length_at(_bc_start) == sizeof(jint)+1, "dest4 called with bad bytecode" );
- return _bc_start-_start + (int)Bytes::get_Java_u4(_pc-4);
+ int get_far_dest() const {
+ return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw());
}
// For a lookup or switch table, return target destination
@@ -234,22 +240,6 @@ public:
ciCPCache* get_cpcache();
ciCallSite* get_call_site();
-
- private:
- void assert_index_size(int required_size) const {
-#ifdef ASSERT
- int isize = instruction_size() - (is_wide() ? 1 : 0) - 1;
- if (isize == 2 && cur_bc() == Bytecodes::_iinc)
- isize = 1;
- else if (isize <= 2)
- ; // no change
- else if (has_giant_index())
- isize = 4;
- else
- isize = 2;
- assert(isize = required_size, "wrong index size");
-#endif
- }
};
diff --git a/hotspot/src/share/vm/ci/ciTypeFlow.cpp b/hotspot/src/share/vm/ci/ciTypeFlow.cpp
index 74dae3e454d..c90f51fc42d 100644
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2132,6 +2132,7 @@ bool ciTypeFlow::can_trap(ciBytecodeStream& str) {
if (!Bytecodes::can_trap(str.cur_bc())) return false;
switch (str.cur_bc()) {
+ // %%% FIXME: ldc of Class can generate an exception
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
diff --git a/hotspot/src/share/vm/classfile/classFileParser.cpp b/hotspot/src/share/vm/classfile/classFileParser.cpp
index 87135d24422..9e751177f2d 100644
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp
@@ -25,10 +25,10 @@
#include "incls/_precompiled.incl"
#include "incls/_classFileParser.cpp.incl"
-// We generally try to create the oops directly when parsing, rather than allocating
-// temporary data structures and copying the bytes twice. A temporary area is only
-// needed when parsing utf8 entries in the constant pool and when parsing line number
-// tables.
+// We generally try to create the oops directly when parsing, rather than
+// allocating temporary data structures and copying the bytes twice. A
+// temporary area is only needed when parsing utf8 entries in the constant
+// pool and when parsing line number tables.
// We add assert in debug mode when class format is not checked.
@@ -47,6 +47,10 @@
// - also used as the max version when running in jdk6
#define JAVA_6_VERSION 50
+// Used for backward compatibility reasons:
+// - to check NameAndType_info signatures more aggressively
+#define JAVA_7_VERSION 51
+
void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS) {
// Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
@@ -384,6 +388,20 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
verify_legal_class_name(class_name, CHECK_(nullHandle));
break;
}
+ case JVM_CONSTANT_NameAndType: {
+ if (_need_verify && _major_version >= JAVA_7_VERSION) {
+ int sig_index = cp->signature_ref_index_at(index);
+ int name_index = cp->name_ref_index_at(index);
+ symbolHandle name(THREAD, cp->symbol_at(name_index));
+ symbolHandle sig(THREAD, cp->symbol_at(sig_index));
+ if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
+ verify_legal_method_signature(name, sig, CHECK_(nullHandle));
+ } else {
+ verify_legal_field_signature(name, sig, CHECK_(nullHandle));
+ }
+ }
+ break;
+ }
case JVM_CONSTANT_Fieldref:
case JVM_CONSTANT_Methodref:
case JVM_CONSTANT_InterfaceMethodref: {
@@ -396,10 +414,28 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
symbolHandle signature(THREAD, cp->symbol_at(signature_ref_index));
if (tag == JVM_CONSTANT_Fieldref) {
verify_legal_field_name(name, CHECK_(nullHandle));
- verify_legal_field_signature(name, signature, CHECK_(nullHandle));
+ if (_need_verify && _major_version >= JAVA_7_VERSION) {
+ // Signature is verified above, when iterating NameAndType_info.
+ // Need only to be sure it's the right type.
+ if (signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
+ throwIllegalSignature(
+ "Field", name, signature, CHECK_(nullHandle));
+ }
+ } else {
+ verify_legal_field_signature(name, signature, CHECK_(nullHandle));
+ }
} else {
verify_legal_method_name(name, CHECK_(nullHandle));
- verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+ if (_need_verify && _major_version >= JAVA_7_VERSION) {
+ // Signature is verified above, when iterating NameAndType_info.
+ // Need only to be sure it's the right type.
+ if (signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
+ throwIllegalSignature(
+ "Method", name, signature, CHECK_(nullHandle));
+ }
+ } else {
+ verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+ }
if (tag == JVM_CONSTANT_Methodref) {
// 4509014: If a class method name begins with '<', it must be "".
assert(!name.is_null(), "method name in constant pool is null");
@@ -1313,6 +1349,14 @@ u2* ClassFileParser::parse_checked_exceptions(u2* checked_exceptions_length,
return checked_exceptions_start;
}
+void ClassFileParser::throwIllegalSignature(
+ const char* type, symbolHandle name, symbolHandle sig, TRAPS) {
+ ResourceMark rm(THREAD);
+ Exceptions::fthrow(THREAD_AND_LOCATION,
+ vmSymbols::java_lang_ClassFormatError(),
+ "%s \"%s\" in class %s has illegal signature \"%s\"", type,
+ name->as_C_string(), _class_name->as_C_string(), sig->as_C_string());
+}
#define MAX_ARGS_SIZE 255
#define MAX_CODE_SIZE 65535
@@ -4058,14 +4102,7 @@ void ClassFileParser::verify_legal_field_signature(symbolHandle name, symbolHand
char* p = skip_over_field_signature(bytes, false, length, CHECK);
if (p == NULL || (p - bytes) != (int)length) {
- ResourceMark rm(THREAD);
- Exceptions::fthrow(
- THREAD_AND_LOCATION,
- vmSymbolHandles::java_lang_ClassFormatError(),
- "Field \"%s\" in class %s has illegal signature \"%s\"",
- name->as_C_string(), _class_name->as_C_string(), bytes
- );
- return;
+ throwIllegalSignature("Field", name, signature, CHECK);
}
}
@@ -4116,13 +4153,7 @@ int ClassFileParser::verify_legal_method_signature(symbolHandle name, symbolHand
}
}
// Report error
- ResourceMark rm(THREAD);
- Exceptions::fthrow(
- THREAD_AND_LOCATION,
- vmSymbolHandles::java_lang_ClassFormatError(),
- "Method \"%s\" in class %s has illegal signature \"%s\"",
- name->as_C_string(), _class_name->as_C_string(), p
- );
+ throwIllegalSignature("Method", name, signature, CHECK_0);
return 0;
}
diff --git a/hotspot/src/share/vm/classfile/classFileParser.hpp b/hotspot/src/share/vm/classfile/classFileParser.hpp
index 77d9c8cc77f..6bb45809303 100644
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp
@@ -195,6 +195,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
if (!b) { classfile_parse_error(msg, index, name, CHECK); }
}
+ void throwIllegalSignature(
+ const char* type, symbolHandle name, symbolHandle sig, TRAPS);
+
bool is_supported_version(u2 major, u2 minor);
bool has_illegal_visibility(jint flags);
diff --git a/hotspot/src/share/vm/classfile/verifier.cpp b/hotspot/src/share/vm/classfile/verifier.cpp
index efa5220dd6f..ca76e7148cd 100644
--- a/hotspot/src/share/vm/classfile/verifier.cpp
+++ b/hotspot/src/share/vm/classfile/verifier.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
# include "incls/_precompiled.incl"
# include "incls/_verifier.cpp.incl"
+#define NOFAILOVER_MAJOR_VERSION 51
+
// Access to external entry for VerifyClassCodes - old byte code verifier
extern "C" {
@@ -91,7 +93,8 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
klass, message_buffer, message_buffer_len, THREAD);
split_verifier.verify_class(THREAD);
exception_name = split_verifier.result();
- if (FailOverToOldVerifier && !HAS_PENDING_EXCEPTION &&
+ if (klass->major_version() < NOFAILOVER_MAJOR_VERSION &&
+ FailOverToOldVerifier && !HAS_PENDING_EXCEPTION &&
(exception_name == vmSymbols::java_lang_VerifyError() ||
exception_name == vmSymbols::java_lang_ClassFormatError())) {
if (TraceClassInitialization) {
@@ -254,6 +257,9 @@ void ClassVerifier::verify_class(TRAPS) {
int num_methods = methods->length();
for (int index = 0; index < num_methods; index++) {
+ // Check for recursive re-verification before each method.
+ if (was_recursively_verified()) return;
+
methodOop m = (methodOop)methods->obj_at(index);
if (m->is_native() || m->is_abstract()) {
// If m is native or abstract, skip it. It is checked in class file
@@ -262,6 +268,12 @@ void ClassVerifier::verify_class(TRAPS) {
}
verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
}
+
+ if (_verify_verbose || TraceClassInitialization) {
+ if (was_recursively_verified())
+ tty->print_cr("Recursive verification detected for: %s",
+ _klass->external_name());
+ }
}
void ClassVerifier::verify_method(methodHandle m, TRAPS) {
@@ -326,6 +338,9 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
// instruction in sequence
Bytecodes::Code opcode;
while (!bcs.is_last_bytecode()) {
+ // Check for recursive re-verification before each bytecode.
+ if (was_recursively_verified()) return;
+
opcode = bcs.raw_next();
u2 bci = bcs.bci();
@@ -410,13 +425,13 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_ldc :
verify_ldc(
- opcode, bcs.get_index(), ¤t_frame,
+ opcode, bcs.get_index_u1(), ¤t_frame,
cp, bci, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_ldc_w :
case Bytecodes::_ldc2_w :
verify_ldc(
- opcode, bcs.get_index_big(), ¤t_frame,
+ opcode, bcs.get_index_u2(), ¤t_frame,
cp, bci, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_iload :
@@ -1182,7 +1197,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_new :
{
- index = bcs.get_index_big();
+ index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
VerificationType new_class_type =
cp_index_to_type(index, cp, CHECK_VERIFY(this));
@@ -1202,7 +1217,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_anewarray :
verify_anewarray(
- bcs.get_index_big(), cp, ¤t_frame, CHECK_VERIFY(this));
+ bcs.get_index_u2(), cp, ¤t_frame, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_arraylength :
type = current_frame.pop_stack(
@@ -1215,7 +1230,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_checkcast :
{
- index = bcs.get_index_big();
+ index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
@@ -1225,7 +1240,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
}
case Bytecodes::_instanceof : {
- index = bcs.get_index_big();
+ index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
@@ -1240,7 +1255,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_multianewarray :
{
- index = bcs.get_index_big();
+ index = bcs.get_index_u2();
u2 dim = *(bcs.bcp()+3);
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
VerificationType new_array_type =
@@ -1299,7 +1314,7 @@ char* ClassVerifier::generate_code_data(methodHandle m, u4 code_length, TRAPS) {
while (!bcs.is_last_bytecode()) {
if (bcs.raw_next() != Bytecodes::_illegal) {
int bci = bcs.bci();
- if (bcs.code() == Bytecodes::_new) {
+ if (bcs.raw_code() == Bytecodes::_new) {
code_data[bci] = NEW_OFFSET;
} else {
code_data[bci] = BYTECODE_OFFSET;
@@ -1470,20 +1485,9 @@ void ClassVerifier::verify_cp_type(
// In some situations, bytecode rewriting may occur while we're verifying.
// In this case, a constant pool cache exists and some indices refer to that
- // instead. Get the original index for the tag check
- constantPoolCacheOop cache = cp->cache();
- if (cache != NULL &&
- ((types == (1 << JVM_CONSTANT_InterfaceMethodref)) ||
- (types == (1 << JVM_CONSTANT_Methodref)) ||
- (types == (1 << JVM_CONSTANT_Fieldref)))) {
- int native_index = index;
- if (Bytes::is_Java_byte_ordering_different()) {
- native_index = Bytes::swap_u2(index);
- }
- assert((native_index >= 0) && (native_index < cache->length()),
- "Must be a legal index into the cp cache");
- index = cache->entry_at(native_index)->constant_pool_index();
- }
+ // instead. Be sure we don't pick up such indices by accident.
+ // We must check was_recursively_verified() before we get here.
+ guarantee(cp->cache() == NULL, "not rewritten yet");
verify_cp_index(cp, index, CHECK_VERIFY(this));
unsigned int tag = cp->tag_at(index).value();
@@ -1654,7 +1658,7 @@ void ClassVerifier::verify_switch(
int keys, delta;
current_frame->pop_stack(
VerificationType::integer_type(), CHECK_VERIFY(this));
- if (bcs->code() == Bytecodes::_tableswitch) {
+ if (bcs->raw_code() == Bytecodes::_tableswitch) {
jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
if (low > high) {
@@ -1710,7 +1714,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
StackMapFrame* current_frame,
constantPoolHandle cp,
TRAPS) {
- u2 index = bcs->get_index_big();
+ u2 index = bcs->get_index_u2();
verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this));
// Get field name and signature
@@ -1750,7 +1754,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
&sig_stream, field_type, CHECK_VERIFY(this));
u2 bci = bcs->bci();
bool is_assignable;
- switch (bcs->code()) {
+ switch (bcs->raw_code()) {
case Bytecodes::_getstatic: {
for (int i = 0; i < n; i++) {
current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
@@ -1870,7 +1874,7 @@ void ClassVerifier::verify_invoke_init(
ref_class_type.name(), CHECK_VERIFY(this));
methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
vmSymbols::object_initializer_name(),
- cp->signature_ref_at(bcs->get_index_big()));
+ cp->signature_ref_at(bcs->get_index_u2()));
instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from(
@@ -1893,8 +1897,8 @@ void ClassVerifier::verify_invoke_instructions(
bool *this_uninit, VerificationType return_type,
constantPoolHandle cp, TRAPS) {
// Make sure the constant pool item is the right type
- u2 index = bcs->get_index_big();
- Bytecodes::Code opcode = bcs->code();
+ u2 index = bcs->get_index_u2();
+ Bytecodes::Code opcode = bcs->raw_code();
unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic
diff --git a/hotspot/src/share/vm/classfile/verifier.hpp b/hotspot/src/share/vm/classfile/verifier.hpp
index f4a6ea4eee2..59cd08559eb 100644
--- a/hotspot/src/share/vm/classfile/verifier.hpp
+++ b/hotspot/src/share/vm/classfile/verifier.hpp
@@ -158,6 +158,16 @@ class ClassVerifier : public StackObj {
methodHandle _method; // current method being verified
VerificationType _this_type; // the verification type of the current class
+ // Some recursive calls from the verifier to the name resolver
+ // can cause the current class to be re-verified and rewritten.
+ // If this happens, the original verification should not continue,
+ // because constant pool indexes will have changed.
+ // The rewriter is preceded by the verifier. If the verifier throws
+ // an error, rewriting is prevented. Also, rewriting always precedes
+ // bytecode execution or compilation. Thus, is_rewritten implies
+ // that a class has been verified and prepared for execution.
+ bool was_recursively_verified() { return _klass->is_rewritten(); }
+
public:
enum {
BYTECODE_OFFSET = 1,
diff --git a/hotspot/src/share/vm/code/codeBlob.cpp b/hotspot/src/share/vm/code/codeBlob.cpp
index f9da9ea5591..2b8410105ce 100644
--- a/hotspot/src/share/vm/code/codeBlob.cpp
+++ b/hotspot/src/share/vm/code/codeBlob.cpp
@@ -66,8 +66,6 @@ CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_comple
_relocation_size = locs_size;
_instructions_offset = align_code_offset(header_size + locs_size);
_data_offset = size;
- _oops_offset = size;
- _oops_length = 0;
_frame_size = 0;
set_oop_maps(NULL);
}
@@ -94,9 +92,6 @@ CodeBlob::CodeBlob(
_relocation_size = round_to(cb->total_relocation_size(), oopSize);
_instructions_offset = align_code_offset(header_size + _relocation_size);
_data_offset = _instructions_offset + round_to(cb->total_code_size(), oopSize);
- _oops_offset = _size - round_to(cb->total_oop_size(), oopSize);
- _oops_length = 0; // temporary, until the copy_oops handshake
- assert(_oops_offset >= _data_offset, "codeBlob is too small");
assert(_data_offset <= size, "codeBlob is too small");
cb->copy_code_and_locs_to(this);
@@ -131,99 +126,6 @@ void CodeBlob::flush() {
}
-// Promote one word from an assembly-time handle to a live embedded oop.
-inline void CodeBlob::initialize_immediate_oop(oop* dest, jobject handle) {
- if (handle == NULL ||
- // As a special case, IC oops are initialized to 1 or -1.
- handle == (jobject) Universe::non_oop_word()) {
- (*dest) = (oop)handle;
- } else {
- (*dest) = JNIHandles::resolve_non_null(handle);
- }
-}
-
-
-void CodeBlob::copy_oops(GrowableArray* array) {
- assert(_oops_length == 0, "do this handshake just once, please");
- int length = array->length();
- assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
- oop* dest = oops_begin();
- for (int index = 0 ; index < length; index++) {
- initialize_immediate_oop(&dest[index], array->at(index));
- }
- _oops_length = length;
-
- // Now we can fix up all the oops in the code.
- // We need to do this in the code because
- // the assembler uses jobjects as placeholders.
- // The code and relocations have already been
- // initialized by the CodeBlob constructor,
- // so it is valid even at this early point to
- // iterate over relocations and patch the code.
- fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
-}
-
-
-relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
- RelocIterator iter(this, pc, pc+1);
- while (iter.next()) {
- return (relocInfo::relocType) iter.type();
- }
- // No relocation info found for pc
- ShouldNotReachHere();
- return relocInfo::none; // dummy return value
-}
-
-
-bool CodeBlob::is_at_poll_return(address pc) {
- RelocIterator iter(this, pc, pc+1);
- while (iter.next()) {
- if (iter.type() == relocInfo::poll_return_type)
- return true;
- }
- return false;
-}
-
-
-bool CodeBlob::is_at_poll_or_poll_return(address pc) {
- RelocIterator iter(this, pc, pc+1);
- while (iter.next()) {
- relocInfo::relocType t = iter.type();
- if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
- return true;
- }
- return false;
-}
-
-
-void CodeBlob::fix_oop_relocations(address begin, address end,
- bool initialize_immediates) {
- // re-patch all oop-bearing instructions, just in case some oops moved
- RelocIterator iter(this, begin, end);
- while (iter.next()) {
- if (iter.type() == relocInfo::oop_type) {
- oop_Relocation* reloc = iter.oop_reloc();
- if (initialize_immediates && reloc->oop_is_immediate()) {
- oop* dest = reloc->oop_addr();
- initialize_immediate_oop(dest, (jobject) *dest);
- }
- // Refresh the oop-related bits of this instruction.
- reloc->fix_oop_relocation();
- }
-
- // There must not be any interfering patches or breakpoints.
- assert(!(iter.type() == relocInfo::breakpoint_type
- && iter.breakpoint_reloc()->active()),
- "no active breakpoint");
- }
-}
-
-void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred) {
- ShouldNotReachHere();
-}
-
OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
address pc = return_address ;
assert (oop_maps() != NULL, "nope");
diff --git a/hotspot/src/share/vm/code/codeBlob.hpp b/hotspot/src/share/vm/code/codeBlob.hpp
index 5f00a5e3fd4..1fc0c2f0c1e 100644
--- a/hotspot/src/share/vm/code/codeBlob.hpp
+++ b/hotspot/src/share/vm/code/codeBlob.hpp
@@ -54,17 +54,12 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
// that range. There is a similar range(s) on returns
// which we don't detect.
int _data_offset; // offset to where data region begins
- int _oops_offset; // offset to where embedded oop table begins (inside data)
- int _oops_length; // number of embedded oops
int _frame_size; // size of stack frame
OopMapSet* _oop_maps; // OopMap for this CodeBlob
CodeComments _comments;
friend class OopRecorder;
- void fix_oop_relocations(address begin, address end, bool initialize_immediates);
- inline void initialize_immediate_oop(oop* dest, jobject handle);
-
public:
// Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
@@ -115,14 +110,11 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
address instructions_end() const { return (address) header_begin() + _data_offset; }
address data_begin() const { return (address) header_begin() + _data_offset; }
address data_end() const { return (address) header_begin() + _size; }
- oop* oops_begin() const { return (oop*) (header_begin() + _oops_offset); }
- oop* oops_end() const { return oops_begin() + _oops_length; }
// Offsets
int relocation_offset() const { return _header_size; }
int instructions_offset() const { return _instructions_offset; }
int data_offset() const { return _data_offset; }
- int oops_offset() const { return _oops_offset; }
// Sizes
int size() const { return _size; }
@@ -130,40 +122,16 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); }
int instructions_size() const { return instructions_end() - instructions_begin(); }
int data_size() const { return data_end() - data_begin(); }
- int oops_size() const { return (address) oops_end() - (address) oops_begin(); }
// Containment
bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); }
bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); }
bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); }
- bool oops_contains(oop* addr) const { return oops_begin() <= addr && addr < oops_end(); }
bool contains(address addr) const { return instructions_contains(addr); }
bool is_frame_complete_at(address addr) const { return instructions_contains(addr) &&
addr >= instructions_begin() + _frame_complete_offset; }
- // Relocation support
- void fix_oop_relocations(address begin, address end) {
- fix_oop_relocations(begin, end, false);
- }
- void fix_oop_relocations() {
- fix_oop_relocations(NULL, NULL, false);
- }
- relocInfo::relocType reloc_type_for_address(address pc);
- bool is_at_poll_return(address pc);
- bool is_at_poll_or_poll_return(address pc);
-
- // Support for oops in scopes and relocs:
- // Note: index 0 is reserved for null.
- oop oop_at(int index) const { return index == 0? (oop)NULL: *oop_addr_at(index); }
- oop* oop_addr_at(int index) const{ // for GC
- // relocation indexes are biased by 1 (because 0 is reserved)
- assert(index > 0 && index <= _oops_length, "must be a valid non-zero index");
- return &oops_begin()[index-1];
- }
-
- void copy_oops(GrowableArray* oops);
-
// CodeCache support: really only used by the nmethods, but in order to get
// asserts and certain bookkeeping to work in the CodeCache they are defined
// virtual here.
@@ -175,12 +143,6 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
// GC support
virtual bool is_alive() const = 0;
- virtual void do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred);
- virtual void oops_do(OopClosure* f) = 0;
- // (All CodeBlob subtypes other than NMethod currently have
- // an empty oops_do() method.
// OopMap for frame
OopMapSet* oop_maps() const { return _oop_maps; }
@@ -245,11 +207,6 @@ class BufferBlob: public CodeBlob {
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
bool is_alive() const { return true; }
- void do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred) { /* do nothing */ }
-
- void oops_do(OopClosure* f) { /* do nothing*/ }
void verify();
void print() const PRODUCT_RETURN;
@@ -334,10 +291,6 @@ class RuntimeStub: public CodeBlob {
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
bool is_alive() const { return true; }
- void do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred) { /* do nothing */ }
- void oops_do(OopClosure* f) { /* do-nothing*/ }
void verify();
void print() const PRODUCT_RETURN;
@@ -363,9 +316,6 @@ class SingletonBlob: public CodeBlob {
{};
bool is_alive() const { return true; }
- void do_unloading(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- bool unloading_occurred) { /* do-nothing*/ }
void verify(); // does nothing
void print() const PRODUCT_RETURN;
@@ -423,9 +373,6 @@ class DeoptimizationBlob: public SingletonBlob {
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
- // Iteration
- void oops_do(OopClosure* f) {}
-
// Printing
void print_value_on(outputStream* st) const PRODUCT_RETURN;
@@ -477,9 +424,6 @@ class UncommonTrapBlob: public SingletonBlob {
// Typing
bool is_uncommon_trap_stub() const { return true; }
-
- // Iteration
- void oops_do(OopClosure* f) {}
};
@@ -512,9 +456,6 @@ class ExceptionBlob: public SingletonBlob {
// Typing
bool is_exception_stub() const { return true; }
-
- // Iteration
- void oops_do(OopClosure* f) {}
};
#endif // COMPILER2
@@ -548,7 +489,4 @@ class SafepointBlob: public SingletonBlob {
// Typing
bool is_safepoint_stub() const { return true; }
-
- // Iteration
- void oops_do(OopClosure* f) {}
};
diff --git a/hotspot/src/share/vm/code/codeCache.cpp b/hotspot/src/share/vm/code/codeCache.cpp
index 7633058295a..54767b36929 100644
--- a/hotspot/src/share/vm/code/codeCache.cpp
+++ b/hotspot/src/share/vm/code/codeCache.cpp
@@ -74,12 +74,12 @@ class CodeBlob_sizes {
total_size += cb->size();
header_size += cb->header_size();
relocation_size += cb->relocation_size();
- scopes_oop_size += cb->oops_size();
if (cb->is_nmethod()) {
- nmethod *nm = (nmethod*)cb;
+ nmethod* nm = cb->as_nmethod_or_null();
code_size += nm->code_size();
stub_size += nm->stub_size();
+ scopes_oop_size += nm->oops_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
} else {
@@ -262,14 +262,14 @@ int CodeCache::alignment_offset() {
}
-// Mark code blobs for unloading if they contain otherwise
-// unreachable oops.
+// Mark nmethods for unloading if they contain otherwise unreachable
+// oops.
void CodeCache::do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock);
- FOR_ALL_ALIVE_BLOBS(cb) {
- cb->do_unloading(is_alive, keep_alive, unloading_occurred);
+ FOR_ALL_ALIVE_NMETHODS(nm) {
+ nm->do_unloading(is_alive, keep_alive, unloading_occurred);
}
}
@@ -509,9 +509,9 @@ void CodeCache::gc_epilogue() {
if (needs_cache_clean()) {
nm->cleanup_inline_caches();
}
- debug_only(nm->verify();)
+ DEBUG_ONLY(nm->verify());
+ nm->fix_oop_relocations();
}
- cb->fix_oop_relocations();
}
set_needs_cache_clean(false);
prune_scavenge_root_nmethods();
diff --git a/hotspot/src/share/vm/code/compiledIC.cpp b/hotspot/src/share/vm/code/compiledIC.cpp
index 9abc5f8e3ae..d2c8e2752ef 100644
--- a/hotspot/src/share/vm/code/compiledIC.cpp
+++ b/hotspot/src/share/vm/code/compiledIC.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -441,11 +441,11 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
}
-inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {
+inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
address first_oop = NULL;
// Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
- CodeBlob *code1 = code;
- return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized);
+ nmethod* tmp_nm = nm;
+ return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
}
CompiledIC::CompiledIC(NativeCall* ic_call)
diff --git a/hotspot/src/share/vm/code/nmethod.cpp b/hotspot/src/share/vm/code/nmethod.cpp
index 50d4d01fc6c..106ff2f032e 100644
--- a/hotspot/src/share/vm/code/nmethod.cpp
+++ b/hotspot/src/share/vm/code/nmethod.cpp
@@ -99,12 +99,12 @@ struct nmethod_stats_struct {
code_size += nm->code_size();
stub_size += nm->stub_size();
consts_size += nm->consts_size();
+ oops_size += nm->oops_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
dependencies_size += nm->dependencies_size();
handler_table_size += nm->handler_table_size();
nul_chk_table_size += nm->nul_chk_table_size();
- oops_size += nm->oops_size();
}
void print_nmethod_stats() {
if (nmethod_count == 0) return;
@@ -114,12 +114,12 @@ struct nmethod_stats_struct {
if (code_size != 0) tty->print_cr(" main code = %d", code_size);
if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size);
if (consts_size != 0) tty->print_cr(" constants = %d", consts_size);
+ if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size);
if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size);
if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size);
if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size);
if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size);
- if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
}
int native_nmethod_count;
@@ -600,7 +600,8 @@ nmethod::nmethod(
#endif // def HAVE_DTRACE_H
_stub_offset = data_offset();
_consts_offset = data_offset();
- _scopes_data_offset = data_offset();
+ _oops_offset = data_offset();
+ _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
@@ -690,7 +691,8 @@ nmethod::nmethod(
_orig_pc_offset = 0;
_stub_offset = data_offset();
_consts_offset = data_offset();
- _scopes_data_offset = data_offset();
+ _oops_offset = data_offset();
+ _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
@@ -805,8 +807,9 @@ nmethod::nmethod(
_unwind_handler_offset = -1;
}
_consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
- _scopes_data_offset = data_offset();
- _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
+ _oops_offset = data_offset();
+ _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size (), oopSize);
+ _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize);
_nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
@@ -990,6 +993,79 @@ void nmethod::set_version(int v) {
}
+// Promote one word from an assembly-time handle to a live embedded oop.
+inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
+ if (handle == NULL ||
+ // As a special case, IC oops are initialized to 1 or -1.
+ handle == (jobject) Universe::non_oop_word()) {
+ (*dest) = (oop) handle;
+ } else {
+ (*dest) = JNIHandles::resolve_non_null(handle);
+ }
+}
+
+
+void nmethod::copy_oops(GrowableArray* array) {
+ //assert(oops_size() == 0, "do this handshake just once, please");
+ int length = array->length();
+ assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
+ oop* dest = oops_begin();
+ for (int index = 0 ; index < length; index++) {
+ initialize_immediate_oop(&dest[index], array->at(index));
+ }
+
+ // Now we can fix up all the oops in the code. We need to do this
+ // in the code because the assembler uses jobjects as placeholders.
+ // The code and relocations have already been initialized by the
+ // CodeBlob constructor, so it is valid even at this early point to
+ // iterate over relocations and patch the code.
+ fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
+}
+
+
+bool nmethod::is_at_poll_return(address pc) {
+ RelocIterator iter(this, pc, pc+1);
+ while (iter.next()) {
+ if (iter.type() == relocInfo::poll_return_type)
+ return true;
+ }
+ return false;
+}
+
+
+bool nmethod::is_at_poll_or_poll_return(address pc) {
+ RelocIterator iter(this, pc, pc+1);
+ while (iter.next()) {
+ relocInfo::relocType t = iter.type();
+ if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
+ return true;
+ }
+ return false;
+}
+
+
+void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
+ // re-patch all oop-bearing instructions, just in case some oops moved
+ RelocIterator iter(this, begin, end);
+ while (iter.next()) {
+ if (iter.type() == relocInfo::oop_type) {
+ oop_Relocation* reloc = iter.oop_reloc();
+ if (initialize_immediates && reloc->oop_is_immediate()) {
+ oop* dest = reloc->oop_addr();
+ initialize_immediate_oop(dest, (jobject) *dest);
+ }
+ // Refresh the oop-related bits of this instruction.
+ reloc->fix_oop_relocation();
+ }
+
+ // There must not be any interfering patches or breakpoints.
+ assert(!(iter.type() == relocInfo::breakpoint_type
+ && iter.breakpoint_reloc()->active()),
+ "no active breakpoint");
+ }
+}
+
+
ScopeDesc* nmethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present");
@@ -1266,19 +1342,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
if (state == zombie) {
-
- DTRACE_METHOD_UNLOAD_PROBE(method());
-
- if (JvmtiExport::should_post_compiled_method_unload() &&
- !unload_reported()) {
- assert(method() != NULL, "checking");
- {
- HandleMark hm;
- JvmtiExport::post_compiled_method_unload_at_safepoint(
- method()->jmethod_id(), code_begin());
- }
- set_unload_reported();
- }
+ post_compiled_method_unload();
}
@@ -1430,6 +1494,12 @@ void nmethod::post_compiled_method_load_event() {
}
void nmethod::post_compiled_method_unload() {
+ if (unload_reported()) {
+ // During unloading we transition to unloaded and then to zombie
+ // and the unloading is reported during the first transition.
+ return;
+ }
+
assert(_method != NULL && !is_unloaded(), "just checking");
DTRACE_METHOD_UNLOAD_PROBE(method());
@@ -1439,8 +1509,7 @@ void nmethod::post_compiled_method_unload() {
if (JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded");
HandleMark hm;
- JvmtiExport::post_compiled_method_unload_at_safepoint(
- method()->jmethod_id(), code_begin());
+ JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin());
}
// The JVMTI CompiledMethodUnload event can be enabled or disabled at
@@ -2282,6 +2351,10 @@ void nmethod::print() const {
consts_begin(),
consts_end(),
consts_size());
+ if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+ oops_begin(),
+ oops_end(),
+ oops_size());
if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
scopes_data_begin(),
scopes_data_end(),
diff --git a/hotspot/src/share/vm/code/nmethod.hpp b/hotspot/src/share/vm/code/nmethod.hpp
index b74541d2abd..28f60296f55 100644
--- a/hotspot/src/share/vm/code/nmethod.hpp
+++ b/hotspot/src/share/vm/code/nmethod.hpp
@@ -105,6 +105,7 @@ struct nmFlags {
// [Relocation]
// - relocation information
// - constant part (doubles, longs and floats used in nmethod)
+// - oop table
// [Code]
// - code body
// - exception handler
@@ -161,6 +162,7 @@ class nmethod : public CodeBlob {
#endif // def HAVE_DTRACE_H
int _stub_offset;
int _consts_offset;
+ int _oops_offset; // offset to where embedded oop table begins (inside data)
int _scopes_data_offset;
int _scopes_pcs_offset;
int _dependencies_offset;
@@ -347,7 +349,10 @@ class nmethod : public CodeBlob {
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _consts_offset ; }
address consts_begin () const { return header_begin() + _consts_offset ; }
- address consts_end () const { return header_begin() + _scopes_data_offset ; }
+ address consts_end () const { return header_begin() + _oops_offset ; }
+ oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
+ oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
+
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
@@ -359,20 +364,24 @@ class nmethod : public CodeBlob {
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
- int code_size () const { return code_end () - code_begin (); }
- int stub_size () const { return stub_end () - stub_begin (); }
- int consts_size () const { return consts_end () - consts_begin (); }
- int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
- int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
- int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
- int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
- int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
+ // Sizes
+ int code_size () const { return code_end () - code_begin (); }
+ int stub_size () const { return stub_end () - stub_begin (); }
+ int consts_size () const { return consts_end () - consts_begin (); }
+ int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
+ int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
+ int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
+ int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
+ int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
+ int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
int total_size () const;
+ // Containment
bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
+ bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
@@ -431,6 +440,29 @@ class nmethod : public CodeBlob {
int version() const { return flags.version; }
void set_version(int v);
+ // Support for oops in scopes and relocs:
+ // Note: index 0 is reserved for null.
+ oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
+ oop* oop_addr_at(int index) const { // for GC
+ // relocation indexes are biased by 1 (because 0 is reserved)
+ assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
+ return &oops_begin()[index - 1];
+ }
+
+ void copy_oops(GrowableArray* oops);
+
+ // Relocation support
+private:
+ void fix_oop_relocations(address begin, address end, bool initialize_immediates);
+ inline void initialize_immediate_oop(oop* dest, jobject handle);
+
+public:
+ void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
+ void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
+
+ bool is_at_poll_return(address pc);
+ bool is_at_poll_or_poll_return(address pc);
+
// Non-perm oop support
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
protected:
@@ -511,8 +543,8 @@ class nmethod : public CodeBlob {
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);
- virtual void oops_do(OopClosure* f) { oops_do(f, false); }
- void oops_do(OopClosure* f, bool do_strong_roots_only);
+ void oops_do(OopClosure* f) { oops_do(f, false); }
+ void oops_do(OopClosure* f, bool do_strong_roots_only);
bool detect_scavenge_root_oops();
void verify_scavenge_root_oops() PRODUCT_RETURN;
diff --git a/hotspot/src/share/vm/code/oopRecorder.cpp b/hotspot/src/share/vm/code/oopRecorder.cpp
index 4a368c4eeb9..7688079b1f4 100644
--- a/hotspot/src/share/vm/code/oopRecorder.cpp
+++ b/hotspot/src/share/vm/code/oopRecorder.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,10 @@ int OopRecorder::oop_size() {
return _handles->length() * sizeof(oop);
}
-void OopRecorder::copy_to(CodeBlob* code) {
+void OopRecorder::copy_to(nmethod* nm) {
assert(_complete, "must be frozen");
maybe_initialize(); // get non-null handles, even if we have no oops
- code->copy_oops(_handles);
+ nm->copy_oops(_handles);
}
void OopRecorder::maybe_initialize() {
diff --git a/hotspot/src/share/vm/code/oopRecorder.hpp b/hotspot/src/share/vm/code/oopRecorder.hpp
index da686476df8..fda4d290514 100644
--- a/hotspot/src/share/vm/code/oopRecorder.hpp
+++ b/hotspot/src/share/vm/code/oopRecorder.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,8 +70,8 @@ class OopRecorder : public ResourceObj {
return _handles->length() + first_index;
}
- // copy the generated oop table to CodeBlob
- void copy_to(CodeBlob* code); // => code->copy_oops(_handles)
+ // copy the generated oop table to nmethod
+ void copy_to(nmethod* nm); // => nm->copy_oops(_handles)
bool is_unused() { return _handles == NULL && !_complete; }
#ifdef ASSERT
diff --git a/hotspot/src/share/vm/code/relocInfo.cpp b/hotspot/src/share/vm/code/relocInfo.cpp
index ef59554985c..6afb3c54d75 100644
--- a/hotspot/src/share/vm/code/relocInfo.cpp
+++ b/hotspot/src/share/vm/code/relocInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,24 +115,25 @@ void relocInfo::remove_reloc_info_for_address(RelocIterator *itr, address pc, re
// ----------------------------------------------------------------------------------------------------
// Implementation of RelocIterator
-void RelocIterator::initialize(CodeBlob* cb, address begin, address limit) {
+void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
initialize_misc();
- if (cb == NULL && begin != NULL) {
- // allow CodeBlob to be deduced from beginning address
- cb = CodeCache::find_blob(begin);
+ if (nm == NULL && begin != NULL) {
+ // allow nmethod to be deduced from beginning address
+ CodeBlob* cb = CodeCache::find_blob(begin);
+ nm = cb->as_nmethod_or_null();
}
- assert(cb != NULL, "must be able to deduce nmethod from other arguments");
+ assert(nm != NULL, "must be able to deduce nmethod from other arguments");
- _code = cb;
- _current = cb->relocation_begin()-1;
- _end = cb->relocation_end();
- _addr = (address) cb->instructions_begin();
+ _code = nm;
+ _current = nm->relocation_begin() - 1;
+ _end = nm->relocation_end();
+ _addr = (address) nm->instructions_begin();
assert(!has_current(), "just checking");
- address code_end = cb->instructions_end();
+ address code_end = nm->instructions_end();
- assert(begin == NULL || begin >= cb->instructions_begin(), "in bounds");
+ assert(begin == NULL || begin >= nm->instructions_begin(), "in bounds");
// FIX THIS assert(limit == NULL || limit <= code_end, "in bounds");
set_limits(begin, limit);
}
@@ -754,7 +755,7 @@ oop* oop_Relocation::oop_addr() {
// oop is stored in the code stream
return (oop*) pd_address_in_code();
} else {
- // oop is stored in table at CodeBlob::oops_begin
+ // oop is stored in table at nmethod::oops_begin
return code()->oop_addr_at(n);
}
}
@@ -776,26 +777,28 @@ void oop_Relocation::fix_oop_relocation() {
}
-RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_call, address &first_oop,
+RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop,
oop* &oop_addr, bool *is_optimized) {
assert(ic_call != NULL, "ic_call address must be set");
assert(ic_call != NULL || first_oop != NULL, "must supply a non-null input");
- if (code == NULL) {
+ if (nm == NULL) {
+ CodeBlob* code;
if (ic_call != NULL) {
code = CodeCache::find_blob(ic_call);
} else if (first_oop != NULL) {
code = CodeCache::find_blob(first_oop);
}
- assert(code != NULL, "address to parse must be in CodeBlob");
+ nm = code->as_nmethod_or_null();
+ assert(nm != NULL, "address to parse must be in nmethod");
}
- assert(ic_call == NULL || code->contains(ic_call), "must be in CodeBlob");
- assert(first_oop == NULL || code->contains(first_oop), "must be in CodeBlob");
+ assert(ic_call == NULL || nm->contains(ic_call), "must be in nmethod");
+ assert(first_oop == NULL || nm->contains(first_oop), "must be in nmethod");
address oop_limit = NULL;
if (ic_call != NULL) {
// search for the ic_call at the given address
- RelocIterator iter(code, ic_call, ic_call+1);
+ RelocIterator iter(nm, ic_call, ic_call+1);
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
@@ -814,7 +817,7 @@ RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_cal
}
// search for the first_oop, to get its oop_addr
- RelocIterator all_oops(code, first_oop);
+ RelocIterator all_oops(nm, first_oop);
RelocIterator iter = all_oops;
iter.set_limit(first_oop+1);
bool found_oop = false;
@@ -842,7 +845,7 @@ RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_cal
}
}
guarantee(!did_reset, "cannot find ic_call");
- iter = RelocIterator(code); // search the whole CodeBlob
+ iter = RelocIterator(nm); // search the whole nmethod
did_reset = true;
}
@@ -1175,9 +1178,9 @@ void RelocIterator::print() {
// For the debugger:
extern "C"
-void print_blob_locs(CodeBlob* cb) {
- cb->print();
- RelocIterator iter(cb);
+void print_blob_locs(nmethod* nm) {
+ nm->print();
+ RelocIterator iter(nm);
iter.print();
}
extern "C"
diff --git a/hotspot/src/share/vm/code/relocInfo.hpp b/hotspot/src/share/vm/code/relocInfo.hpp
index 8c4723ef464..7bb1887376b 100644
--- a/hotspot/src/share/vm/code/relocInfo.hpp
+++ b/hotspot/src/share/vm/code/relocInfo.hpp
@@ -512,7 +512,7 @@ class RelocIterator : public StackObj {
address _limit; // stop producing relocations after this _addr
relocInfo* _current; // the current relocation information
relocInfo* _end; // end marker; we're done iterating when _current == _end
- CodeBlob* _code; // compiled method containing _addr
+ nmethod* _code; // compiled method containing _addr
address _addr; // instruction to which the relocation applies
short _databuf; // spare buffer for compressed data
short* _data; // pointer to the relocation's data
@@ -549,7 +549,7 @@ class RelocIterator : public StackObj {
address compute_section_start(int n) const; // out-of-line helper
- void initialize(CodeBlob* nm, address begin, address limit);
+ void initialize(nmethod* nm, address begin, address limit);
friend class PatchingRelocIterator;
// make an uninitialized one, for PatchingRelocIterator:
@@ -557,7 +557,7 @@ class RelocIterator : public StackObj {
public:
// constructor
- RelocIterator(CodeBlob* cb, address begin = NULL, address limit = NULL);
+ RelocIterator(nmethod* nm, address begin = NULL, address limit = NULL);
RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL);
// get next reloc info, return !eos
@@ -592,7 +592,7 @@ class RelocIterator : public StackObj {
relocType type() const { return current()->type(); }
int format() const { return (relocInfo::have_format) ? current()->format() : 0; }
address addr() const { return _addr; }
- CodeBlob* code() const { return _code; }
+ nmethod* code() const { return _code; }
short* data() const { return _data; }
int datalen() const { return _datalen; }
bool has_current() const { return _datalen >= 0; }
@@ -790,9 +790,9 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
public:
// accessors which only make sense for a bound Relocation
- address addr() const { return binding()->addr(); }
- CodeBlob* code() const { return binding()->code(); }
- bool addr_in_const() const { return binding()->addr_in_const(); }
+ address addr() const { return binding()->addr(); }
+ nmethod* code() const { return binding()->code(); }
+ bool addr_in_const() const { return binding()->addr_in_const(); }
protected:
short* data() const { return binding()->data(); }
int datalen() const { return binding()->datalen(); }
@@ -982,12 +982,12 @@ class virtual_call_Relocation : public CallRelocation {
// Figure out where an ic_call is hiding, given a set-oop or call.
// Either ic_call or first_oop must be non-null; the other is deduced.
- // Code if non-NULL must be the CodeBlob, else it is deduced.
+ // Code if non-NULL must be the nmethod, else it is deduced.
// The address of the patchable oop is also deduced.
// The returned iterator will enumerate over the oops and the ic_call,
// as well as any other relocations that happen to be in that span of code.
// Recognize relevant set_oops with: oop_reloc()->oop_addr() == oop_addr.
- static RelocIterator parse_ic(CodeBlob* &code, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
+ static RelocIterator parse_ic(nmethod* &nm, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
};
@@ -1304,8 +1304,8 @@ inline name##_Relocation* RelocIterator::name##_reloc() { \
APPLY_TO_RELOCATIONS(EACH_CASE);
#undef EACH_CASE
-inline RelocIterator::RelocIterator(CodeBlob* cb, address begin, address limit) {
- initialize(cb, begin, limit);
+inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
+ initialize(nm, begin, limit);
}
// if you are going to patch code, you should use this subclass of
@@ -1323,8 +1323,8 @@ class PatchingRelocIterator : public RelocIterator {
void operator=(const RelocIterator&);
public:
- PatchingRelocIterator(CodeBlob* cb, address begin =NULL, address limit =NULL)
- : RelocIterator(cb, begin, limit) { prepass(); }
+ PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL)
+ : RelocIterator(nm, begin, limit) { prepass(); }
~PatchingRelocIterator() { postpass(); }
};
diff --git a/hotspot/src/share/vm/compiler/compileBroker.cpp b/hotspot/src/share/vm/compiler/compileBroker.cpp
index e5603b46fcd..9ae477e3e15 100644
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp
@@ -1651,14 +1651,15 @@ void CompileBroker::handle_full_code_cache() {
log->stamp();
log->end_elem();
}
- #ifndef PRODUCT
- warning("CodeCache is full. Compiler has been disabled");
+ warning("CodeCache is full. Compiler has been disabled.");
+ warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
+#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(JavaThread::current());
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
}
- #endif
+#endif
if (UseCodeCacheFlushing) {
NMethodSweeper::handle_full_code_cache(true);
} else {
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
index fb4454bd928..28f6ef4b0a3 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
@@ -32,6 +32,23 @@
// highest ranked free list lock rank
int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
+// Defaults are 0 so things will break badly if incorrectly initialized.
+int CompactibleFreeListSpace::IndexSetStart = 0;
+int CompactibleFreeListSpace::IndexSetStride = 0;
+
+size_t MinChunkSize = 0;
+
+void CompactibleFreeListSpace::set_cms_values() {
+ // Set CMS global values
+ assert(MinChunkSize == 0, "already set");
+ #define numQuanta(x,y) ((x+y-1)/y)
+ MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
+
+ assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
+ IndexSetStart = MinObjAlignment;
+ IndexSetStride = MinObjAlignment;
+}
+
// Constructor
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
MemRegion mr, bool use_adaptive_freelists,
@@ -302,7 +319,7 @@ size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
size_t count = 0;
- for (int i = MinChunkSize; i < IndexSetSize; i++) {
+ for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
debug_only(
ssize_t total_list_count = 0;
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
index 69670ef1be3..8bca5df52f5 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
@@ -91,10 +91,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
enum SomeConstants {
SmallForLinearAlloc = 16, // size < this then use _sLAB
SmallForDictionary = 257, // size < this then use _indexedFreeList
- IndexSetSize = SmallForDictionary, // keep this odd-sized
- IndexSetStart = MinObjAlignment,
- IndexSetStride = MinObjAlignment
+ IndexSetSize = SmallForDictionary // keep this odd-sized
};
+ static int IndexSetStart;
+ static int IndexSetStride;
private:
enum FitStrategyOptions {
@@ -278,6 +278,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
+ // Set CMS global values
+ static void set_cms_values();
+
// Return the free chunk at the end of the space. If no such
// chunk exists, return NULL.
FreeChunk* find_chunk_at_end();
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index a1f40e21e91..0aa08ec3018 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -159,7 +159,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
CardTableRS* ct, bool use_adaptive_freelists,
FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
CardGeneration(rs, initial_byte_size, level, ct),
- _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
+ _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_debug_collection_type(Concurrent_collection_type)
{
HeapWord* bottom = (HeapWord*) _virtual_space.low();
@@ -222,7 +222,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
// promoting generation, we'll instead just use the mimimum
// object size (which today is a header's worth of space);
// note that all arithmetic is in units of HeapWords.
- assert(MinChunkSize >= oopDesc::header_size(), "just checking");
+ assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
assert(_dilatation_factor >= 1.0, "from previous assert");
}
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
index 6d083e1052f..19e3c1c0dd2 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
@@ -133,9 +133,5 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
void print_on(outputStream* st);
};
-// Alignment helpers etc.
-#define numQuanta(x,y) ((x+y-1)/y)
-enum AlignmentConstants {
- MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
-};
+extern size_t MinChunkSize;
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 3e638d16640..33ff94c8e1b 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -3644,7 +3644,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
do {
free_words = r->free()/HeapWordSize;
// If there's too little space, no one can allocate, so we're done.
- if (free_words < (size_t)oopDesc::header_size()) return;
+ if (free_words < CollectedHeap::min_fill_size()) return;
// Otherwise, try to claim it.
block = r->par_allocate(free_words);
} while (block == NULL);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index a534ba26c46..ad09d214935 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -2523,14 +2523,14 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
}
if (ParallelGCThreads > 0) {
const size_t OverpartitionFactor = 4;
- const size_t MinChunkSize = 8;
- const size_t ChunkSize =
+ const size_t MinWorkUnit = 8;
+ const size_t WorkUnit =
MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
- MinChunkSize);
+ MinWorkUnit);
_collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
- ChunkSize);
+ WorkUnit);
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
- (int) ChunkSize);
+ (int) WorkUnit);
_g1->workers()->run_task(&parKnownGarbageTask);
assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
index 4cf222f811c..a83d9c750c7 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
@@ -81,20 +81,24 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) {
remove_expired_entries(end);
if (_no_entries == QueueLength) {
- // OK, right now when we fill up we bomb out
- // there are a few ways of dealing with this "gracefully"
+ // OK, we've filled up the queue. There are a few ways
+ // of dealing with this "gracefully"
// increase the array size (:-)
// remove the oldest entry (this might allow more GC time for
- // the time slice than what's allowed)
+ // the time slice than what's allowed) - this is what we
+ // currently do
// consolidate the two entries with the minimum gap between them
// (this might allow less GC time than what's allowed)
- guarantee(NOT_PRODUCT(ScavengeALot ||) G1UseFixedWindowMMUTracker,
- "array full, currently we can't recover unless +G1UseFixedWindowMMUTracker");
+
// In the case where ScavengeALot is true, such overflow is not
// uncommon; in such cases, we can, without much loss of precision
// or performance (we are GC'ing most of the time anyway!),
- // simply overwrite the oldest entry in the tracker: this
- // is also the behaviour when G1UseFixedWindowMMUTracker is enabled.
+ // simply overwrite the oldest entry in the tracker.
+
+ if (G1PolicyVerbose > 1) {
+ warning("MMU Tracker Queue overflow. Replacing earliest entry.");
+ }
+
_head_index = trim_index(_head_index + 1);
assert(_head_index == _tail_index, "Because we have a full circular buffer");
_tail_index = trim_index(_tail_index + 1);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
index 6a39ebe4e6e..0abac8a1855 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
@@ -254,9 +254,6 @@
"If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size") \
\
- product(bool, G1UseFixedWindowMMUTracker, false, \
- "If the MMU tracker's memory is full, forget the oldest entry") \
- \
product(uintx, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \
\
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index d0cb46a3ae1..83f442aa6d7 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -711,6 +711,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
// object in the region.
if (region_ptr->data_size() == RegionSize) {
result += pointer_delta(addr, region_addr);
+ DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
return result;
}
@@ -1487,13 +1488,14 @@ PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
space->set_top_for_allocations();
}
- size_t obj_len = 8;
+ size_t min_size = CollectedHeap::min_fill_size();
+ size_t obj_len = min_size;
while (b + obj_len <= t) {
CollectedHeap::fill_with_object(b, obj_len);
mark_bitmap()->mark_obj(b, obj_len);
summary_data().add_obj(b, obj_len);
b += obj_len;
- obj_len = (obj_len & 0x18) + 8; // 8 16 24 32 8 16 24 32 ...
+ obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
}
if (b < t) {
// The loop didn't completely fill to t (top); adjust top downward.
@@ -1680,11 +1682,13 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
// +-------+
// Initially assume case a, c or e will apply.
- size_t obj_len = (size_t)oopDesc::header_size();
+ size_t obj_len = CollectedHeap::min_fill_size();
HeapWord* obj_beg = dense_prefix_end - obj_len;
#ifdef _LP64
- if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
+ if (MinObjAlignment > 1) { // object alignment > heap word size
+ // Cases a, c or e.
+ } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
// Case b above.
obj_beg = dense_prefix_end - 1;
} else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
index f7021be6851..9f1fbf3ee39 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
@@ -1414,6 +1414,8 @@ PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
{
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
"must move left or to a different space");
+ assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
+ "checking alignment");
}
#endif // ASSERT
diff --git a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
index a5baf1f957c..22cace7a2ab 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
@@ -761,7 +761,7 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
if (p != NULL) {
size_t remainder = s->free_in_words();
- if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
+ if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
s->set_top(s->top() - size);
p = NULL;
}
@@ -803,7 +803,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
HeapWord *p = s->cas_allocate(size);
if (p != NULL) {
size_t remainder = pointer_delta(s->end(), p + size);
- if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
+ if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
if (s->cas_deallocate(p, size)) {
// We were the last to allocate and created a fragment less than
// a minimal object.
diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
index d082a072d9a..6fd9f0228c8 100644
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
@@ -239,11 +239,11 @@ oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
}
size_t CollectedHeap::filler_array_hdr_size() {
- return size_t(arrayOopDesc::header_size(T_INT));
+ return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
}
size_t CollectedHeap::filler_array_min_size() {
- return align_object_size(filler_array_hdr_size());
+ return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
}
size_t CollectedHeap::filler_array_max_size() {
diff --git a/hotspot/src/share/vm/includeDB_compiler1 b/hotspot/src/share/vm/includeDB_compiler1
index 3e3b0e9c6ff..302bcc59600 100644
--- a/hotspot/src/share/vm/includeDB_compiler1
+++ b/hotspot/src/share/vm/includeDB_compiler1
@@ -71,8 +71,8 @@ c1_Compilation.cpp c1_LinearScan.hpp
c1_Compilation.cpp c1_MacroAssembler.hpp
c1_Compilation.cpp c1_ValueMap.hpp
c1_Compilation.cpp c1_ValueStack.hpp
-c1_Compilation.cpp ciEnv.hpp
c1_Compilation.cpp debugInfoRec.hpp
+c1_Compilation.hpp ciEnv.hpp
c1_Compilation.hpp exceptionHandlerTable.hpp
c1_Compilation.hpp resourceArea.hpp
@@ -82,6 +82,8 @@ c1_Compiler.cpp arguments.hpp
c1_Compiler.cpp c1_Compilation.hpp
c1_Compiler.cpp c1_Compiler.hpp
c1_Compiler.cpp c1_FrameMap.hpp
+c1_Compiler.cpp c1_GraphBuilder.hpp
+c1_Compiler.cpp c1_LinearScan.hpp
c1_Compiler.cpp c1_MacroAssembler.hpp
c1_Compiler.cpp c1_Runtime1.hpp
c1_Compiler.cpp c1_ValueType.hpp
diff --git a/hotspot/src/share/vm/includeDB_core b/hotspot/src/share/vm/includeDB_core
index 8906395939b..709b88ef582 100644
--- a/hotspot/src/share/vm/includeDB_core
+++ b/hotspot/src/share/vm/includeDB_core
@@ -827,6 +827,7 @@ ciStreams.cpp ciField.hpp
ciStreams.cpp ciStreams.hpp
ciStreams.cpp ciUtilities.hpp
+ciStreams.hpp bytecode.hpp
ciStreams.hpp ciClassList.hpp
ciStreams.hpp ciExceptionHandler.hpp
ciStreams.hpp ciInstanceKlass.hpp
@@ -3635,6 +3636,7 @@ rewriter.cpp bytecodes.hpp
rewriter.cpp gcLocker.hpp
rewriter.cpp generateOopMap.hpp
rewriter.cpp interpreter.hpp
+rewriter.cpp methodComparator.hpp
rewriter.cpp objArrayOop.hpp
rewriter.cpp oop.inline.hpp
rewriter.cpp oopFactory.hpp
diff --git a/hotspot/src/share/vm/interpreter/bytecode.cpp b/hotspot/src/share/vm/interpreter/bytecode.cpp
index 5be30c6afc9..212c80950ba 100644
--- a/hotspot/src/share/vm/interpreter/bytecode.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,19 +26,12 @@
#include "incls/_bytecode.cpp.incl"
// Implementation of Bytecode
-// Should eventually get rid of these functions and use ThisRelativeObj methods instead
-void Bytecode::set_code(Bytecodes::Code code) {
- Bytecodes::check(code);
- *addr_at(0) = u_char(code);
-}
-
-
-bool Bytecode::check_must_rewrite() const {
- assert(Bytecodes::can_rewrite(code()), "post-check only");
+bool Bytecode::check_must_rewrite(Bytecodes::Code code) const {
+ assert(Bytecodes::can_rewrite(code), "post-check only");
// Some codes are conditionally rewriting. Look closely at them.
- switch (code()) {
+ switch (code) {
case Bytecodes::_aload_0:
// Even if RewriteFrequentPairs is turned on,
// the _aload_0 code might delay its rewrite until
@@ -58,14 +51,85 @@ bool Bytecode::check_must_rewrite() const {
}
+#ifdef ASSERT
+
+void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const {
+ Bytecodes::Code thisbc = Bytecodes::cast(byte_at(0));
+ if (thisbc == Bytecodes::_breakpoint) return; // let the assertion fail silently
+ if (is_wide) {
+ assert(thisbc == Bytecodes::_wide, "expected a wide instruction");
+ thisbc = Bytecodes::cast(byte_at(1));
+ if (thisbc == Bytecodes::_breakpoint) return;
+ }
+ int thisflags = Bytecodes::flags(testbc, is_wide) & Bytecodes::_all_fmt_bits;
+ int testflags = Bytecodes::flags(thisbc, is_wide) & Bytecodes::_all_fmt_bits;
+ if (thisflags != testflags)
+ tty->print_cr("assert_same_format_as(%d) failed on bc=%d%s; %d != %d",
+ (int)testbc, (int)thisbc, (is_wide?"/wide":""), testflags, thisflags);
+ assert(thisflags == testflags, "expected format");
+}
+
+void Bytecode::assert_index_size(int size, Bytecodes::Code bc, bool is_wide) {
+ int have_fmt = (Bytecodes::flags(bc, is_wide)
+ & (Bytecodes::_fmt_has_u2 | Bytecodes::_fmt_has_u4 |
+ Bytecodes::_fmt_not_simple |
+ // Not an offset field:
+ Bytecodes::_fmt_has_o));
+ int need_fmt = -1;
+ switch (size) {
+ case 1: need_fmt = 0; break;
+ case 2: need_fmt = Bytecodes::_fmt_has_u2; break;
+ case 4: need_fmt = Bytecodes::_fmt_has_u4; break;
+ }
+ if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
+ if (have_fmt != need_fmt) {
+ tty->print_cr("assert_index_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+ assert(have_fmt == need_fmt, "assert_index_size");
+ }
+}
+
+void Bytecode::assert_offset_size(int size, Bytecodes::Code bc, bool is_wide) {
+ int have_fmt = Bytecodes::flags(bc, is_wide) & Bytecodes::_all_fmt_bits;
+ int need_fmt = -1;
+ switch (size) {
+ case 2: need_fmt = Bytecodes::_fmt_bo2; break;
+ case 4: need_fmt = Bytecodes::_fmt_bo4; break;
+ }
+ if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
+ if (have_fmt != need_fmt) {
+ tty->print_cr("assert_offset_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+ assert(have_fmt == need_fmt, "assert_offset_size");
+ }
+}
+
+void Bytecode::assert_constant_size(int size, int where, Bytecodes::Code bc, bool is_wide) {
+ int have_fmt = Bytecodes::flags(bc, is_wide) & (Bytecodes::_all_fmt_bits
+ // Ignore any 'i' field (for iinc):
+ & ~Bytecodes::_fmt_has_i);
+ int need_fmt = -1;
+ switch (size) {
+ case 1: need_fmt = Bytecodes::_fmt_bc; break;
+ case 2: need_fmt = Bytecodes::_fmt_bc | Bytecodes::_fmt_has_u2; break;
+ }
+ if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
+ int length = is_wide ? Bytecodes::wide_length_for(bc) : Bytecodes::length_for(bc);
+ if (have_fmt != need_fmt || where + size != length) {
+ tty->print_cr("assert_constant_size %d @%d: bc=%d%s %d != %d", size, where, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
+ }
+ assert(have_fmt == need_fmt, "assert_constant_size");
+ assert(where + size == length, "assert_constant_size oob");
+}
+
+void Bytecode::assert_native_index(Bytecodes::Code bc, bool is_wide) {
+ assert((Bytecodes::flags(bc, is_wide) & Bytecodes::_fmt_has_nbo) != 0, "native index");
+}
+
+#endif //ASSERT
// Implementation of Bytecode_tableupswitch
int Bytecode_tableswitch::dest_offset_at(int i) const {
- address x = aligned_addr_at(1);
- int x2 = aligned_offset(1 + (3 + i)*jintSize);
- int val = java_signed_word_at(x2);
- return java_signed_word_at(aligned_offset(1 + (3 + i)*jintSize));
+ return get_Java_u4_at(aligned_offset(1 + (3 + i)*jintSize));
}
@@ -74,6 +138,7 @@ int Bytecode_tableswitch::dest_offset_at(int i) const {
void Bytecode_invoke::verify() const {
Bytecodes::Code bc = adjusted_invoke_code();
assert(is_valid(), "check invoke");
+ assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter");
}
@@ -116,27 +181,12 @@ methodHandle Bytecode_invoke::static_target(TRAPS) {
int Bytecode_invoke::index() const {
// Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
// at the same time it allocates per-call-site CP cache entries.
- if (has_giant_index())
- return Bytes::get_native_u4(bcp() + 1);
+ Bytecodes::Code stdc = Bytecodes::java_code(code());
+ Bytecode* invoke = Bytecode_at(bcp());
+ if (invoke->has_index_u4(stdc))
+ return invoke->get_index_u4(stdc);
else
- return Bytes::get_Java_u2(bcp() + 1);
-}
-
-
-// Implementation of Bytecode_static
-
-void Bytecode_static::verify() const {
- assert(Bytecodes::java_code(code()) == Bytecodes::_putstatic
- || Bytecodes::java_code(code()) == Bytecodes::_getstatic, "check static");
-}
-
-
-BasicType Bytecode_static::result_type(methodOop method) const {
- int index = java_hwrd_at(1);
- constantPoolOop constants = method->constants();
- symbolOop field_type = constants->signature_ref_at(index);
- BasicType basic_type = FieldType::basic_type(field_type);
- return basic_type;
+ return invoke->get_index_u2_cpcache(stdc);
}
@@ -156,7 +206,8 @@ bool Bytecode_field::is_static() const {
int Bytecode_field::index() const {
- return java_hwrd_at(1);
+ Bytecode* invoke = Bytecode_at(bcp());
+ return invoke->get_index_u2_cpcache(Bytecodes::_getfield);
}
@@ -164,7 +215,14 @@ int Bytecode_field::index() const {
int Bytecode_loadconstant::index() const {
Bytecodes::Code stdc = Bytecodes::java_code(code());
- return stdc == Bytecodes::_ldc ? java_byte_at(1) : java_hwrd_at(1);
+ if (stdc != Bytecodes::_wide) {
+ if (Bytecodes::java_code(stdc) == Bytecodes::_ldc)
+ return get_index_u1(stdc);
+ else
+ return get_index_u2(stdc, false);
+ }
+ stdc = Bytecodes::code_at(addr_at(1));
+ return get_index_u2(stdc, true);
}
//------------------------------------------------------------------------------
diff --git a/hotspot/src/share/vm/interpreter/bytecode.hpp b/hotspot/src/share/vm/interpreter/bytecode.hpp
index 2eeee5d2502..dd0068926e2 100644
--- a/hotspot/src/share/vm/interpreter/bytecode.hpp
+++ b/hotspot/src/share/vm/interpreter/bytecode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,92 +26,100 @@
// relative to an objects 'this' pointer.
class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
- private:
- int sign_extend (int x, int size) const { const int s = (BytesPerInt - size)*BitsPerByte; return (x << s) >> s; }
-
public:
// Address computation
address addr_at (int offset) const { return (address)this + offset; }
+ int byte_at (int offset) const { return *(addr_at(offset)); }
address aligned_addr_at (int offset) const { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
int aligned_offset (int offset) const { return aligned_addr_at(offset) - addr_at(0); }
- // Java unsigned accessors (using Java spec byte ordering)
- int java_byte_at (int offset) const { return *(jubyte*)addr_at(offset); }
- int java_hwrd_at (int offset) const { return java_byte_at(offset) << (1 * BitsPerByte) | java_byte_at(offset + 1); }
- int java_word_at (int offset) const { return java_hwrd_at(offset) << (2 * BitsPerByte) | java_hwrd_at(offset + 2); }
-
- // Java signed accessors (using Java spec byte ordering)
- int java_signed_byte_at(int offset) const { return sign_extend(java_byte_at(offset), 1); }
- int java_signed_hwrd_at(int offset) const { return sign_extend(java_hwrd_at(offset), 2); }
- int java_signed_word_at(int offset) const { return java_word_at(offset) ; }
-
- // Fast accessors (using the machine's natural byte ordering)
- int fast_byte_at (int offset) const { return *(jubyte *)addr_at(offset); }
- int fast_hwrd_at (int offset) const { return *(jushort*)addr_at(offset); }
- int fast_word_at (int offset) const { return *(juint *)addr_at(offset); }
-
- // Fast signed accessors (using the machine's natural byte ordering)
- int fast_signed_byte_at(int offset) const { return *(jbyte *)addr_at(offset); }
- int fast_signed_hwrd_at(int offset) const { return *(jshort*)addr_at(offset); }
- int fast_signed_word_at(int offset) const { return *(jint *)addr_at(offset); }
-
- // Fast manipulators (using the machine's natural byte ordering)
- void set_fast_byte_at (int offset, int x) const { *(jbyte *)addr_at(offset) = (jbyte )x; }
- void set_fast_hwrd_at (int offset, int x) const { *(jshort*)addr_at(offset) = (jshort)x; }
- void set_fast_word_at (int offset, int x) const { *(jint *)addr_at(offset) = (jint )x; }
+ // Word access:
+ int get_Java_u2_at (int offset) const { return Bytes::get_Java_u2(addr_at(offset)); }
+ int get_Java_u4_at (int offset) const { return Bytes::get_Java_u4(addr_at(offset)); }
+ int get_native_u2_at (int offset) const { return Bytes::get_native_u2(addr_at(offset)); }
+ int get_native_u4_at (int offset) const { return Bytes::get_native_u4(addr_at(offset)); }
};
// The base class for different kinds of bytecode abstractions.
// Provides the primitive operations to manipulate code relative
// to an objects 'this' pointer.
+// FIXME: Make this a ResourceObj, include the enclosing methodOop, and cache the opcode.
class Bytecode: public ThisRelativeObj {
protected:
u_char byte_at(int offset) const { return *addr_at(offset); }
- bool check_must_rewrite() const;
+ bool check_must_rewrite(Bytecodes::Code bc) const;
public:
// Attributes
address bcp() const { return addr_at(0); }
- address next_bcp() const { return addr_at(0) + Bytecodes::length_at(bcp()); }
int instruction_size() const { return Bytecodes::length_at(bcp()); }
+ // Warning: Use code() with caution on live bytecode streams. 4926272
Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); }
Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); }
- bool must_rewrite() const { return Bytecodes::can_rewrite(code()) && check_must_rewrite(); }
- bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); }
-
- int one_byte_index() const { assert_index_size(1); return byte_at(1); }
- int two_byte_index() const { assert_index_size(2); return (byte_at(1) << 8) + byte_at(2); }
-
- int offset() const { return (two_byte_index() << 16) >> 16; }
- address destination() const { return bcp() + offset(); }
-
- // Attribute modification
- void set_code(Bytecodes::Code code);
+ bool must_rewrite(Bytecodes::Code code) const { return Bytecodes::can_rewrite(code) && check_must_rewrite(code); }
// Creation
inline friend Bytecode* Bytecode_at(address bcp);
- private:
- void assert_index_size(int required_size) const {
-#ifdef ASSERT
- int isize = instruction_size() - 1;
- if (isize == 2 && code() == Bytecodes::_iinc)
- isize = 1;
- else if (isize <= 2)
- ; // no change
- else if (code() == Bytecodes::_invokedynamic)
- isize = 4;
- else
- isize = 2;
- assert(isize = required_size, "wrong index size");
-#endif
+ // Static functions for parsing bytecodes in place.
+ int get_index_u1(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_index_size(1, bc);
+ return *(jubyte*)addr_at(1);
+ }
+ int get_index_u2(Bytecodes::Code bc, bool is_wide = false) const {
+ assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide);
+ address p = addr_at(is_wide ? 2 : 1);
+ if (can_use_native_byte_order(bc, is_wide))
+ return Bytes::get_native_u2(p);
+ else return Bytes::get_Java_u2(p);
+ }
+ int get_index_u2_cpcache(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc);
+ return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG);
+ }
+ int get_index_u4(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_index_size(4, bc);
+ assert(can_use_native_byte_order(bc), "");
+ return Bytes::get_native_u4(addr_at(1));
+ }
+ bool has_index_u4(Bytecodes::Code bc) const {
+ return bc == Bytecodes::_invokedynamic;
+ }
+
+ int get_offset_s2(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_offset_size(2, bc);
+ return (jshort) Bytes::get_Java_u2(addr_at(1));
+ }
+ int get_offset_s4(Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_offset_size(4, bc);
+ return (jint) Bytes::get_Java_u4(addr_at(1));
+ }
+
+ int get_constant_u1(int offset, Bytecodes::Code bc) const {
+ assert_same_format_as(bc); assert_constant_size(1, offset, bc);
+ return *(jbyte*)addr_at(offset);
+ }
+ int get_constant_u2(int offset, Bytecodes::Code bc, bool is_wide = false) const {
+ assert_same_format_as(bc, is_wide); assert_constant_size(2, offset, bc, is_wide);
+ return (jshort) Bytes::get_Java_u2(addr_at(offset));
+ }
+
+ // These are used locally and also from bytecode streams.
+ void assert_same_format_as(Bytecodes::Code testbc, bool is_wide = false) const NOT_DEBUG_RETURN;
+ static void assert_index_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+ static void assert_offset_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+ static void assert_constant_size(int required_size, int where, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+ static void assert_native_index(Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
+ static bool can_use_native_byte_order(Bytecodes::Code bc, bool is_wide = false) {
+ return (!Bytes::is_Java_byte_ordering_different() || Bytecodes::native_byte_order(bc /*, is_wide*/));
}
};
inline Bytecode* Bytecode_at(address bcp) {
+ // Warning: Use with caution on live bytecode streams. 4926272
return (Bytecode*)bcp;
}
@@ -124,8 +132,8 @@ class LookupswitchPair: ThisRelativeObj {
int _offset;
public:
- int match() const { return java_signed_word_at(0 * jintSize); }
- int offset() const { return java_signed_word_at(1 * jintSize); }
+ int match() const { return get_Java_u4_at(0 * jintSize); }
+ int offset() const { return get_Java_u4_at(1 * jintSize); }
};
@@ -134,8 +142,8 @@ class Bytecode_lookupswitch: public Bytecode {
void verify() const PRODUCT_RETURN;
// Attributes
- int default_offset() const { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); }
- int number_of_pairs() const { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); }
+ int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
+ int number_of_pairs() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
LookupswitchPair* pair_at(int i) const { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); }
// Creation
@@ -154,9 +162,9 @@ class Bytecode_tableswitch: public Bytecode {
void verify() const PRODUCT_RETURN;
// Attributes
- int default_offset() const { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); }
- int low_key() const { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); }
- int high_key() const { return java_signed_word_at(aligned_offset(1 + 2*jintSize)); }
+ int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
+ int low_key() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
+ int high_key() const { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); }
int dest_offset_at(int i) const;
int length() { return high_key()-low_key()+1; }
@@ -206,7 +214,6 @@ class Bytecode_invoke: public ResourceObj {
bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); }
- bool has_giant_index() const { return is_invokedynamic(); }
bool is_valid() const { return is_invokeinterface() ||
is_invokevirtual() ||
@@ -252,26 +259,6 @@ inline Bytecode_field* Bytecode_field_at(const methodOop method, address bcp) {
}
-// Abstraction for {get,put}static
-
-class Bytecode_static: public Bytecode {
- public:
- void verify() const;
-
- // Returns the result type of the send by inspecting the field ref
- BasicType result_type(methodOop method) const;
-
- // Creation
- inline friend Bytecode_static* Bytecode_static_at(const methodOop method, address bcp);
-};
-
-inline Bytecode_static* Bytecode_static_at(const methodOop method, address bcp) {
- Bytecode_static* b = (Bytecode_static*)bcp;
- debug_only(b->verify());
- return b;
-}
-
-
// Abstraction for checkcast
class Bytecode_checkcast: public Bytecode {
@@ -279,7 +266,7 @@ class Bytecode_checkcast: public Bytecode {
void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_checkcast); };
// Creation
inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
@@ -299,7 +286,7 @@ class Bytecode_instanceof: public Bytecode {
void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_instanceof); };
// Creation
inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
@@ -317,7 +304,7 @@ class Bytecode_new: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_new); };
// Creation
inline friend Bytecode_new* Bytecode_new_at(address bcp);
@@ -335,7 +322,7 @@ class Bytecode_multianewarray: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_multianewarray); };
// Creation
inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
@@ -353,7 +340,7 @@ class Bytecode_anewarray: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
// Returns index
- long index() const { return java_hwrd_at(1); };
+ long index() const { return get_index_u2(Bytecodes::_anewarray); };
// Creation
inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);
diff --git a/hotspot/src/share/vm/interpreter/bytecodeStream.cpp b/hotspot/src/share/vm/interpreter/bytecodeStream.cpp
index a6ceb472930..b5414d5b951 100644
--- a/hotspot/src/share/vm/interpreter/bytecodeStream.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecodeStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,25 @@ Bytecodes::Code RawBytecodeStream::raw_next_special(Bytecodes::Code code) {
}
}
}
- _code = code;
+ _raw_code = code;
return code;
}
+
+#ifdef ASSERT
+void BaseBytecodeStream::assert_raw_index_size(int size) const {
+ if (raw_code() == Bytecodes::_invokedynamic && is_raw()) {
+ // in raw mode, pretend indy is "bJJ__"
+ assert(size == 2, "raw invokedynamic instruction has 2-byte index only");
+ } else {
+ bytecode()->assert_index_size(size, raw_code(), is_wide());
+ }
+}
+
+void BaseBytecodeStream::assert_raw_stream(bool want_raw) const {
+ if (want_raw) {
+ assert( is_raw(), "this function only works on raw streams");
+ } else {
+ assert(!is_raw(), "this function only works on non-raw streams");
+ }
+}
+#endif //ASSERT
diff --git a/hotspot/src/share/vm/interpreter/bytecodeStream.hpp b/hotspot/src/share/vm/interpreter/bytecodeStream.hpp
index 6561c225c34..204a3c56a53 100644
--- a/hotspot/src/share/vm/interpreter/bytecodeStream.hpp
+++ b/hotspot/src/share/vm/interpreter/bytecodeStream.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,13 +32,13 @@
// while ((c = s.next()) >= 0) {
// ...
// }
-//
+
// A RawBytecodeStream is a simple version of BytecodeStream.
// It is used ONLY when we know the bytecodes haven't been rewritten
-// yet, such as in the rewriter or the verifier. Currently only the
-// verifier uses this class.
+// yet, such as in the rewriter or the verifier.
-class RawBytecodeStream: StackObj {
+// Here is the common base class for both RawBytecodeStream and BytecodeStream:
+class BaseBytecodeStream: StackObj {
protected:
// stream buffer
methodHandle _method; // read from method directly
@@ -49,15 +49,17 @@ class RawBytecodeStream: StackObj {
int _end_bci; // bci after the current iteration interval
// last bytecode read
- Bytecodes::Code _code;
+ Bytecodes::Code _raw_code;
bool _is_wide;
+ bool _is_raw; // false in 'cooked' BytecodeStream
- public:
// Construction
- RawBytecodeStream(methodHandle method) : _method(method) {
+ BaseBytecodeStream(methodHandle method) : _method(method) {
set_interval(0, _method->code_size());
+ _is_raw = false;
}
+ public:
// Iteration control
void set_interval(int beg_bci, int end_bci) {
// iterate over the interval [beg_bci, end_bci)
@@ -72,6 +74,46 @@ class RawBytecodeStream: StackObj {
set_interval(beg_bci, _method->code_size());
}
+ bool is_raw() const { return _is_raw; }
+
+ // Stream attributes
+ methodHandle method() const { return _method; }
+
+ int bci() const { return _bci; }
+ int next_bci() const { return _next_bci; }
+ int end_bci() const { return _end_bci; }
+
+ Bytecodes::Code raw_code() const { return _raw_code; }
+ bool is_wide() const { return _is_wide; }
+ int instruction_size() const { return (_next_bci - _bci); }
+ bool is_last_bytecode() const { return _next_bci >= _end_bci; }
+
+ address bcp() const { return method()->code_base() + _bci; }
+ Bytecode* bytecode() const { return Bytecode_at(bcp()); }
+
+ // State changes
+ void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
+
+ // Bytecode-specific attributes
+ int dest() const { return bci() + bytecode()->get_offset_s2(raw_code()); }
+ int dest_w() const { return bci() + bytecode()->get_offset_s4(raw_code()); }
+
+ // One-byte indices.
+ int get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
+
+ protected:
+ void assert_raw_index_size(int size) const NOT_DEBUG_RETURN;
+ void assert_raw_stream(bool want_raw) const NOT_DEBUG_RETURN;
+};
+
+class RawBytecodeStream: public BaseBytecodeStream {
+ public:
+ // Construction
+ RawBytecodeStream(methodHandle method) : BaseBytecodeStream(method) {
+ _is_raw = true;
+ }
+
+ public:
// Iteration
// Use raw_next() rather than next() for faster method reference
Bytecodes::Code raw_next() {
@@ -80,7 +122,7 @@ class RawBytecodeStream: StackObj {
_bci = _next_bci;
assert(!is_last_bytecode(), "caller should check is_last_bytecode()");
- address bcp = RawBytecodeStream::bcp();
+ address bcp = this->bcp();
code = Bytecodes::code_or_bp_at(bcp);
// set next bytecode position
@@ -90,84 +132,49 @@ class RawBytecodeStream: StackObj {
&& code != Bytecodes::_lookupswitch, "can't be special bytecode");
_is_wide = false;
_next_bci += l;
- _code = code;
+ _raw_code = code;
return code;
- } else if (code == Bytecodes::_wide && _bci + 1 >= _end_bci) {
- return Bytecodes::_illegal;
} else {
return raw_next_special(code);
}
}
Bytecodes::Code raw_next_special(Bytecodes::Code code);
- // Stream attributes
- methodHandle method() const { return _method; }
-
- int bci() const { return _bci; }
- int next_bci() const { return _next_bci; }
- int end_bci() const { return _end_bci; }
-
- Bytecodes::Code code() const { return _code; }
- bool is_wide() const { return _is_wide; }
- int instruction_size() const { return (_next_bci - _bci); }
- bool is_last_bytecode() const { return _next_bci >= _end_bci; }
-
- address bcp() const { return method()->code_base() + _bci; }
- address next_bcp() { return method()->code_base() + _next_bci; }
-
- // State changes
- void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
-
- // Bytecode-specific attributes
- int dest() const { return bci() + (short)Bytes::get_Java_u2(bcp() + 1); }
- int dest_w() const { return bci() + (int )Bytes::get_Java_u4(bcp() + 1); }
-
- // Unsigned indices, widening
- int get_index() const { assert_index_size(is_wide() ? 2 : 1);
- return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; }
- int get_index_big() const { assert_index_size(2);
- return (int)Bytes::get_Java_u2(bcp() + 1); }
- int get_index_int() const { return has_giant_index() ? get_index_giant() : get_index_big(); }
- int get_index_giant() const { assert_index_size(4); return Bytes::get_native_u4(bcp() + 1); }
- int has_giant_index() const { return (code() == Bytecodes::_invokedynamic); }
+ // Unsigned indices, widening, with no swapping of bytes
+ int get_index() const { return (is_wide()) ? get_index_u2_raw(bcp() + 2) : get_index_u1(); }
+ // Get an unsigned 2-byte index, with no swapping of bytes.
+ int get_index_u2() const { assert(!is_wide(), ""); return get_index_u2_raw(bcp() + 1); }
private:
- void assert_index_size(int required_size) const {
-#ifdef ASSERT
- int isize = instruction_size() - (int)_is_wide - 1;
- if (isize == 2 && code() == Bytecodes::_iinc)
- isize = 1;
- else if (isize <= 2)
- ; // no change
- else if (has_giant_index())
- isize = 4;
- else
- isize = 2;
- assert(isize = required_size, "wrong index size");
-#endif
+ int get_index_u2_raw(address p) const {
+ assert_raw_index_size(2); assert_raw_stream(true);
+ return Bytes::get_Java_u2(p);
}
};
// In BytecodeStream, non-java bytecodes will be translated into the
// corresponding java bytecodes.
-class BytecodeStream: public RawBytecodeStream {
+class BytecodeStream: public BaseBytecodeStream {
+ Bytecodes::Code _code;
+
public:
// Construction
- BytecodeStream(methodHandle method) : RawBytecodeStream(method) { }
+ BytecodeStream(methodHandle method) : BaseBytecodeStream(method) { }
// Iteration
Bytecodes::Code next() {
- Bytecodes::Code code;
+ Bytecodes::Code raw_code, code;
// set reading position
_bci = _next_bci;
if (is_last_bytecode()) {
// indicate end of bytecode stream
- code = Bytecodes::_illegal;
+ raw_code = code = Bytecodes::_illegal;
} else {
// get bytecode
- address bcp = BytecodeStream::bcp();
- code = Bytecodes::java_code_at(bcp);
+ address bcp = this->bcp();
+ raw_code = Bytecodes::code_at(bcp);
+ code = Bytecodes::java_code(raw_code);
// set next bytecode position
//
// note that we cannot advance before having the
@@ -181,14 +188,29 @@ class BytecodeStream: public RawBytecodeStream {
_is_wide = false;
// check for special (uncommon) cases
if (code == Bytecodes::_wide) {
- code = (Bytecodes::Code)bcp[1];
+ raw_code = (Bytecodes::Code)bcp[1];
+ code = raw_code; // wide BCs are always Java-normal
_is_wide = true;
}
assert(Bytecodes::is_java_code(code), "sanity check");
}
+ _raw_code = raw_code;
_code = code;
return _code;
}
bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); }
+ Bytecodes::Code code() const { return _code; }
+
+ // Unsigned indices, widening
+ int get_index() const { return is_wide() ? bytecode()->get_index_u2(raw_code(), true) : get_index_u1(); }
+ // Get an unsigned 2-byte index, swapping the bytes if necessary.
+ int get_index_u2() const { assert_raw_stream(false);
+ return bytecode()->get_index_u2(raw_code(), false); }
+ // Get an unsigned 2-byte index in native order.
+ int get_index_u2_cpcache() const { assert_raw_stream(false);
+ return bytecode()->get_index_u2_cpcache(raw_code()); }
+ int get_index_u4() const { assert_raw_stream(false);
+ return bytecode()->get_index_u4(raw_code()); }
+ bool has_index_u4() const { return bytecode()->has_index_u4(raw_code()); }
};
diff --git a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp
index dbf617bc2f6..5dea0d746ac 100644
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@ class BytecodePrinter: public BytecodeClosure {
// (Also, ensure that occasional false positives are benign.)
methodOop _current_method;
bool _is_wide;
+ Bytecodes::Code _code;
address _next_pc; // current decoding position
void align() { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); }
@@ -46,23 +47,26 @@ class BytecodePrinter: public BytecodeClosure {
short get_short() { short i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
int get_int() { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; }
- int get_index() { return *(address)_next_pc++; }
- int get_big_index() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
- int get_giant_index() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
- int get_index_special() { return (is_wide()) ? get_big_index() : get_index(); }
+ int get_index_u1() { return *(address)_next_pc++; }
+ int get_index_u2() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
+ int get_index_u2_cpcache() { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
+ int get_index_u4() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
+ int get_index_special() { return (is_wide()) ? get_index_u2() : get_index_u1(); }
methodOop method() { return _current_method; }
bool is_wide() { return _is_wide; }
+ Bytecodes::Code raw_code() { return Bytecodes::Code(_code); }
- bool check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st = tty);
+ bool check_index(int i, int& cp_index, outputStream* st = tty);
void print_constant(int i, outputStream* st = tty);
void print_field_or_method(int i, outputStream* st = tty);
- void print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty);
+ void print_attributes(int bci, outputStream* st = tty);
void bytecode_epilog(int bci, outputStream* st = tty);
public:
BytecodePrinter() {
_is_wide = false;
+ _code = Bytecodes::_illegal;
}
// This method is called while executing the raw bytecodes, so none of
@@ -89,7 +93,8 @@ class BytecodePrinter: public BytecodeClosure {
} else {
code = Bytecodes::code_at(bcp);
}
- int bci = bcp - method->code_base();
+ _code = code;
+ int bci = bcp - method->code_base();
st->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
if (Verbose) {
st->print("%8d %4d " INTPTR_FORMAT " " INTPTR_FORMAT " %s",
@@ -99,10 +104,11 @@ class BytecodePrinter: public BytecodeClosure {
BytecodeCounter::counter_value(), bci, Bytecodes::name(code));
}
_next_pc = is_wide() ? bcp+2 : bcp+1;
- print_attributes(code, bci);
+ print_attributes(bci);
// Set is_wide for the next one, since the caller of this doesn't skip
// the next bytecode.
_is_wide = (code == Bytecodes::_wide);
+ _code = Bytecodes::_illegal;
}
// Used for methodOop::print_codes(). The input bcp comes from
@@ -116,6 +122,7 @@ class BytecodePrinter: public BytecodeClosure {
if (is_wide()) {
code = Bytecodes::code_at(bcp+1);
}
+ _code = code;
int bci = bcp - method->code_base();
// Print bytecode index and name
if (is_wide()) {
@@ -124,7 +131,7 @@ class BytecodePrinter: public BytecodeClosure {
st->print("%d %s", bci, Bytecodes::name(code));
}
_next_pc = is_wide() ? bcp+2 : bcp+1;
- print_attributes(code, bci, st);
+ print_attributes(bci, st);
bytecode_epilog(bci, st);
}
};
@@ -185,12 +192,13 @@ void print_oop(oop value, outputStream* st) {
}
}
-bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st) {
+bool BytecodePrinter::check_index(int i, int& cp_index, outputStream* st) {
constantPoolOop constants = method()->constants();
int ilimit = constants->length(), climit = 0;
+ Bytecodes::Code code = raw_code();
constantPoolCacheOop cache = NULL;
- if (in_cp_cache) {
+ if (Bytecodes::uses_cp_cache(code)) {
cache = constants->cache();
if (cache != NULL) {
//climit = cache->length(); // %%% private!
@@ -201,7 +209,7 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
}
}
- if (in_cp_cache && constantPoolCacheOopDesc::is_secondary_index(i)) {
+ if (cache != NULL && constantPoolCacheOopDesc::is_secondary_index(i)) {
i = constantPoolCacheOopDesc::decode_secondary_index(i);
st->print(" secondary cache[%d] of", i);
if (i >= 0 && i < climit) {
@@ -218,8 +226,6 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
}
if (cache != NULL) {
- i = Bytes::swap_u2(i);
- if (WizardMode) st->print(" (swap=%d)", i);
goto check_cache_index;
}
@@ -234,6 +240,17 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
return false;
check_cache_index:
+#ifdef ASSERT
+ {
+ const int CPCACHE_INDEX_TAG = constantPoolOopDesc::CPCACHE_INDEX_TAG;
+ if (i >= CPCACHE_INDEX_TAG && i < climit + CPCACHE_INDEX_TAG) {
+ i -= CPCACHE_INDEX_TAG;
+ } else {
+ st->print_cr(" CP[%d] missing bias?", i);
+ return false;
+ }
+ }
+#endif //ASSERT
if (i >= 0 && i < climit) {
if (cache->entry_at(i)->is_secondary_entry()) {
st->print_cr(" secondary entry?");
@@ -248,7 +265,7 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
void BytecodePrinter::print_constant(int i, outputStream* st) {
int orig_i = i;
- if (!check_index(orig_i, false, i, st)) return;
+ if (!check_index(orig_i, i, st)) return;
constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i);
@@ -279,7 +296,7 @@ void BytecodePrinter::print_constant(int i, outputStream* st) {
void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
int orig_i = i;
- if (!check_index(orig_i, true, i, st)) return;
+ if (!check_index(orig_i, i, st)) return;
constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i);
@@ -303,9 +320,9 @@ void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
}
-void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStream* st) {
+void BytecodePrinter::print_attributes(int bci, outputStream* st) {
// Show attributes of pre-rewritten codes
- code = Bytecodes::java_code(code);
+ Bytecodes::Code code = Bytecodes::java_code(raw_code());
// If the code doesn't have any fields there's nothing to print.
// note this is ==1 because the tableswitch and lookupswitch are
// zero size (for some reason) and we want to print stuff out for them.
@@ -323,12 +340,12 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
st->print_cr(" " INT32_FORMAT, get_short());
break;
case Bytecodes::_ldc:
- print_constant(get_index(), st);
+ print_constant(get_index_u1(), st);
break;
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
- print_constant(get_big_index(), st);
+ print_constant(get_index_u2(), st);
break;
case Bytecodes::_iload:
@@ -352,7 +369,7 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
break;
case Bytecodes::_newarray: {
- BasicType atype = (BasicType)get_index();
+ BasicType atype = (BasicType)get_index_u1();
const char* str = type2name(atype);
if (str == NULL || atype == T_OBJECT || atype == T_ARRAY) {
assert(false, "Unidentified basic type");
@@ -361,15 +378,15 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
}
break;
case Bytecodes::_anewarray: {
- int klass_index = get_big_index();
+ int klass_index = get_index_u2();
constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(klass_index);
st->print_cr(" %s ", name->as_C_string());
}
break;
case Bytecodes::_multianewarray: {
- int klass_index = get_big_index();
- int nof_dims = get_index();
+ int klass_index = get_index_u2();
+ int nof_dims = get_index_u1();
constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(klass_index);
st->print_cr(" %s %d", name->as_C_string(), nof_dims);
@@ -451,31 +468,31 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
case Bytecodes::_getstatic:
case Bytecodes::_putfield:
case Bytecodes::_getfield:
- print_field_or_method(get_big_index(), st);
+ print_field_or_method(get_index_u2_cpcache(), st);
break;
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
- print_field_or_method(get_big_index(), st);
+ print_field_or_method(get_index_u2_cpcache(), st);
break;
case Bytecodes::_invokeinterface:
- { int i = get_big_index();
- int n = get_index();
- get_index(); // ignore zero byte
+ { int i = get_index_u2_cpcache();
+ int n = get_index_u1();
+ get_byte(); // ignore zero byte
print_field_or_method(i, st);
}
break;
case Bytecodes::_invokedynamic:
- print_field_or_method(get_giant_index(), st);
+ print_field_or_method(get_index_u4(), st);
break;
case Bytecodes::_new:
case Bytecodes::_checkcast:
case Bytecodes::_instanceof:
- { int i = get_big_index();
+ { int i = get_index_u2();
constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(i);
st->print_cr(" %d <%s>", i, name->as_C_string());
diff --git a/hotspot/src/share/vm/interpreter/bytecodes.cpp b/hotspot/src/share/vm/interpreter/bytecodes.cpp
index 1c8887170f7..59d9b0e80d9 100644
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,14 +37,11 @@
bool Bytecodes::_is_initialized = false;
const char* Bytecodes::_name [Bytecodes::number_of_codes];
-const char* Bytecodes::_format [Bytecodes::number_of_codes];
-const char* Bytecodes::_wide_format [Bytecodes::number_of_codes];
BasicType Bytecodes::_result_type [Bytecodes::number_of_codes];
s_char Bytecodes::_depth [Bytecodes::number_of_codes];
-u_char Bytecodes::_length [Bytecodes::number_of_codes];
-bool Bytecodes::_can_trap [Bytecodes::number_of_codes];
+u_char Bytecodes::_lengths [Bytecodes::number_of_codes];
Bytecodes::Code Bytecodes::_java_code [Bytecodes::number_of_codes];
-bool Bytecodes::_can_rewrite [Bytecodes::number_of_codes];
+u_short Bytecodes::_flags [(1< 0 && len == (int)len) ? len : -1;
}
}
+ // Note: Length functions must return <=0 for invalid bytecodes.
return 0;
}
@@ -124,15 +122,22 @@ void Bytecodes::def(Code code, const char* name, const char* format, const char*
void Bytecodes::def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code) {
assert(wide_format == NULL || format != NULL, "short form must exist if there's a wide form");
+ int len = (format != NULL ? (int) strlen(format) : 0);
+ int wlen = (wide_format != NULL ? (int) strlen(wide_format) : 0);
_name [code] = name;
- _format [code] = format;
- _wide_format [code] = wide_format;
_result_type [code] = result_type;
_depth [code] = depth;
- _can_trap [code] = can_trap;
- _length [code] = format != NULL ? (u_char)strlen(format) : 0;
+ _lengths [code] = (wlen << 4) | (len & 0xF);
_java_code [code] = java_code;
- if (java_code != code) _can_rewrite[java_code] = true;
+ int bc_flags = 0;
+ if (can_trap) bc_flags |= _bc_can_trap;
+ if (java_code != code) bc_flags |= _bc_can_rewrite;
+ _flags[(u1)code+0*(1<> 4; }
+ static bool can_trap (Code code) { check(code); return has_all_flags(code, _bc_can_trap, false); }
static Code java_code (Code code) { check(code); return _java_code [code]; }
- static bool can_rewrite (Code code) { check(code); return _can_rewrite [code]; }
- static int wide_length_for(Code code) {
- if (!is_defined(code)) {
- return 0;
- }
- const char* wf = wide_format(code);
- return (wf == NULL) ? 0 : (int)strlen(wf);
- }
+ static bool can_rewrite (Code code) { check(code); return has_all_flags(code, _bc_can_rewrite, false); }
+ static bool native_byte_order(Code code) { check(code); return has_all_flags(code, _fmt_has_nbo, false); }
+ static bool uses_cp_cache (Code code) { check(code); return has_all_flags(code, _fmt_has_j, false); }
// if 'end' is provided, it indicates the end of the code buffer which
// should not be read past when parsing.
static int special_length_at(address bcp, address end = NULL);
@@ -355,6 +377,16 @@ class Bytecodes: AllStatic {
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|| code == _fconst_0 || code == _dconst_0); }
+ static int compute_flags (const char* format, int more_flags = 0); // compute the flags
+ static int flags (int code, bool is_wide) {
+ assert(code == (u_char)code, "must be a byte");
+ return _flags[code + (is_wide ? (1<bcp_from(bci);
+ Bytecodes::Code code = Bytecodes::code_at(bcp, method());
- if (!Bytecode_at(bcp)->must_rewrite()) {
+ if (!Bytecode_at(bcp)->must_rewrite(code)) {
// might have been reached
return false;
}
diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
index ace796404e4..3c7009e7ea3 100644
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
@@ -63,7 +63,7 @@ void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) {
IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
// access constant pool
constantPoolOop pool = method(thread)->constants();
- int index = wide ? two_byte_index(thread) : one_byte_index(thread);
+ int index = wide ? get_index_u2(thread, Bytecodes::_ldc_w) : get_index_u1(thread, Bytecodes::_ldc);
constantTag tag = pool->tag_at(index);
if (tag.is_unresolved_klass() || tag.is_klass()) {
@@ -135,7 +135,7 @@ IRT_END
IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address))
// We may want to pass in more arguments - could make this slightly faster
constantPoolOop constants = method(thread)->constants();
- int i = two_byte_index(thread);
+ int i = get_index_u2(thread, Bytecodes::_multianewarray);
klassOop klass = constants->klass_at(i, CHECK);
int nof_dims = number_of_dimensions(thread);
assert(oop(klass)->is_klass(), "not a class");
@@ -169,7 +169,7 @@ IRT_END
// Quicken instance-of and check-cast bytecodes
IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread))
// Force resolving; quicken the bytecode
- int which = two_byte_index(thread);
+ int which = get_index_u2(thread, Bytecodes::_checkcast);
constantPoolOop cpool = method(thread)->constants();
// We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
// program we might have seen an unquick'd bytecode in the interpreter but have another
@@ -463,7 +463,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
{
JvmtiHideSingleStepping jhss(thread);
- LinkResolver::resolve_field(info, pool, two_byte_index(thread),
+ LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
bytecode, false, CHECK);
} // end JvmtiHideSingleStepping
@@ -634,7 +634,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
{
JvmtiHideSingleStepping jhss(thread);
LinkResolver::resolve_invoke(info, receiver, pool,
- two_byte_index(thread), bytecode, CHECK);
+ get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
int retry_count = 0;
while (info.resolved_method()->is_old()) {
@@ -645,7 +645,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
"Could not resolve to latest version of redefined method");
// method is redefined in the middle of resolve so re-try.
LinkResolver::resolve_invoke(info, receiver, pool,
- two_byte_index(thread), bytecode, CHECK);
+ get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
}
}
} // end JvmtiHideSingleStepping
@@ -704,7 +704,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
caller_bci = caller_method->bci_from(caller_bcp);
site_index = Bytes::get_native_u4(caller_bcp+1);
}
- assert(site_index == four_byte_index(thread), "");
+ assert(site_index == InterpreterRuntime::bytecode(thread)->get_index_u4(bytecode), "");
assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
// there is a second CPC entries that is of interest; it caches signature info:
int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
index 7bd284472ac..3d36a4524b8 100644
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,9 +40,13 @@ class InterpreterRuntime: AllStatic {
return Bytecodes::code_at(bcp(thread), method(thread));
}
static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
- static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; }
- static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); }
- static int four_byte_index(JavaThread *thread) { return Bytes::get_native_u4(bcp(thread) + 1); }
+ static Bytecode* bytecode(JavaThread *thread) { return Bytecode_at(bcp(thread)); }
+ static int get_index_u1(JavaThread *thread, Bytecodes::Code bc)
+ { return bytecode(thread)->get_index_u1(bc); }
+ static int get_index_u2(JavaThread *thread, Bytecodes::Code bc)
+ { return bytecode(thread)->get_index_u2(bc); }
+ static int get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc)
+ { return bytecode(thread)->get_index_u2_cpcache(bc); }
static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; }
static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); }
diff --git a/hotspot/src/share/vm/interpreter/rewriter.cpp b/hotspot/src/share/vm/interpreter/rewriter.cpp
index 5cca8eb8d5a..815bb22af5f 100644
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,16 +103,15 @@ void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
// Rewrite a classfile-order CP index into a native-order CPC index.
-int Rewriter::rewrite_member_reference(address bcp, int offset) {
+void Rewriter::rewrite_member_reference(address bcp, int offset) {
address p = bcp + offset;
int cp_index = Bytes::get_Java_u2(p);
int cache_index = cp_entry_to_cp_cache(cp_index);
Bytes::put_native_u2(p, cache_index);
- return cp_index;
}
-void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
+void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
address p = bcp + offset;
assert(p[-1] == Bytecodes::_invokedynamic, "");
int cp_index = Bytes::get_Java_u2(p);
@@ -178,7 +177,7 @@ void Rewriter::scan_method(methodOop method) {
case Bytecodes::_lookupswitch : {
#ifndef CC_INTERP
Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
- bc->set_code(
+ (*bcp) = (
bc->number_of_pairs() < BinarySwitchThreshold
? Bytecodes::_fast_linearswitch
: Bytecodes::_fast_binaryswitch
@@ -197,7 +196,7 @@ void Rewriter::scan_method(methodOop method) {
rewrite_member_reference(bcp, prefix_length+1);
break;
case Bytecodes::_invokedynamic:
- rewrite_invokedynamic(bcp, prefix_length+1, int(sizeof"@@@@DELETE ME"));
+ rewrite_invokedynamic(bcp, prefix_length+1);
break;
case Bytecodes::_jsr : // fall through
case Bytecodes::_jsr_w : nof_jsrs++; break;
@@ -308,5 +307,19 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArray
// Set up method entry points for compiler and interpreter.
m->link_method(m, CHECK);
+
+#ifdef ASSERT
+ if (StressMethodComparator) {
+ static int nmc = 0;
+ for (int j = i; j >= 0 && j >= i-4; j--) {
+ if ((++nmc % 1000) == 0) tty->print_cr("Have run MethodComparator %d times...", nmc);
+ bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
+ if (j == i && !z) {
+ tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
+ assert(z, "method must compare equal to itself");
+ }
+ }
+ }
+#endif //ASSERT
}
}
diff --git a/hotspot/src/share/vm/interpreter/rewriter.hpp b/hotspot/src/share/vm/interpreter/rewriter.hpp
index 68cc9d76c64..0135f76361c 100644
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,8 +64,8 @@ class Rewriter: public StackObj {
void scan_method(methodOop m);
methodHandle rewrite_jsrs(methodHandle m, TRAPS);
void rewrite_Object_init(methodHandle m, TRAPS);
- int rewrite_member_reference(address bcp, int offset);
- void rewrite_invokedynamic(address bcp, int offset, int cp_index);
+ void rewrite_member_reference(address bcp, int offset);
+ void rewrite_invokedynamic(address bcp, int offset);
public:
// Driver routine:
diff --git a/hotspot/src/share/vm/interpreter/templateTable.cpp b/hotspot/src/share/vm/interpreter/templateTable.cpp
index c392eda779d..6775897952d 100644
--- a/hotspot/src/share/vm/interpreter/templateTable.cpp
+++ b/hotspot/src/share/vm/interpreter/templateTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -434,15 +434,15 @@ void TemplateTable::initialize() {
def(Bytecodes::_dreturn , ____|disp|clvm|____, dtos, dtos, _return , dtos );
def(Bytecodes::_areturn , ____|disp|clvm|____, atos, atos, _return , atos );
def(Bytecodes::_return , ____|disp|clvm|____, vtos, vtos, _return , vtos );
- def(Bytecodes::_getstatic , ubcp|____|clvm|____, vtos, vtos, getstatic , 1 );
- def(Bytecodes::_putstatic , ubcp|____|clvm|____, vtos, vtos, putstatic , 2 );
- def(Bytecodes::_getfield , ubcp|____|clvm|____, vtos, vtos, getfield , 1 );
- def(Bytecodes::_putfield , ubcp|____|clvm|____, vtos, vtos, putfield , 2 );
- def(Bytecodes::_invokevirtual , ubcp|disp|clvm|____, vtos, vtos, invokevirtual , 2 );
- def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , 1 );
- def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , 1 );
- def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , 1 );
- def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , 1 );
+ def(Bytecodes::_getstatic , ubcp|____|clvm|____, vtos, vtos, getstatic , f1_byte );
+ def(Bytecodes::_putstatic , ubcp|____|clvm|____, vtos, vtos, putstatic , f2_byte );
+ def(Bytecodes::_getfield , ubcp|____|clvm|____, vtos, vtos, getfield , f1_byte );
+ def(Bytecodes::_putfield , ubcp|____|clvm|____, vtos, vtos, putfield , f2_byte );
+ def(Bytecodes::_invokevirtual , ubcp|disp|clvm|____, vtos, vtos, invokevirtual , f2_byte );
+ def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , f1_byte );
+ def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , f1_byte );
+ def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , f1_byte );
+ def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , f1_oop );
def(Bytecodes::_new , ubcp|____|clvm|____, vtos, atos, _new , _ );
def(Bytecodes::_newarray , ubcp|____|clvm|____, itos, atos, newarray , _ );
def(Bytecodes::_anewarray , ubcp|____|clvm|____, itos, atos, anewarray , _ );
@@ -502,7 +502,7 @@ void TemplateTable::initialize() {
def(Bytecodes::_fast_iload2 , ubcp|____|____|____, vtos, itos, fast_iload2 , _ );
def(Bytecodes::_fast_icaload , ubcp|____|____|____, vtos, itos, fast_icaload , _ );
- def(Bytecodes::_fast_invokevfinal , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal , 2 );
+ def(Bytecodes::_fast_invokevfinal , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal , f2_byte );
def(Bytecodes::_fast_linearswitch , ubcp|disp|____|____, itos, vtos, fast_linearswitch , _ );
def(Bytecodes::_fast_binaryswitch , ubcp|disp|____|____, itos, vtos, fast_binaryswitch , _ );
diff --git a/hotspot/src/share/vm/interpreter/templateTable.hpp b/hotspot/src/share/vm/interpreter/templateTable.hpp
index 9a455c076c6..16145d63321 100644
--- a/hotspot/src/share/vm/interpreter/templateTable.hpp
+++ b/hotspot/src/share/vm/interpreter/templateTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,6 +73,7 @@ class TemplateTable: AllStatic {
public:
enum Operation { add, sub, mul, div, rem, _and, _or, _xor, shl, shr, ushr };
enum Condition { equal, not_equal, less, less_equal, greater, greater_equal };
+ enum CacheByte { f1_byte = 1, f2_byte = 2, f1_oop = 0x11 }; // byte_no codes
private:
static bool _is_initialized; // true if TemplateTable has been initialized
@@ -244,13 +245,18 @@ class TemplateTable: AllStatic {
static void _return(TosState state);
- static void resolve_cache_and_index(int byte_no, Register cache, Register index);
+ static void resolve_cache_and_index(int byte_no, // one of 1,2,11
+ Register result , // either noreg or output for f1/f2
+ Register cache, // output for CP cache
+ Register index, // output for CP index
+ size_t index_size); // one of 1,2,4
static void load_invoke_cp_cache_entry(int byte_no,
Register method,
Register itable_index,
Register flags,
- bool is_invokevirtual = false,
- bool is_virtual_final = false);
+ bool is_invokevirtual,
+ bool is_virtual_final,
+ bool is_invokedynamic);
static void load_field_cp_cache_entry(Register obj,
Register cache,
Register index,
diff --git a/hotspot/src/share/vm/memory/iterator.cpp b/hotspot/src/share/vm/memory/iterator.cpp
index 52ff5a393b9..683a1e3ab1f 100644
--- a/hotspot/src/share/vm/memory/iterator.cpp
+++ b/hotspot/src/share/vm/memory/iterator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,8 +58,8 @@ MarkingCodeBlobClosure::MarkScope::~MarkScope() {
}
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
- if (!cb->is_nmethod()) return;
- nmethod* nm = (nmethod*) cb;
+ nmethod* nm = cb->as_nmethod_or_null();
+ if (nm == NULL) return;
if (!nm->test_set_oops_do_mark()) {
NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, 1st visit\n"));
do_newly_marked_nmethod(nm);
@@ -74,11 +74,14 @@ void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
if (!_do_marking) {
- NOT_PRODUCT(if (TraceScavenge && Verbose && cb->is_nmethod()) ((nmethod*)cb)->print_on(tty, "oops_do, unmarked visit\n"));
+ nmethod* nm = cb->as_nmethod_or_null();
+ NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL) nm->print_on(tty, "oops_do, unmarked visit\n"));
// This assert won't work, since there are lots of mini-passes
// (mostly in debug mode) that co-exist with marking phases.
//assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
- cb->oops_do(_cl);
+ if (nm != NULL) {
+ nm->oops_do(_cl);
+ }
} else {
MarkingCodeBlobClosure::do_code_blob(cb);
}
diff --git a/hotspot/src/share/vm/memory/space.cpp b/hotspot/src/share/vm/memory/space.cpp
index 3dc2361998b..f20d82c9b8a 100644
--- a/hotspot/src/share/vm/memory/space.cpp
+++ b/hotspot/src/share/vm/memory/space.cpp
@@ -861,9 +861,9 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
}
size = align_object_size(size);
- const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
- if (size >= min_int_array_size) {
- size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint));
+ const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
+ if (size >= (size_t)align_object_size(array_header_size)) {
+ size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
// allocate uninitialized int array
typeArrayOop t = (typeArrayOop) allocate(size);
assert(t != NULL, "allocation should succeed");
@@ -871,7 +871,7 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
t->set_klass(Universe::intArrayKlassObj());
t->set_length((int)length);
} else {
- assert((int) size == instanceOopDesc::header_size(),
+ assert(size == CollectedHeap::min_fill_size(),
"size for smallest fake object doesn't match");
instanceOop obj = (instanceOop) allocate(size);
obj->set_mark(markOopDesc::prototype());
diff --git a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
index 1a6a4d1be9b..712abe43729 100644
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
+++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
@@ -31,7 +31,7 @@ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) {
// Skip mangling the space corresponding to the object header to
// ensure that the returned space is not considered parsable by
// any concurrent GC thread.
- size_t hdr_size = CollectedHeap::min_fill_size();
+ size_t hdr_size = oopDesc::header_size();
Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
#endif // ASSERT
// This addition is safe because we know that top is
diff --git a/hotspot/src/share/vm/memory/universe.cpp b/hotspot/src/share/vm/memory/universe.cpp
index a5762c0232e..f0595761a8b 100644
--- a/hotspot/src/share/vm/memory/universe.cpp
+++ b/hotspot/src/share/vm/memory/universe.cpp
@@ -748,7 +748,7 @@ jint universe_init() {
// 4Gb
static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
// 32Gb
-static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
+// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
size_t base = 0;
@@ -1261,7 +1261,7 @@ static void calculate_verify_data(uintptr_t verify_data[2],
// decide which low-order bits we require to be clear:
size_t alignSize = MinObjAlignmentInBytes;
- size_t min_object_size = oopDesc::header_size();
+ size_t min_object_size = CollectedHeap::min_fill_size();
// make an inclusive limit:
uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
diff --git a/hotspot/src/share/vm/oops/arrayOop.hpp b/hotspot/src/share/vm/oops/arrayOop.hpp
index b0a8530ec29..c1ad4147494 100644
--- a/hotspot/src/share/vm/oops/arrayOop.hpp
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp
@@ -92,7 +92,7 @@ class arrayOopDesc : public oopDesc {
static int header_size(BasicType type) {
size_t typesize_in_bytes = header_size_in_bytes();
return (int)(Universe::element_type_should_be_aligned(type)
- ? align_object_size(typesize_in_bytes/HeapWordSize)
+ ? align_object_offset(typesize_in_bytes/HeapWordSize)
: typesize_in_bytes/HeapWordSize);
}
diff --git a/hotspot/src/share/vm/oops/constantPoolKlass.cpp b/hotspot/src/share/vm/oops/constantPoolKlass.cpp
index c13d076921a..87c5ed3fe3a 100644
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp
@@ -310,15 +310,12 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
Klass::oop_print_on(obj, st);
constantPoolOop cp = constantPoolOop(obj);
if (cp->flags() != 0) {
- st->print(" - flags : 0x%x", cp->flags());
+ st->print(" - flags: 0x%x", cp->flags());
if (cp->has_pseudo_string()) st->print(" has_pseudo_string");
if (cp->has_invokedynamic()) st->print(" has_invokedynamic");
st->cr();
}
-
- // Temp. remove cache so we can do lookups with original indicies.
- constantPoolCacheHandle cache (THREAD, cp->cache());
- cp->set_cache(NULL);
+ st->print_cr(" - cache: " INTPTR_FORMAT, cp->cache());
for (int index = 1; index < cp->length(); index++) { // Index 0 is unused
st->print(" - %3d : ", index);
@@ -334,8 +331,8 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
case JVM_CONSTANT_Fieldref :
case JVM_CONSTANT_Methodref :
case JVM_CONSTANT_InterfaceMethodref :
- st->print("klass_index=%d", cp->klass_ref_index_at(index));
- st->print(" name_and_type_index=%d", cp->name_and_type_ref_index_at(index));
+ st->print("klass_index=%d", cp->uncached_klass_ref_index_at(index));
+ st->print(" name_and_type_index=%d", cp->uncached_name_and_type_ref_index_at(index));
break;
case JVM_CONSTANT_UnresolvedString :
case JVM_CONSTANT_String :
@@ -382,9 +379,6 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
st->cr();
}
st->cr();
-
- // Restore cache
- cp->set_cache(cache());
}
#endif
@@ -398,6 +392,9 @@ void constantPoolKlass::oop_print_value_on(oop obj, outputStream* st) {
cp->print_address_on(st);
st->print(" for ");
cp->pool_holder()->print_value_on(st);
+ if (cp->cache() != NULL) {
+ st->print(" cache=" PTR_FORMAT, cp->cache());
+ }
}
const char* constantPoolKlass::internal_name() const {
diff --git a/hotspot/src/share/vm/oops/constantPoolOop.cpp b/hotspot/src/share/vm/oops/constantPoolOop.cpp
index ddd97f4b6c7..11b10e3d797 100644
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -297,11 +297,9 @@ int constantPoolOopDesc::impl_klass_ref_index_at(int which, bool uncached) {
int constantPoolOopDesc::remap_instruction_operand_from_cache(int operand) {
- // Operand was fetched by a stream using get_Java_u2, yet was stored
- // by Rewriter::rewrite_member_reference in native order.
- // So now we have to fix the damage by swapping back to native order.
- assert((int)(u2)operand == operand, "clean u2");
- int cpc_index = Bytes::swap_u2(operand);
+ int cpc_index = operand;
+ DEBUG_ONLY(cpc_index -= CPCACHE_INDEX_TAG);
+ assert((int)(u2)cpc_index == cpc_index, "clean u2");
int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
return member_index;
}
diff --git a/hotspot/src/share/vm/oops/constantPoolOop.hpp b/hotspot/src/share/vm/oops/constantPoolOop.hpp
index 53bf61e1dbc..8609d7ace7f 100644
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -434,6 +434,10 @@ class constantPoolOopDesc : public oopDesc {
// Debugging
const char* printable_name_at(int which) PRODUCT_RETURN0;
+#ifdef ASSERT
+ enum { CPCACHE_INDEX_TAG = 0x10000 }; // helps keep CP cache indices distinct from CP indices
+#endif //ASSERT
+
private:
symbolOop impl_name_ref_at(int which, bool uncached);
@@ -441,7 +445,7 @@ class constantPoolOopDesc : public oopDesc {
int impl_klass_ref_index_at(int which, bool uncached);
int impl_name_and_type_ref_index_at(int which, bool uncached);
- int remap_instruction_operand_from_cache(int operand);
+ int remap_instruction_operand_from_cache(int operand); // operand must be biased by CPCACHE_INDEX_TAG
// Used while constructing constant pool (only by ClassFileParser)
jint klass_index_at(int which) {
diff --git a/hotspot/src/share/vm/oops/generateOopMap.cpp b/hotspot/src/share/vm/oops/generateOopMap.cpp
index fea2a0a4c9a..952e2661ef9 100644
--- a/hotspot/src/share/vm/oops/generateOopMap.cpp
+++ b/hotspot/src/share/vm/oops/generateOopMap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1254,7 +1254,7 @@ void GenerateOopMap::print_current_state(outputStream *os,
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
- int idx = currentBC->get_index_int();
+ int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
constantPoolOop cp = method()->constants();
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx);
@@ -1286,7 +1286,7 @@ void GenerateOopMap::print_current_state(outputStream *os,
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
- int idx = currentBC->get_index_int();
+ int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
constantPoolOop cp = method()->constants();
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx);
@@ -1356,8 +1356,8 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
case Bytecodes::_ldc2_w: ppush(vvCTS); break;
- case Bytecodes::_ldc: do_ldc(itr->get_index(), itr->bci()); break;
- case Bytecodes::_ldc_w: do_ldc(itr->get_index_big(), itr->bci());break;
+ case Bytecodes::_ldc: do_ldc(itr->get_index(), itr->bci()); break;
+ case Bytecodes::_ldc_w: do_ldc(itr->get_index_u2(), itr->bci()); break;
case Bytecodes::_iload:
case Bytecodes::_fload: ppload(vCTS, itr->get_index()); break;
@@ -1550,17 +1550,17 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
case Bytecodes::_jsr_w: do_jsr(itr->dest_w()); break;
case Bytecodes::_getstatic: do_field(true, true,
- itr->get_index_big(),
+ itr->get_index_u2_cpcache(),
itr->bci()); break;
- case Bytecodes::_putstatic: do_field(false, true, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_getfield: do_field(true, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_putfield: do_field(false, false, itr->get_index_big(), itr->bci()); break;
+ case Bytecodes::_putstatic: do_field(false, true, itr->get_index_u2_cpcache(), itr->bci()); break;
+ case Bytecodes::_getfield: do_field(true, false, itr->get_index_u2_cpcache(), itr->bci()); break;
+ case Bytecodes::_putfield: do_field(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
case Bytecodes::_invokevirtual:
- case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_int(), itr->bci()); break;
- case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break;
+ case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_u2_cpcache(), itr->bci()); break;
+ case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_u2_cpcache(), itr->bci()); break;
+ case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_u4(), itr->bci()); break;
+ case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_u2_cpcache(), itr->bci()); break;
case Bytecodes::_newarray:
case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
case Bytecodes::_checkcast: do_checkcast(); break;
diff --git a/hotspot/src/share/vm/oops/methodKlass.cpp b/hotspot/src/share/vm/oops/methodKlass.cpp
index a664c4e8fab..f0ba4ad8413 100644
--- a/hotspot/src/share/vm/oops/methodKlass.cpp
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp
@@ -237,7 +237,7 @@ void methodKlass::oop_print_on(oop obj, outputStream* st) {
Klass::oop_print_on(obj, st);
methodOop m = methodOop(obj);
// get the effect of PrintOopAddress, always, for methods:
- st->print (" - this oop: "INTPTR_FORMAT, (intptr_t)m);
+ st->print_cr(" - this oop: "INTPTR_FORMAT, (intptr_t)m);
st->print (" - method holder: "); m->method_holder()->print_value_on(st); st->cr();
st->print (" - constants: "INTPTR_FORMAT" ", (address)m->constants());
m->constants()->print_value_on(st); st->cr();
diff --git a/hotspot/src/share/vm/oops/oop.hpp b/hotspot/src/share/vm/oops/oop.hpp
index 63a03d32333..952802c7801 100644
--- a/hotspot/src/share/vm/oops/oop.hpp
+++ b/hotspot/src/share/vm/oops/oop.hpp
@@ -149,10 +149,6 @@ class oopDesc {
// Need this as public for garbage collection.
template T* obj_field_addr(int offset) const;
- // Oop encoding heap max
- static const uint64_t OopEncodingHeapMax =
- (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
-
static bool is_null(oop obj);
static bool is_null(narrowOop obj);
diff --git a/hotspot/src/share/vm/oops/oop.inline.hpp b/hotspot/src/share/vm/oops/oop.inline.hpp
index 3336fb20a1e..c840f46f90b 100644
--- a/hotspot/src/share/vm/oops/oop.inline.hpp
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp
@@ -146,8 +146,13 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
// offset from the heap base. Saving the check for null can save instructions
// in inner GC loops so these are separated.
+inline bool check_obj_alignment(oop obj) {
+ return (intptr_t)obj % MinObjAlignmentInBytes == 0;
+}
+
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
assert(!is_null(v), "oop value can never be zero");
+ assert(check_obj_alignment(v), "Address not aligned");
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
address base = Universe::narrow_oop_base();
int shift = Universe::narrow_oop_shift();
@@ -167,7 +172,9 @@ inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
assert(!is_null(v), "narrow oop value can never be zero");
address base = Universe::narrow_oop_base();
int shift = Universe::narrow_oop_shift();
- return (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+ oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+ assert(check_obj_alignment(result), "Address not aligned");
+ return result;
}
inline oop oopDesc::decode_heap_oop(narrowOop v) {
@@ -522,10 +529,6 @@ inline bool oopDesc::has_bias_pattern() const {
return mark()->has_bias_pattern();
}
-inline bool check_obj_alignment(oop obj) {
- return (intptr_t)obj % MinObjAlignmentInBytes == 0;
-}
-
// used only for asserts
inline bool oopDesc::is_oop(bool ignore_mark_word) const {
@@ -600,6 +603,8 @@ inline bool oopDesc::is_forwarded() const {
// Used by scavengers
inline void oopDesc::forward_to(oop p) {
+ assert(check_obj_alignment(p),
+ "forwarding to something not aligned");
assert(Universe::heap()->is_in_reserved(p),
"forwarding to something not in heap");
markOop m = markOopDesc::encode_pointer_as_mark(p);
@@ -609,6 +614,8 @@ inline void oopDesc::forward_to(oop p) {
// Used by parallel scavengers
inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
+ assert(check_obj_alignment(p),
+ "forwarding to something not aligned");
assert(Universe::heap()->is_in_reserved(p),
"forwarding to something not in heap");
markOop m = markOopDesc::encode_pointer_as_mark(p);
diff --git a/hotspot/src/share/vm/opto/bytecodeInfo.cpp b/hotspot/src/share/vm/opto/bytecodeInfo.cpp
index cba1aa45311..b4178384b60 100644
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -188,8 +188,8 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* calle
return NULL;
}
- // Always inline MethodHandle methods.
- if (callee_method->is_method_handle_invoke())
+ // Always inline MethodHandle methods and generated MethodHandle adapters.
+ if (callee_method->is_method_handle_invoke() || callee_method->is_method_handle_adapter())
return NULL;
// First check all inlining restrictions which are required for correctness
@@ -340,7 +340,7 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
Bytecodes::Code call_bc = iter.cur_bc();
// An invokedynamic instruction does not have a klass.
if (call_bc != Bytecodes::_invokedynamic) {
- int index = iter.get_index_int();
+ int index = iter.get_index_u2_cpcache();
if (!caller_method->is_klass_loaded(index, true)) {
return false;
}
diff --git a/hotspot/src/share/vm/opto/compile.cpp b/hotspot/src/share/vm/opto/compile.cpp
index c8623700618..3152ef2e963 100644
--- a/hotspot/src/share/vm/opto/compile.cpp
+++ b/hotspot/src/share/vm/opto/compile.cpp
@@ -2176,14 +2176,14 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
#ifdef _LP64
case Op_CastPP:
- if (n->in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks()) {
+ if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
Compile* C = Compile::current();
Node* in1 = n->in(1);
const Type* t = n->bottom_type();
Node* new_in1 = in1->clone();
new_in1->as_DecodeN()->set_type(t);
- if (!Matcher::clone_shift_expressions) {
+ if (!Matcher::narrow_oop_use_complex_address()) {
//
// x86, ARM and friends can handle 2 adds in addressing mode
// and Matcher can fold a DecodeN node into address by using
@@ -2231,8 +2231,12 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
new_in2 = in2->in(1);
} else if (in2->Opcode() == Op_ConP) {
const Type* t = in2->bottom_type();
- if (t == TypePtr::NULL_PTR && Universe::narrow_oop_use_implicit_null_checks()) {
- new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
+ if (t == TypePtr::NULL_PTR) {
+ // Don't convert CmpP null check into CmpN if compressed
+ // oops implicit null check is not generated.
+ // This will allow to generate normal oop implicit null check.
+ if (Matcher::gen_narrow_oop_implicit_null_checks())
+ new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
//
// This transformation together with CastPP transformation above
// will generated code for implicit NULL checks for compressed oops.
@@ -2289,9 +2293,9 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
case Op_DecodeN:
assert(!n->in(1)->is_EncodeP(), "should be optimized out");
- // DecodeN could be pinned on Sparc where it can't be fold into
+ // DecodeN could be pinned when it can't be fold into
// an address expression, see the code for Op_CastPP above.
- assert(n->in(0) == NULL || !Matcher::clone_shift_expressions, "no control except on sparc");
+ assert(n->in(0) == NULL || !Matcher::narrow_oop_use_complex_address(), "no control");
break;
case Op_EncodeP: {
@@ -2496,6 +2500,10 @@ static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Re
}
}
+ // Skip next transformation if compressed oops are not used.
+ if (!UseCompressedOops || !Matcher::gen_narrow_oop_implicit_null_checks())
+ return;
+
// Go over safepoints nodes to skip DecodeN nodes for debug edges.
// It could be done for an uncommon traps or any safepoints/calls
// if the DecodeN node is referenced only in a debug info.
diff --git a/hotspot/src/share/vm/opto/connode.cpp b/hotspot/src/share/vm/opto/connode.cpp
index 996d2fc57cb..3fd660a9fa8 100644
--- a/hotspot/src/share/vm/opto/connode.cpp
+++ b/hotspot/src/share/vm/opto/connode.cpp
@@ -437,7 +437,7 @@ Node *ConstraintCastNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
// If not converting int->oop, throw away cast after constant propagation
Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
const Type *t = ccp->type(in(1));
- if (!t->isa_oop_ptr() || (in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks())) {
+ if (!t->isa_oop_ptr() || (in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks())) {
return NULL; // do not transform raw pointers or narrow oops
}
return ConstraintCastNode::Ideal_DU_postCCP(ccp);
diff --git a/hotspot/src/share/vm/opto/graphKit.cpp b/hotspot/src/share/vm/opto/graphKit.cpp
index 550e6f174d6..ce2fb4bb759 100644
--- a/hotspot/src/share/vm/opto/graphKit.cpp
+++ b/hotspot/src/share/vm/opto/graphKit.cpp
@@ -3487,7 +3487,6 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
Node* tls = __ thread(); // ThreadLocalStorage
- Node* no_ctrl = NULL;
Node* no_base = __ top();
float likely = PROB_LIKELY(0.999);
float unlikely = PROB_UNLIKELY(0.999);
@@ -3511,10 +3510,10 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
// Now some values
-
- Node* index = __ load(no_ctrl, index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
- Node* buffer = __ load(no_ctrl, buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
-
+ // Use ctrl to avoid hoisting these values past a safepoint, which could
+ // potentially reset these fields in the JavaThread.
+ Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
+ Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
// Convert the store obj pointer to an int prior to doing math on it
// Must use ctrl to prevent "integerized oop" existing across safepoint
diff --git a/hotspot/src/share/vm/opto/lcm.cpp b/hotspot/src/share/vm/opto/lcm.cpp
index 6199c9b4ef1..0afde90166d 100644
--- a/hotspot/src/share/vm/opto/lcm.cpp
+++ b/hotspot/src/share/vm/opto/lcm.cpp
@@ -32,7 +32,8 @@
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
// The proj is the control projection for the not-null case.
-// The val is the pointer being checked for nullness.
+// The val is the pointer being checked for nullness or
+// decodeHeapOop_not_null node if it did not fold into address.
void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
// Assume if null check need for 0 offset then always needed
// Intel solaris doesn't support any null checks yet and no
@@ -96,6 +97,13 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
}
}
+ // Check for decodeHeapOop_not_null node which did not fold into address
+ bool is_decoden = ((intptr_t)val) & 1;
+ val = (Node*)(((intptr_t)val) & ~1);
+
+ assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() &&
+ (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity");
+
// Search the successor block for a load or store who's base value is also
// the tested value. There may be several.
Node_List *out = new Node_List(Thread::current()->resource_area());
@@ -148,7 +156,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( !mach->needs_anti_dependence_check() )
continue; // Not an memory op; skip it
{
- // Check that value is used in memory address.
+ // Check that value is used in memory address in
+ // instructions with embedded load (CmpP val1,(val2+off)).
Node* base;
Node* index;
const MachOper* oper = mach->memory_inputs(base, index);
@@ -213,7 +222,11 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
uint vidx = 0; // Capture index of value into memop
uint j;
for( j = mach->req()-1; j > 0; j-- ) {
- if( mach->in(j) == val ) vidx = j;
+ if( mach->in(j) == val ) {
+ vidx = j;
+ // Ignore DecodeN val which could be hoisted to where needed.
+ if( is_decoden ) continue;
+ }
// Block of memory-op input
Block *inb = cfg->_bbs[mach->in(j)->_idx];
Block *b = this; // Start from nul check
@@ -270,6 +283,26 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
extern int implicit_null_checks;
implicit_null_checks++;
+ if( is_decoden ) {
+ // Check if we need to hoist decodeHeapOop_not_null first.
+ Block *valb = cfg->_bbs[val->_idx];
+ if( this != valb && this->_dom_depth < valb->_dom_depth ) {
+ // Hoist it up to the end of the test block.
+ valb->find_remove(val);
+ this->add_inst(val);
+ cfg->_bbs.map(val->_idx,this);
+ // DecodeN on x86 may kill flags. Check for flag-killing projections
+ // that also need to be hoisted.
+ for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
+ Node* n = val->fast_out(j);
+ if( n->Opcode() == Op_MachProj ) {
+ cfg->_bbs[n->_idx]->find_remove(n);
+ this->add_inst(n);
+ cfg->_bbs.map(n->_idx,this);
+ }
+ }
+ }
+ }
// Hoist the memory candidate up to the end of the test block.
Block *old_block = cfg->_bbs[best->_idx];
old_block->find_remove(best);
diff --git a/hotspot/src/share/vm/opto/matcher.cpp b/hotspot/src/share/vm/opto/matcher.cpp
index eb59720ff5e..78931baa42a 100644
--- a/hotspot/src/share/vm/opto/matcher.cpp
+++ b/hotspot/src/share/vm/opto/matcher.cpp
@@ -1334,7 +1334,7 @@ static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool s
if( j == max_scan ) // No post-domination before scan end?
return true; // Then break the match tree up
}
- if (m->is_DecodeN() && Matcher::clone_shift_expressions) {
+ if (m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) {
// These are commonly used in address expressions and can
// efficiently fold into them on X64 in some cases.
return false;
@@ -2110,8 +2110,8 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
_null_check_tests.push(proj);
Node* val = cmp->in(1);
#ifdef _LP64
- if (UseCompressedOops && !Matcher::clone_shift_expressions &&
- val->bottom_type()->isa_narrowoop()) {
+ if (val->bottom_type()->isa_narrowoop() &&
+ !Matcher::narrow_oop_use_complex_address()) {
//
// Look for DecodeN node which should be pinned to orig_proj.
// On platforms (Sparc) which can not handle 2 adds
@@ -2127,6 +2127,9 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
if (d->is_DecodeN() && d->in(1) == val) {
val = d;
val->set_req(0, NULL); // Unpin now.
+ // Mark this as special case to distinguish from
+ // a regular case: CmpP(DecodeN, NULL).
+ val = (Node*)(((intptr_t)val) | 1);
break;
}
}
@@ -2146,9 +2149,21 @@ void Matcher::validate_null_checks( ) {
for( uint i=0; i < cnt; i+=2 ) {
Node *test = _null_check_tests[i];
Node *val = _null_check_tests[i+1];
+ bool is_decoden = ((intptr_t)val) & 1;
+ val = (Node*)(((intptr_t)val) & ~1);
if (has_new_node(val)) {
+ Node* new_val = new_node(val);
+ if (is_decoden) {
+ assert(val->is_DecodeN() && val->in(0) == NULL, "sanity");
+ // Note: new_val may have a control edge if
+ // the original ideal node DecodeN was matched before
+ // it was unpinned in Matcher::collect_null_checks().
+ // Unpin the mach node and mark it.
+ new_val->set_req(0, NULL);
+ new_val = (Node*)(((intptr_t)new_val) | 1);
+ }
// Is a match-tree root, so replace with the matched value
- _null_check_tests.map(i+1, new_node(val));
+ _null_check_tests.map(i+1, new_val);
} else {
// Yank from candidate list
_null_check_tests.map(i+1,_null_check_tests[--cnt]);
diff --git a/hotspot/src/share/vm/opto/matcher.hpp b/hotspot/src/share/vm/opto/matcher.hpp
index e4bcf567d0c..0badb1366b5 100644
--- a/hotspot/src/share/vm/opto/matcher.hpp
+++ b/hotspot/src/share/vm/opto/matcher.hpp
@@ -352,6 +352,38 @@ public:
// registers? True for Intel but false for most RISCs
static const bool clone_shift_expressions;
+ static bool narrow_oop_use_complex_address();
+
+ // Generate implicit null check for narrow oops if it can fold
+ // into address expression (x64).
+ //
+ // [R12 + narrow_oop_reg<<3 + offset] // fold into address expression
+ // NullCheck narrow_oop_reg
+ //
+ // When narrow oops can't fold into address expression (Sparc) and
+ // base is not null use decode_not_null and normal implicit null check.
+ // Note, decode_not_null node can be used here since it is referenced
+ // only on non null path but it requires special handling, see
+ // collect_null_checks():
+ //
+ // decode_not_null narrow_oop_reg, oop_reg // 'shift' and 'add base'
+ // [oop_reg + offset]
+ // NullCheck oop_reg
+ //
+ // With Zero base and when narrow oops can not fold into address
+ // expression use normal implicit null check since only shift
+ // is needed to decode narrow oop.
+ //
+ // decode narrow_oop_reg, oop_reg // only 'shift'
+ // [oop_reg + offset]
+ // NullCheck oop_reg
+ //
+ inline static bool gen_narrow_oop_implicit_null_checks() {
+ return Universe::narrow_oop_use_implicit_null_checks() &&
+ (narrow_oop_use_complex_address() ||
+ Universe::narrow_oop_base() != NULL);
+ }
+
// Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a
diff --git a/hotspot/src/share/vm/opto/parse2.cpp b/hotspot/src/share/vm/opto/parse2.cpp
index 7ed0ae39a1c..e212e0d7ec2 100644
--- a/hotspot/src/share/vm/opto/parse2.cpp
+++ b/hotspot/src/share/vm/opto/parse2.cpp
@@ -1317,8 +1317,8 @@ void Parse::do_one_bytecode() {
case Bytecodes::_iconst_3: push(intcon( 3)); break;
case Bytecodes::_iconst_4: push(intcon( 4)); break;
case Bytecodes::_iconst_5: push(intcon( 5)); break;
- case Bytecodes::_bipush: push(intcon( iter().get_byte())); break;
- case Bytecodes::_sipush: push(intcon( iter().get_short())); break;
+ case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break;
+ case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break;
case Bytecodes::_aconst_null: push(null()); break;
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
diff --git a/hotspot/src/share/vm/opto/superword.cpp b/hotspot/src/share/vm/opto/superword.cpp
index 248319382f6..be93c15ea8b 100644
--- a/hotspot/src/share/vm/opto/superword.cpp
+++ b/hotspot/src/share/vm/opto/superword.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -514,6 +514,13 @@ bool SuperWord::exists_at(Node* s, uint pos) {
bool SuperWord::are_adjacent_refs(Node* s1, Node* s2) {
if (!s1->is_Mem() || !s2->is_Mem()) return false;
if (!in_bb(s1) || !in_bb(s2)) return false;
+
+ // Do not use superword for non-primitives
+ if (!is_java_primitive(s1->as_Mem()->memory_type()) ||
+ !is_java_primitive(s2->as_Mem()->memory_type())) {
+ return false;
+ }
+
// FIXME - co_locate_pack fails on Stores in different mem-slices, so
// only pack memops that are in the same alias set until that's fixed.
if (_phase->C->get_alias_index(s1->as_Mem()->adr_type()) !=
diff --git a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
index b2d69373d39..e8ac72d4a4d 100644
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -638,7 +638,7 @@ void JvmtiClassFileReconstituter::copy_bytecodes(methodHandle mh,
// length of bytecode (mnemonic + operands)
address bcp = bs.bcp();
- int len = bs.next_bcp() - bcp;
+ int len = bs.instruction_size();
assert(len > 0, "length must be > 0");
// copy the bytecodes
diff --git a/hotspot/src/share/vm/prims/jvmtiExport.cpp b/hotspot/src/share/vm/prims/jvmtiExport.cpp
index 07d4bf01a4e..a4c2828dacb 100644
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp
@@ -726,6 +726,32 @@ GrowableArray* JvmtiExport::_pending_compiled_method_unload_method_id
GrowableArray* JvmtiExport::_pending_compiled_method_unload_code_begins;
JavaThread* JvmtiExport::_current_poster;
+void JvmtiExport::post_compiled_method_unload_internal(JavaThread* self, jmethodID method, const void *code_begin) {
+ EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
+ ("JVMTI [%s] method compile unload event triggered",
+ JvmtiTrace::safe_get_thread_name(self)));
+
+ // post the event for each environment that has this event enabled.
+ JvmtiEnvIterator it;
+ for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+ if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_UNLOAD)) {
+
+ EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
+ ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
+ JvmtiTrace::safe_get_thread_name(self), method));
+
+ ResourceMark rm(self);
+
+ JvmtiEventMark jem(self);
+ JvmtiJavaThreadEventTransition jet(self);
+ jvmtiEventCompiledMethodUnload callback = env->callbacks()->CompiledMethodUnload;
+ if (callback != NULL) {
+ (*callback)(env->jvmti_external(), method, code_begin);
+ }
+ }
+ }
+}
+
// post any pending CompiledMethodUnload events
void JvmtiExport::post_pending_compiled_method_unload_events() {
@@ -788,26 +814,7 @@ void JvmtiExport::post_pending_compiled_method_unload_events() {
// flag, cleanup _current_poster to indicate that no thread is now servicing the
// pending events list, and finally notify any thread that might be waiting.
for (;;) {
- EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
- ("JVMTI [%s] method compile unload event triggered",
- JvmtiTrace::safe_get_thread_name(self)));
-
- // post the event for each environment that has this event enabled.
- JvmtiEnvIterator it;
- for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
- if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_UNLOAD)) {
- EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
- ("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
- JvmtiTrace::safe_get_thread_name(self), method));
-
- JvmtiEventMark jem(self);
- JvmtiJavaThreadEventTransition jet(self);
- jvmtiEventCompiledMethodUnload callback = env->callbacks()->CompiledMethodUnload;
- if (callback != NULL) {
- (*callback)(env->jvmti_external(), method, code_begin);
- }
- }
- }
+ post_compiled_method_unload_internal(self, method, code_begin);
// event posted, now re-grab monitor and get the next event
// If there's no next event then we are done. If this is the first
@@ -1864,17 +1871,25 @@ void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, const jmethodID metho
}
// used at a safepoint to post a CompiledMethodUnload event
-void JvmtiExport::post_compiled_method_unload_at_safepoint(jmethodID mid, const void *code_begin) {
- assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
-
- // create list lazily
- if (_pending_compiled_method_unload_method_ids == NULL) {
- _pending_compiled_method_unload_method_ids = new (ResourceObj::C_HEAP) GrowableArray(10,true);
- _pending_compiled_method_unload_code_begins = new (ResourceObj::C_HEAP) GrowableArray(10,true);
+void JvmtiExport::post_compiled_method_unload(jmethodID mid, const void *code_begin) {
+ if (SafepointSynchronize::is_at_safepoint()) {
+ // Class unloading can cause nmethod unloading which is reported
+ // by the VMThread. These must be batched to be processed later.
+ if (_pending_compiled_method_unload_method_ids == NULL) {
+ // create list lazily
+ _pending_compiled_method_unload_method_ids = new (ResourceObj::C_HEAP) GrowableArray(10,true);
+ _pending_compiled_method_unload_code_begins = new (ResourceObj::C_HEAP) GrowableArray(10,true);
+ }
+ _pending_compiled_method_unload_method_ids->append(mid);
+ _pending_compiled_method_unload_code_begins->append(code_begin);
+ _have_pending_compiled_method_unload_events = true;
+ } else {
+ // Unloading caused by the sweeper can be reported synchronously.
+ if (have_pending_compiled_method_unload_events()) {
+ post_pending_compiled_method_unload_events();
+ }
+ post_compiled_method_unload_internal(JavaThread::current(), mid, code_begin);
}
- _pending_compiled_method_unload_method_ids->append(mid);
- _pending_compiled_method_unload_code_begins->append(code_begin);
- _have_pending_compiled_method_unload_events = true;
}
void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) {
diff --git a/hotspot/src/share/vm/prims/jvmtiExport.hpp b/hotspot/src/share/vm/prims/jvmtiExport.hpp
index 31c5571ee92..57d1fb53005 100644
--- a/hotspot/src/share/vm/prims/jvmtiExport.hpp
+++ b/hotspot/src/share/vm/prims/jvmtiExport.hpp
@@ -144,6 +144,9 @@ class JvmtiExport : public AllStatic {
// posts any pending CompiledMethodUnload events.
static void post_pending_compiled_method_unload_events();
+ // Perform the actual notification to interested JvmtiEnvs.
+ static void post_compiled_method_unload_internal(JavaThread* self, jmethodID mid, const void* code_begin);
+
// posts a DynamicCodeGenerated event (internal/private implementation).
// The public post_dynamic_code_generated* functions make use of the
// internal implementation.
@@ -299,8 +302,8 @@ class JvmtiExport : public AllStatic {
static void post_compiled_method_load(nmethod *nm) KERNEL_RETURN;
static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN;
- // used at a safepoint to post a CompiledMethodUnload event
- static void post_compiled_method_unload_at_safepoint(jmethodID mid, const void *code_begin) KERNEL_RETURN;
+ // used to post a CompiledMethodUnload event
+ static void post_compiled_method_unload(jmethodID mid, const void *code_begin) KERNEL_RETURN;
// similiar to post_dynamic_code_generated except that it can be used to
// post a DynamicCodeGenerated event while holding locks in the VM. Any event
diff --git a/hotspot/src/share/vm/prims/methodComparator.cpp b/hotspot/src/share/vm/prims/methodComparator.cpp
index 4b198f95be6..9190d5a839c 100644
--- a/hotspot/src/share/vm/prims/methodComparator.cpp
+++ b/hotspot/src/share/vm/prims/methodComparator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -130,8 +130,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_multianewarray : // fall through
case Bytecodes::_checkcast : // fall through
case Bytecodes::_instanceof : {
- u2 cpi_old = _s_old->get_index_big();
- u2 cpi_new = _s_new->get_index_big();
+ u2 cpi_old = _s_old->get_index_u2();
+ u2 cpi_new = _s_new->get_index_u2();
if ((_old_cp->klass_at_noresolve(cpi_old) != _new_cp->klass_at_noresolve(cpi_new)))
return false;
if (c_old == Bytecodes::_multianewarray &&
@@ -147,9 +147,10 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
+ case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface : {
- u2 cpci_old = _s_old->get_index_int();
- u2 cpci_new = _s_new->get_index_int();
+ int cpci_old = _s_old->has_index_u4() ? _s_old->get_index_u4() : _s_old->get_index_u2_cpcache();
+ int cpci_new = _s_new->has_index_u4() ? _s_new->get_index_u4() : _s_new->get_index_u2_cpcache();
// Check if the names of classes, field/method names and signatures at these indexes
// are the same. Indices which are really into constantpool cache (rather than constant
// pool itself) are accepted by the constantpool query routines below.
@@ -162,14 +163,10 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_ldc : // fall through
case Bytecodes::_ldc_w : {
- u2 cpi_old, cpi_new;
- if (c_old == Bytecodes::_ldc) {
- cpi_old = _s_old->bcp()[1];
- cpi_new = _s_new->bcp()[1];
- } else {
- cpi_old = _s_old->get_index_big();
- cpi_new = _s_new->get_index_big();
- }
+ Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method()(), _s_old->bcp());
+ Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method()(), _s_new->bcp());
+ int cpi_old = ldc_old->index();
+ int cpi_new = ldc_new->index();
constantTag tag_old = _old_cp->tag_at(cpi_old);
constantTag tag_new = _new_cp->tag_at(cpi_new);
if (tag_old.is_int() || tag_old.is_float()) {
@@ -179,7 +176,9 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
if (_old_cp->int_at(cpi_old) != _new_cp->int_at(cpi_new))
return false;
} else {
- if (_old_cp->float_at(cpi_old) != _new_cp->float_at(cpi_new))
+ // Use jint_cast to compare the bits rather than numerical values.
+ // This makes a difference for NaN constants.
+ if (jint_cast(_old_cp->float_at(cpi_old)) != jint_cast(_new_cp->float_at(cpi_new)))
return false;
}
} else if (tag_old.is_string() || tag_old.is_unresolved_string()) {
@@ -199,8 +198,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
}
case Bytecodes::_ldc2_w : {
- u2 cpi_old = _s_old->get_index_big();
- u2 cpi_new = _s_new->get_index_big();
+ u2 cpi_old = _s_old->get_index_u2();
+ u2 cpi_new = _s_new->get_index_u2();
constantTag tag_old = _old_cp->tag_at(cpi_old);
constantTag tag_new = _new_cp->tag_at(cpi_new);
if (tag_old.value() != tag_new.value())
@@ -209,7 +208,9 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
if (_old_cp->long_at(cpi_old) != _new_cp->long_at(cpi_new))
return false;
} else {
- if (_old_cp->double_at(cpi_old) != _new_cp->double_at(cpi_new))
+ // Use jlong_cast to compare the bits rather than numerical values.
+ // This makes a difference for NaN constants.
+ if (jlong_cast(_old_cp->double_at(cpi_old)) != jlong_cast(_new_cp->double_at(cpi_new)))
return false;
}
break;
@@ -221,7 +222,7 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
break;
case Bytecodes::_sipush :
- if (_s_old->get_index_big() != _s_new->get_index_big())
+ if (_s_old->get_index_u2() != _s_new->get_index_u2())
return false;
break;
@@ -260,8 +261,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_ifnonnull : // fall through
case Bytecodes::_ifnull : // fall through
case Bytecodes::_jsr : {
- short old_ofs = (short) _s_old->get_index_big();
- short new_ofs = (short) _s_new->get_index_big();
+ int old_ofs = _s_old->bytecode()->get_offset_s2(c_old);
+ int new_ofs = _s_new->bytecode()->get_offset_s2(c_new);
if (_switchable_test) {
int old_dest = _s_old->bci() + old_ofs;
int new_dest = _s_new->bci() + new_ofs;
@@ -285,9 +286,11 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
if (_s_old->is_wide() != _s_new->is_wide())
return false;
if (! _s_old->is_wide()) {
- if (_s_old->get_index_big() != _s_new->get_index_big())
+ // We could use get_index_u1 and get_constant_u1, but it's simpler to grab both bytes at once:
+ if (Bytes::get_Java_u2(_s_old->bcp() + 1) != Bytes::get_Java_u2(_s_new->bcp() + 1))
return false;
} else {
+ // We could use get_index_u2 and get_constant_u2, but it's simpler to grab all four bytes at once:
if (Bytes::get_Java_u4(_s_old->bcp() + 1) != Bytes::get_Java_u4(_s_new->bcp() + 1))
return false;
}
@@ -295,8 +298,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
case Bytecodes::_goto_w : // fall through
case Bytecodes::_jsr_w : {
- int old_ofs = (int) Bytes::get_Java_u4(_s_old->bcp() + 1);
- int new_ofs = (int) Bytes::get_Java_u4(_s_new->bcp() + 1);
+ int old_ofs = _s_old->bytecode()->get_offset_s4(c_old);
+ int new_ofs = _s_new->bytecode()->get_offset_s4(c_new);
if (_switchable_test) {
int old_dest = _s_old->bci() + old_ofs;
int new_dest = _s_new->bci() + new_ofs;
@@ -357,8 +360,8 @@ bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
}
}
} else { // !_switchable_test, can use fast rough compare
- int len_old = _s_old->next_bcp() - _s_old->bcp();
- int len_new = _s_new->next_bcp() - _s_new->bcp();
+ int len_old = _s_old->instruction_size();
+ int len_new = _s_new->instruction_size();
if (len_old != len_new)
return false;
if (memcmp(_s_old->bcp(), _s_new->bcp(), len_old) != 0)
diff --git a/hotspot/src/share/vm/prims/methodHandleWalk.cpp b/hotspot/src/share/vm/prims/methodHandleWalk.cpp
index d4f9ab3b0f0..f41f63a16a0 100644
--- a/hotspot/src/share/vm/prims/methodHandleWalk.cpp
+++ b/hotspot/src/share/vm/prims/methodHandleWalk.cpp
@@ -732,7 +732,7 @@ void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
case Bytecodes::_dreturn:
case Bytecodes::_areturn:
case Bytecodes::_return:
- assert(strcmp(Bytecodes::format(op), "b") == 0, "wrong bytecode format");
+ assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_b, "wrong bytecode format");
_bytecode.push(op);
break;
@@ -748,7 +748,7 @@ void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
case Bytecodes::_fstore:
case Bytecodes::_dstore:
case Bytecodes::_astore:
- assert(strcmp(Bytecodes::format(op), "bi") == 0, "wrong bytecode format");
+ assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format");
assert((char) index == index, "index does not fit in 8-bit");
_bytecode.push(op);
_bytecode.push(index);
@@ -757,18 +757,18 @@ void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
// bii
case Bytecodes::_ldc2_w:
case Bytecodes::_checkcast:
- assert(strcmp(Bytecodes::format(op), "bii") == 0, "wrong bytecode format");
+ assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
assert((short) index == index, "index does not fit in 16-bit");
_bytecode.push(op);
_bytecode.push(index >> 8);
_bytecode.push(index);
break;
- // bjj
+ // bJJ
case Bytecodes::_invokestatic:
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
- assert(strcmp(Bytecodes::format(op), "bjj") == 0, "wrong bytecode format");
+ assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
assert((short) index == index, "index does not fit in 16-bit");
_bytecode.push(op);
_bytecode.push(index >> 8);
diff --git a/hotspot/src/share/vm/runtime/arguments.cpp b/hotspot/src/share/vm/runtime/arguments.cpp
index 6f42fb8ef69..4d2f6762eb8 100644
--- a/hotspot/src/share/vm/runtime/arguments.cpp
+++ b/hotspot/src/share/vm/runtime/arguments.cpp
@@ -1211,8 +1211,44 @@ void Arguments::set_cms_and_parnew_gc_flags() {
}
#endif // KERNEL
+void set_object_alignment() {
+ // Object alignment.
+ assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
+ MinObjAlignmentInBytes = ObjectAlignmentInBytes;
+ assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
+ MinObjAlignment = MinObjAlignmentInBytes / HeapWordSize;
+ assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
+ MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
+
+ LogMinObjAlignmentInBytes = exact_log2(ObjectAlignmentInBytes);
+ LogMinObjAlignment = LogMinObjAlignmentInBytes - LogHeapWordSize;
+
+ // Oop encoding heap max
+ OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
+
+#ifndef KERNEL
+ // Set CMS global values
+ CompactibleFreeListSpace::set_cms_values();
+#endif // KERNEL
+}
+
+bool verify_object_alignment() {
+ // Object alignment.
+ if (!is_power_of_2(ObjectAlignmentInBytes)) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: ObjectAlignmentInBytes=%d must be power of 2", (int)ObjectAlignmentInBytes);
+ return false;
+ }
+ if ((int)ObjectAlignmentInBytes < BytesPerLong) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: ObjectAlignmentInBytes=%d must be greater or equal %d", (int)ObjectAlignmentInBytes, BytesPerLong);
+ return false;
+ }
+ return true;
+}
+
inline uintx max_heap_for_compressed_oops() {
- LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
+ LP64_ONLY(return OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
NOT_LP64(ShouldNotReachHere(); return 0);
}
@@ -1776,6 +1812,8 @@ bool Arguments::check_vm_args_consistency() {
status = status && verify_interval(TLABWasteTargetPercent,
1, 100, "TLABWasteTargetPercent");
+ status = status && verify_object_alignment();
+
return status;
}
@@ -2848,6 +2886,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
UseCompressedOops = false;
#endif
+ // Set object alignment values.
+ set_object_alignment();
+
#ifdef SERIALGC
force_serial_gc();
#endif // SERIALGC
diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp
index f6726a92948..16eb0eee6b0 100644
--- a/hotspot/src/share/vm/runtime/globals.hpp
+++ b/hotspot/src/share/vm/runtime/globals.hpp
@@ -321,6 +321,9 @@ class CommandLineFlags {
diagnostic(bool, PrintCompressedOopsMode, false, \
"Print compressed oops base address and encoding mode") \
\
+ lp64_product(intx, ObjectAlignmentInBytes, 8, \
+ "Default object alignment in bytes, 8 is minimum") \
+ \
/* UseMembar is theoretically a temp flag used for memory barrier \
* removal testing. It was supposed to be removed before FCS but has \
* been re-added (see 6401008) */ \
@@ -920,6 +923,10 @@ class CommandLineFlags {
\
product(intx, AlwaysInflate, 0, "(Unstable) Force inflation") \
\
+ product(intx, MonitorBound, 0, "Bound Monitor population") \
+ \
+ product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \
+ \
product(intx, Atomics, 0, \
"(Unsafe,Unstable) Diagnostic - Controls emission of atomics") \
\
@@ -1117,6 +1124,9 @@ class CommandLineFlags {
product(intx, TraceRedefineClasses, 0, \
"Trace level for JVMTI RedefineClasses") \
\
+ develop(bool, StressMethodComparator, false, \
+ "run the MethodComparator on all loaded methods") \
+ \
/* change to false by default sometime after Mustang */ \
product(bool, VerifyMergedCPBytecodes, true, \
"Verify bytecodes after RedefineClasses constant pool merging") \
diff --git a/hotspot/src/share/vm/runtime/mutexLocker.cpp b/hotspot/src/share/vm/runtime/mutexLocker.cpp
index 0941aedf61f..71cbc771856 100644
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp
@@ -82,9 +82,6 @@ Mutex* EvacFailureStack_lock = NULL;
Mutex* DerivedPointerTableGC_lock = NULL;
Mutex* Compile_lock = NULL;
Monitor* MethodCompileQueue_lock = NULL;
-#ifdef TIERED
-Monitor* C1_lock = NULL;
-#endif // TIERED
Monitor* CompileThread_lock = NULL;
Mutex* CompileTaskAlloc_lock = NULL;
Mutex* CompileStatistics_lock = NULL;
@@ -255,11 +252,6 @@ void mutex_init() {
def(Debug3_lock , Mutex , nonleaf+4, true );
def(ProfileVM_lock , Monitor, nonleaf+4, false); // used for profiling of the VMThread
def(CompileThread_lock , Monitor, nonleaf+5, false );
-#ifdef TIERED
- def(C1_lock , Monitor, nonleaf+5, false );
-#endif // TIERED
-
-
}
GCMutexLocker::GCMutexLocker(Monitor * mutex) {
diff --git a/hotspot/src/share/vm/runtime/mutexLocker.hpp b/hotspot/src/share/vm/runtime/mutexLocker.hpp
index 2d29d1f4dd5..08083f5a19e 100644
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp
@@ -84,9 +84,6 @@ extern Mutex* ParGCRareEvent_lock; // Synchronizes various (rare)
extern Mutex* EvacFailureStack_lock; // guards the evac failure scan stack
extern Mutex* Compile_lock; // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc)
extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued
-#ifdef TIERED
-extern Monitor* C1_lock; // a lock to ensure on single c1 compile is ever active
-#endif // TIERED
extern Monitor* CompileThread_lock; // a lock held by compile threads during compilation system initialization
extern Mutex* CompileTaskAlloc_lock; // a lock held when CompileTasks are allocated
extern Mutex* CompileStatistics_lock; // a lock held when updating compilation statistics
diff --git a/hotspot/src/share/vm/runtime/sharedRuntime.cpp b/hotspot/src/share/vm/runtime/sharedRuntime.cpp
index 676ad86149d..489063a4b19 100644
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp
@@ -1435,7 +1435,7 @@ IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, addr
// for the rest of its life! Just another racing bug in the life of
// fixup_callers_callsite ...
//
- RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
+ RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
iter.next();
assert(iter.has_current(), "must have a reloc at java call site");
relocInfo::relocType typ = iter.reloc()->type();
@@ -2055,11 +2055,11 @@ class AdapterHandlerTableIterator : public StackObj {
void scan() {
while (_index < _table->table_size()) {
AdapterHandlerEntry* a = _table->bucket(_index);
+ _index++;
if (a != NULL) {
_current = a;
return;
}
- _index++;
}
}
diff --git a/hotspot/src/share/vm/runtime/synchronizer.cpp b/hotspot/src/share/vm/runtime/synchronizer.cpp
index 5c55445dc6d..75674987efb 100644
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp
@@ -185,6 +185,8 @@ struct SharedGlobals {
} ;
static SharedGlobals GVars ;
+static int MonitorScavengeThreshold = 1000000 ;
+static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
// Tunables ...
@@ -746,8 +748,85 @@ void Thread::muxRelease (volatile intptr_t * Lock) {
ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
+static volatile int MonitorFreeCount = 0 ; // # on gFreeList
+static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
#define CHAINMARKER ((oop)-1)
+// Constraining monitor pool growth via MonitorBound ...
+//
+// The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
+// the rate of scavenging is driven primarily by GC. As such, we can find
+// an inordinate number of monitors in circulation.
+// To avoid that scenario we can artificially induce a STW safepoint
+// if the pool appears to be growing past some reasonable bound.
+// Generally we favor time in space-time tradeoffs, but as there's no
+// natural back-pressure on the # of extant monitors we need to impose some
+// type of limit. Beware that if MonitorBound is set to too low a value
+// we could just loop. In addition, if MonitorBound is set to a low value
+// we'll incur more safepoints, which are harmful to performance.
+// See also: GuaranteedSafepointInterval
+//
+// As noted elsewhere, the correct long-term solution is to deflate at
+// monitorexit-time, in which case the number of inflated objects is bounded
+// by the number of threads. That policy obviates the need for scavenging at
+// STW safepoint time. As an aside, scavenging can be time-consuming when the
+// # of extant monitors is large. Unfortunately there's a day-1 assumption baked
+// into much HotSpot code that the object::monitor relationship, once established
+// or observed, will remain stable except over potential safepoints.
+//
+// We can use either a blocking synchronous VM operation or an async VM operation.
+// -- If we use a blocking VM operation :
+// Calls to ScavengeCheck() should be inserted only into 'safe' locations in paths
+// that lead to ::inflate() or ::omAlloc().
+// Even though the safepoint will not directly induce GC, a GC might
+// piggyback on the safepoint operation, so the caller should hold no naked oops.
+// Furthermore, monitor::object relationships are NOT necessarily stable over this call
+// unless the caller has made provisions to "pin" the object to the monitor, say
+// by incrementing the monitor's _count field.
+// -- If we use a non-blocking asynchronous VM operation :
+// the constraints above don't apply. The safepoint will fire in the future
+// at a more convenient time. On the other hand the latency between posting and
+// running the safepoint introduces or admits "slop" or laxity during which the
+// monitor population can climb further above the threshold. The monitor population,
+// however, tends to converge asymptotically over time to a count that's slightly
+// above the target value specified by MonitorBound. That is, we avoid unbounded
+// growth, albeit with some imprecision.
+//
+// The current implementation uses asynchronous VM operations.
+//
+// Ideally we'd check if (MonitorPopulation > MonitorBound) in omAlloc()
+// immediately before trying to grow the global list via allocation.
+// If the predicate was true then we'd induce a synchronous safepoint, wait
+// for the safepoint to complete, and then again to allocate from the global
+// free list. This approach is much simpler and precise, admitting no "slop".
+// Unfortunately we can't safely safepoint in the midst of omAlloc(), so
+// instead we use asynchronous safepoints.
+
+static void InduceScavenge (Thread * Self, const char * Whence) {
+ // Induce STW safepoint to trim monitors
+ // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
+ // More precisely, trigger an asynchronous STW safepoint as the number
+ // of active monitors passes the specified threshold.
+ // TODO: assert thread state is reasonable
+
+ if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
+ if (Knob_Verbose) {
+ ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
+ ::fflush(stdout) ;
+ }
+ // Induce a 'null' safepoint to scavenge monitors
+ // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
+ // to the VMthread and have a lifespan longer than that of this activation record.
+ // The VMThread will delete the op when completed.
+ VMThread::execute (new VM_ForceAsyncSafepoint()) ;
+
+ if (Knob_Verbose) {
+ ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
+ ::fflush(stdout) ;
+ }
+ }
+}
+
ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
// A large MAXPRIVATE value reduces both list lock contention
// and list coherency traffic, but also tends to increase the
@@ -770,6 +849,11 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
Self->omFreeCount -- ;
// CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
guarantee (m->object() == NULL, "invariant") ;
+ if (MonitorInUseLists) {
+ m->FreeNext = Self->omInUseList;
+ Self->omInUseList = m;
+ Self->omInUseCount ++;
+ }
return m ;
}
@@ -784,6 +868,7 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
// on various locks.
Thread::muxAcquire (&ListLock, "omAlloc") ;
for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
+ MonitorFreeCount --;
ObjectMonitor * take = gFreeList ;
gFreeList = take->FreeNext ;
guarantee (take->object() == NULL, "invariant") ;
@@ -796,6 +881,15 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
TEVENT (omFirst - reprovision) ;
continue ;
+
+ const int mx = MonitorBound ;
+ if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
+ // We can't safely induce a STW safepoint from omAlloc() as our thread
+ // state may not be appropriate for such activities and callers may hold
+ // naked oops, so instead we defer the action.
+ InduceScavenge (Self, "omAlloc") ;
+ }
+ continue;
}
// 3: allocate a block of new ObjectMonitors
@@ -836,6 +930,8 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
// Acquire the ListLock to manipulate BlockList and FreeList.
// An Oyama-Taura-Yonezawa scheme might be more efficient.
Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
+ MonitorPopulation += _BLOCKSIZE-1;
+ MonitorFreeCount += _BLOCKSIZE-1;
// Add the new block to the list of extant blocks (gBlockList).
// The very first objectMonitor in a block is reserved and dedicated.
@@ -894,7 +990,9 @@ void ObjectSynchronizer::omFlush (Thread * Self) {
if (List == NULL) return ;
ObjectMonitor * Tail = NULL ;
ObjectMonitor * s ;
+ int Tally = 0;
for (s = List ; s != NULL ; s = s->FreeNext) {
+ Tally ++ ;
Tail = s ;
guarantee (s->object() == NULL, "invariant") ;
guarantee (!s->is_busy(), "invariant") ;
@@ -906,6 +1004,7 @@ void ObjectSynchronizer::omFlush (Thread * Self) {
Thread::muxAcquire (&ListLock, "omFlush") ;
Tail->FreeNext = gFreeList ;
gFreeList = List ;
+ MonitorFreeCount += Tally;
Thread::muxRelease (&ListLock) ;
TEVENT (omFlush) ;
}
@@ -1747,16 +1846,15 @@ void ObjectSynchronizer::oops_do(OopClosure* f) {
// Having a large number of monitors in-circulation negatively
// impacts the performance of some applications (e.g., PointBase).
// Broadly, we want to minimize the # of monitors in circulation.
-// Alternately, we could partition the active monitors into sub-lists
-// of those that need scanning and those that do not.
-// Specifically, we would add a new sub-list of objectmonitors
-// that are in-circulation and potentially active. deflate_idle_monitors()
-// would scan only that list. Other monitors could reside on a quiescent
-// list. Such sequestered monitors wouldn't need to be scanned by
-// deflate_idle_monitors(). omAlloc() would first check the global free list,
-// then the quiescent list, and, failing those, would allocate a new block.
-// Deflate_idle_monitors() would scavenge and move monitors to the
-// quiescent list.
+//
+// We have added a flag, MonitorInUseLists, which creates a list
+// of active monitors for each thread. deflate_idle_monitors()
+// only scans the per-thread inuse lists. omAlloc() puts all
+// assigned monitors on the per-thread list. deflate_idle_monitors()
+// returns the non-busy monitors to the global free list.
+// An alternative could have used a single global inuse list. The
+// downside would have been the additional cost of acquiring the global list lock
+// for every omAlloc().
//
// Perversely, the heap size -- and thus the STW safepoint rate --
// typically drives the scavenge rate. Large heaps can mean infrequent GC,
@@ -1769,18 +1867,100 @@ void ObjectSynchronizer::oops_do(OopClosure* f) {
// An even better solution would be to deflate on-the-fly, aggressively,
// at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
+
+// Deflate a single monitor if not in use
+// Return true if deflated, false if in use
+bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
+ ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
+ bool deflated;
+ // Normal case ... The monitor is associated with obj.
+ guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
+ guarantee (mid == obj->mark()->monitor(), "invariant");
+ guarantee (mid->header()->is_neutral(), "invariant");
+
+ if (mid->is_busy()) {
+ if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
+ deflated = false;
+ } else {
+ // Deflate the monitor if it is no longer being used
+ // It's idle - scavenge and return to the global free list
+ // plain old deflation ...
+ TEVENT (deflate_idle_monitors - scavenge1) ;
+ if (TraceMonitorInflation) {
+ if (obj->is_instance()) {
+ ResourceMark rm;
+ tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+ (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
+ }
+ }
+
+ // Restore the header back to obj
+ obj->release_set_mark(mid->header());
+ mid->clear();
+
+ assert (mid->object() == NULL, "invariant") ;
+
+ // Move the object to the working free list defined by FreeHead,FreeTail.
+ if (*FreeHeadp == NULL) *FreeHeadp = mid;
+ if (*FreeTailp != NULL) {
+ ObjectMonitor * prevtail = *FreeTailp;
+ prevtail->FreeNext = mid;
+ }
+ *FreeTailp = mid;
+ deflated = true;
+ }
+ return deflated;
+}
+
void ObjectSynchronizer::deflate_idle_monitors() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
int nInuse = 0 ; // currently associated with objects
int nInCirculation = 0 ; // extant
int nScavenged = 0 ; // reclaimed
+ bool deflated = false;
ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
ObjectMonitor * FreeTail = NULL ;
- // Iterate over all extant monitors - Scavenge all idle monitors.
TEVENT (deflate_idle_monitors) ;
- for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
+ // Prevent omFlush from changing mids in Thread dtor's during deflation
+ // And in case the vm thread is acquiring a lock during a safepoint
+ // See e.g. 6320749
+ Thread::muxAcquire (&ListLock, "scavenge - return") ;
+
+ if (MonitorInUseLists) {
+ ObjectMonitor* mid;
+ ObjectMonitor* next;
+ ObjectMonitor* curmidinuse;
+ for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
+ curmidinuse = NULL;
+ for (mid = cur->omInUseList; mid != NULL; ) {
+ oop obj = (oop) mid->object();
+ deflated = false;
+ if (obj != NULL) {
+ deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
+ }
+ if (deflated) {
+ // extract from per-thread in-use-list
+ if (mid == cur->omInUseList) {
+ cur->omInUseList = mid->FreeNext;
+ } else if (curmidinuse != NULL) {
+ curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
+ }
+ next = mid->FreeNext;
+ mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
+ mid = next;
+ cur->omInUseCount--;
+ nScavenged ++ ;
+ } else {
+ curmidinuse = mid;
+ mid = mid->FreeNext;
+ nInuse ++;
+ }
+ }
+ }
+ } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
+ // Iterate over all extant monitors - Scavenge all idle monitors.
assert(block->object() == CHAINMARKER, "must be a block header");
nInCirculation += _BLOCKSIZE ;
for (int i = 1 ; i < _BLOCKSIZE; i++) {
@@ -1795,61 +1975,39 @@ void ObjectSynchronizer::deflate_idle_monitors() {
guarantee (!mid->is_busy(), "invariant") ;
continue ;
}
+ deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
- // Normal case ... The monitor is associated with obj.
- guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
- guarantee (mid == obj->mark()->monitor(), "invariant");
- guarantee (mid->header()->is_neutral(), "invariant");
-
- if (mid->is_busy()) {
- if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
- nInuse ++ ;
+ if (deflated) {
+ mid->FreeNext = NULL ;
+ nScavenged ++ ;
} else {
- // Deflate the monitor if it is no longer being used
- // It's idle - scavenge and return to the global free list
- // plain old deflation ...
- TEVENT (deflate_idle_monitors - scavenge1) ;
- if (TraceMonitorInflation) {
- if (obj->is_instance()) {
- ResourceMark rm;
- tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
- (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
- }
- }
-
- // Restore the header back to obj
- obj->release_set_mark(mid->header());
- mid->clear();
-
- assert (mid->object() == NULL, "invariant") ;
-
- // Move the object to the working free list defined by FreeHead,FreeTail.
- mid->FreeNext = NULL ;
- if (FreeHead == NULL) FreeHead = mid ;
- if (FreeTail != NULL) FreeTail->FreeNext = mid ;
- FreeTail = mid ;
- nScavenged ++ ;
+ nInuse ++;
}
}
}
+ MonitorFreeCount += nScavenged;
+
+ // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
+
+ if (Knob_Verbose) {
+ ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
+ nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
+ MonitorPopulation, MonitorFreeCount) ;
+ ::fflush(stdout) ;
+ }
+
+ ForceMonitorScavenge = 0; // Reset
+
// Move the scavenged monitors back to the global free list.
- // In theory we don't need the freelist lock as we're at a STW safepoint.
- // omAlloc() and omFree() can only be called while a thread is _not in safepoint state.
- // But it's remotely possible that omFlush() or release_monitors_owned_by_thread()
- // might be called while not at a global STW safepoint. In the interest of
- // safety we protect the following access with ListLock.
- // An even more conservative and prudent approach would be to guard
- // the main loop in scavenge_idle_monitors() with ListLock.
if (FreeHead != NULL) {
guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
assert (FreeTail->FreeNext == NULL, "invariant") ;
// constant-time list splice - prepend scavenged segment to gFreeList
- Thread::muxAcquire (&ListLock, "scavenge - return") ;
FreeTail->FreeNext = gFreeList ;
gFreeList = FreeHead ;
- Thread::muxRelease (&ListLock) ;
}
+ Thread::muxRelease (&ListLock) ;
if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
if (_sync_MonExtant != NULL) _sync_MonExtant ->set_value(nInCirculation);
diff --git a/hotspot/src/share/vm/runtime/synchronizer.hpp b/hotspot/src/share/vm/runtime/synchronizer.hpp
index f3e81162312..4944cacaea3 100644
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp
@@ -150,6 +150,8 @@ class ObjectSynchronizer : AllStatic {
// Basically we deflate all monitors that are not busy.
// An adaptive profile-based deflation policy could be used if needed
static void deflate_idle_monitors();
+ static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** FreeHeadp,
+ ObjectMonitor** FreeTailp);
static void oops_do(OopClosure* f);
// debugging
diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp
index e3112ad394b..5527173edf9 100644
--- a/hotspot/src/share/vm/runtime/thread.cpp
+++ b/hotspot/src/share/vm/runtime/thread.cpp
@@ -139,6 +139,8 @@ Thread::Thread() {
omFreeList = NULL ;
omFreeCount = 0 ;
omFreeProvision = 32 ;
+ omInUseList = NULL ;
+ omInUseCount = 0 ;
_SR_lock = new Monitor(Mutex::suspend_resume, "SR_lock", true);
_suspend_flags = 0;
@@ -2797,6 +2799,7 @@ CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters)
_task = NULL;
_queue = queue;
_counters = counters;
+ _buffer_blob = NULL;
#ifndef PRODUCT
_ideal_graph_printer = NULL;
diff --git a/hotspot/src/share/vm/runtime/thread.hpp b/hotspot/src/share/vm/runtime/thread.hpp
index 74f3d67c3c8..901c7f7ff67 100644
--- a/hotspot/src/share/vm/runtime/thread.hpp
+++ b/hotspot/src/share/vm/runtime/thread.hpp
@@ -225,6 +225,8 @@ class Thread: public ThreadShadow {
ObjectMonitor * omFreeList ;
int omFreeCount ; // length of omFreeList
int omFreeProvision ; // reload chunk size
+ ObjectMonitor * omInUseList; // SLL to track monitors in circulation
+ int omInUseCount; // length of omInUseList
public:
enum {
@@ -493,7 +495,6 @@ public:
static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base ); }
static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size ); }
- static ByteSize omFreeList_offset() { return byte_offset_of(Thread, omFreeList); }
#define TLAB_FIELD_OFFSET(name) \
static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
@@ -1576,6 +1577,7 @@ class CompilerThread : public JavaThread {
CompileLog* _log;
CompileTask* _task;
CompileQueue* _queue;
+ BufferBlob* _buffer_blob;
public:
@@ -1594,6 +1596,9 @@ class CompilerThread : public JavaThread {
ciEnv* env() { return _env; }
void set_env(ciEnv* env) { _env = env; }
+ BufferBlob* get_buffer_blob() { return _buffer_blob; }
+ void set_buffer_blob(BufferBlob* b) { _buffer_blob = b; };
+
// Get/set the thread's logging information
CompileLog* log() { return _log; }
void init_log(CompileLog* log) {
diff --git a/hotspot/src/share/vm/runtime/vmStructs.cpp b/hotspot/src/share/vm/runtime/vmStructs.cpp
index cae43b31912..05ccdb9918a 100644
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -607,8 +607,6 @@ static inline uint64_t cast_uint64_t(size_t x)
nonstatic_field(CodeBlob, _instructions_offset, int) \
nonstatic_field(CodeBlob, _frame_complete_offset, int) \
nonstatic_field(CodeBlob, _data_offset, int) \
- nonstatic_field(CodeBlob, _oops_offset, int) \
- nonstatic_field(CodeBlob, _oops_length, int) \
nonstatic_field(CodeBlob, _frame_size, int) \
nonstatic_field(CodeBlob, _oop_maps, OopMapSet*) \
\
@@ -626,6 +624,8 @@ static inline uint64_t cast_uint64_t(size_t x)
nonstatic_field(nmethod, _deoptimize_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \
nonstatic_field(nmethod, _stub_offset, int) \
+ nonstatic_field(nmethod, _consts_offset, int) \
+ nonstatic_field(nmethod, _oops_offset, int) \
nonstatic_field(nmethod, _scopes_data_offset, int) \
nonstatic_field(nmethod, _scopes_pcs_offset, int) \
nonstatic_field(nmethod, _dependencies_offset, int) \
@@ -1328,14 +1328,6 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_constant(LogBytesPerWord) \
declare_constant(BytesPerLong) \
\
- /********************/ \
- /* Object alignment */ \
- /********************/ \
- \
- declare_constant(MinObjAlignment) \
- declare_constant(MinObjAlignmentInBytes) \
- declare_constant(LogMinObjAlignmentInBytes) \
- \
/********************************************/ \
/* Generation and Space Hierarchy Constants */ \
/********************************************/ \
diff --git a/hotspot/src/share/vm/utilities/copy.hpp b/hotspot/src/share/vm/utilities/copy.hpp
index ce26c86bfc8..99966163f14 100644
--- a/hotspot/src/share/vm/utilities/copy.hpp
+++ b/hotspot/src/share/vm/utilities/copy.hpp
@@ -51,7 +51,7 @@ extern "C" {
class Copy : AllStatic {
public:
// Block copy methods have four attributes. We don't define all possibilities.
- // alignment: aligned according to minimum Java object alignment (MinObjAlignment)
+ // alignment: aligned to BytesPerLong
// arrayof: arraycopy operation with both operands aligned on the same
// boundary as the first element of an array of the copy unit.
// This is currently a HeapWord boundary on all platforms, except
@@ -70,7 +70,7 @@ class Copy : AllStatic {
// [ '_atomic' ]
//
// Except in the arrayof case, whatever the alignment is, we assume we can copy
- // whole alignment units. E.g., if MinObjAlignment is 2x word alignment, an odd
+ // whole alignment units. E.g., if BytesPerLong is 2x word alignment, an odd
// count may copy an extra word. In the arrayof case, we are allowed to copy
// only the number of copy units specified.
@@ -305,17 +305,17 @@ class Copy : AllStatic {
}
static void assert_params_aligned(HeapWord* from, HeapWord* to) {
#ifdef ASSERT
- if (mask_bits((uintptr_t)from, MinObjAlignmentInBytes-1) != 0)
- basic_fatal("not object aligned");
- if (mask_bits((uintptr_t)to, MinObjAlignmentInBytes-1) != 0)
- basic_fatal("not object aligned");
+ if (mask_bits((uintptr_t)from, BytesPerLong-1) != 0)
+ basic_fatal("not long aligned");
+ if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0)
+ basic_fatal("not long aligned");
#endif
}
static void assert_params_aligned(HeapWord* to) {
#ifdef ASSERT
- if (mask_bits((uintptr_t)to, MinObjAlignmentInBytes-1) != 0)
- basic_fatal("not object aligned");
+ if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0)
+ basic_fatal("not long aligned");
#endif
}
diff --git a/hotspot/src/share/vm/utilities/globalDefinitions.cpp b/hotspot/src/share/vm/utilities/globalDefinitions.cpp
index d25f56613ba..1f03a88fded 100644
--- a/hotspot/src/share/vm/utilities/globalDefinitions.cpp
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.cpp
@@ -34,6 +34,18 @@ int LogBitsPerHeapOop = 0;
int BytesPerHeapOop = 0;
int BitsPerHeapOop = 0;
+// Object alignment, in units of HeapWords.
+// Defaults are -1 so things will break badly if incorrectly initialized.
+int MinObjAlignment = -1;
+int MinObjAlignmentInBytes = -1;
+int MinObjAlignmentInBytesMask = 0;
+
+int LogMinObjAlignment = -1;
+int LogMinObjAlignmentInBytes = -1;
+
+// Oop encoding heap max
+uint64_t OopEncodingHeapMax = 0;
+
void basic_fatal(const char* msg) {
fatal(msg);
}
diff --git a/hotspot/src/share/vm/utilities/globalDefinitions.hpp b/hotspot/src/share/vm/utilities/globalDefinitions.hpp
index 291c42dd55d..a5a8ae403d2 100644
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp
@@ -73,6 +73,9 @@ extern int LogBitsPerHeapOop;
extern int BytesPerHeapOop;
extern int BitsPerHeapOop;
+// Oop encoding heap max
+extern uint64_t OopEncodingHeapMax;
+
const int BitsPerJavaInteger = 32;
const int BitsPerJavaLong = 64;
const int BitsPerSize_t = size_tSize * BitsPerByte;
@@ -292,12 +295,12 @@ const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (
// Minimum is max(BytesPerLong, BytesPerDouble, BytesPerOop) / HeapWordSize, so jlong, jdouble and
// reference fields can be naturally aligned.
-const int MinObjAlignment = HeapWordsPerLong;
-const int MinObjAlignmentInBytes = MinObjAlignment * HeapWordSize;
-const int MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
+extern int MinObjAlignment;
+extern int MinObjAlignmentInBytes;
+extern int MinObjAlignmentInBytesMask;
-const int LogMinObjAlignment = LogHeapWordsPerLong;
-const int LogMinObjAlignmentInBytes = LogMinObjAlignment + LogHeapWordSize;
+extern int LogMinObjAlignment;
+extern int LogMinObjAlignmentInBytes;
// Machine dependent stuff
@@ -332,18 +335,16 @@ inline intptr_t align_object_size(intptr_t size) {
return align_size_up(size, MinObjAlignment);
}
-// Pad out certain offsets to jlong alignment, in HeapWord units.
+inline bool is_object_aligned(intptr_t addr) {
+ return addr == align_object_size(addr);
+}
-#define align_object_offset_(offset) align_size_up_(offset, HeapWordsPerLong)
+// Pad out certain offsets to jlong alignment, in HeapWord units.
inline intptr_t align_object_offset(intptr_t offset) {
return align_size_up(offset, HeapWordsPerLong);
}
-inline bool is_object_aligned(intptr_t offset) {
- return offset == align_object_offset(offset);
-}
-
//----------------------------------------------------------------------------------------------------
// Utility macros for compilers
diff --git a/hotspot/test/compiler/6958485/Test.java b/hotspot/test/compiler/6958485/Test.java
new file mode 100644
index 00000000000..0bbc1ec6285
--- /dev/null
+++ b/hotspot/test/compiler/6958485/Test.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 6958485
+ * @summary fix for 6879921 was insufficient
+ *
+ * @run main/othervm -Xbatch -XX:CompileOnly=Test.init Test
+ */
+
+public class Test {
+
+ public static void init(Object src[], boolean[] dst) {
+ // initialize the arrays
+ for (int i =0; i 1400
+#if _MSC_VER > 1400 && _MSC_VER < 1600
/*
* When building for Microsoft Windows, your program has a dependency
@@ -68,7 +68,7 @@
"publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"")
#endif /* _M_AMD64 */
-#endif /* _MSC_VER > 1400 */
+#endif /* _MSC_VER > 1400 && _MSC_VER < 1600 */
#endif /* _MSC_VER */
#define QUOTEMACRO(x) QUOTEME(x)
diff --git a/jdk/src/share/classes/java/dyn/CallSite.java b/jdk/src/share/classes/java/dyn/CallSite.java
index 94e58f6a815..b8335774851 100644
--- a/jdk/src/share/classes/java/dyn/CallSite.java
+++ b/jdk/src/share/classes/java/dyn/CallSite.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,25 +25,32 @@
package java.dyn;
-import sun.dyn.util.BytecodeName;
import sun.dyn.Access;
+import sun.dyn.MemberName;
import sun.dyn.CallSiteImpl;
-import sun.dyn.MethodHandleImpl;
/**
- * An {@code invokedynamic} call site, as reified by the
- * containing class's bootstrap method.
- * Every call site object corresponds to a distinct instance
- * of the invokedynamic instruction, and vice versa.
- * Every call site has one state variable, called the {@code target}.
- * It is typed as a {@link MethodHandle}. This state is never null, and
- * it is the responsibility of the bootstrap method to produce call sites
- * which have been pre-linked to an initial target method.
+ * A {@code CallSite} reifies an {@code invokedynamic} instruction from bytecode,
+ * and controls its linkage.
+ * Every linked {@code CallSite} object corresponds to a distinct instance
+ * of the {@code invokedynamic} instruction, and vice versa.
*
- * (Note: The bootstrap method may elect to produce call sites of a
+ * Every linked {@code CallSite} object has one state variable,
+ * a {@link MethodHandle} reference called the {@code target}.
+ * This reference is never null. Though it can change its value
+ * successive values must always have exactly the {@link MethodType method type}
+ * called for by the bytecodes of the associated {@code invokedynamic} instruction
+ *
+ * It is the responsibility of each class's
+ * {@link Linkage#registerBootstrapMethod(Class, MethodHandle) bootstrap method}
+ * to produce call sites which have been pre-linked to an initial target method.
+ * The required {@link MethodType type} for the target method is a parameter
+ * to each bootstrap method call.
+ *
+ * The bootstrap method may elect to produce call sites of a
* language-specific subclass of {@code CallSite}. In such a case,
* the subclass may claim responsibility for initializing its target to
- * a non-null value, by overriding {@link #initialTarget}.)
+ * a non-null value, by overriding {@link #initialTarget}.
*
* An {@code invokedynamic} instruction which has not yet been executed
* is said to be unlinked. When an unlinked call site is executed,
@@ -52,54 +59,139 @@ import sun.dyn.MethodHandleImpl;
* value to the new call site's target variable, the method {@link #initialTarget}
* is called to produce the new call site's first target method.
*
+ * A freshly-created {@code CallSite} object is not yet in a linked state.
+ * An unlinked {@code CallSite} object reports null for its {@code callerClass}.
+ * When the JVM receives a {@code CallSite} object from a bootstrap method,
+ * it first ensures that its target is non-null and of the correct type.
+ * The JVM then links the {@code CallSite} object to the call site instruction,
+ * enabling the {@code callerClass} to return the class in which the instruction occurs.
+ *
+ * Next, the JVM links the instruction to the {@code CallSite}, at which point
+ * any further execution of the {@code invokedynamic} instruction implicitly
+ * invokes the current target of the {@code CallSite} object.
+ * After this two-way linkage, both the instruction and the {@code CallSite}
+ * object are said to be linked.
+ *
+ * This state of linkage continues until the method containing the
+ * dynamic call site is garbage collected, or the dynamic call site
+ * is invalidated by an explicit request.
+ *
+ * Linkage happens once in the lifetime of any given {@code CallSite} object.
+ * Because of call site invalidation, this linkage can be repeated for
+ * a single {@code invokedynamic} instruction, with multiple {@code CallSite} objects.
+ * When a {@code CallSite} is unlinked from an {@code invokedynamic} instruction,
+ * the instruction is reset so that it is no longer associated with
+ * the {@code CallSite} object, but the {@code CallSite} does not change
+ * state.
+ *
+ * Here is a sample use of call sites and bootstrap methods which links every
+ * dynamic call site to print its arguments:
+
+private static void printArgs(Object... args) {
+ System.out.println(java.util.Arrays.deepToString(args));
+}
+private static final MethodHandle printArgs;
+static {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ Class thisClass = lookup.lookupClass(); // (who am I?)
+ printArgs = lookup.findStatic(thisClass,
+ "printArgs", MethodType.methodType(void.class, Object[].class));
+ Linkage.registerBootstrapMethod("bootstrapDynamic");
+}
+private static CallSite bootstrapDynamic(Class caller, String name, MethodType type) {
+ // ignore caller and name, but match the type:
+ return new CallSite(MethodHandles.collectArguments(printArgs, type));
+}
+
* @see Linkage#registerBootstrapMethod(java.lang.Class, java.dyn.MethodHandle)
* @author John Rose, JSR 292 EG
*/
public class CallSite
- // Note: This is an implementation inheritance hack, and will be removed
- // with a JVM change which moves the required hidden state onto this class.
- extends CallSiteImpl
{
private static final Access IMPL_TOKEN = Access.getToken();
- /*
-
// Fields used only by the JVM. Do not use or change.
- private Object vmmethod;
- int callerMID, callerBCI; // supplied by the JVM
+ private MemberName vmmethod; // supplied by the JVM (ref. to calling method)
+ private int vmindex; // supplied by the JVM (BCI within calling method)
+ // The actual payload of this call site:
private MethodHandle target;
- final Object caller; // usually a class
- final String name;
- final MethodType type;
- */
+ // Remove this field for PFD and delete deprecated methods:
+ private MemberName calleeNameRemoveForPFD;
/**
- * Make a call site given the parameters from a call to the bootstrap method.
- * The resulting call site is in an unlinked state, which means that before
- * it is returned from a bootstrap method call it must be provided with
- * a target method via a call to {@link CallSite#setTarget}.
- * @param caller the class in which the relevant {@code invokedynamic} instruction occurs
- * @param name the name specified by the {@code invokedynamic} instruction
- * @param type the method handle type derived from descriptor of the {@code invokedynamic} instruction
+ * Make a blank call site object.
+ * Before it is returned from a bootstrap method, this {@code CallSite} object
+ * must be provided with
+ * a target method via a call to {@link CallSite#setTarget(MethodHandle) setTarget},
+ * or by a subclass override of {@link CallSite#initialTarget(Class,String,MethodType) initialTarget}.
*/
- public CallSite(Object caller, String name, MethodType type) {
- super(IMPL_TOKEN, caller, name, type);
+ public CallSite() {
}
- private static void privateInitializeCallSite(CallSite site, int callerMID, int callerBCI) {
- site.callerMID = callerMID;
- site.callerBCI = callerBCI;
- site.ensureTarget();
+ /**
+ * Make a blank call site object, possibly equipped with an initial target method handle.
+ * The initial target reference may be null, in which case the {@code CallSite} object
+ * must be provided with a target method via a call to {@link CallSite#setTarget},
+ * or by a subclass override of {@link CallSite#initialTarget}.
+ * @param target the method handle which will be the initial target of the call site, or null if there is none yet
+ */
+ public CallSite(MethodHandle target) {
+ this.target = target;
}
- private void ensureTarget() {
- // Note use of super, which accesses the field directly,
- // without deferring to possible subclass overrides.
- if (super.getTarget() == null) {
- super.setTarget(this.initialTarget());
- super.getTarget().type(); // provoke NPE if still null
+
+ /** @deprecated transitional form defined in EDR but removed in PFD */
+ public CallSite(Class> caller, String name, MethodType type) {
+ this.calleeNameRemoveForPFD = new MemberName(caller, name, type);
+ }
+ /** @deprecated transitional form defined in EDR but removed in PFD */
+ public Class> callerClass() {
+ MemberName callee = this.calleeNameRemoveForPFD;
+ return callee == null ? null : callee.getDeclaringClass();
+ }
+ /** @deprecated transitional form defined in EDR but removed in PFD */
+ public String name() {
+ MemberName callee = this.calleeNameRemoveForPFD;
+ return callee == null ? null : callee.getName();
+ }
+ /** @deprecated transitional form defined in EDR but removed in PFD */
+ public MethodType type() {
+ MemberName callee = this.calleeNameRemoveForPFD;
+ return callee == null ? (target == null ? null : target.type()) : callee.getMethodType();
+ }
+ /** @deprecated transitional form defined in EDR but removed in PFD */
+ protected MethodHandle initialTarget() {
+ return initialTarget(callerClass(), name(), type());
+ }
+
+ /** Report if the JVM has linked this {@code CallSite} object to a dynamic call site instruction.
+ * Once it is linked, it is never unlinked.
+ */
+ private boolean isLinked() {
+ return vmmethod != null;
+ }
+
+ /** Called from JVM (or low-level Java code) after the BSM returns the newly created CallSite.
+ * The parameters are JVM-specific.
+ */
+ void initializeFromJVM(String name,
+ MethodType type,
+ MemberName callerMethod,
+ int callerBCI) {
+ if (this.isLinked()) {
+ throw new InvokeDynamicBootstrapError("call site has already been linked to an invokedynamic instruction");
}
+ MethodHandle target = this.target;
+ if (target == null) {
+ this.target = target = this.initialTarget(callerMethod.getDeclaringClass(), name, type);
+ }
+ if (!target.type().equals(type)) {
+ throw wrongTargetType(target, type);
+ }
+ this.vmindex = callerBCI;
+ this.vmmethod = callerMethod;
+ assert(this.isLinked());
}
/**
@@ -108,14 +200,18 @@ public class CallSite
* the method {@code initialTarget} is called to produce an initial
* non-null target. (Live call sites must never have null targets.)
*
+ * The arguments are the same as those passed to the bootstrap method.
+ * Thus, a bootstrap method is free to ignore the arguments and simply
+ * create a "blank" {@code CallSite} object of an appropriate subclass.
+ *
* If the bootstrap method itself does not initialize the call site,
* this method must be overridden, because it just raises an
* {@code InvokeDynamicBootstrapError}, which in turn causes the
* linkage of the {@code invokedynamic} instruction to terminate
* abnormally.
*/
- protected MethodHandle initialTarget() {
- throw new InvokeDynamicBootstrapError("target must be initialized before call site is linked: "+this);
+ protected MethodHandle initialTarget(Class> callerClass, String name, MethodType type) {
+ throw new InvokeDynamicBootstrapError("target must be initialized before call site is linked: "+name+type);
}
/**
@@ -137,11 +233,11 @@ public class CallSite
* @see #setTarget
*/
public MethodHandle getTarget() {
- return super.getTarget();
+ return target;
}
/**
- * Link or relink the call site, by setting its target method.
+ * Set the target method of this call site.
*
* The interactions of {@code setTarget} with memory are the same
* as of a write to an ordinary variable, such as an array element or a
@@ -152,96 +248,46 @@ public class CallSite
* Stronger guarantees can be created by putting appropriate operations
* into the bootstrap method and/or the target methods used
* at any given call site.
- * @param target the new target, or null if it is to be unlinked
+ * @param newTarget the new target
* @throws NullPointerException if the proposed new target is null
- * @throws WrongMethodTypeException if the proposed new target
- * has a method type that differs from the call site's {@link #type()}
+ * @throws WrongMethodTypeException if the call site is linked and the proposed new target
+ * has a method type that differs from the previous target
*/
- public void setTarget(MethodHandle target) {
- checkTarget(target);
- super.setTarget(target);
+ public void setTarget(MethodHandle newTarget) {
+ MethodType newType = newTarget.type(); // null check!
+ MethodHandle oldTarget = this.target;
+ if (oldTarget == null) {
+ // CallSite is not yet linked.
+ assert(!isLinked());
+ this.target = newTarget; // might be null!
+ return;
+ }
+ MethodType oldType = oldTarget.type();
+ if (!newTarget.type().equals(oldType))
+ throw wrongTargetType(newTarget, oldType);
+ if (oldTarget != newTarget)
+ CallSiteImpl.setCallSiteTarget(IMPL_TOKEN, this, newTarget);
}
- protected void checkTarget(MethodHandle target) {
- target.type(); // provoke NPE
- if (!canSetTarget(target))
- throw new WrongMethodTypeException(String.valueOf(target)+target.type()+" should be of type "+type());
+ private static WrongMethodTypeException wrongTargetType(MethodHandle target, MethodType type) {
+ return new WrongMethodTypeException(String.valueOf(target)+target.type()+" should be of type "+type);
}
- protected boolean canSetTarget(MethodHandle target) {
- return (target != null && target.type() == type());
- }
-
- /**
- * Report the class containing the call site.
- * This is an immutable property of the call site, set from the first argument to the constructor.
- * @return class containing the call site
+ /** Produce a printed representation that displays information about this call site
+ * that may be useful to the human reader.
*/
- public Class> callerClass() {
- return (Class) caller;
- }
-
- /**
- * Report the method name specified in the {@code invokedynamic} instruction.
- * This is an immutable property of the call site, set from the second argument to the constructor.
- *
- * Note that the name is a JVM bytecode name, and as such can be any
- * non-empty string, as long as it does not contain certain "dangerous"
- * characters such as slash {@code '/'} and dot {@code '.'}.
- * See the Java Virtual Machine specification for more details.
- *
- * Application such as a language runtimes may need to encode
- * arbitrary program element names and other configuration information
- * into the name. A standard convention for doing this is
- * specified here.
- * @return method name specified by the call site
- */
- public String name() {
- return name;
- }
-
- /**
- * Report the method name specified in the {@code invokedynamic} instruction,
- * as a series of components, individually demangled according to
- * the standard convention
- * specified here.
- *
- * Non-empty runs of characters between dangerous characters are demangled.
- * Each component is either a completely arbitrary demangled string,
- * or else a character constant for a punctuation character, typically ':'.
- * (In principle, the character can be any dangerous character that the
- * JVM lets through in a method name, such as '$' or ']'.
- * Runtime implementors are encouraged to use colon ':' for building
- * structured names.)
- *
- * In the common case where the name contains no dangerous characters,
- * the result is an array whose only element array is the demangled
- * name at the call site. Such a demangled name can be any sequence
- * of any number of any unicode characters.
- * @return method name components specified by the call site
- */
- public Object[] nameComponents() {
- return BytecodeName.parseBytecodeName(name);
- }
-
- /**
- * Report the resolved result and parameter types of this call site,
- * which are derived from its bytecode-level invocation descriptor.
- * The types are packaged into a {@link MethodType}.
- * Any linked target of this call site must be exactly this method type.
- * This is an immutable property of the call site, set from the third argument to the constructor.
- * @return method type specified by the call site
- */
- public MethodType type() {
- return type;
- }
-
@Override
public String toString() {
- return "CallSite#"+hashCode()+"["+name+type+" => "+getTarget()+"]";
+ StringBuilder buf = new StringBuilder("CallSite#");
+ buf.append(hashCode());
+ if (!isLinked())
+ buf.append("[unlinked]");
+ else
+ buf.append("[")
+ .append("from ").append(vmmethod.getDeclaringClass().getName())
+ .append(" : ").append(getTarget().type())
+ .append(" => ").append(getTarget())
+ .append("]");
+ return buf.toString();
}
-
- // Package-local constant:
- static final MethodHandle GET_TARGET = MethodHandleImpl.getLookup(IMPL_TOKEN).
- findVirtual(CallSite.class, "getTarget", MethodType.methodType(MethodHandle.class));
}
diff --git a/jdk/src/share/classes/java/dyn/InvokeDynamic.java b/jdk/src/share/classes/java/dyn/InvokeDynamic.java
index 7f9d4b0363f..021e75a1ea7 100644
--- a/jdk/src/share/classes/java/dyn/InvokeDynamic.java
+++ b/jdk/src/share/classes/java/dyn/InvokeDynamic.java
@@ -26,27 +26,25 @@
package java.dyn;
/**
- * Syntactic marker to request javac to emit an {@code invokedynamic} instruction.
- * An {@code invokedynamic} instruction is a 5-byte bytecoded instruction
- * which begins with an opcode byte of value 186 ({@code 0xBA}),
- * and is followed by a two-byte index of a {@code NameAndType} constant
- * pool entry, then by two zero bytes. The constant pool reference gives
- * the method name and argument and return types of the call site; there
- * is no other information provided at the call site.
+ * {@code InvokeDynamic} is a class with neither methods nor instances,
+ * which serves only as a syntactic marker in Java source code for
+ * an {@code invokedynamic} instruction.
+ * (See the package information for specifics on this instruction.)
*
* The {@code invokedynamic} instruction is incomplete without a target method.
- * The target method is a property of the reified call site object
- * (of type {@link CallSite}) which is in a one-to-one association with each
- * corresponding {@code invokedynamic} instruction. The call site object
- * is initially produced by a bootstrap method associated with
- * the call site, via the various overloadings of {@link Linkage#registerBootstrapMethod}.
+ * The target method is a property of the reified {@linkplain CallSite call site object}
+ * which is linked to each active {@code invokedynamic} instruction.
+ * The call site object is initially produced by a
+ * {@linkplain java.dyn.Linkage#registerBootstrapMethod(Class, MethodHandle) bootstrap method}
+ * associated with the class whose bytecodes include the dynamic call site.
*
* The type {@code InvokeDynamic} has no particular meaning as a
* class or interface supertype, or an object type; it can never be instantiated.
* Logically, it denotes a source of all dynamically typed methods.
- * It may be viewed as a pure syntactic marker (an importable one) of static calls.
+ * It may be viewed as a pure syntactic marker of static calls.
+ * It may be imported for ease of use.
*
- * Here are some examples of usage:
+ * Here are some examples:
*
* Object x; String s; int i;
* x = InvokeDynamic.greet("world"); // greet(Ljava/lang/String;)Ljava/lang/Object;
@@ -65,6 +63,7 @@ package java.dyn;
* which must be registered by the static initializer of the enclosing class.
* @author John Rose, JSR 292 EG
*/
+@MethodHandle.PolymorphicSignature
public final class InvokeDynamic {
private InvokeDynamic() { throw new InternalError(); } // do not instantiate
diff --git a/jdk/src/share/classes/java/dyn/InvokeDynamicBootstrapError.java b/jdk/src/share/classes/java/dyn/InvokeDynamicBootstrapError.java
index e0517361ecf..83ebcd464b4 100644
--- a/jdk/src/share/classes/java/dyn/InvokeDynamicBootstrapError.java
+++ b/jdk/src/share/classes/java/dyn/InvokeDynamicBootstrapError.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,24 +27,29 @@ package java.dyn;
/**
* Thrown to indicate that an {@code invokedynamic} instruction has
- * failed to find its bootstrap method, or the bootstrap method has
- * failed to provide a call site with a non-null target.
+ * failed to find its
+ * {@linkplain Linkage#registerBootstrapMethod(Class, MethodHandle) bootstrap method},
+ * or the bootstrap method has
+ * failed to provide a
+ * {@linkplain CallSite} call site with a non-null {@linkplain MethodHandle target}
+ * of the correct {@linkplain MethodType method type}.
*
- * The boostrap method must have been declared during a class's initialization
- * by a call to {@link Linkage#registerBootstrapMethod}.
+ * The bootstrap method must have been declared during a class's initialization
+ * by a call to one of the overloadings of
+ * {@link Linkage#registerBootstrapMethod registerBootstrapMethod}.
*
* @author John Rose, JSR 292 EG
*/
public class InvokeDynamicBootstrapError extends LinkageError {
/**
- * Constructs a {@code InvokeDynamicBootstrapError} with no detail message.
+ * Constructs an {@code InvokeDynamicBootstrapError} with no detail message.
*/
public InvokeDynamicBootstrapError() {
super();
}
/**
- * Constructs a {@code InvokeDynamicBootstrapError} with the specified
+ * Constructs an {@code InvokeDynamicBootstrapError} with the specified
* detail message.
*
* @param s the detail message.
diff --git a/jdk/src/share/classes/java/dyn/JavaMethodHandle.java b/jdk/src/share/classes/java/dyn/JavaMethodHandle.java
index 0507eecb237..d8cd87a5ed5 100644
--- a/jdk/src/share/classes/java/dyn/JavaMethodHandle.java
+++ b/jdk/src/share/classes/java/dyn/JavaMethodHandle.java
@@ -28,7 +28,8 @@ package java.dyn;
import sun.dyn.Access;
/**
- * A Java method handle extends the basic method handle type with additional
+ * A Java method handle is a deprecated proposal for extending
+ * the basic method handle type with additional
* programmer defined methods and fields.
* Its behavior as a method handle is determined at instance creation time,
* by providing the new instance with an "entry point" method handle
@@ -62,11 +63,11 @@ import sun.dyn.Access;
* greeter.run(); // prints "hello, world"
* // Statically typed method handle invocation (most direct):
* MethodHandle mh = greeter;
- * mh.<void>invoke(); // also prints "hello, world"
+ * mh.<void>invokeExact(); // also prints "hello, world"
* // Dynamically typed method handle invocation:
- * MethodHandles.invoke(greeter); // also prints "hello, world"
+ * MethodHandles.invokeExact(greeter); // also prints "hello, world"
* greeter.setGreeting("howdy");
- * mh.invoke(); // prints "howdy, world" (object-like mutable behavior)
+ * mh.invokeExact(); // prints "howdy, world" (object-like mutable behavior)
*
*
* In the example of {@code Greeter}, the method {@code run} provides the entry point.
@@ -81,7 +82,7 @@ import sun.dyn.Access;
* inner class:
*
* // We can also do this with symbolic names and/or inner classes:
- * MethodHandles.invoke(new JavaMethodHandle("yow") {
+ * MethodHandles.invokeExact(new JavaMethodHandle("yow") {
* void yow() { System.out.println("yow, world"); }
* });
*
@@ -101,7 +102,7 @@ import sun.dyn.Access;
* Greeter greeter = new Greeter("world");
* greeter.run(); // prints "hello, world"
* MethodHandle mh = MethodHanndles.insertArgument(Greeter.RUN, 0, greeter);
- * mh.invoke(); // also prints "hello, world"
+ * mh.invokeExact(); // also prints "hello, world"
*
* Note that the method handle must be separately created as a view on the base object.
* This increases footprint, complexity, and dynamic indirections.
@@ -113,7 +114,7 @@ import sun.dyn.Access;
* MethodHandle greeter = new JavaMethodHandle("run") {
* private void run() { System.out.println("hello, "+greetee); }
* }
- * greeter.invoke(); // prints "hello, world"
+ * greeter.invokeExact(); // prints "hello, world"
*
*
* Here is an abstract parameterized lvalue, efficiently expressed as a subtype of MethodHandle,
@@ -137,10 +138,12 @@ import sun.dyn.Access;
* public Number get(long i) { return stuff[(int)i]; }
* public void set(long i, Object x) { stuff[(int)i] = x; }
* }
- * int x = (Integer) stuffPtr.<Number>invoke(1L); // 456
- * stuffPtr.setter().<void>invoke(0L, (Number) 789); // replaces 123 with 789
+ * int x = (Integer) stuffPtr.<Number>invokeExact(1L); // 456
+ * stuffPtr.setter().<void>invokeExact(0L, (Number) 789); // replaces 123 with 789
*
* @see MethodHandle
+ * @deprecated The JSR 292 EG intends to replace {@code JavaMethodHandle} with
+ * an interface-based API for mixing method handle behavior with other classes.
* @author John Rose, JSR 292 EG
*/
public abstract class JavaMethodHandle
diff --git a/jdk/src/share/classes/java/dyn/Linkage.java b/jdk/src/share/classes/java/dyn/Linkage.java
index 9b6bd60ea96..d65ae41c31c 100644
--- a/jdk/src/share/classes/java/dyn/Linkage.java
+++ b/jdk/src/share/classes/java/dyn/Linkage.java
@@ -25,14 +25,19 @@
package java.dyn;
+import java.lang.annotation.Annotation;
import java.dyn.MethodHandles.Lookup;
import java.util.WeakHashMap;
import sun.dyn.Access;
+import sun.dyn.MethodHandleImpl;
import sun.reflect.Reflection;
import static sun.dyn.util.VerifyAccess.checkBootstrapPrivilege;
+import static sun.dyn.MemberName.newIllegalArgumentException;
/**
- * Static methods which control the linkage of invokedynamic call sites.
+ * This class consists exclusively of static methods that control
+ * the linkage of {@code invokedynamic} instructions, and specifically
+ * their reification as {@link CallSite} objects.
* @author John Rose, JSR 292 EG
*/
public class Linkage {
@@ -42,102 +47,137 @@ public class Linkage {
/**
* PROVISIONAL API, WORK IN PROGRESS:
- * Register a bootstrap method to use when linking a given caller class.
- * It must be a method handle of a type equivalent to {@link CallSite#CallSite}.
- * In other words, it must act as a factory method which accepts the arguments
- * to {@code CallSite}'s constructor (a class, a string, and a method type),
+ * Register a bootstrap method to use when linking dynamic call sites within
+ * a given caller class.
+ *
+ * A bootstrap method must be a method handle with a return type of {@link CallSite}
+ * and the following arguments:
+ *
+ *
the class containing the {@code invokedynamic} instruction, for which the bootstrap method was registered
+ *
the name of the method being invoked (a {@link String})
+ *
the type of the method being invoked (a {@link MethodType})
+ *
TBD optionally, an unordered array of {@link Annotation}s attached to the call site
+ * (Until this feature is implemented, this will always receive an empty array.)
+ *
+ * (TBD: The final argument type may be missing from the method handle's type.
+ * Additional arguments may be added in the future.)
+ * The bootstrap method acts as a factory method which accepts the given arguments
* and returns a {@code CallSite} object (possibly of a subclass of {@code CallSite}).
*
- * The registration will fail with an {@code IllegalStateException} if any of the following conditions hold:
+ * The registration must take place exactly once, either before the class has begun
+ * being initialized, or from within the class's static initializer.
+ * Registration will fail with an exception if any of the following conditions hold:
*
- *
The caller of this method is in a different package than the {@code callerClass},
+ *
The immediate caller of this method is in a different package than the given caller class,
* and there is a security manager, and its {@code checkPermission} call throws
* when passed {@link LinkagePermission}("registerBootstrapMethod",callerClass).
- *
The given class already has a bootstrap method from a previous
- * call to this method.
- *
The given class is already fully initialized.
- *
The given class is in the process of initialization, in another thread.
- *
The same {@code CallSite} object has already been returned from
- * a bootstrap method call to another {@code invokedynamic} call site.
+ *
The given caller class already has a bootstrap method registered.
+ *
The given caller class is already fully initialized.
+ *
The given caller class is in the process of initialization, in another thread.
*
* Because of these rules, a class may install its own bootstrap method in
* a static initializer.
* @param callerClass a class that may have {@code invokedynamic} sites
* @param bootstrapMethod the method to use to bootstrap all such sites
+ * @exception IllegalArgumentException if the class argument is null or
+ * a primitive class, or if the bootstrap method is the wrong type
+ * @exception IllegalStateException if the class already has a bootstrap
+ * method, or if the its static initializer has already run
+ * or is already running in another thread
+ * @exception SecurityException if there is a security manager installed,
+ * and a {@link LinkagePermission} check fails for "registerBootstrapMethod"
*/
public static
void registerBootstrapMethod(Class callerClass, MethodHandle bootstrapMethod) {
Class callc = Reflection.getCallerClass(2);
checkBootstrapPrivilege(callc, callerClass, "registerBootstrapMethod");
checkBSM(bootstrapMethod);
- synchronized (bootstrapMethods) {
- if (bootstrapMethods.containsKey(callerClass))
- throw new IllegalStateException("bootstrap method already declared in "+callerClass);
- bootstrapMethods.put(callerClass, bootstrapMethod);
- }
+ MethodHandleImpl.registerBootstrap(IMPL_TOKEN, callerClass, bootstrapMethod);
}
- static void checkBSM(MethodHandle mh) {
- if (mh == null) throw new IllegalArgumentException("null bootstrap method");
- if (mh.type() == OLD_BOOTSTRAP_METHOD_TYPE) // FIXME: delete at EDR/PFD
- throw new WrongMethodTypeException("bootstrap method must be a CallSite factory");
- if (mh.type() != BOOTSTRAP_METHOD_TYPE)
- throw new WrongMethodTypeException(mh.toString());
+ static private void checkBSM(MethodHandle mh) {
+ if (mh == null) throw newIllegalArgumentException("null bootstrap method");
+ if (mh.type() == BOOTSTRAP_METHOD_TYPE_2)
+ // For now, always pass an empty array for the Annotations argument
+ mh = MethodHandles.insertArguments(mh, BOOTSTRAP_METHOD_TYPE_2.parameterCount()-1,
+ (Object)NO_ANNOTATIONS);
+ if (mh.type() == BOOTSTRAP_METHOD_TYPE) return;
+ throw new WrongMethodTypeException(mh.toString());
}
+ static private final Annotation[] NO_ANNOTATIONS = { };
/**
* PROVISIONAL API, WORK IN PROGRESS:
- * Simplified version of registerBootstrapMethod for self-registration,
+ * Simplified version of {@code registerBootstrapMethod} for self-registration,
* to be called from a static initializer.
* Finds a static method of the required type in the
- * given class, and installs it on the caller.
- * @throws IllegalArgumentException if there is no such method
+ * given runtime class, and installs it on the caller class.
+ * @throws NoSuchMethodException if there is no such method
+ * @throws IllegalStateException if the caller class's static initializer
+ * has already run, or is already running in another thread
*/
public static
void registerBootstrapMethod(Class> runtime, String name) {
- Class callc = Reflection.getCallerClass(2);
- Lookup lookup = new Lookup(IMPL_TOKEN, callc);
- MethodHandle bootstrapMethod =
- lookup.findStatic(runtime, name, BOOTSTRAP_METHOD_TYPE);
- // FIXME: exception processing wrong here
- checkBSM(bootstrapMethod);
- Linkage.registerBootstrapMethod(callc, bootstrapMethod);
+ Class callerClass = Reflection.getCallerClass(2);
+ registerBootstrapMethodLookup(callerClass, runtime, name);
}
/**
* PROVISIONAL API, WORK IN PROGRESS:
- * Simplified version of registerBootstrapMethod for self-registration,
+ * Simplified version of {@code registerBootstrapMethod} for self-registration,
* to be called from a static initializer.
* Finds a static method of the required type in the
- * caller's class, and installs it on the caller.
+ * caller class itself, and installs it on the caller class.
* @throws IllegalArgumentException if there is no such method
+ * @throws IllegalStateException if the caller class's static initializer
+ * has already run, or is already running in another thread
*/
public static
void registerBootstrapMethod(String name) {
- Class callc = Reflection.getCallerClass(2);
- Lookup lookup = new Lookup(IMPL_TOKEN, callc);
- MethodHandle bootstrapMethod =
- lookup.findStatic(callc, name, BOOTSTRAP_METHOD_TYPE);
- // FIXME: exception processing wrong here
+ Class callerClass = Reflection.getCallerClass(2);
+ registerBootstrapMethodLookup(callerClass, callerClass, name);
+ }
+
+ private static
+ void registerBootstrapMethodLookup(Class> callerClass, Class> runtime, String name) {
+ Lookup lookup = new Lookup(IMPL_TOKEN, callerClass);
+ MethodHandle bootstrapMethod;
+ // Try both types. TBD
+ try {
+ bootstrapMethod = lookup.findStatic(runtime, name, BOOTSTRAP_METHOD_TYPE_2);
+ } catch (NoAccessException ex) {
+ bootstrapMethod = null;
+ }
+ if (bootstrapMethod == null) {
+ try {
+ bootstrapMethod = lookup.findStatic(runtime, name, BOOTSTRAP_METHOD_TYPE);
+ } catch (NoAccessException ex) {
+ throw new IllegalArgumentException("no such bootstrap method in "+runtime+": "+name, ex);
+ }
+ }
checkBSM(bootstrapMethod);
- Linkage.registerBootstrapMethod(callc, bootstrapMethod);
+ MethodHandleImpl.registerBootstrap(IMPL_TOKEN, callerClass, bootstrapMethod);
}
/**
* PROVISIONAL API, WORK IN PROGRESS:
- * Report the bootstrap method registered for a given class.
+ * Report the bootstrap method registered for a given caller class.
* Returns null if the class has never yet registered a bootstrap method.
* Only callers privileged to set the bootstrap method may inquire
* about it, because a bootstrap method is potentially a back-door entry
* point into its class.
+ * @exception IllegalArgumentException if the argument is null or
+ * a primitive class
+ * @exception SecurityException if there is a security manager installed,
+ * and the immediate caller of this method is not in the same
+ * package as the caller class
+ * and a {@link LinkagePermission} check fails for "getBootstrapMethod"
*/
public static
MethodHandle getBootstrapMethod(Class callerClass) {
Class callc = Reflection.getCallerClass(2);
- checkBootstrapPrivilege(callc, callerClass, "registerBootstrapMethod");
- synchronized (bootstrapMethods) {
- return bootstrapMethods.get(callerClass);
- }
+ checkBootstrapPrivilege(callc, callerClass, "getBootstrapMethod");
+ return MethodHandleImpl.getBootstrap(IMPL_TOKEN, callerClass);
}
/**
@@ -148,13 +188,10 @@ public class Linkage {
public static final MethodType BOOTSTRAP_METHOD_TYPE
= MethodType.methodType(CallSite.class,
Class.class, String.class, MethodType.class);
-
- private static final MethodType OLD_BOOTSTRAP_METHOD_TYPE
- = MethodType.methodType(Object.class,
- CallSite.class, Object[].class);
-
- private static final WeakHashMap bootstrapMethods =
- new WeakHashMap();
+ static final MethodType BOOTSTRAP_METHOD_TYPE_2
+ = MethodType.methodType(CallSite.class,
+ Class.class, String.class, MethodType.class,
+ Annotation[].class);
/**
* PROVISIONAL API, WORK IN PROGRESS:
@@ -182,10 +219,8 @@ public class Linkage {
/**
* PROVISIONAL API, WORK IN PROGRESS:
- * Invalidate all invokedynamic call sites in the bytecodes
+ * Invalidate all {@code invokedynamic} call sites in the bytecodes
* of any methods of the given class.
- * (These are exactly those sites which report the given class
- * via the {@link CallSite#callerClass()} method.)
*
* When this method returns, every matching invokedynamic
* instruction will invoke its bootstrap method on next call.
@@ -201,18 +236,4 @@ public class Linkage {
}
throw new UnsupportedOperationException("NYI");
}
-
- private static Object doNotBootstrap(CallSite site, Object... arguments) {
- throw new UnsupportedOperationException("call site must not have null target: "+site);
- }
-
- private static final MethodHandle DO_NOT_BOOTSTRAP =
- MethodHandles.Lookup.IMPL_LOOKUP.findStatic(Linkage.class, "doNotBootstrap",
- OLD_BOOTSTRAP_METHOD_TYPE);
-
- // Up-call from the JVM. Obsolete. FIXME: Delete from VM then from here.
- static
- MethodHandle findBootstrapMethod(Class callerClass, Class searchBootstrapClass) {
- return DO_NOT_BOOTSTRAP;
- }
}
diff --git a/jdk/src/share/classes/java/dyn/LinkagePermission.java b/jdk/src/share/classes/java/dyn/LinkagePermission.java
index ab4ce04dd2b..4478d959853 100644
--- a/jdk/src/share/classes/java/dyn/LinkagePermission.java
+++ b/jdk/src/share/classes/java/dyn/LinkagePermission.java
@@ -31,23 +31,17 @@ import java.util.Hashtable;
import java.util.StringTokenizer;
/**
- * This class is for runtime permissions. A RuntimePermission
- * contains a name (also referred to as a "target name") but
+ * This class is for managing runtime permission checking for
+ * operations performed by methods in the {@link Linkage} class.
+ * Like a {@link RuntimePermission}, on which it is modeled,
+ * a {@code LinkagePermission} contains a target name but
* no actions list; you either have the named permission
* or you don't.
- *
- *
- * The target name is the name of the runtime permission (see below). The
- * naming convention follows the hierarchical property naming convention.
- * Also, an asterisk
- * may appear at the end of the name, following a ".", or by itself, to
- * signify a wildcard match. For example: "loadLibrary.*" or "*" is valid,
- * "*loadLibrary" or "a*b" is not valid.
- *
- * The following table lists all the possible RuntimePermission target names,
+ *
+ * The following table lists all the possible {@code LinkagePermission} target names,
* and for each provides a description of what the permission allows
* and a discussion of the risks of granting code the permission.
- *
Specifying a bootstrap method for invokedynamic, within a class of the given name
+ *
Specifying a bootstrap method for {@code invokedynamic} instructions within a class of the given name
*
An attacker could attempt to attach a bootstrap method to a class which
- * has just been loaded, thus gaining control of its invokedynamic calls.
+ * has just been loaded, thus gaining control of its {@code invokedynamic} calls.
*
*
*
*
invalidateAll
*
Force the relinking of invokedynamic call sites everywhere.
- *
This could allow an attacker to slow down the system, or perhaps surface timing bugs in a dynamic language implementations, by forcing redundant relinking operations.
+ *
This could allow an attacker to slow down the system,
+ * or perhaps expose timing bugs in a dynamic language implementations,
+ * by forcing redundant relinking operations.
*
- * @see java.security.BasicPermission
+ * @see java.security.RuntimePermission
* @see java.lang.SecurityManager
*
* @author John Rose, JSR 292 EG
diff --git a/jdk/src/share/classes/java/dyn/MethodHandle.java b/jdk/src/share/classes/java/dyn/MethodHandle.java
index 25d575b9f25..20387ca6859 100644
--- a/jdk/src/share/classes/java/dyn/MethodHandle.java
+++ b/jdk/src/share/classes/java/dyn/MethodHandle.java
@@ -34,32 +34,34 @@ import static java.dyn.MethodHandles.invokers; // package-private API
import static sun.dyn.MemberName.newIllegalArgumentException; // utility
/**
- * A method handle is a typed reference to the entry point of a method.
+ * A method handle is a typed, directly executable reference to a method,
+ * constructor, field, or similar low-level operation, with optional
+ * conversion or substitution of arguments or return values.
*
* Method handles are strongly typed according to signature.
* They are not distinguished by method name or enclosing class.
* A method handle must be invoked under a signature which exactly matches
- * the method handle's own type.
+ * the method handle's own {@link MethodType method type}.
*
- * Every method handle confesses its type via the type accessor.
+ * Every method handle confesses its type via the {@code type} accessor.
* The structure of this type is a series of classes, one of which is
- * the return type of the method (or void.class if none).
+ * the return type of the method (or {@code void.class} if none).
*
* Every method handle appears as an object containing a method named
- * invoke, whose signature exactly matches
+ * {@code invoke}, whose signature exactly matches
* the method handle's type.
* A Java method call expression, which compiles to an
- * invokevirtual instruction,
+ * {@code invokevirtual} instruction,
* can invoke this method from Java source code.
*
* Every call to a method handle specifies an intended method type,
* which must exactly match the type of the method handle.
- * (The type is specified in the invokevirtual instruction,
+ * (The type is specified in the {@code invokevirtual} instruction,
* via a {@code CONSTANT_NameAndType} constant pool entry.)
* The call looks within the receiver object for a method
- * named invoke of the intended method type.
+ * named {@code invoke} of the intended method type.
* The call fails with a {@link WrongMethodTypeException}
- * if the method does not exist, even if there is an invoke
+ * if the method does not exist, even if there is an {@code invoke}
* method of a closely similar signature.
* As with other kinds
* of methods in the JVM, signature matching during method linkage
@@ -76,13 +78,13 @@ import static sun.dyn.MemberName.newIllegalArgumentException; // utility
* They should not be passed to untrusted code.
*
* Bytecode in an extended JVM can directly call a method handle's
- * invoke from an invokevirtual instruction.
- * The receiver class type must be MethodHandle and the method name
- * must be invoke. The signature of the invocation
+ * {@code invoke} from an {@code invokevirtual} instruction.
+ * The receiver class type must be {@code MethodHandle} and the method name
+ * must be {@code invoke}. The signature of the invocation
* (after resolving symbolic type names) must exactly match the method type
* of the target method.
*
- * Every invoke method always throws {@link Exception},
+ * Every {@code invoke} method always throws {@link Exception},
* which is to say that there is no static restriction on what a method handle
* can throw. Since the JVM does not distinguish between checked
* and unchecked exceptions (other than by their class, of course),
@@ -92,11 +94,11 @@ import static sun.dyn.MemberName.newIllegalArgumentException; // utility
* throw {@code Exception}, or else must catch all checked exceptions locally.
*
* Bytecode in an extended JVM can directly obtain a method handle
- * for any accessible method from a ldc instruction
- * which refers to a CONSTANT_Methodref or
- * CONSTANT_InterfaceMethodref constant pool entry.
+ * for any accessible method from a {@code ldc} instruction
+ * which refers to a {@code CONSTANT_Methodref} or
+ * {@code CONSTANT_InterfaceMethodref} constant pool entry.
*
- * All JVMs can also use a reflective API called MethodHandles
+ * All JVMs can also use a reflective API called {@code MethodHandles}
* for creating and calling method handles.
*
* A method reference may refer either to a static or non-static method.
@@ -104,7 +106,7 @@ import static sun.dyn.MemberName.newIllegalArgumentException; // utility
* receiver argument, prepended before any other arguments.
* In the method handle's type, the initial receiver argument is typed
* according to the class under which the method was initially requested.
- * (E.g., if a non-static method handle is obtained via ldc,
+ * (E.g., if a non-static method handle is obtained via {@code ldc},
* the type of the receiver is the class named in the constant pool entry.)
*
* When a method handle to a virtual method is invoked, the method is
@@ -113,38 +115,38 @@ import static sun.dyn.MemberName.newIllegalArgumentException; // utility
* A non-virtual method handles to a specific virtual method implementation
* can also be created. These do not perform virtual lookup based on
* receiver type. Such a method handle simulates the effect of
- * an invokespecial instruction to the same method.
+ * an {@code invokespecial} instruction to the same method.
*
* Each of the above calls generates a single invokevirtual instruction
* with the name {@code invoke} and the type descriptors indicated in the comments.
@@ -167,6 +169,14 @@ import static sun.dyn.MemberName.newIllegalArgumentException; // utility
* those of multiple arities. It is impossible to represent such
* genericity with a Java type parameter.
*
+ * Signature polymorphic methods in this class appear to be documented
+ * as having type parameters for return types and a parameter, but that is
+ * merely a documentation convention. These type parameters do
+ * not play a role in type-checking method handle invocations.
+ *
+ * Note: Like classes and strings, method handles that correspond directly
+ * to fields and methods can be represented directly as constants to be
+ * loaded by {@code ldc} bytecodes.
*
* @see MethodType
* @see MethodHandles
@@ -180,7 +190,15 @@ public abstract class MethodHandle
private static Access IMPL_TOKEN = Access.getToken();
// interface MethodHandle
- // { MethodType type(); public R invoke(A...) throws X; }
+ // { MethodType type(); public R invokeExact(A...) throws X; }
+
+ /**
+ * Internal marker interface which distinguishes (to the Java compiler)
+ * those methods which are signature polymorphic.
+ */
+ @java.lang.annotation.Target({java.lang.annotation.ElementType.METHOD,java.lang.annotation.ElementType.TYPE})
+ @java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.CLASS)
+ @interface PolymorphicSignature { }
private MethodType type;
@@ -232,85 +250,38 @@ public abstract class MethodHandle
return MethodHandleImpl.getNameString(IMPL_TOKEN, this);
}
- //// First draft of the "Method Handle Kernel API" discussed at the JVM Language Summit, 9/2009.
+ //// This is the "Method Handle Kernel API" discussed at the JVM Language Summit, 9/2009.
//// Implementations here currently delegate to statics in MethodHandles. Some of those statics
//// will be deprecated. Others will be kept as "algorithms" to supply degrees of freedom
//// not present in the Kernel API.
/**
* PROVISIONAL API, WORK IN PROGRESS:
- * Perform an exact invocation. The signature at the call site of {@code invokeExact} must
+ * Invoke the method handle, allowing any caller signature, but requiring an exact signature match.
+ * The signature at the call site of {@code invokeExact} must
* exactly match this method handle's {@code type}.
* No conversions are allowed on arguments or return values.
- * This is not yet implemented, pending required compiler and JVM support.
*/
- public final T invokeExact(Object... arguments) throws Throwable {
- // This is an approximate implementation, which discards the caller's signature and refuses the call.
- throw new InternalError("not yet implemented");
- }
+ public final native @PolymorphicSignature R invokeExact(A... args) throws Throwable;
+
+ // FIXME: remove this transitional form
+ /** @deprecated transitional form defined in EDR but removed in PFD */
+ public final native @PolymorphicSignature R invoke(A... args) throws Throwable;
/**
* PROVISIONAL API, WORK IN PROGRESS:
- * Perform a generic invocation. The signature at the call site of {@code invokeExact} must
+ * Invoke the method handle, allowing any caller signature,
+ * and performing simple conversions for arguments and return types.
+ * The signature at the call site of {@code invokeGeneric} must
* have the same arity as this method handle's {@code type}.
* The same conversions are allowed on arguments or return values as are supported by
* by {@link MethodHandles#convertArguments}.
* If the call site signature exactly matches this method handle's {@code type},
* the call proceeds as if by {@link #invokeExact}.
- * This is not fully implemented, pending required compiler and JVM support.
*/
- // This is an approximate implementation, which discards the caller's signature.
- // When it is made signature polymorphic, the overloadings will disappear.
- public final T invokeGeneric() throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this);
- }
- public final T invokeGeneric(Object a0) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0);
- }
- public final T invokeGeneric(Object a0, Object a1) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1);
- }
- public final T invokeGeneric(Object a0, Object a1, Object a2) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1, a2);
- }
- public final T invokeGeneric(Object a0, Object a1, Object a2, Object a3) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1, a2, a3);
- }
- public final T invokeGeneric(Object a0, Object a1, Object a2, Object a3,
- Object a4) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1, a2, a3, a4);
- }
- public final T invokeGeneric(Object a0, Object a1, Object a2, Object a3,
- Object a4, Object a5) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1, a2, a3, a4, a5);
- }
- public final T invokeGeneric(Object a0, Object a1, Object a2, Object a3,
- Object a4, Object a5, Object a6) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1, a2, a3, a4, a5, a6);
- }
- public final T invokeGeneric(Object a0, Object a1, Object a2, Object a3,
- Object a4, Object a5, Object a6, Object a7) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1, a2, a3, a4, a5, a6, a7);
- }
- public final T invokeGeneric(Object a0, Object a1, Object a2, Object a3,
- Object a4, Object a5, Object a6, Object a7, Object a8) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1, a2, a3, a4, a5, a6, a7, a8);
- }
- public final T invokeGeneric(Object a0, Object a1, Object a2, Object a3,
- Object a4, Object a5, Object a6, Object a7, Object a8, Object a9) throws Throwable {
- MethodHandle invoker = invokers(this.type()).genericInvoker();
- return invoker.invoke(this, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9);
- }
+ public final native @PolymorphicSignature R invokeGeneric(A... args) throws Throwable;
+
+ // ?? public final native @PolymorphicSignature R invokeVarargs(A args, V[] varargs) throws Throwable;
/**
* PROVISIONAL API, WORK IN PROGRESS:
@@ -341,47 +312,47 @@ public abstract class MethodHandle
* This call is equivalent to the following code:
*
* @param arguments the arguments to pass to the target
* @return the result returned by the target
* @see MethodHandles#genericInvoker
*/
- public final Object invokeVarargs(Object[] arguments) throws Throwable {
+ public final Object invokeVarargs(Object... arguments) throws Throwable {
int argc = arguments == null ? 0 : arguments.length;
MethodType type = type();
if (argc <= 10) {
MethodHandle invoker = MethodHandles.invokers(type).genericInvoker();
switch (argc) {
- case 0: return invoker.invoke(this);
- case 1: return invoker.invoke(this,
+ case 0: return invoker.invokeExact(this);
+ case 1: return invoker.invokeExact(this,
arguments[0]);
- case 2: return invoker.invoke(this,
+ case 2: return invoker.invokeExact(this,
arguments[0], arguments[1]);
- case 3: return invoker.invoke(this,
+ case 3: return invoker.invokeExact(this,
arguments[0], arguments[1], arguments[2]);
- case 4: return invoker.invoke(this,
+ case 4: return invoker.invokeExact(this,
arguments[0], arguments[1], arguments[2],
arguments[3]);
- case 5: return invoker.invoke(this,
+ case 5: return invoker.invokeExact(this,
arguments[0], arguments[1], arguments[2],
arguments[3], arguments[4]);
- case 6: return invoker.invoke(this,
+ case 6: return invoker.invokeExact(this,
arguments[0], arguments[1], arguments[2],
arguments[3], arguments[4], arguments[5]);
- case 7: return invoker.invoke(this,
+ case 7: return invoker.invokeExact(this,
arguments[0], arguments[1], arguments[2],
arguments[3], arguments[4], arguments[5],
arguments[6]);
- case 8: return invoker.invoke(this,
+ case 8: return invoker.invokeExact(this,
arguments[0], arguments[1], arguments[2],
arguments[3], arguments[4], arguments[5],
arguments[6], arguments[7]);
- case 9: return invoker.invoke(this,
+ case 9: return invoker.invokeExact(this,
arguments[0], arguments[1], arguments[2],
arguments[3], arguments[4], arguments[5],
arguments[6], arguments[7], arguments[8]);
- case 10: return invoker.invoke(this,
+ case 10: return invoker.invokeExact(this,
arguments[0], arguments[1], arguments[2],
arguments[3], arguments[4], arguments[5],
arguments[6], arguments[7], arguments[8],
@@ -391,7 +362,7 @@ public abstract class MethodHandle
// more than ten arguments get boxed in a varargs list:
MethodHandle invoker = MethodHandles.invokers(type).varargsInvoker(0);
- return invoker.invoke(this, arguments);
+ return invoker.invokeExact(this, arguments);
}
/** Equivalent to {@code invokeVarargs(arguments.toArray())}. */
public final Object invokeVarargs(java.util.List> arguments) throws Throwable {
diff --git a/jdk/src/share/classes/java/dyn/MethodHandles.java b/jdk/src/share/classes/java/dyn/MethodHandles.java
index 3f53e4a19e9..ac35f88e143 100644
--- a/jdk/src/share/classes/java/dyn/MethodHandles.java
+++ b/jdk/src/share/classes/java/dyn/MethodHandles.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,13 +44,13 @@ import static sun.dyn.MemberName.newIllegalArgumentException;
import static sun.dyn.MemberName.newNoAccessException;
/**
- * Fundamental operations and utilities for MethodHandle.
- * They fall into several categories:
+ * This class consists exclusively of static methods that operate on or return
+ * method handles. They fall into several categories:
*
- *
Reifying methods and fields. This is subject to access checks.
- *
Combining or transforming pre-existing method handles into new ones.
- *
Miscellaneous emulation of common JVM operations or control flow patterns.
+ *
Factory methods which create method handles for methods and fields.
+ *
Invoker methods which can invoke method handles on dynamically typed arguments and/or varargs arrays.
+ *
Combinator methods, which combine or transforming pre-existing method handles into new ones.
+ *
Factory methods which create method handles that emulate other common JVM operations or control flow patterns.
*
*
* @author John Rose, JSR 292 EG
@@ -66,36 +66,44 @@ public class MethodHandles {
//// Method handle creation from ordinary methods.
- /** Create a {@link Lookup} lookup object on the caller.
- *
+ /**
+ * Return a {@link Lookup lookup object} on the caller,
+ * which has the capability to access any method handle that the caller has access to,
+ * including direct method handles to private fields and methods.
+ * This lookup object is a capability which may be delegated to trusted agents.
+ * Do not store it in place where untrusted code can access it.
*/
public static Lookup lookup() {
return new Lookup();
}
- /** Version of lookup which is trusted minimally.
- * It can only be used to create method handles to
- * publicly accessible members.
+ /**
+ * Return a {@link Lookup lookup object} which is trusted minimally.
+ * It can only be used to create method handles to
+ * publicly accessible fields and methods.
*/
public static Lookup publicLookup() {
return Lookup.PUBLIC_LOOKUP;
}
/**
- * A factory object for creating method handles, when the creation
- * requires access checking. Method handles do not perform
+ * A lookup object is a factory for creating method handles,
+ * when the creation requires access checking.
+ * Method handles do not perform
* access checks when they are called; this is a major difference
* from reflective {@link Method}, which performs access checking
- * against every caller, on every call. Method handle access
- * restrictions are enforced when a method handle is created.
+ * against every caller, on every call.
+ * Therefore, method handle access
+ * restrictions must be enforced when a method handle is created.
* The caller class against which those restrictions are enforced
- * is known as the "lookup class". {@link Lookup} embodies an
+ * is known as the {@linkplain #lookupClass lookup class}.
+ * A lookup object embodies an
* authenticated lookup class, and can be used to create any number
* of access-checked method handles, all checked against a single
* lookup class.
*
* A class which needs to create method handles will call
- * {@code MethodHandles.lookup()} to create a factory for itself.
+ * {@link MethodHandles#lookup MethodHandles.lookup} to create a factory for itself.
* It may then use this factory to create method handles on
* all of its methods, including private ones.
* It may also delegate the lookup (e.g., to a metaobject protocol)
@@ -104,12 +112,13 @@ public class MethodHandles {
* checked against the original lookup class, and not with any higher
* privileges.
*
- * Note that access checks only apply to named and reflected methods.
- * Other method handle creation methods, such as {@link #convertArguments},
+ * Access checks only apply to named and reflected methods.
+ * Other method handle creation methods, such as
+ * {@link #convertArguments MethodHandles.convertArguments},
* do not require any access checks, and can be done independently
* of any lookup class.
- *
- * A note about error conditions: A lookup can fail, because
+ *
How access errors are handled
+ * A lookup can fail, because
* the containing class is not accessible to the lookup class, or
* because the desired class member is missing, or because the
* desired class member is not accessible to the lookup class.
@@ -124,8 +133,25 @@ public class MethodHandles {
*/
public static final
class Lookup {
+ /** The class on behalf of whom the lookup is being performed. */
private final Class> lookupClass;
+ /** The allowed sorts of members which may be looked up (public, etc.), with STRICT for package. */
+ private final int allowedModes;
+
+ private static final int
+ PUBLIC = Modifier.PUBLIC,
+ PACKAGE = Modifier.STRICT,
+ PROTECTED = Modifier.PROTECTED,
+ PRIVATE = Modifier.PRIVATE,
+ ALL_MODES = (PUBLIC | PACKAGE | PROTECTED | PRIVATE),
+ TRUSTED = -1;
+
+ private static int fixmods(int mods) {
+ mods &= (ALL_MODES - PACKAGE);
+ return (mods != 0) ? mods : PACKAGE;
+ }
+
/** Which class is performing the lookup? It is this class against
* which checks are performed for visibility and access permissions.
*
@@ -136,57 +162,90 @@ public class MethodHandles {
return lookupClass;
}
+ // This is just for calling out to MethodHandleImpl.
+ private Class> lookupClassOrNull() {
+ return (allowedModes == TRUSTED) ? null : lookupClass;
+ }
+
+ /** Which types of members can this lookup object produce?
+ * The result is a bit-mask of the modifier bits PUBLIC, PROTECTED, PRIVATE, and STRICT.
+ * The modifier bit STRICT stands in for the (non-existent) package protection mode.
+ */
+ int lookupModes() {
+ return allowedModes & ALL_MODES;
+ }
+
/** Embody the current class (the lookupClass) as a lookup class
* for method handle creation.
* Must be called by from a method in this package,
* which in turn is called by a method not in this package.
+ *
* Also, don't make it private, lest javac interpose
* an access$N method.
*/
Lookup() {
- this(IMPL_TOKEN, getCallerClassAtEntryPoint());
+ this(getCallerClassAtEntryPoint(), ALL_MODES);
+ // make sure we haven't accidentally picked up a privileged class:
+ checkUnprivilegedlookupClass(lookupClass);
}
Lookup(Access token, Class> lookupClass) {
- // make sure we haven't accidentally picked up a privileged class:
- checkUnprivilegedlookupClass(lookupClass);
+ this(lookupClass, ALL_MODES);
+ Access.check(token);
+ }
+
+ private Lookup(Class> lookupClass, int allowedModes) {
this.lookupClass = lookupClass;
+ this.allowedModes = allowedModes;
}
/**
- * Create a lookup on the specified class.
- * The result is guaranteed to have no more access privileges
- * than the original.
+ * Create a lookup on the specified new lookup class.
+ * The resulting object will report the specified
+ * class as its own {@link #lookupClass}.
+ *
+ * However, the resulting {@code Lookup} object is guaranteed
+ * to have no more access capabilities than the original.
+ * In particular:
+ *
If the new lookup class differs from the old one,
+ * protected members will not be accessible by virtue of inheritance.
+ *
If the new lookup class is in a different package
+ * than the old one, protected and default (package) members will not be accessible.
+ *
If the new lookup class is not within the same package member
+ * as the old one, private members will not be accessible.
+ *
In all cases, public members will continue to be accessible.
+ *
*/
- public Lookup in(Class> newLookupClass) {
- if (this == PUBLIC_LOOKUP) return PUBLIC_LOOKUP;
- if (newLookupClass == null) return PUBLIC_LOOKUP;
- if (newLookupClass == lookupClass) return this;
- if (this != IMPL_LOOKUP) {
- if (!VerifyAccess.isSamePackage(lookupClass, newLookupClass))
- throw newNoAccessException(new MemberName(newLookupClass), this);
- checkUnprivilegedlookupClass(newLookupClass);
+ public Lookup in(Class> requestedLookupClass) {
+ requestedLookupClass.getClass(); // null check
+ if (allowedModes == TRUSTED) // IMPL_LOOKUP can make any lookup at all
+ return new Lookup(requestedLookupClass, ALL_MODES);
+ if (requestedLookupClass == this.lookupClass)
+ return this; // keep same capabilities
+ int newModes = (allowedModes & (ALL_MODES & ~PROTECTED));
+ if ((newModes & PACKAGE) != 0
+ && !VerifyAccess.isSamePackage(this.lookupClass, requestedLookupClass)) {
+ newModes &= ~(PACKAGE|PRIVATE);
}
- return new Lookup(newLookupClass);
- }
-
- private Lookup(Class> lookupClass) {
- this.lookupClass = lookupClass;
+ if ((newModes & PRIVATE) != 0
+ && !VerifyAccess.isSamePackageMember(this.lookupClass, requestedLookupClass)) {
+ newModes &= ~PRIVATE;
+ }
+ checkUnprivilegedlookupClass(requestedLookupClass);
+ return new Lookup(requestedLookupClass, newModes);
}
// Make sure outer class is initialized first.
static { IMPL_TOKEN.getClass(); }
- private static final Class> PUBLIC_ONLY = sun.dyn.empty.Empty.class;
-
/** Version of lookup which is trusted minimally.
* It can only be used to create method handles to
* publicly accessible members.
*/
- static final Lookup PUBLIC_LOOKUP = new Lookup(PUBLIC_ONLY);
+ static final Lookup PUBLIC_LOOKUP = new Lookup(Object.class, PUBLIC);
/** Package-private version of lookup which is trusted. */
- static final Lookup IMPL_LOOKUP = new Lookup(null);
+ static final Lookup IMPL_LOOKUP = new Lookup(Object.class, TRUSTED);
static { MethodHandleImpl.initLookup(IMPL_TOKEN, IMPL_LOOKUP); }
private static void checkUnprivilegedlookupClass(Class> lookupClass) {
@@ -195,13 +254,35 @@ public class MethodHandles {
throw newIllegalArgumentException("illegal lookupClass: "+lookupClass);
}
+ /** Display the name of the class.
+ * If there are restrictions on the access permitted to this lookup,
+ * display those also.
+ */
@Override
public String toString() {
- if (lookupClass == PUBLIC_ONLY)
- return "public";
- if (lookupClass == null)
- return "privileged";
- return lookupClass.getName();
+ String modestr;
+ String cname = lookupClass.getName();
+ switch (allowedModes) {
+ case TRUSTED:
+ return "/trusted";
+ case PUBLIC:
+ modestr = "/public";
+ if (lookupClass == Object.class)
+ return modestr;
+ break;
+ case PUBLIC|PACKAGE:
+ return cname + "/package";
+ case 0: // should not happen
+ return cname + "/empty";
+ case ALL_MODES:
+ return cname;
+ }
+ StringBuilder buf = new StringBuilder(cname);
+ if ((allowedModes & PUBLIC) != 0) buf.append("/public");
+ if ((allowedModes & PACKAGE) != 0) buf.append("/package");
+ if ((allowedModes & PROTECTED) != 0) buf.append("/protected");
+ if ((allowedModes & PRIVATE) != 0) buf.append("/private");
+ return buf.toString();
}
// call this from an entry point method in Lookup with extraFrames=0.
@@ -219,11 +300,11 @@ public class MethodHandles {
* The type of the method handle will be that of the method.
* (Since static methods do not take receivers, there is no
* additional receiver argument inserted into the method handle type,
- * as there would be with {@linkplain #findVirtual} or {@linkplain #findSpecial}.)
+ * as there would be with {@link #findVirtual} or {@link #findSpecial}.)
* The method and all its argument types must be accessible to the lookup class.
* If the method's class has not yet been initialized, that is done
* immediately, before the method handle is returned.
- * @param defc the class from which the method is accessed
+ * @param refc the class from which the method is accessed
* @param name the name of the method
* @param type the type of the method
* @return the desired method handle
@@ -231,18 +312,16 @@ public class MethodHandles {
* @exception NoAccessException if the method does not exist or access checking fails
*/
public
- MethodHandle findStatic(Class> defc, String name, MethodType type) throws NoAccessException {
- MemberName method = IMPL_NAMES.resolveOrFail(new MemberName(defc, name, type, Modifier.STATIC), true, lookupClass());
- VerifyAccess.checkName(method, this);
- checkStatic(true, method, this);
- //throw NoSuchMethodException
- return MethodHandleImpl.findMethod(IMPL_TOKEN, method, false, lookupClass());
+ MethodHandle findStatic(Class> refc, String name, MethodType type) throws NoAccessException {
+ MemberName method = resolveOrFail(refc, name, type, true);
+ checkMethod(refc, method, true);
+ return MethodHandleImpl.findMethod(IMPL_TOKEN, method, false, lookupClassOrNull());
}
/**
* Produce a method handle for a virtual method.
* The type of the method handle will be that of the method,
- * with the receiver type ({@code defc}) prepended.
+ * with the receiver type (usually {@code refc}) prepended.
* The method and all its argument types must be accessible to the lookup class.
*
* (BUG NOTE: The type {@code Object} may be prepended instead
@@ -257,18 +336,44 @@ public class MethodHandles {
* implementation to enter.
* (The dispatching action is identical with that performed by an
* {@code invokevirtual} or {@code invokeinterface} instruction.)
- * @param defc the class or interface from which the method is accessed
+ * @param refc the class or interface from which the method is accessed
* @param name the name of the method
* @param type the type of the method, with the receiver argument omitted
* @return the desired method handle
* @exception SecurityException TBD
* @exception NoAccessException if the method does not exist or access checking fails
*/
- public MethodHandle findVirtual(Class> defc, String name, MethodType type) throws NoAccessException {
- MemberName method = IMPL_NAMES.resolveOrFail(new MemberName(defc, name, type), true, lookupClass());
- VerifyAccess.checkName(method, this);
- checkStatic(false, method, this);
- return MethodHandleImpl.findMethod(IMPL_TOKEN, method, true, lookupClass());
+ public MethodHandle findVirtual(Class> refc, String name, MethodType type) throws NoAccessException {
+ MemberName method = resolveOrFail(refc, name, type, false);
+ checkMethod(refc, method, false);
+ MethodHandle mh = MethodHandleImpl.findMethod(IMPL_TOKEN, method, true, lookupClassOrNull());
+ return restrictProtectedReceiver(method, mh);
+ }
+
+ /**
+ * Produce a method handle which creates an object and initializes it, using
+ * the constructor of the specified type.
+ * The parameter types of the method handle will be those of the constructor,
+ * while the return type will be a reference to the constructor's class.
+ * The constructor and all its argument types must be accessible to the lookup class.
+ * If the constructor's class has not yet been initialized, that is done
+ * immediately, before the method handle is returned.
+ *
+ * Note: The requested type must have a return type of {@code void}.
+ * This is consistent with the JVM's treatment of constructor signatures.
+ * @param refc the class or interface from which the method is accessed
+ * @param type the type of the method, with the receiver argument omitted, and a void return type
+ * @return the desired method handle
+ * @exception SecurityException TBD
+ * @exception NoAccessException if the method does not exist or access checking fails
+ */
+ public MethodHandle findConstructor(Class> refc, MethodType type) throws NoAccessException {
+ String name = "";
+ MemberName ctor = resolveOrFail(refc, name, type, false, false, lookupClassOrNull());
+ assert(ctor.isConstructor());
+ checkAccess(refc, ctor);
+ MethodHandle rawMH = MethodHandleImpl.findMethod(IMPL_TOKEN, ctor, false, lookupClassOrNull());
+ return MethodHandleImpl.makeAllocator(IMPL_TOKEN, rawMH);
}
/**
@@ -287,27 +392,87 @@ public class MethodHandles {
*
* If the explicitly specified caller class is not identical with the
* lookup class, a security check TBD is performed.
- * @param defc the class or interface from which the method is accessed
- * @param name the name of the method, or "" for a constructor
+ * @param refc the class or interface from which the method is accessed
+ * @param name the name of the method (which must not be "<init>")
* @param type the type of the method, with the receiver argument omitted
* @param specialCaller the proposed calling class to perform the {@code invokespecial}
* @return the desired method handle
* @exception SecurityException TBD
* @exception NoAccessException if the method does not exist or access checking fails
*/
- public MethodHandle findSpecial(Class> defc, String name, MethodType type,
+ public MethodHandle findSpecial(Class> refc, String name, MethodType type,
Class> specialCaller) throws NoAccessException {
- checkSpecialCaller(specialCaller, this);
- Lookup slookup = this.in(specialCaller);
- MemberName method = IMPL_NAMES.resolveOrFail(new MemberName(defc, name, type), false, slookup.lookupClass());
- VerifyAccess.checkName(method, this);
- checkStatic(false, method, this);
- if (name.equals("")) {
- throw newNoAccessException("cannot directly invoke a constructor", method, null);
- } else if (defc.isInterface() || !defc.isAssignableFrom(specialCaller)) {
- throw newNoAccessException("method must be in a superclass of lookup class", method, slookup.lookupClass());
- }
- return MethodHandleImpl.findMethod(IMPL_TOKEN, method, false, slookup.lookupClass());
+ checkSpecialCaller(specialCaller);
+ MemberName method = resolveOrFail(refc, name, type, false, false, specialCaller);
+ checkMethod(refc, method, false);
+ MethodHandle mh = MethodHandleImpl.findMethod(IMPL_TOKEN, method, false, specialCaller);
+ return restrictReceiver(method, mh, specialCaller);
+ }
+
+ /**
+ * PROVISIONAL API, WORK IN PROGRESS:
+ * Produce a method handle giving read access to a non-static field.
+ * The type of the method handle will have a return type of the field's
+ * value type.
+ * The method handle's single argument will be the instance containing
+ * the field.
+ * Access checking is performed immediately on behalf of the lookup class.
+ * @param name the field's name
+ * @param type the field's type
+ * @return a method handle which can load values from the field
+ * @exception NoAccessException if access checking fails
+ */
+ public MethodHandle findGetter(Class> refc, String name, Class> type) throws NoAccessException {
+ return makeAccessor(refc, name, type, false, false);
+ }
+
+ /**
+ * PROVISIONAL API, WORK IN PROGRESS:
+ * Produce a method handle giving write access to a non-static field.
+ * The type of the method handle will have a void return type.
+ * The method handle will take two arguments, the instance containing
+ * the field, and the value to be stored.
+ * The second argument will be of the field's value type.
+ * Access checking is performed immediately on behalf of the lookup class.
+ * @param name the field's name
+ * @param type the field's type
+ * @return a method handle which can store values into the field
+ * @exception NoAccessException if access checking fails
+ */
+ public MethodHandle findSetter(Class> refc, String name, Class> type) throws NoAccessException {
+ return makeAccessor(refc, name, type, false, true);
+ }
+
+ /**
+ * PROVISIONAL API, WORK IN PROGRESS:
+ * Produce a method handle giving read access to a static field.
+ * The type of the method handle will have a return type of the field's
+ * value type.
+ * The method handle will take no arguments.
+ * Access checking is performed immediately on behalf of the lookup class.
+ * @param name the field's name
+ * @param type the field's type
+ * @return a method handle which can load values from the field
+ * @exception NoAccessException if access checking fails
+ */
+ public MethodHandle findStaticGetter(Class> refc, String name, Class> type) throws NoAccessException {
+ return makeAccessor(refc, name, type, true, false);
+ }
+
+ /**
+ * PROVISIONAL API, WORK IN PROGRESS:
+ * Produce a method handle giving write access to a static field.
+ * The type of the method handle will have a void return type.
+ * The method handle will take a single
+ * argument, of the field's value type, the value to be stored.
+ * Access checking is performed immediately on behalf of the lookup class.
+ * @param name the field's name
+ * @param type the field's type
+ * @return a method handle which can store values into the field
+ * @exception NoAccessException if access checking fails
+ */
+ public MethodHandle findStaticSetter(Class> refc, String name, Class> type) throws NoAccessException {
+ return makeAccessor(refc, name, type, true, true);
}
/**
@@ -323,7 +488,7 @@ public class MethodHandles {
*
* This is equivalent to the following expression:
*
- * {@link #insertArguments}({@link #findVirtual}(defc, name, type), receiver)
+ * {@link #insertArguments insertArguments}({@link #findVirtual findVirtual}(defc, name, type), receiver)
*
* where {@code defc} is either {@code receiver.getClass()} or a super
* type of that class, in which the requested method is accessible
@@ -336,15 +501,13 @@ public class MethodHandles {
* @exception NoAccessException if the method does not exist or access checking fails
*/
public MethodHandle bind(Object receiver, String name, MethodType type) throws NoAccessException {
- Class extends Object> rcvc = receiver.getClass(); // may get NPE
- MemberName reference = new MemberName(rcvc, name, type);
- MemberName method = IMPL_NAMES.resolveOrFail(reference, true, lookupClass());
- VerifyAccess.checkName(method, this);
- checkStatic(false, method, this);
- MethodHandle dmh = MethodHandleImpl.findMethod(IMPL_TOKEN, method, true, lookupClass());
+ Class extends Object> refc = receiver.getClass(); // may get NPE
+ MemberName method = resolveOrFail(refc, name, type, false);
+ checkMethod(refc, method, false);
+ MethodHandle dmh = MethodHandleImpl.findMethod(IMPL_TOKEN, method, true, lookupClassOrNull());
MethodHandle bmh = MethodHandleImpl.bindReceiver(IMPL_TOKEN, dmh, receiver);
if (bmh == null)
- throw newNoAccessException(method, this);
+ throw newNoAccessException(method, lookupClass());
return bmh;
}
@@ -364,29 +527,37 @@ public class MethodHandles {
* @exception NoAccessException if access checking fails
*/
public MethodHandle unreflect(Method m) throws NoAccessException {
- return unreflectImpl(new MemberName(m), m.isAccessible(), true, false, this);
+ MemberName method = new MemberName(m);
+ assert(method.isMethod());
+ if (!m.isAccessible()) checkMethod(method.getDeclaringClass(), method, method.isStatic());
+ MethodHandle mh = MethodHandleImpl.findMethod(IMPL_TOKEN, method, true, lookupClassOrNull());
+ if (!m.isAccessible()) mh = restrictProtectedReceiver(method, mh);
+ return mh;
}
/**
* PROVISIONAL API, WORK IN PROGRESS:
* Produce a method handle for a reflected method.
* It will bypass checks for overriding methods on the receiver,
- * as if by the {@code invokespecial} instruction.
+ * as if by a {@code invokespecial} instruction from within the {@code specialCaller}.
* The type of the method handle will be that of the method,
- * with the receiver type prepended.
+ * with the special caller type prepended (and not the receiver of the method).
* If the method's {@code accessible} flag is not set,
* access checking is performed immediately on behalf of the lookup class,
* as if {@code invokespecial} instruction were being linked.
* @param m the reflected method
+ * @param specialCaller the class nominally calling the method
* @return a method handle which can invoke the reflected method
* @exception NoAccessException if access checking fails
*/
public MethodHandle unreflectSpecial(Method m, Class> specialCaller) throws NoAccessException {
- checkSpecialCaller(specialCaller, this);
- Lookup slookup = this.in(specialCaller);
- MemberName mname = new MemberName(m);
- checkStatic(false, mname, this);
- return unreflectImpl(mname, m.isAccessible(), false, false, slookup);
+ checkSpecialCaller(specialCaller);
+ MemberName method = new MemberName(m);
+ assert(method.isMethod());
+ // ignore m.isAccessible: this is a new kind of access
+ checkMethod(m.getDeclaringClass(), method, false);
+ MethodHandle mh = MethodHandleImpl.findMethod(IMPL_TOKEN, method, false, lookupClassOrNull());
+ return restrictReceiver(method, mh, specialCaller);
}
/**
@@ -400,13 +571,16 @@ public class MethodHandles {
*
* If the constructor's {@code accessible} flag is not set,
* access checking is performed immediately on behalf of the lookup class.
- * @param ctor the reflected constructor
+ * @param c the reflected constructor
* @return a method handle which can invoke the reflected constructor
* @exception NoAccessException if access checking fails
*/
- public MethodHandle unreflectConstructor(Constructor ctor) throws NoAccessException {
- MemberName m = new MemberName(ctor);
- return unreflectImpl(m, ctor.isAccessible(), false, false, this);
+ public MethodHandle unreflectConstructor(Constructor c) throws NoAccessException {
+ MemberName ctor = new MemberName(c);
+ assert(ctor.isConstructor());
+ if (!c.isAccessible()) checkAccess(c.getDeclaringClass(), ctor);
+ MethodHandle rawCtor = MethodHandleImpl.findMethod(IMPL_TOKEN, ctor, false, lookupClassOrNull());
+ return MethodHandleImpl.makeAllocator(IMPL_TOKEN, rawCtor);
}
/**
@@ -424,8 +598,7 @@ public class MethodHandles {
* @exception NoAccessException if access checking fails
*/
public MethodHandle unreflectGetter(Field f) throws NoAccessException {
- MemberName m = new MemberName(f);
- return unreflectImpl(m, f.isAccessible(), false, false, this);
+ return makeAccessor(f.getDeclaringClass(), new MemberName(f), f.isAccessible(), false);
}
/**
@@ -443,75 +616,134 @@ public class MethodHandles {
* @exception NoAccessException if access checking fails
*/
public MethodHandle unreflectSetter(Field f) throws NoAccessException {
- MemberName m = new MemberName(f);
- return unreflectImpl(m, f.isAccessible(), false, true, this);
+ return makeAccessor(f.getDeclaringClass(), new MemberName(f), f.isAccessible(), true);
}
- }
+ /// Helper methods, all package-private.
- static /*must not be public*/
- MethodHandle findStaticFrom(Lookup lookup,
- Class> defc, String name, MethodType type) throws NoAccessException {
- MemberName method = IMPL_NAMES.resolveOrFail(new MemberName(defc, name, type, Modifier.STATIC), true, lookup.lookupClass());
- VerifyAccess.checkName(method, lookup);
- checkStatic(true, method, lookup);
- return MethodHandleImpl.findMethod(IMPL_TOKEN, method, false, lookup.lookupClass());
- }
-
- static void checkStatic(boolean wantStatic, MemberName m, Lookup lookup) {
- if (wantStatic != m.isStatic()) {
- String message = wantStatic ? "expected a static method" : "expected a non-static method";
- throw newNoAccessException(message, m, lookup.lookupClass());
+ MemberName resolveOrFail(Class> refc, String name, Class> type, boolean isStatic) {
+ checkSymbolicClass(refc); // do this before attempting to resolve
+ int mods = (isStatic ? Modifier.STATIC : 0);
+ return IMPL_NAMES.resolveOrFail(new MemberName(refc, name, type, mods), true, lookupClassOrNull());
}
- }
- static void checkSpecialCaller(Class> specialCaller, Lookup lookup) {
- if (lookup == Lookup.IMPL_LOOKUP)
- return; // privileged action
- assert(lookup.lookupClass() != null);
- if (!VerifyAccess.isSamePackageMember(specialCaller, lookup.lookupClass()))
- throw newNoAccessException("no private access", new MemberName(specialCaller), lookup.lookupClass());
- }
-
- // Helper for creating handles on reflected methods and constructors.
- static MethodHandle unreflectImpl(MemberName m, boolean isAccessible,
- boolean doDispatch, boolean isSetter, Lookup lookup) {
- MethodType narrowMethodType = null;
- Class> defc = m.getDeclaringClass();
- boolean isSpecialInvoke = m.isInvocable() && !doDispatch;
- int mods = m.getModifiers();
- if (m.isStatic()) {
- if (!isAccessible &&
- VerifyAccess.isAccessible(defc, mods, lookup.lookupClass(), false) == null)
- throw newNoAccessException(m, lookup);
- } else {
- Class> constraint;
- if (isAccessible) {
- // abbreviated access check for "unlocked" method
- constraint = doDispatch ? defc : lookup.lookupClass();
- } else {
- constraint = VerifyAccess.isAccessible(defc, mods, lookup.lookupClass(), isSpecialInvoke);
- }
- if (constraint == null) {
- throw newNoAccessException(m, lookup);
- }
- if (constraint != defc && !constraint.isAssignableFrom(defc)) {
- if (!defc.isAssignableFrom(constraint))
- throw newNoAccessException("receiver must be in caller class", m, lookup.lookupClass());
- if (m.isInvocable())
- narrowMethodType = m.getInvocationType().changeParameterType(0, constraint);
- else if (m.isField())
- narrowMethodType = (!isSetter
- ? MethodType.methodType(m.getFieldType(), constraint)
- : MethodType.methodType(void.class, constraint, m.getFieldType()));
- }
+ MemberName resolveOrFail(Class> refc, String name, MethodType type, boolean isStatic) {
+ checkSymbolicClass(refc); // do this before attempting to resolve
+ int mods = (isStatic ? Modifier.STATIC : 0);
+ return IMPL_NAMES.resolveOrFail(new MemberName(refc, name, type, mods), true, lookupClassOrNull());
+ }
+
+ MemberName resolveOrFail(Class> refc, String name, MethodType type, boolean isStatic,
+ boolean searchSupers, Class> specialCaller) {
+ checkSymbolicClass(refc); // do this before attempting to resolve
+ int mods = (isStatic ? Modifier.STATIC : 0);
+ return IMPL_NAMES.resolveOrFail(new MemberName(refc, name, type, mods), searchSupers, specialCaller);
+ }
+
+ void checkSymbolicClass(Class> refc) {
+ Class> caller = lookupClassOrNull();
+ if (caller != null && !VerifyAccess.isClassAccessible(refc, caller))
+ throw newNoAccessException("symbolic reference class is not public", new MemberName(refc), caller);
+ }
+
+ void checkMethod(Class> refc, MemberName m, boolean wantStatic) {
+ String message;
+ if (m.isConstructor())
+ message = "expected a method, not a constructor";
+ else if (!m.isMethod())
+ message = "expected a method";
+ else if (wantStatic != m.isStatic())
+ message = wantStatic ? "expected a static method" : "expected a non-static method";
+ else
+ { checkAccess(refc, m); return; }
+ throw newNoAccessException(message, m, lookupClass());
+ }
+
+ void checkAccess(Class> refc, MemberName m) {
+ int allowedModes = this.allowedModes;
+ if (allowedModes == TRUSTED) return;
+ int mods = m.getModifiers();
+ if (Modifier.isPublic(mods) && Modifier.isPublic(refc.getModifiers()))
+ return; // common case
+ int requestedModes = fixmods(mods); // adjust 0 => PACKAGE
+ if ((requestedModes & allowedModes) != 0
+ && VerifyAccess.isMemberAccessible(refc, m.getDeclaringClass(),
+ mods, lookupClass()))
+ return;
+ if (((requestedModes & ~allowedModes) & PROTECTED) != 0
+ && VerifyAccess.isSamePackage(m.getDeclaringClass(), lookupClass()))
+ // Protected members can also be checked as if they were package-private.
+ return;
+ throw newNoAccessException(accessFailedMessage(refc, m), m, lookupClass());
+ }
+
+ String accessFailedMessage(Class> refc, MemberName m) {
+ Class> defc = m.getDeclaringClass();
+ int mods = m.getModifiers();
+ if (!VerifyAccess.isClassAccessible(defc, lookupClass()))
+ return "class is not public";
+ if (refc != defc && !VerifyAccess.isClassAccessible(refc, lookupClass()))
+ return "symbolic reference "+refc.getName()+" is not public";
+ if (Modifier.isPublic(mods))
+ return "access to public member failed"; // (how?)
+ else if (allowedModes == PUBLIC)
+ return "member is not public";
+ if (Modifier.isPrivate(mods))
+ return "member is private";
+ if (Modifier.isProtected(mods))
+ return "member is protected";
+ return "member is private to package";
+ }
+
+ void checkSpecialCaller(Class> specialCaller) {
+ if (allowedModes == TRUSTED) return;
+ if (!VerifyAccess.isSamePackageMember(specialCaller, lookupClass()))
+ throw newNoAccessException("no private access for invokespecial",
+ new MemberName(specialCaller), lookupClass());
+ }
+
+ MethodHandle restrictProtectedReceiver(MemberName method, MethodHandle mh) {
+ // The accessing class only has the right to use a protected member
+ // on itself or a subclass. Enforce that restriction, from JVMS 5.4.4, etc.
+ if (!method.isProtected() || method.isStatic()
+ || allowedModes == TRUSTED
+ || VerifyAccess.isSamePackageMember(method.getDeclaringClass(), lookupClass()))
+ return mh;
+ else
+ return restrictReceiver(method, mh, lookupClass());
+ }
+ MethodHandle restrictReceiver(MemberName method, MethodHandle mh, Class> caller) {
+ assert(!method.isStatic());
+ Class> defc = method.getDeclaringClass(); // receiver type of mh is too wide
+ if (defc.isInterface() || !defc.isAssignableFrom(caller)) {
+ throw newNoAccessException("caller class must be a subclass below the method", method, caller);
+ }
+ MethodType rawType = mh.type();
+ if (rawType.parameterType(0) == caller) return mh;
+ MethodType narrowType = rawType.changeParameterType(0, caller);
+ return MethodHandleImpl.convertArguments(IMPL_TOKEN, mh, narrowType, rawType, null);
+ }
+
+ MethodHandle makeAccessor(Class> refc, String name, Class> type,
+ boolean isStatic, boolean isSetter) throws NoAccessException {
+ MemberName field = resolveOrFail(refc, name, type, isStatic);
+ if (isStatic != field.isStatic())
+ throw newNoAccessException(isStatic
+ ? "expected a static field"
+ : "expected a non-static field",
+ field, lookupClass());
+ return makeAccessor(refc, field, false, isSetter);
+ }
+
+ MethodHandle makeAccessor(Class> refc, MemberName field,
+ boolean trusted, boolean isSetter) throws NoAccessException {
+ assert(field.isField());
+ if (trusted)
+ return MethodHandleImpl.accessField(IMPL_TOKEN, field, isSetter, lookupClassOrNull());
+ checkAccess(refc, field);
+ MethodHandle mh = MethodHandleImpl.accessField(IMPL_TOKEN, field, isSetter, lookupClassOrNull());
+ return restrictProtectedReceiver(field, mh);
}
- if (m.isInvocable())
- return MethodHandleImpl.findMethod(IMPL_TOKEN, m, doDispatch, lookup.lookupClass());
- else if (m.isField())
- return MethodHandleImpl.accessField(IMPL_TOKEN, m, isSetter, lookup.lookupClass());
- else
- throw new InternalError();
}
/**
@@ -667,10 +899,15 @@ public class MethodHandles {
*/
public static
MethodHandle dynamicInvoker(CallSite site) {
- MethodHandle getTarget = MethodHandleImpl.bindReceiver(IMPL_TOKEN, CallSite.GET_TARGET, site);
+ MethodHandle getCSTarget = GET_TARGET;
+ if (getCSTarget == null)
+ GET_TARGET = getCSTarget = Lookup.IMPL_LOOKUP.
+ findVirtual(CallSite.class, "getTarget", MethodType.methodType(MethodHandle.class));
+ MethodHandle getTarget = MethodHandleImpl.bindReceiver(IMPL_TOKEN, getCSTarget, site);
MethodHandle invoker = exactInvoker(site.type());
return foldArguments(invoker, getTarget);
}
+ private static MethodHandle GET_TARGET = null; // link this lazily, not eagerly
static Invokers invokers(MethodType type) {
return MethodTypeImpl.invokers(IMPL_TOKEN, type);
@@ -1025,15 +1262,15 @@ public class MethodHandles {
*
* @param target the method handle to invoke after the argument is dropped
* @param valueTypes the type(s) of the argument to drop
@@ -1254,7 +1491,7 @@ public class MethodHandles {
MethodHandle dispatch = compose(choose, test);
// dispatch = \(a...).(test(a...) ? target : fallback)
return combineArguments(invoke, dispatch, 0);
- // return \(a...).((test(a...) ? target : fallback).invoke(a...))
+ // return \(a...).((test(a...) ? target : fallback).invokeExact(a...))
} */
return MethodHandleImpl.makeGuardWithTest(IMPL_TOKEN, test, target, fallback);
}
@@ -1325,22 +1562,4 @@ public class MethodHandles {
MethodHandle throwException(Class> returnType, Class extends Throwable> exType) {
return MethodHandleImpl.throwException(IMPL_TOKEN, MethodType.methodType(returnType, exType));
}
-
- /** Alias for {@link MethodType#methodType}. */
- @Deprecated // "use MethodType.methodType instead"
- public static MethodType methodType(Class> rtype) {
- return MethodType.methodType(rtype);
- }
-
- /** Alias for {@link MethodType#methodType}. */
- @Deprecated // "use MethodType.methodType instead"
- public static MethodType methodType(Class> rtype, Class> ptype) {
- return MethodType.methodType(rtype, ptype);
- }
-
- /** Alias for {@link MethodType#methodType}. */
- @Deprecated // "use MethodType.methodType instead"
- public static MethodType methodType(Class> rtype, Class> ptype0, Class>... ptypes) {
- return MethodType.methodType(rtype, ptype0, ptypes);
- }
}
diff --git a/jdk/src/share/classes/java/dyn/MethodType.java b/jdk/src/share/classes/java/dyn/MethodType.java
index f50ea446199..c9001bd25a6 100644
--- a/jdk/src/share/classes/java/dyn/MethodType.java
+++ b/jdk/src/share/classes/java/dyn/MethodType.java
@@ -36,15 +36,24 @@ import sun.dyn.util.BytecodeDescriptor;
import static sun.dyn.MemberName.newIllegalArgumentException;
/**
- * Run-time token used to match call sites with method handles.
+ * A method type represents the arguments and return type accepted and
+ * returned by a method handle, or the arguments and return type passed
+ * and expected by a method handle caller. Method types must be properly
+ * matched between a method handle and all its callers,
+ * and the JVM's operations enforce this matching at all times.
+ *
* The structure is a return type accompanied by any number of parameter types.
* The types (primitive, void, and reference) are represented by Class objects.
+ *
* All instances of MethodType are immutable.
* Two instances are completely interchangeable if they compare equal.
- * Equality depends exactly on the return and parameter types.
+ * Equality depends on pairwise correspondence of the return and parameter types and on nothing else.
*
- * This type can be created only by factory methods, which manage interning.
- *
+ * This type can be created only by factory methods.
+ * All factory methods may cache values, though caching is not guaranteed.
+ *
+ * Note: Like classes and strings, method types can be represented directly
+ * as constants to be loaded by {@code ldc} bytecodes.
* @author John Rose, JSR 292 EG
*/
public final
@@ -109,7 +118,7 @@ class MethodType {
/** Find or create an instance of the given method type.
* @param rtype the return type
* @param ptypes the parameter types
- * @return the interned method type with the given parts
+ * @return a method type with the given parts
* @throws NullPointerException if rtype or any ptype is null
* @throws IllegalArgumentException if any of the ptypes is void
*/
@@ -626,7 +635,7 @@ class MethodType {
}
/** Convenience method for {@link #methodType(java.lang.Class, java.lang.Class[])}.
- * Find or create an instance (interned) of the given method type.
+ * Find or create an instance of the given method type.
* Any class or interface name embedded in the signature string
* will be resolved by calling {@link ClassLoader#loadClass(java.lang.String)}
* on the given loader (or if it is null, on the system class loader).
diff --git a/jdk/src/share/classes/java/dyn/NoAccessException.java b/jdk/src/share/classes/java/dyn/NoAccessException.java
index 8547a5d777f..5e76f6a4aae 100644
--- a/jdk/src/share/classes/java/dyn/NoAccessException.java
+++ b/jdk/src/share/classes/java/dyn/NoAccessException.java
@@ -27,11 +27,12 @@ package java.dyn;
/**
* Thrown to indicate that a caller has attempted to create a method handle
- * which calls a method to which the caller does not have access.
+ * which accesses a field, method, or class to which the caller does not have access.
* This unchecked exception is analogous to {@link IllegalAccessException},
* which is a checked exception thrown when reflective invocation fails
* because of an access check. With method handles, this same access
- * checking is performed on behalf of the method handle creator,
+ * checking is performed by the {@link MethodHandles.Lookup lookup object}
+ * on behalf of the method handle creator,
* at the time of creation.
* @author John Rose, JSR 292 EG
*/
diff --git a/jdk/src/share/classes/java/dyn/package-info.java b/jdk/src/share/classes/java/dyn/package-info.java
index a1ed722ff9d..7a285b75d22 100644
--- a/jdk/src/share/classes/java/dyn/package-info.java
+++ b/jdk/src/share/classes/java/dyn/package-info.java
@@ -27,6 +27,105 @@
* PROVISIONAL API, WORK IN PROGRESS:
* This package contains dynamic language support provided directly by
* the Java core class libraries and virtual machine.
+ *
+ * Certain types in this package have special relations to dynamic
+ * language support in the virtual machine:
+ *
+ *
In source code, a call to
+ * {@link java.dyn.MethodHandle#invokeExact MethodHandle.invokeExact} or
+ * {@link java.dyn.MethodHandle#invokeGeneric MethodHandle.invokeGeneric}
+ * will compile and link, regardless of the requested type signature.
+ * As usual, the Java compiler emits an {@code invokevirtual}
+ * instruction with the given signature against the named method.
+ * The JVM links any such call (regardless of signature) to a dynamically
+ * typed method handle invocation. In the case of {@code invokeGeneric},
+ * argument and return value conversions are applied.
+ *
+ *
In source code, the class {@link java.dyn.InvokeDynamic} appears to accept
+ * any static method invocation, of any name and any signature.
+ * But instead of emitting
+ * an {@code invokestatic} instruction for such a call, the Java compiler emits
+ * an {@code invokedynamic} instruction with the given name and signature.
+ *
+ *
When the JVM links an {@code invokedynamic} instruction, it calls the
+ * {@linkplain java.dyn.Linkage#registerBootstrapMethod(Class, MethodHandle) bootstrap method}
+ * of the containing class to obtain a {@linkplain java.dyn.CallSite call site} object through which
+ * the call site will link its target {@linkplain java.dyn.MethodHandle method handle}.
+ *
+ *
The JVM bytecode format supports immediate constants of
+ * the classes {@link java.dyn.MethodHandle} and {@link java.dyn.MethodType}.
+ *
+ *
+ *
Corresponding JVM bytecode format changes
+ * The following low-level information is presented here as a preview of
+ * changes being made to the Java Virtual Machine specification for JSR 292.
+ *
+ *
{@code invokedynamic} instruction format
+ * In bytecode, an {@code invokedynamic} instruction is formatted as five bytes.
+ * The first byte is the opcode 186 (hexadecimal {@code BA}).
+ * The next two bytes are a constant pool index (in the same format as for the other {@code invoke} instructions).
+ * The final two bytes are reserved for future use and required to be zero.
+ * The constant pool reference is to a entry with tag {@code CONSTANT_NameAndType}
+ * (decimal 12). It is thus not a method reference of any sort, but merely
+ * the method name, argument types, and return type of the dynamic call site.
+ * (TBD: The EG is discussing the possibility of a special constant pool entry type,
+ * so that other information may be added, such as a per-instruction bootstrap
+ * method and/or annotations.)
+ *
+ *
constant pool entries for {@code MethodType}s
+ * If a constant pool entry has the tag {@code CONSTANT_MethodType} (decimal 16),
+ * it must contain exactly two more bytes, which are an index to a {@code CONSTANT_Utf8}
+ * entry which represents a method type signature. The JVM will ensure that on first
+ * execution of an {@code ldc} instruction for this entry, a {@link java.dyn.MethodType}
+ * will be created which represents the signature.
+ * Any classes mentioned in the {@code MethodType} will be loaded if necessary,
+ * but not initialized.
+ * Access checking and error reporting is performed exactly as it is for
+ * references by {@code ldc} instructions to {@code CONSTANT_Class} constants.
+ *
+ *
constant pool entries for {@code MethodHandle}s
+ * If a constant pool entry has the tag {@code CONSTANT_MethodHandle} (decimal 15),
+ * it must contain exactly three more bytes. The first byte after the tag is a subtag
+ * value in the range 1 through 9, and the last two are an index to a
+ * {@code CONSTANT_Fieldref}, {@code CONSTANT_Methodref}, or
+ * {@code CONSTANT_InterfaceMethodref} entry which represents a field or method
+ * for which a method handle is to be created.
+ * The JVM will ensure that on first execution of an {@code ldc} instruction
+ * for this entry, a {@link java.dyn.MethodHandle} will be created which represents
+ * the field or method reference, according to the specific mode implied by the subtag.
+ *
+ * As with {@code CONSTANT_Class} and {@code CONSTANT_MethodType} constants,
+ * the {@code Class} or {@code MethodType} object which reifies the field or method's
+ * type is created. Any classes mentioned in this reificaiton will be loaded if necessary,
+ * but not initialized, and access checking and error reporting performed as usual.
+ *
+ * The method handle itself will have a type and behavior determined by the subtag as follows:
+ *
+ *
+ *
N
subtag name
member
MH type
MH behavior
+ *
1
REF_getField
C.f:T
(C)T
getfield C.f:T
+ *
2
REF_getStatic
C.f:T
( )T
getstatic C.f:T
+ *
3
REF_putField
C.f:T
(C,T)void
putfield C.f:T
+ *
4
REF_putStatic
C.f:T
(T)void
putstatic C.f:T
+ *
5
REF_invokeVirtual
C.m(A*)T
(C,A*)T
invokevirtual C.m(A*)T
+ *
6
REF_invokeStatic
C.m(A*)T
(C,A*)T
invokestatic C.m(A*)T
+ *
7
REF_invokeSpecial
C.m(A*)T
(C,A*)T
invokespecial C.m(A*)T
+ *
8
REF_newInvokeSpecial
C.<init>(A*)void
(A*)C
new C; dup; invokespecial C.<init>(A*)void
+ *
9
REF_invokeInterface
C.m(A*)T
(C,A*)T
invokeinterface C.m(A*)T
+ *
+ *
+ *
+ * The special names {@code } and {@code } are not allowed except for subtag 8 as shown.
+ *
+ * The verifier applies the same access checks and restrictions for these references as for the hypothetical
+ * bytecode instructions specified in the last column of the table. In particular, method handles to
+ * private and protected members can be created in exactly those classes for which the corresponding
+ * normal accesses are legal.
+ *