This commit is contained in:
Lana Steuck 2010-06-29 10:48:02 -07:00
commit d7a336ae22
222 changed files with 5866 additions and 3845 deletions

View File

@ -71,3 +71,5 @@ b5dab6a313fdff4c043250e4d9c8f66fd624d27e jdk7-b93
8bb281f0f91582104d65d032be22522bfd2d8110 jdk7-b94
654298d26561b76dfe3cfcffbbd7078080837300 jdk7-b95
d260f892491e040ae385a8e6df59557a7d721abf jdk7-b96
7e406ebed9a5968b584f3c3e6b60893b5d6d9741 jdk7-b97
db6e660120446c407e2d908d52ec046592b21726 jdk7-b98

View File

@ -72,3 +72,5 @@ d7f35c61afa092b6357c2c4bce3f298f16620f71 jdk7-b94
fd3663286e77b9f13c39eee124db2beb079b3ca6 jdk7-b95
cf71cb5151166f35433afebaf67dbf34a704a170 jdk7-b96
5e197c942c6ebd8b92f324a31049c5f1d26d40ef jdk7-b97
6cea9984d73d74de0cd01f30d07ac0a1ed196117 jdk7-b98
e7f18db469a3e947b7096bfd12e87380e5a042cd jdk7-b99

View File

@ -65,8 +65,9 @@
<li><a href="#cacerts">Certificate Authority File (cacert)</a> </li>
<li><a href="#compilers">Compilers</a>
<ul>
<li><a href="#msvc">Microsoft Visual Studio</a> </li>
<li><a href="#mssdk">Microsoft Platform SDK</a> </li>
<li><a href="#msvc32">Microsoft Visual Studio Professional/Express for 32 bit</a> </li>
<li><a href="#msvc64">Microsoft Visual Studio Professional for 64 bit</a> </li>
<li><a href="#mssdk64">Microsoft Windows SDK for 64 bit</a> </li>
<li><a href="#gcc">Linux gcc/binutils</a> </li>
<li><a href="#studio">Sun Studio</a> </li>
</ul>
@ -789,11 +790,11 @@
</li>
<li>
Install the
<a href="#msvc">Microsoft Visual Studio Compilers</a>).
<a href="#msvc32">Microsoft Visual Studio Compilers</a>).
</li>
<li>
Setup all environment variables for compilers
(see <a href="#msvc">compilers</a>).
(see <a href="#msvc32">compilers</a>).
</li>
<li>
Install
@ -958,7 +959,7 @@
are also an option, although these compilers have not
been extensively used yet.
</blockquote>
<strong><a name="msvc">Windows i586: Microsoft Visual Studio Compilers</a></strong>
<strong><a name="msvc32">Windows i586: Microsoft Visual Studio 2010 Compilers</a></strong>
<blockquote>
<p>
<b>BEGIN WARNING</b>: At this time (Spring/Summer 2010) JDK 7 is starting a transition to
@ -971,14 +972,13 @@ So for now you should be able to build with either VS2003 or VS2010.
We do not guarantee that VS2008 will work, although there is sufficient
makefile support to make at least basic JDK builds plausible.
Visual Studio 2010 Express compilers are now able to build all the
open source repositories, but this is 32 bit only, since
we have not yet seen the 7.1 Windows SDK with the 64 bit
compilers. <b>END WARNING.</b>
open source repositories, but this is 32 bit only. To build 64 bit
Windows binaries use the the 7.1 Windows SDK.<b>END WARNING.</b>
<p>
The 32-bit OpenJDK Windows build
requires
Microsoft Visual Studio C++ 2010 (VS2010) Professional
Edition compiler.
Edition or Express compiler.
The compiler and other tools are expected to reside
in the location defined by the variable
<tt>VS100COMNTOOLS</tt> which
@ -1001,14 +1001,33 @@ compilers. <b>END WARNING.</b>
The path <tt>/usr/bin</tt> must be after the path to the
Visual Studio product.
</blockquote>
<strong><a name="mssdk">Windows x64: Microsoft Visual Studio Compilers</a></strong>
<strong><a name="msvc64">Windows x64: Microsoft Visual Studio 2010 Professional Compiler</a></strong>
<blockquote>
On <b>X64</b>, the set up is much the same in VS2010
For <b>X64</b>, builds, when using the VS2010 Professional
compiler, the 64 bit build set up is much the same as 32 bit
except that you run <tt>amd64\VCVARS64.BAT</tt>
to set the compiler environment variables.
Previously 64 builds had used the 64 bit compiler in
an unbundled Windows SDK but this is no longer necessary.
Previously 64 bit builds had used the 64 bit compiler in
an unbundled Windows SDK but this is no longer necessary if
you have VS2010 Professional.
</blockquote>
<strong><a name="mssdk64">Windows x64: Microsoft Windows 7.1 SDK 64 bit compilers.</a></strong>
For a free alternative for 64 bit builds, use the 7.1 SDK.
Microsoft say that to set up your paths for this run
<pre>
c:\Program Files\Microsoft SDKs\Windows\v7.1\bin\setenv.cmd /x64.
</pre>
What was tested is just directly setting up LIB, INCLUDE,
PATH and based on the installation directories using the
DOS short name appropriate for the system, (you will
need to set them for yours, not just blindly copy this) eg :
<pre>
set VSINSTALLDIR=c:\PROGRA~2\MICROS~1.0
set WindowsSdkDir=c:\PROGRA~1\MICROS~1\Windows\v7.1
set PATH=%VSINSTALLDIR%\vc\bin\amd64;%VSINSTALLDIR%\Common7\IDE;%WindowsSdkDir%\bin;%PATH%
set INCLUDE=%VSINSTALLDIR%\vc\include;%WindowsSdkDir%\include
set LIB=%VSINSTALLDIR%\vc\lib\amd64;%WindowsSdkDir%\lib\x64
</pre>
</blockquote>
<!-- ------------------------------------------------------ -->
<h4><a name="zip">Zip and Unzip</a></h4>

View File

@ -71,3 +71,5 @@ bcd2fc089227559ac5be927923609fac29f067fa jdk7-b91
533c11186b44e3a02d6c5fe69a73260505fcfe5e jdk7-b94
06dbf406818c789bb586c1de4c002024cd26ecd2 jdk7-b95
edc2a2659c77dabc55cb55bb617bad89e3a05bb3 jdk7-b96
4ec9d59374caa1e5d72fa802291b4d66955a4936 jdk7-b97
3b99409057e4c255da946f9f540d051a5ef4ab23 jdk7-b98

View File

@ -100,3 +100,5 @@ d38f45079fe98792a7381dbb4b64f5b589ec8c58 jdk7-b94
91d861ba858daca645993a1ab6ba2fa06a8f4a5b jdk7-b95
573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 jdk7-b96
573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 hs19-b02
5f42499e57adc16380780f40541e1a66cd601891 jdk7-b97
8a045b3f5c13eaad92ff4baf15ca671845fcad1a jdk7-b98

View File

@ -42,8 +42,6 @@ public class CodeBlob extends VMObject {
private static CIntegerField instructionsOffsetField;
private static CIntegerField frameCompleteOffsetField;
private static CIntegerField dataOffsetField;
private static CIntegerField oopsOffsetField;
private static CIntegerField oopsLengthField;
private static CIntegerField frameSizeField;
private static AddressField oopMapsField;
@ -72,8 +70,6 @@ public class CodeBlob extends VMObject {
frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset");
instructionsOffsetField = type.getCIntegerField("_instructions_offset");
dataOffsetField = type.getCIntegerField("_data_offset");
oopsOffsetField = type.getCIntegerField("_oops_offset");
oopsLengthField = type.getCIntegerField("_oops_length");
frameSizeField = type.getCIntegerField("_frame_size");
oopMapsField = type.getAddressField("_oop_maps");
@ -131,19 +127,10 @@ public class CodeBlob extends VMObject {
return headerBegin().addOffsetTo(sizeField.getValue(addr));
}
public Address oopsBegin() {
return headerBegin().addOffsetTo(oopsOffsetField.getValue(addr));
}
public Address oopsEnd() {
return oopsBegin().addOffsetTo(getOopsLength());
}
// Offsets
public int getRelocationOffset() { return (int) headerSizeField.getValue(addr); }
public int getInstructionsOffset() { return (int) instructionsOffsetField.getValue(addr); }
public int getDataOffset() { return (int) dataOffsetField.getValue(addr); }
public int getOopsOffset() { return (int) oopsOffsetField.getValue(addr); }
// Sizes
public int getSize() { return (int) sizeField.getValue(addr); }
@ -157,19 +144,9 @@ public class CodeBlob extends VMObject {
// FIXME: add relocationContains
public boolean instructionsContains(Address addr) { return instructionsBegin().lessThanOrEqual(addr) && instructionsEnd().greaterThan(addr); }
public boolean dataContains(Address addr) { return dataBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr); }
public boolean oopsContains(Address addr) { return oopsBegin().lessThanOrEqual(addr) && oopsEnd().greaterThan(addr); }
public boolean contains(Address addr) { return instructionsContains(addr); }
public boolean isFrameCompleteAt(Address a) { return instructionsContains(a) && a.minus(instructionsBegin()) >= frameCompleteOffsetField.getValue(addr); }
/** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
public OopHandle getOopAt(int index) {
if (index == 0) return null;
if (Assert.ASSERTS_ENABLED) {
Assert.that(index > 0 && index <= getOopsLength(), "must be a valid non-zero index");
}
return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
}
// Reclamation support (really only used by the nmethods, but in order to get asserts to work
// in the CodeCache they are defined virtual here)
public boolean isZombie() { return false; }
@ -223,18 +200,8 @@ public class CodeBlob extends VMObject {
}
protected void printComponentsOn(PrintStream tty) {
// FIXME: add relocation information
tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
" data: [" + dataBegin() + ", " + dataEnd() + "), " +
" oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
" frame size: " + getFrameSize());
}
//--------------------------------------------------------------------------------
// Internals only below this point
//
private int getOopsLength() {
return (int) oopsLengthField.getValue(addr);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,6 +49,7 @@ public class NMethod extends CodeBlob {
private static CIntegerField deoptOffsetField;
private static CIntegerField origPCOffsetField;
private static CIntegerField stubOffsetField;
private static CIntegerField oopsOffsetField;
private static CIntegerField scopesDataOffsetField;
private static CIntegerField scopesPCsOffsetField;
private static CIntegerField dependenciesOffsetField;
@ -98,6 +99,7 @@ public class NMethod extends CodeBlob {
deoptOffsetField = type.getCIntegerField("_deoptimize_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
stubOffsetField = type.getCIntegerField("_stub_offset");
oopsOffsetField = type.getCIntegerField("_oops_offset");
scopesDataOffsetField = type.getCIntegerField("_scopes_data_offset");
scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset");
dependenciesOffsetField = type.getCIntegerField("_dependencies_offset");
@ -141,7 +143,9 @@ public class NMethod extends CodeBlob {
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
public Address deoptBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); }
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address stubEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address scopesDataBegin() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
@ -156,6 +160,7 @@ public class NMethod extends CodeBlob {
public int constantsSize() { return (int) constantsEnd() .minus(constantsBegin()); }
public int codeSize() { return (int) codeEnd() .minus(codeBegin()); }
public int stubSize() { return (int) stubEnd() .minus(stubBegin()); }
public int oopsSize() { return (int) oopsEnd() .minus(oopsBegin()); }
public int scopesDataSize() { return (int) scopesDataEnd() .minus(scopesDataBegin()); }
public int scopesPCsSize() { return (int) scopesPCsEnd() .minus(scopesPCsBegin()); }
public int dependenciesSize() { return (int) dependenciesEnd().minus(dependenciesBegin()); }
@ -178,6 +183,7 @@ public class NMethod extends CodeBlob {
public boolean constantsContains (Address addr) { return constantsBegin() .lessThanOrEqual(addr) && constantsEnd() .greaterThan(addr); }
public boolean codeContains (Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); }
public boolean stubContains (Address addr) { return stubBegin() .lessThanOrEqual(addr) && stubEnd() .greaterThan(addr); }
public boolean oopsContains (Address addr) { return oopsBegin() .lessThanOrEqual(addr) && oopsEnd() .greaterThan(addr); }
public boolean scopesDataContains (Address addr) { return scopesDataBegin() .lessThanOrEqual(addr) && scopesDataEnd() .greaterThan(addr); }
public boolean scopesPCsContains (Address addr) { return scopesPCsBegin() .lessThanOrEqual(addr) && scopesPCsEnd() .greaterThan(addr); }
public boolean handlerTableContains(Address addr) { return handlerTableBegin().lessThanOrEqual(addr) && handlerTableEnd().greaterThan(addr); }
@ -187,6 +193,15 @@ public class NMethod extends CodeBlob {
public Address getEntryPoint() { return entryPointField.getValue(addr); }
public Address getVerifiedEntryPoint() { return verifiedEntryPointField.getValue(addr); }
/** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
public OopHandle getOopAt(int index) {
if (index == 0) return null;
if (Assert.ASSERTS_ENABLED) {
Assert.that(index > 0 && index <= oopsSize(), "must be a valid non-zero index");
}
return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
}
// FIXME: add interpreter_entry_point()
// FIXME: add lazy_interpreter_entry_point() for C2
@ -338,6 +353,14 @@ public class NMethod extends CodeBlob {
printOn(System.out);
}
protected void printComponentsOn(PrintStream tty) {
// FIXME: add relocation information
tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
" data: [" + dataBegin() + ", " + dataEnd() + "), " +
" oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
" frame size: " + getFrameSize());
}
public String toString() {
Method method = getMethod();
return "NMethod for " +
@ -367,6 +390,7 @@ public class NMethod extends CodeBlob {
private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); }
private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); }
private int getStubOffset() { return (int) stubOffsetField .getValue(addr); }
private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); }
private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); }
private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); }
private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); }

View File

@ -73,18 +73,11 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
public CompactibleFreeListSpace(Address addr) {
super(addr);
if ( VM.getVM().isLP64() ) {
heapWordSize = 8;
IndexSetStart = 1;
IndexSetStride = 1;
}
else {
heapWordSize = 4;
IndexSetStart = 2;
IndexSetStride = 2;
}
IndexSetSize = 257;
VM vm = VM.getVM();
heapWordSize = vm.getHeapWordSize();
IndexSetStart = vm.getMinObjAlignmentInBytes() / heapWordSize;
IndexSetStride = IndexSetStart;
IndexSetSize = 257;
}
// Accessing block offset table

View File

@ -128,7 +128,7 @@ public class Oop {
// Align the object size.
public static long alignObjectSize(long size) {
return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignment());
return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignmentInBytes());
}
// All vm's align longs, so pad out certain offsets.

View File

@ -93,6 +93,7 @@ public class VM {
/** alignment constants */
private boolean isLP64;
private int bytesPerLong;
private int objectAlignmentInBytes;
private int minObjAlignmentInBytes;
private int logMinObjAlignmentInBytes;
private int heapWordSize;
@ -313,9 +314,6 @@ public class VM {
isLP64 = debugger.getMachineDescription().isLP64();
}
bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue();
minObjAlignmentInBytes = db.lookupIntConstant("MinObjAlignmentInBytes").intValue();
// minObjAlignment = db.lookupIntConstant("MinObjAlignment").intValue();
logMinObjAlignmentInBytes = db.lookupIntConstant("LogMinObjAlignmentInBytes").intValue();
heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
oopSize = db.lookupIntConstant("oopSize").intValue();
@ -323,6 +321,15 @@ public class VM {
uintxType = db.lookupType("uintx");
boolType = (CIntegerType) db.lookupType("bool");
minObjAlignmentInBytes = getObjectAlignmentInBytes();
if (minObjAlignmentInBytes == 8) {
logMinObjAlignmentInBytes = 3;
} else if (minObjAlignmentInBytes == 16) {
logMinObjAlignmentInBytes = 4;
} else {
throw new RuntimeException("Object alignment " + minObjAlignmentInBytes + " not yet supported");
}
if (isCompressedOopsEnabled()) {
// Size info for oops within java objects is fixed
heapOopSize = (int)getIntSize();
@ -492,10 +499,6 @@ public class VM {
}
/** Get minimum object alignment in bytes. */
public int getMinObjAlignment() {
return minObjAlignmentInBytes;
}
public int getMinObjAlignmentInBytes() {
return minObjAlignmentInBytes;
}
@ -754,6 +757,14 @@ public class VM {
return compressedOopsEnabled.booleanValue();
}
public int getObjectAlignmentInBytes() {
if (objectAlignmentInBytes == 0) {
Flag flag = getCommandLineFlag("ObjectAlignmentInBytes");
objectAlignmentInBytes = (flag == null) ? 8 : (int)flag.getIntx();
}
return objectAlignmentInBytes;
}
// returns null, if not available.
public Flag[] getCommandLineFlags() {
if (commandLineFlags == null) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,7 +98,12 @@ public class PointerFinder {
}
loc.inBlobInstructions = loc.blob.instructionsContains(a);
loc.inBlobData = loc.blob.dataContains(a);
loc.inBlobOops = loc.blob.oopsContains(a);
if (loc.blob.isNMethod()) {
NMethod nm = (NMethod) loc.blob;
loc.inBlobOops = nm.oopsContains(a);
}
loc.inBlobUnknownLocation = (!(loc.inBlobInstructions ||
loc.inBlobData ||
loc.inBlobOops));

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=19
HS_MINOR_VER=0
HS_BUILD_NUMBER=02
HS_BUILD_NUMBER=03
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

View File

@ -80,12 +80,10 @@ ifeq ($(ARCH_DATA_MODEL), 32)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so
ifeq ($(ARCH),sparc)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
endif
EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
endif
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so

View File

@ -69,8 +69,20 @@ ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),)
MAKE_ARGS += Platform_arch_model=x86_64
endif
ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) EM64T),)
ARCH_DATA_MODEL=64
PLATFORM=windows-amd64
VM_PLATFORM=windows_amd64
HS_ARCH=x86
MAKE_ARGS += LP64=1
MAKE_ARGS += ARCH=x86
MAKE_ARGS += BUILDARCH=amd64
MAKE_ARGS += Platform_arch=x86
MAKE_ARGS += Platform_arch_model=x86_64
endif
# NB later OS versions than 2003 may report "Intel64"
ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) "EM64T\|Intel64"),)
ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) Intel64),)
ARCH_DATA_MODEL=64
PLATFORM=windows-amd64
VM_PLATFORM=windows_amd64

View File

@ -87,6 +87,7 @@ REGISTER_DECLARATION(Register, Gtemp , G5);
// JSR 292 fixed register usages:
REGISTER_DECLARATION(Register, G5_method_type , G5);
REGISTER_DECLARATION(Register, G3_method_handle , G3);
REGISTER_DECLARATION(Register, L7_mh_SP_save , L7);
// The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
// because a single patchable "set" instruction (NativeMovConstReg,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -181,8 +181,8 @@ bool FrameMap::is_caller_save_register (Register r) {
}
void FrameMap::init () {
if (_init_done) return;
void FrameMap::initialize() {
assert(!_init_done, "once");
int i=0;
// Register usage:
@ -345,6 +345,13 @@ LIR_Opr FrameMap::stack_pointer() {
}
// JSR 292
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
assert(L7 == L7_mh_SP_save, "must be same register");
return L7_opr;
}
bool FrameMap::validate_frame() {
int max_offset = in_bytes(framesize_in_bytes());
int java_index = 0;

View File

@ -143,6 +143,3 @@
static bool is_caller_save_register (LIR_Opr reg);
static bool is_caller_save_register (Register r);
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }

View File

@ -736,7 +736,8 @@ void LIR_Assembler::align_call(LIR_Code) {
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
__ call(op->addr(), rtype);
// the peephole pass fills the delay slot
// The peephole pass fills the delay slot, add_call_info is done in
// LIR_Assembler::emit_delay.
}
@ -745,7 +746,8 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
__ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
__ relocate(rspec);
__ call(op->addr(), relocInfo::none);
// the peephole pass fills the delay slot
// The peephole pass fills the delay slot, add_call_info is done in
// LIR_Assembler::emit_delay.
}
@ -766,16 +768,6 @@ void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
}
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
// load with 32-bit displacement
int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
int load_offset = code_offset();
@ -2934,7 +2926,7 @@ void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
// we may also be emitting the call info for the instruction
// which we are the delay slot of.
CodeEmitInfo * call_info = op->call_info();
CodeEmitInfo* call_info = op->call_info();
if (call_info) {
add_call_info(code_offset(), call_info);
}
@ -3159,6 +3151,7 @@ void LIR_Assembler::peephole(LIR_List* lir) {
tty->print_cr("delayed");
inst->at(i - 1)->print();
inst->at(i)->print();
tty->cr();
}
#endif
continue;
@ -3174,8 +3167,8 @@ void LIR_Assembler::peephole(LIR_List* lir) {
case lir_static_call:
case lir_virtual_call:
case lir_icvirtual_call:
case lir_optvirtual_call: {
LIR_Op* delay_op = NULL;
case lir_optvirtual_call:
case lir_dynamic_call: {
LIR_Op* prev = inst->at(i - 1);
if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
(op->code() != lir_virtual_call ||
@ -3192,15 +3185,14 @@ void LIR_Assembler::peephole(LIR_List* lir) {
tty->print_cr("delayed");
inst->at(i - 1)->print();
inst->at(i)->print();
tty->cr();
}
#endif
continue;
}
if (!delay_op) {
delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
inst->insert_before(i + 1, delay_op);
}
LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
inst->insert_before(i + 1, delay_op);
break;
}
}

View File

@ -221,7 +221,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
if (needs_card_mark) {
LIR_Opr ptr = new_pointer_register();
__ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
return new LIR_Address(ptr, 0, type);
return new LIR_Address(ptr, type);
} else {
return new LIR_Address(base_opr, offset, type);
}
@ -231,7 +231,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) {
LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
LIR_Address* addr = new LIR_Address(pointer, T_INT);
increment_counter(addr, step);
}
@ -1159,7 +1159,7 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
if (type == T_ARRAY || type == T_OBJECT) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, index_op, tmp);
addr = new LIR_Address(tmp, 0, type);
addr = new LIR_Address(tmp, type);
} else {
addr = new LIR_Address(base_op, index_op, type);
}

View File

@ -679,8 +679,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
G2_thread, Oissuing_pc->after_save());
__ verify_not_null_oop(Oexception->after_save());
__ jmp(O0, 0);
__ delayed()->restore();
// Restore SP from L7 if the exception PC is a MethodHandle call site.
__ mov(O0, G5); // Save the target address.
__ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
__ tst(L0); // Condition codes are preserved over the restore.
__ restore();
__ jmp(G5, 0);
__ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.
}
break;

View File

@ -154,7 +154,7 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
}
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
assert(MinObjAlignmentInBytes == BytesPerLong, "need alternate implementation");
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
julong* to = (julong*)tohw;
julong v = ((julong)value << 32) | value;
@ -162,7 +162,7 @@ static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value)
// and be equal to 0 on 64-bit platform.
size_t odd = count % (BytesPerLong / HeapWordSize) ;
size_t aligned_count = align_object_size(count - odd) / HeapWordsPerLong;
size_t aligned_count = align_object_offset(count - odd) / HeapWordsPerLong;
julong* end = ((julong*)tohw) + aligned_count - 1;
while (to <= end) {
DEBUG_ONLY(count -= BytesPerLong / HeapWordSize ;)

View File

@ -336,9 +336,11 @@ frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
#endif // ASSERT
}
frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_stack) {
_sp = sp;
_younger_sp = younger_sp;
frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
_sp(sp),
_younger_sp(younger_sp),
_deopt_state(unknown),
_sp_adjustment_by_callee(0) {
if (younger_sp == NULL) {
// make a deficient frame which doesn't know where its PC is
_pc = NULL;
@ -352,20 +354,32 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_sta
// wrong. (the _last_native_pc will have the right value)
// So do not put add any asserts on the _pc here.
}
if (younger_frame_adjusted_stack) {
// compute adjustment to this frame's SP made by its interpreted callee
_sp_adjustment_by_callee = (intptr_t*)((intptr_t)younger_sp[I5_savedSP->sp_offset_in_saved_window()] +
STACK_BIAS) - sp;
} else {
_sp_adjustment_by_callee = 0;
if (_pc != NULL)
_cb = CodeCache::find_blob(_pc);
// Check for MethodHandle call sites.
if (_cb != NULL) {
nmethod* nm = _cb->as_nmethod_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
_sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
// The SP is already adjusted by this MH call site, don't
// overwrite this value with the wrong interpreter value.
younger_frame_is_interpreted = false;
}
}
}
_deopt_state = unknown;
if (younger_frame_is_interpreted) {
// compute adjustment to this frame's SP made by its interpreted callee
_sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
}
// It is important that frame be fully construct when we do this lookup
// as get_original_pc() needs correct value for unextended_sp()
// It is important that the frame is fully constructed when we do
// this lookup as get_deopt_original_pc() needs a correct value for
// unextended_sp() which uses _sp_adjustment_by_callee.
if (_pc != NULL) {
_cb = CodeCache::find_blob(_pc);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
@ -462,9 +476,8 @@ frame frame::sender(RegisterMap* map) const {
if (is_entry_frame()) return sender_for_entry_frame(map);
intptr_t* younger_sp = sp();
intptr_t* sp = sender_sp();
bool adjusted_stack = false;
intptr_t* younger_sp = sp();
intptr_t* sp = sender_sp();
// Note: The version of this operation on any platform with callee-save
// registers must update the register map (if not null).
@ -483,8 +496,8 @@ frame frame::sender(RegisterMap* map) const {
// interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
// explicitly recognized.
adjusted_stack = is_interpreted_frame();
if (adjusted_stack) {
bool frame_is_interpreted = is_interpreted_frame();
if (frame_is_interpreted) {
map->make_integer_regs_unsaved();
map->shift_window(sp, younger_sp);
} else if (_cb != NULL) {
@ -503,7 +516,7 @@ frame frame::sender(RegisterMap* map) const {
}
}
}
return frame(sp, younger_sp, adjusted_stack);
return frame(sp, younger_sp, frame_is_interpreted);
}

View File

@ -720,25 +720,30 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) {
if (index_size == sizeof(u2)) {
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
} else {
} else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
xor3(tmp, -1, tmp); // convert to plain index
} else if (index_size == sizeof(u1)) {
assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
ldub(Lbcp, bcp_offset, tmp);
} else {
ShouldNotReachHere();
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp);
assert_not_delayed();
get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
// convert from field index to ConstantPoolCacheEntry index and from
// word index to byte offset
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
@ -747,12 +752,15 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp);
assert_not_delayed();
assert(!giant_index,"NYI");
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
if (index_size == sizeof(u2)) {
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
} else {
ShouldNotReachHere(); // other sizes not supported here
}
// convert from field index to ConstantPoolCacheEntry index
// and from word index to byte offset
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);

View File

@ -182,9 +182,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register Rdst,
setCCOrNot should_set_CC = dont_set_CC );
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
// common code

View File

@ -375,10 +375,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Register O0_scratch = O0_argslot;
int stackElementSize = Interpreter::stackElementSize;
// Make space on the stack for the arguments.
__ sub(SP, 4*stackElementSize, SP);
__ sub(Gargs, 3*stackElementSize, Gargs);
//__ sub(Lesp, 3*stackElementSize, Lesp);
// Make space on the stack for the arguments and set Gargs
// correctly.
__ sub(SP, 4*stackElementSize, SP); // Keep stack aligned.
__ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs);
// void raiseException(int code, Object actual, Object required)
__ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -321,7 +321,8 @@ void NativeMovConstReg::set_data(intptr_t x) {
set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
CodeBlob* nm = CodeCache::find_blob(instruction_address());
CodeBlob* cb = CodeCache::find_blob(instruction_address());
nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) {
RelocIterator iter(nm, instruction_address(), next_instruction_address());
oop* oop_addr = NULL;
@ -430,7 +431,8 @@ void NativeMovConstRegPatching::set_data(int x) {
set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
CodeBlob* nm = CodeCache::find_blob(instruction_address());
CodeBlob* cb = CodeCache::find_blob(instruction_address());
nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) {
RelocIterator iter(nm, instruction_address(), next_instruction_address());
oop* oop_addr = NULL;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -142,9 +142,12 @@ REGISTER_DEFINITION(Register, G1_scratch);
REGISTER_DEFINITION(Register, G3_scratch);
REGISTER_DEFINITION(Register, G4_scratch);
REGISTER_DEFINITION(Register, Gtemp);
REGISTER_DEFINITION(Register, Lentry_args);
// JSR 292
REGISTER_DEFINITION(Register, G5_method_type);
REGISTER_DEFINITION(Register, G3_method_handle);
REGISTER_DEFINITION(Register, Lentry_args);
REGISTER_DEFINITION(Register, L7_mh_SP_save);
#ifdef CC_INTERP
REGISTER_DEFINITION(Register, Lstate);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -116,6 +116,11 @@ void OptoRuntime::generate_exception_blob() {
__ mov(O0, G3_scratch); // Move handler address to temp
__ restore();
// Restore SP from L7 if the exception PC is a MethodHandle call site.
__ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), O7);
__ tst(O7);
__ movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);
// G3_scratch contains handler address
// Since this may be the deopt blob we must set O7 to look like we returned
// from the original pc that threw the exception

View File

@ -908,26 +908,13 @@ void AdapterGenerator::gen_i2c_adapter(
// O0-O5 - Outgoing args in compiled layout
// O6 - Adjusted or restored SP
// O7 - Valid return address
// L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
// L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
// F0-F7 - more outgoing args
// Gargs is the incoming argument base, and also an outgoing argument.
__ sub(Gargs, BytesPerWord, Gargs);
#ifdef ASSERT
{
// on entry OsavedSP and SP should be equal
Label ok;
__ cmp(O5_savedSP, SP);
__ br(Assembler::equal, false, Assembler::pt, ok);
__ delayed()->nop();
__ stop("I5_savedSP not set");
__ should_not_reach_here();
__ bind(ok);
}
#endif
// ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
// WITH O7 HOLDING A VALID RETURN PC
//

View File

@ -534,7 +534,10 @@ bool can_branch_register( Node *bol, Node *cmp ) {
// The "return address" is the address of the call instruction, plus 8.
int MachCallStaticJavaNode::ret_addr_offset() {
return NativeCall::instruction_size; // call; delay slot
int offset = NativeCall::instruction_size; // call; delay slot
if (_method_handle_invoke)
offset += 4; // restore SP
return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset() {
@ -818,6 +821,10 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
!(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
!(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) &&
!(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) &&
!(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) &&
!(n->ideal_Opcode()==Op_Load8B && ld_op==Op_LoadD) &&
!(n->rule() == loadUB_rule)) {
verify_oops_warning(n, n->ideal_Opcode(), ld_op);
}
@ -829,6 +836,9 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
!(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
!(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
!(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
!(n->ideal_Opcode()==Op_Store2I && st_op==Op_StoreD) &&
!(n->ideal_Opcode()==Op_Store4C && st_op==Op_StoreD) &&
!(n->ideal_Opcode()==Op_Store8B && st_op==Op_StoreD) &&
!(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
verify_oops_warning(n, n->ideal_Opcode(), st_op);
}
@ -1750,6 +1760,12 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = false;
bool Matcher::narrow_oop_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedOops, "only for compressed oops code");
return false;
}
// Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a
@ -1858,7 +1874,7 @@ RegMask Matcher::modL_proj_mask() {
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return RegMask();
return L7_REGP_mask;
}
%}
@ -2441,6 +2457,16 @@ encode %{
/*preserve_g2=*/true, /*force far call*/true);
%}
enc_class preserve_SP %{
MacroAssembler _masm(&cbuf);
__ mov(SP, L7_mh_SP_save);
%}
enc_class restore_SP %{
MacroAssembler _masm(&cbuf);
__ mov(L7_mh_SP_save, SP);
%}
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
@ -9213,6 +9239,7 @@ instruct safePoint_poll(iRegP poll) %{
// Call Java Static Instruction
instruct CallStaticJavaDirect( method meth ) %{
match(CallStaticJava);
predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
size(8);
@ -9223,6 +9250,20 @@ instruct CallStaticJavaDirect( method meth ) %{
ins_pipe(simple_call);
%}
// Call Java Static Instruction (method handle version)
instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth, KILL l7_mh_SP_save);
size(8);
ins_cost(CALL_COST);
format %{ "CALL,static/MethodHandle" %}
ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
ins_pc_relative(1);
ins_pipe(simple_call);
%}
// Call Java Dynamic Instruction
instruct CallDynamicJavaDirect( method meth ) %{
match(CallDynamicJava);

View File

@ -2911,16 +2911,6 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
// Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations.
}

View File

@ -43,7 +43,7 @@ enum /* platform_dependent_constants */ {
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000
method_handles_adapters_code_size = 6000
};
class Sparc {

View File

@ -204,7 +204,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
__ ba(false, L_got_cache);
__ delayed()->nop();
}

View File

@ -1949,23 +1949,30 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
}
// ----------------------------------------------------------------------------
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
void TemplateTable::resolve_cache_and_index(int byte_no,
Register result,
Register Rcache,
Register index,
size_t index_size) {
// Depends on cpCacheOop layout!
const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
if (is_invokedynamic) {
// We are resolved if the f1 field contains a non-null CallSite object.
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (byte_no == f1_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert_different_registers(result, Rcache);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
__ tst(Lbyte_code);
ConstantPoolCacheEntry::f1_offset(), result);
__ tst(result);
__ br(Assembler::notEqual, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
} else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no)*BitsPerByte;
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
@ -1992,7 +1999,10 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
// first time invocation - must resolve first
__ call_VM(noreg, entry, O1);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (result != noreg)
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset(), result);
__ bind(resolved);
}
@ -2001,7 +2011,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register Ritable_index,
Register Rflags,
bool is_invokevirtual,
bool is_invokevfinal) {
bool is_invokevfinal,
bool is_invokedynamic) {
// Uses both G3_scratch and G4_scratch
Register Rcache = G3_scratch;
Register Rscratch = G4_scratch;
@ -2025,11 +2036,15 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
if (is_invokevfinal) {
__ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
__ ld_ptr(Rcache, method_offset, Rmethod);
} else if (byte_no == f1_oop) {
// Resolved f1_oop goes directly into 'method' register.
resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
} else {
resolve_cache_and_index(byte_no, Rcache, Rscratch);
resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
__ ld_ptr(Rcache, method_offset, Rmethod);
}
__ ld_ptr(Rcache, method_offset, Rmethod);
if (Ritable_index != noreg) {
__ ld_ptr(Rcache, index_offset, Ritable_index);
}
@ -2110,7 +2125,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
Register Rflags = G1_scratch;
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
resolve_cache_and_index(byte_no, Rcache, index);
resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
jvmti_post_field_access(Rcache, index, is_static, false);
load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
@ -2475,7 +2490,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Register Rflags = G1_scratch;
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
resolve_cache_and_index(byte_no, Rcache, index);
resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
jvmti_post_field_mod(Rcache, index, is_static);
load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
@ -2816,6 +2831,7 @@ void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Regist
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch;
@ -2823,7 +2839,7 @@ void TemplateTable::invokevirtual(int byte_no) {
Register Rrecv = G5_method;
Label notFinal;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true);
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// Check for vfinal
@ -2864,9 +2880,10 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
/*is_invokevfinal*/true);
/*is_invokevfinal*/true, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
invokevfinal_helper(G3_scratch, Lscratch);
}
@ -2901,12 +2918,13 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch;
Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_method);
@ -2934,12 +2952,13 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch;
Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_method);
@ -2992,6 +3011,7 @@ void TemplateTable::invokeinterface_object_method(Register RklassOop,
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G4_scratch;
Register Rret = G3_scratch;
@ -3001,7 +3021,7 @@ void TemplateTable::invokeinterface(int byte_no) {
Register Rflags = O1;
assert_different_registers(Rscratch, G5_method);
load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, false);
load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// get receiver
@ -3118,6 +3138,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_oop, "use this argument");
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
@ -3132,7 +3153,6 @@ void TemplateTable::invokedynamic(int byte_no) {
// G5: CallSite object (f1)
// XX: unused (f2)
// G3: receiver address
// XX: flags (unused)
Register G5_callsite = G5_method;
@ -3140,7 +3160,8 @@ void TemplateTable::invokedynamic(int byte_no) {
Register Rtemp = G1_scratch;
Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
/*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_callsite);

View File

@ -65,13 +65,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseInlineCaches, false);
}
#ifdef _LP64
// Single issue niagara1 is slower for CompressedOops
// but niagaras after that it's fine.
if (!is_niagara1_plus()) {
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
FLAG_SET_ERGO(bool, UseCompressedOops, false);
}
}
// 32-bit oops don't make sense for the 64-bit VM on sparc
// since the 32-bit VM has the same registers and smaller objects.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);

View File

@ -7643,6 +7643,9 @@ void MacroAssembler::verify_oop(Register reg, const char* s) {
// Pass register number to verify_oop_subroutine
char* b = new char[strlen(s) + 50];
sprintf(b, "verify_oop: %s: %s", reg->name(), s);
#ifdef _LP64
push(rscratch1); // save r10, trashed by movptr()
#endif
push(rax); // save rax,
push(reg); // pass register argument
ExternalAddress buffer((address) b);
@ -7653,6 +7656,7 @@ void MacroAssembler::verify_oop(Register reg, const char* s) {
// call indirectly to solve generation ordering problem
movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
call(rax);
// Caller pops the arguments (oop, message) and restores rax, r10
}
@ -7767,6 +7771,9 @@ void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
char* b = new char[strlen(s) + 50];
sprintf(b, "verify_oop_addr: %s", s);
#ifdef _LP64
push(rscratch1); // save r10, trashed by movptr()
#endif
push(rax); // save rax,
// addr may contain rsp so we will have to adjust it based on the push
// we just did
@ -7789,7 +7796,7 @@ void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
// call indirectly to solve generation ordering problem
movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
call(rax);
// Caller pops the arguments and restores rax, from the stack
// Caller pops the arguments (addr, message) and restores rax, r10.
}
void MacroAssembler::verify_tlab() {
@ -8185,9 +8192,14 @@ void MacroAssembler::load_prototype_header(Register dst, Register src) {
assert (Universe::heap() != NULL, "java heap should be initialized");
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
if (Universe::narrow_oop_shift() != 0) {
assert(Address::times_8 == LogMinObjAlignmentInBytes &&
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
if (LogMinObjAlignmentInBytes == Address::times_8) {
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
} else {
// OK to use shift since we don't need to preserve flags.
shlq(dst, LogMinObjAlignmentInBytes);
movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
}
} else {
movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
}
@ -8361,31 +8373,43 @@ void MacroAssembler::decode_heap_oop(Register r) {
}
void MacroAssembler::decode_heap_oop_not_null(Register r) {
// Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_shift() != 0) {
assert (Address::times_8 == LogMinObjAlignmentInBytes &&
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
// Don't use Shift since it modifies flags.
leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
shlq(r, LogMinObjAlignmentInBytes);
if (Universe::narrow_oop_base() != NULL) {
addq(r, r12_heapbase);
}
} else {
assert (Universe::narrow_oop_base() == NULL, "sanity");
}
}
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
// Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_shift() != 0) {
assert (Address::times_8 == LogMinObjAlignmentInBytes &&
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
if (LogMinObjAlignmentInBytes == Address::times_8) {
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
} else {
if (dst != src) {
movq(dst, src);
}
shlq(dst, LogMinObjAlignmentInBytes);
if (Universe::narrow_oop_base() != NULL) {
addq(dst, r12_heapbase);
}
}
} else if (dst != src) {
assert (Universe::narrow_oop_base() == NULL, "sanity");
movq(dst, src);

View File

@ -135,6 +135,9 @@ REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
#endif // _LP64
// JSR 292 fixed register usages:
REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp);
// Address is an abstraction used to represent a memory location
// using any of the amd64 addressing modes with one object.
//

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -136,8 +136,8 @@ XMMRegister FrameMap::nr2xmmreg(int rnr) {
// FrameMap
//--------------------------------------------------------
void FrameMap::init() {
if (_init_done) return;
void FrameMap::initialize() {
assert(!_init_done, "once");
assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers");
map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0);
@ -309,6 +309,13 @@ LIR_Opr FrameMap::stack_pointer() {
}
// JSR 292
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
assert(rbp == rbp_mh_SP_save, "must be same register");
return rbp_opr;
}
bool FrameMap::validate_frame() {
return true;
}

View File

@ -126,6 +126,3 @@
assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
return _caller_save_xmm_regs[i];
}
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }

View File

@ -2462,9 +2462,18 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
}
#endif // _LP64
} else {
#ifdef _LP64
Register r_lo;
if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
r_lo = right->as_register();
} else {
r_lo = right->as_register_lo();
}
#else
Register r_lo = right->as_register_lo();
Register r_hi = right->as_register_hi();
assert(l_lo != r_hi, "overwriting registers");
#endif
switch (code) {
case lir_logic_and:
__ andptr(l_lo, r_lo);
@ -2784,7 +2793,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
__ call(AddressLiteral(op->addr(), rtype));
add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
add_call_info(code_offset(), op->info());
}
@ -2795,7 +2804,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
(__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
__ call(AddressLiteral(op->addr(), rh));
add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
add_call_info(code_offset(), op->info());
}
@ -2805,16 +2814,6 @@ void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
}
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
__ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
__ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
}
void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size);

View File

@ -175,7 +175,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
// store and again for the card mark.
LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp);
return new LIR_Address(tmp, 0, type);
return new LIR_Address(tmp, type);
} else {
return addr;
}
@ -185,7 +185,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) {
LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
LIR_Address* addr = new LIR_Address(pointer, T_INT);
increment_counter(addr, step);
}

View File

@ -782,7 +782,7 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// Restore SP from BP if the exception PC is a MethodHandle call site.
NOT_LP64(__ get_thread(thread);)
__ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
__ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// continue at exception handler (return address removed)
// note: do *not* remove arguments when unwinding the
@ -1581,7 +1581,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ should_not_reach_here();
break;
}
__ push(rax);
__ push(rdx);
@ -1605,8 +1604,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// Can we store original value in the thread's buffer?
LP64_ONLY(__ movslq(tmp, queue_index);)
#ifdef _LP64
__ movslq(tmp, queue_index);
__ cmpq(tmp, 0);
#else
__ cmpl(queue_index, 0);
@ -1628,13 +1627,33 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ jmp(done);
__ bind(runtime);
// load the pre-value
__ push(rcx);
#ifdef _LP64
__ push(r8);
__ push(r9);
__ push(r10);
__ push(r11);
# ifndef _WIN64
__ push(rdi);
__ push(rsi);
# endif
#endif
// load the pre-value
f.load_argument(0, rcx);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
#ifdef _LP64
# ifndef _WIN64
__ pop(rsi);
__ pop(rdi);
# endif
__ pop(r11);
__ pop(r10);
__ pop(r9);
__ pop(r8);
#endif
__ pop(rcx);
__ bind(done);
__ pop(rdx);
__ pop(rax);
}
@ -1664,13 +1683,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
PtrQueue::byte_offset_of_buf()));
__ push(rax);
__ push(rdx);
__ push(rcx);
NOT_LP64(__ get_thread(thread);)
ExternalAddress cardtable((address)ct->byte_map_base);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
const Register card_addr = rdx;
const Register card_addr = rcx;
#ifdef _LP64
const Register tmp = rscratch1;
f.load_argument(0, card_addr);
@ -1679,7 +1698,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// get the address of the card
__ addq(card_addr, tmp);
#else
const Register card_index = rdx;
const Register card_index = rcx;
f.load_argument(0, card_index);
__ shrl(card_index, CardTableModRefBS::card_shift);
@ -1716,12 +1735,32 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ jmp(done);
__ bind(runtime);
NOT_LP64(__ push(rcx);)
__ push(rdx);
#ifdef _LP64
__ push(r8);
__ push(r9);
__ push(r10);
__ push(r11);
# ifndef _WIN64
__ push(rdi);
__ push(rsi);
# endif
#endif
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
NOT_LP64(__ pop(rcx);)
__ bind(done);
#ifdef _LP64
# ifndef _WIN64
__ pop(rsi);
__ pop(rdi);
# endif
__ pop(r11);
__ pop(r10);
__ pop(r9);
__ pop(r8);
#endif
__ pop(rdx);
__ bind(done);
__ pop(rcx);
__ pop(rax);
}

View File

@ -189,11 +189,11 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, i
}
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, bool giant_index) {
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) {
if (index_size == sizeof(u2)) {
load_unsigned_short(reg, Address(rsi, bcp_offset));
} else {
} else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(reg, Address(rsi, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise
@ -201,14 +201,19 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
// plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(reg); // convert to plain index
} else if (index_size == sizeof(u1)) {
assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
load_unsigned_byte(reg, Address(rsi, bcp_offset));
} else {
ShouldNotReachHere();
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
int bcp_offset, bool giant_index) {
int bcp_offset, size_t index_size) {
assert(cache != index, "must use different registers");
get_cache_index_at_bcp(index, bcp_offset, giant_index);
get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
@ -216,9 +221,9 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
int bcp_offset, size_t index_size) {
assert(cache != tmp, "must use different register");
get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset

View File

@ -76,9 +76,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, bool giant_index = false);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
// Expression stack
void f2ieee(); // truncate ftos to 32bits

View File

@ -187,11 +187,11 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
int bcp_offset,
bool giant_index) {
size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) {
if (index_size == sizeof(u2)) {
load_unsigned_short(index, Address(r13, bcp_offset));
} else {
} else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(index, Address(r13, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise
@ -199,6 +199,11 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
// plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(index); // convert to plain index
} else if (index_size == sizeof(u1)) {
assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
load_unsigned_byte(index, Address(r13, bcp_offset));
} else {
ShouldNotReachHere();
}
}
@ -206,9 +211,9 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index,
int bcp_offset,
bool giant_index) {
size_t index_size) {
assert(cache != index, "must use different registers");
get_cache_index_at_bcp(index, bcp_offset, giant_index);
get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
@ -219,9 +224,9 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
bool giant_index) {
size_t index_size) {
assert(cache != tmp, "must use different register");
get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset

View File

@ -95,10 +95,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index,
int bcp_offset, bool giant_index = false);
int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index = false);
void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void pop_ptr(Register r = rax);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -115,3 +115,6 @@ REGISTER_DEFINITION(MMXRegister, mmx4 );
REGISTER_DEFINITION(MMXRegister, mmx5 );
REGISTER_DEFINITION(MMXRegister, mmx6 );
REGISTER_DEFINITION(MMXRegister, mmx7 );
// JSR 292
REGISTER_DEFINITION(Register, rbp_mh_SP_save);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -117,7 +117,7 @@ void OptoRuntime::generate_exception_blob() {
// Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
__ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it.

View File

@ -3305,7 +3305,7 @@ void OptoRuntime::generate_exception_blob() {
// Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
__ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// We have a handler in rax (could be deopt blob).
__ mov(r8, rax);

View File

@ -914,6 +914,7 @@ class StubGenerator: public StubCodeGenerator {
// * [tos + 5]: error message (char*)
// * [tos + 6]: object to verify (oop)
// * [tos + 7]: saved rax - saved by caller and bashed
// * [tos + 8]: saved r10 (rscratch1) - saved by caller
// * = popped on exit
address generate_verify_oop() {
StubCodeMark mark(this, "StubRoutines", "verify_oop");
@ -934,6 +935,7 @@ class StubGenerator: public StubCodeGenerator {
// After previous pushes.
oop_to_verify = 6 * wordSize,
saved_rax = 7 * wordSize,
saved_r10 = 8 * wordSize,
// Before the call to MacroAssembler::debug(), see below.
return_addr = 16 * wordSize,
@ -983,15 +985,17 @@ class StubGenerator: public StubCodeGenerator {
// return if everything seems ok
__ bind(exit);
__ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
__ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
__ pop(c_rarg3); // restore c_rarg3
__ pop(c_rarg2); // restore c_rarg2
__ pop(r12); // restore r12
__ popf(); // restore flags
__ ret(3 * wordSize); // pop caller saved stuff
__ ret(4 * wordSize); // pop caller saved stuff
// handle errors
__ bind(error);
__ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
__ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
__ pop(c_rarg3); // get saved c_rarg3 back
__ pop(c_rarg2); // get saved c_rarg2 back
__ pop(r12); // get saved r12 back
@ -1009,6 +1013,7 @@ class StubGenerator: public StubCodeGenerator {
// * [tos + 17] error message (char*)
// * [tos + 18] object to verify (oop)
// * [tos + 19] saved rax - saved by caller and bashed
// * [tos + 20] saved r10 (rscratch1) - saved by caller
// * = popped on exit
__ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
@ -1021,7 +1026,7 @@ class StubGenerator: public StubCodeGenerator {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
__ mov(rsp, r12); // restore rsp
__ popa(); // pop registers (includes r12)
__ ret(3 * wordSize); // pop caller saved stuff
__ ret(4 * wordSize); // pop caller saved stuff
return start;
}

View File

@ -214,7 +214,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
__ jcc(Assembler::equal, L_giant_index);
}
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
@ -226,7 +226,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
__ jmp(L_got_cache);
}

View File

@ -192,7 +192,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
__ jcc(Assembler::equal, L_giant_index);
}
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr,
@ -205,7 +205,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
__ jmp(L_got_cache);
}

View File

@ -2012,22 +2012,29 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
__ membar(order_constraint);
}
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
void TemplateTable::resolve_cache_and_index(int byte_no,
Register result,
Register Rcache,
Register index,
size_t index_size) {
Register temp = rbx;
assert_different_registers(Rcache, index, temp);
assert_different_registers(result, Rcache, index, temp);
const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
if (is_invokedynamic) {
// we are resolved if the f1 field contains a non-null CallSite object
__ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (byte_no == f1_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ testptr(result, result);
__ jcc(Assembler::notEqual, resolved);
} else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no)*BitsPerByte;
__ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count);
// have we resolved this bytecode?
@ -2053,7 +2060,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ movl(temp, (int)bytecode());
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (result != noreg)
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ bind(resolved);
}
@ -2087,7 +2096,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register itable_index,
Register flags,
bool is_invokevirtual,
bool is_invokevfinal /*unused*/) {
bool is_invokevfinal /*unused*/,
bool is_invokedynamic) {
// setup registers
const Register cache = rcx;
const Register index = rdx;
@ -2109,13 +2119,18 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset());
resolve_cache_and_index(byte_no, cache, index);
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
if (byte_no == f1_oop) {
// Resolved f1_oop goes directly into 'method' register.
assert(is_invokedynamic, "");
resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
} else {
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
}
if (itable_index != noreg) {
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
__ movl(flags , Address(cache, index, Address::times_ptr, flags_offset ));
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
}
@ -2169,7 +2184,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
const Register off = rbx;
const Register flags = rax;
resolve_cache_and_index(byte_no, cache, index);
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_access(cache, index, is_static, false);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@ -2378,7 +2393,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
const Register off = rbx;
const Register flags = rax;
resolve_cache_and_index(byte_no, cache, index);
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@ -2815,10 +2830,11 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// save 'interpreter return address'
__ save_bcp();
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// load receiver if needed (note: no return address pushed yet)
if (load_receiver) {
assert(!is_invokedynamic, "");
__ movl(recv, flags);
__ andl(recv, 0xFF);
// recv count is 0 based?
@ -2910,6 +2926,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// rbx,: index
@ -2922,6 +2939,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
@ -2932,6 +2950,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
@ -2942,12 +2961,14 @@ void TemplateTable::invokestatic(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
__ stop("fast_invokevfinal not used on x86");
}
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rax, rbx, byte_no);
// rax,: Interface
@ -3036,11 +3057,11 @@ void TemplateTable::invokedynamic(int byte_no) {
return;
}
assert(byte_no == f1_oop, "use this argument");
prepare_invoke(rax, rbx, byte_no);
// rax: CallSite object (f1)
// rbx: unused (f2)
// rcx: receiver address
// rdx: flags (unused)
if (ProfileInterpreter) {

View File

@ -2015,21 +2015,28 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
}
}
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
void TemplateTable::resolve_cache_and_index(int byte_no,
Register result,
Register Rcache,
Register index,
size_t index_size) {
const Register temp = rbx;
assert_different_registers(Rcache, index, temp);
assert_different_registers(result, Rcache, index, temp);
const int shift_count = (1 + byte_no) * BitsPerByte;
Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
if (is_invokedynamic) {
// we are resolved if the f1 field contains a non-null CallSite object
__ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (byte_no == f1_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ testptr(result, result);
__ jcc(Assembler::notEqual, resolved);
} else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no) * BitsPerByte;
__ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count);
// have we resolved this bytecode?
@ -2064,7 +2071,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (result != noreg)
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ bind(resolved);
}
@ -2100,7 +2109,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register itable_index,
Register flags,
bool is_invokevirtual,
bool is_invokevfinal /*unused*/) {
bool is_invokevfinal, /*unused*/
bool is_invokedynamic) {
// setup registers
const Register cache = rcx;
const Register index = rdx;
@ -2120,15 +2130,18 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset());
resolve_cache_and_index(byte_no, cache, index);
assert(wordSize == 8, "adjust code below");
__ movptr(method, Address(cache, index, Address::times_8, method_offset));
if (itable_index != noreg) {
__ movptr(itable_index,
Address(cache, index, Address::times_8, index_offset));
if (byte_no == f1_oop) {
// Resolved f1_oop goes directly into 'method' register.
assert(is_invokedynamic, "");
resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
} else {
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
}
__ movl(flags , Address(cache, index, Address::times_8, flags_offset));
if (itable_index != noreg) {
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
}
@ -2187,7 +2200,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
const Register flags = rax;
const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
resolve_cache_and_index(byte_no, cache, index);
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_access(cache, index, is_static, false);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@ -2390,7 +2403,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
const Register flags = rax;
const Register bc = c_rarg3;
resolve_cache_and_index(byte_no, cache, index);
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@ -2815,10 +2828,11 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// save 'interpreter return address'
__ save_bcp();
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// load receiver if needed (note: no return address pushed yet)
if (load_receiver) {
assert(!is_invokedynamic, "");
__ movl(recv, flags);
__ andl(recv, 0xFF);
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
@ -2914,6 +2928,7 @@ void TemplateTable::invokevirtual_helper(Register index,
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// rbx: index
@ -2926,6 +2941,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
@ -2936,6 +2952,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
@ -2945,11 +2962,13 @@ void TemplateTable::invokestatic(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
__ stop("fast_invokevfinal not used on amd64");
}
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rax, rbx, byte_no);
// rax: Interface
@ -3027,6 +3046,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_oop, "use this argument");
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
@ -3039,6 +3059,7 @@ void TemplateTable::invokedynamic(int byte_no) {
return;
}
assert(byte_no == f1_oop, "use this argument");
prepare_invoke(rax, rbx, byte_no);
// rax: CallSite object (f1)

View File

@ -1377,6 +1377,12 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = true;
bool Matcher::narrow_oop_use_complex_address() {
ShouldNotCallThis();
return true;
}
// Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a
@ -1841,14 +1847,14 @@ encode %{
MacroAssembler _masm(&cbuf);
// RBP is preserved across all calls, even compiled calls.
// Use it to preserve RSP in places where the callee might change the SP.
__ movptr(rbp, rsp);
__ movptr(rbp_mh_SP_save, rsp);
debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
%}
enc_class restore_SP %{
MacroAssembler _masm(&cbuf);
__ movptr(rsp, rbp);
__ movptr(rsp, rbp_mh_SP_save);
%}
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
@ -13570,7 +13576,7 @@ instruct CallStaticJavaDirect(method meth) %{
// Call Java Static Instruction (method handle version)
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{
instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);

View File

@ -1851,29 +1851,24 @@ uint reloc_java_to_interp()
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
if (UseCompressedOops) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
if (Universe::narrow_oop_shift() != 0) {
st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1");
}
st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
} else {
st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
"# Inline cache check", oopDesc::klass_offset_in_bytes());
st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
"# Inline cache check");
}
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
st->print_cr("\tnop");
if (!OptoBreakpoint) {
st->print_cr("\tnop");
}
st->print_cr("\tnop\t# nops to align entry point");
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
MacroAssembler masm(&cbuf);
#ifdef ASSERT
uint code_size = cbuf.code_size();
#endif
if (UseCompressedOops) {
masm.load_klass(rscratch1, j_rarg0);
masm.cmpptr(rax, rscratch1);
@ -1884,33 +1879,21 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
/* WARNING these NOPs are critical so that verified entry point is properly
aligned for patching by NativeJump::patch_verified_entry() */
int nops_cnt = 1;
if (!OptoBreakpoint) {
4 bytes aligned for patching by NativeJump::patch_verified_entry() */
int nops_cnt = 4 - ((cbuf.code_size() - code_size) & 0x3);
if (OptoBreakpoint) {
// Leave space for int3
nops_cnt += 1;
nops_cnt -= 1;
}
if (UseCompressedOops) {
// ??? divisible by 4 is aligned?
nops_cnt += 1;
}
masm.nop(nops_cnt);
assert(cbuf.code_size() - code_size == size(ra_),
"checking code size of inline cache node");
nops_cnt &= 0x3; // Do not add nops if code is aligned.
if (nops_cnt > 0)
masm.nop(nops_cnt);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
{
if (UseCompressedOops) {
if (Universe::narrow_oop_shift() == 0) {
return OptoBreakpoint ? 15 : 16;
} else {
return OptoBreakpoint ? 19 : 20;
}
} else {
return OptoBreakpoint ? 11 : 12;
}
return MachNode::size(ra_); // too many variables; just compute it
// the hard way
}
@ -2054,6 +2037,11 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// into registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = true;
bool Matcher::narrow_oop_use_complex_address() {
assert(UseCompressedOops, "only for compressed oops code");
return (LogMinObjAlignmentInBytes <= 3);
}
// Is it better to copy float constants, or load them directly from
// memory? Intel can load a float constant from a direct address,
// requiring no extra registers. Most RISCs will have to materialize
@ -2635,14 +2623,14 @@ encode %{
MacroAssembler _masm(&cbuf);
// RBP is preserved across all calls, even compiled calls.
// Use it to preserve RSP in places where the callee might change the SP.
__ movptr(rbp, rsp);
__ movptr(rbp_mh_SP_save, rsp);
debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
%}
enc_class restore_SP %{
MacroAssembler _masm(&cbuf);
__ movptr(rsp, rbp);
__ movptr(rsp, rbp_mh_SP_save);
%}
enc_class Java_Static_Call(method meth)
@ -5127,7 +5115,7 @@ operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
// Note: x86 architecture doesn't support "scale * index + offset" without a base
// we can't free r12 even with Universe::narrow_oop_base() == NULL.
operand indCompressedOopOffset(rRegN reg, immL32 off) %{
predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) off);
@ -7742,10 +7730,11 @@ instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
ins_pipe(ialu_reg_long);
%}
instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{
instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
match(Set dst (DecodeN src));
effect(KILL cr);
format %{ "decode_heap_oop_not_null $dst,$src" %}
ins_encode %{
Register s = $src$$Register;
@ -12604,7 +12593,7 @@ instruct CallStaticJavaDirect(method meth) %{
// Call Java Static Instruction (method handle version)
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{
instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -232,12 +232,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _instructions_offset);
GEN_OFFS(CodeBlob, _data_offset);
GEN_OFFS(CodeBlob, _oops_offset);
GEN_OFFS(CodeBlob, _oops_length);
GEN_OFFS(CodeBlob, _frame_size);
printf("\n");
GEN_OFFS(nmethod, _method);
GEN_OFFS(nmethod, _oops_offset);
GEN_OFFS(nmethod, _scopes_data_offset);
GEN_OFFS(nmethod, _scopes_pcs_offset);
GEN_OFFS(nmethod, _handler_table_offset);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -130,7 +130,7 @@ typedef struct Nmethod_t {
int32_t scopes_data_beg; /* _scopes_data_offset */
int32_t scopes_data_end;
int32_t oops_beg; /* _oops_offset */
int32_t oops_len; /* _oops_length */
int32_t oops_end;
int32_t scopes_pcs_beg; /* _scopes_pcs_offset */
int32_t scopes_pcs_end;
@ -597,9 +597,9 @@ static int nmethod_info(Nmethod_t *N)
CHECK_FAIL(err);
/* Oops */
err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_offset, &N->oops_beg, SZ32);
err = ps_pread(J->P, nm + OFFSET_nmethod_oops_offset, &N->oops_beg, SZ32);
CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_length, &N->oops_len, SZ32);
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->oops_end, SZ32);
CHECK_FAIL(err);
/* scopes_pcs */
@ -624,8 +624,8 @@ static int nmethod_info(Nmethod_t *N)
fprintf(stderr, "\t nmethod_info: orig_pc_offset: %#x \n",
N->orig_pc_offset);
fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_len: %#x\n",
N->oops_beg, N->oops_len);
fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_end: %#x\n",
N->oops_beg, N->oops_end);
fprintf(stderr, "\t nmethod_info: scopes_data_beg: %#x, scopes_data_end: %#x\n",
N->scopes_data_beg, N->scopes_data_end);
@ -959,8 +959,8 @@ static int scopeDesc_chain(Nmethod_t *N) {
err = scope_desc_at(N, decode_offset, vf);
CHECK_FAIL(err);
if (vf->methodIdx > N->oops_len) {
fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
if (vf->methodIdx > ((N->oops_end - N->oops_beg) / POINTER_SIZE)) {
fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops length) !\n");
return -1;
}
err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,

View File

@ -510,9 +510,9 @@ class CodeBuffer: public StackObj {
copy_relocations_to(blob);
copy_code_to(blob);
}
void copy_oops_to(CodeBlob* blob) {
void copy_oops_to(nmethod* nm) {
if (!oop_recorder()->is_unused()) {
oop_recorder()->copy_to(blob);
oop_recorder()->copy_to(nm);
}
}

View File

@ -26,9 +26,11 @@
#include "incls/_c1_Canonicalizer.cpp.incl"
static void do_print_value(Value* vp) {
(*vp)->print_line();
}
class PrintValueVisitor: public ValueVisitor {
void visit(Value* vp) {
(*vp)->print_line();
}
};
void Canonicalizer::set_canonical(Value x) {
assert(x != NULL, "value must exist");
@ -37,10 +39,11 @@ void Canonicalizer::set_canonical(Value x) {
// in the instructions).
if (canonical() != x) {
if (PrintCanonicalization) {
canonical()->input_values_do(do_print_value);
PrintValueVisitor do_print_value;
canonical()->input_values_do(&do_print_value);
canonical()->print_line();
tty->print_cr("canonicalized to:");
x->input_values_do(do_print_value);
x->input_values_do(&do_print_value);
x->print_line();
tty->cr();
}
@ -202,7 +205,7 @@ void Canonicalizer::do_StoreField (StoreField* x) {
// limit this optimization to current block
if (value != NULL && in_current_block(conv)) {
set_canonical(new StoreField(x->obj(), x->offset(), x->field(), value, x->is_static(),
x->lock_stack(), x->state_before(), x->is_loaded(), x->is_initialized()));
x->lock_stack(), x->state_before(), x->is_loaded(), x->is_initialized()));
return;
}
}

View File

@ -66,9 +66,6 @@ class PhaseTraceTime: public TraceTime {
}
};
Arena* Compilation::_arena = NULL;
Compilation* Compilation::_compilation = NULL;
// Implementation of Compilation
@ -238,9 +235,23 @@ void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
}
void Compilation::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
// Preinitialize the consts section to some large size:
int locs_buffer_size = 20 * (relocInfo::length_limit + sizeof(relocInfo));
char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
locs_buffer_size / sizeof(relocInfo));
code->initialize_consts_size(Compilation::desired_max_constant_size());
// Call stubs + deopt/exception handler
code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
LIR_Assembler::exception_handler_size +
LIR_Assembler::deopt_handler_size);
}
int Compilation::emit_code_body() {
// emit code
Runtime1::setup_code_buffer(code(), allocator()->num_calls());
setup_code_buffer(code(), allocator()->num_calls());
code()->initialize_oop_recorder(env()->oop_recorder());
_masm = new C1_MacroAssembler(code());
@ -422,7 +433,8 @@ void Compilation::generate_exception_handler_table() {
}
Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method, int osr_bci)
Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method,
int osr_bci, BufferBlob* buffer_blob)
: _compiler(compiler)
, _env(env)
, _method(method)
@ -437,8 +449,10 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _bailout_msg(NULL)
, _exception_info_list(NULL)
, _allocator(NULL)
, _code(Runtime1::get_buffer_blob()->instructions_begin(),
Runtime1::get_buffer_blob()->instructions_size())
, _next_id(0)
, _next_block_id(0)
, _code(buffer_blob->instructions_begin(),
buffer_blob->instructions_size())
, _current_instruction(NULL)
#ifndef PRODUCT
, _last_instruction_printed(NULL)
@ -446,17 +460,15 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
{
PhaseTraceTime timeit(_t_compile);
assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
_arena = Thread::current()->resource_area();
_compilation = this;
_env->set_compiler_data(this);
_exception_info_list = new ExceptionInfoList();
_implicit_exception_table.set_size(0);
compile_method();
}
Compilation::~Compilation() {
_arena = NULL;
_compilation = NULL;
_env->set_compiler_data(NULL);
}

View File

@ -53,15 +53,11 @@ define_stack(ExceptionInfoList, ExceptionInfoArray)
class Compilation: public StackObj {
friend class CompilationResourceObj;
private:
static Arena* _arena;
static Arena* arena() { return _arena; }
static Compilation* _compilation;
private:
// compilation specifics
Arena* _arena;
int _next_id;
int _next_block_id;
AbstractCompiler* _compiler;
ciEnv* _env;
ciMethod* _method;
@ -108,10 +104,14 @@ class Compilation: public StackObj {
public:
// creation
Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method, int osr_bci);
Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method,
int osr_bci, BufferBlob* buffer_blob);
~Compilation();
static Compilation* current_compilation() { return _compilation; }
static Compilation* current() {
return (Compilation*) ciEnv::current()->compiler_data();
}
// accessors
ciEnv* env() const { return _env; }
@ -128,6 +128,15 @@ class Compilation: public StackObj {
CodeBuffer* code() { return &_code; }
C1_MacroAssembler* masm() const { return _masm; }
CodeOffsets* offsets() { return &_offsets; }
Arena* arena() { return _arena; }
// Instruction ids
int get_next_id() { return _next_id++; }
int number_of_instructions() const { return _next_id; }
// BlockBegin ids
int get_next_block_id() { return _next_block_id++; }
int number_of_blocks() const { return _next_block_id; }
// setters
void set_has_exception_handlers(bool f) { _has_exception_handlers = f; }
@ -158,6 +167,15 @@ class Compilation: public StackObj {
bool bailed_out() const { return _bailout_msg != NULL; }
const char* bailout_msg() const { return _bailout_msg; }
static int desired_max_code_buffer_size() {
return (int) NMethodSizeLimit; // default 256K or 512K
}
static int desired_max_constant_size() {
return (int) NMethodSizeLimit / 10; // about 25K
}
static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
// timers
static void print_timers();
@ -203,7 +221,10 @@ class InstructionMark: public StackObj {
// Base class for objects allocated by the compiler in the compilation arena
class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC {
public:
void* operator new(size_t size) { return Compilation::arena()->Amalloc(size); }
void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); }
void* operator new(size_t size, Arena* arena) {
return arena->Amalloc(size);
}
void operator delete(void* p) {} // nothing to do
};

View File

@ -27,9 +27,6 @@
volatile int Compiler::_runtimes = uninitialized;
volatile bool Compiler::_compiling = false;
Compiler::Compiler() {
}
@ -39,47 +36,62 @@ Compiler::~Compiler() {
}
void Compiler::initialize_all() {
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
Arena* arena = new Arena();
Runtime1::initialize(buffer_blob);
FrameMap::initialize();
// initialize data structures
ValueType::initialize(arena);
// Instruction::initialize();
// BlockBegin::initialize();
GraphBuilder::initialize();
// note: to use more than one instance of LinearScan at a time this function call has to
// be moved somewhere outside of this constructor:
Interval::initialize(arena);
}
void Compiler::initialize() {
if (_runtimes != initialized) {
initialize_runtimes( Runtime1::initialize, &_runtimes);
initialize_runtimes( initialize_all, &_runtimes);
}
mark_initialized();
}
BufferBlob* Compiler::build_buffer_blob() {
// setup CodeBuffer. Preallocate a BufferBlob of size
// NMethodSizeLimit plus some extra space for constants.
int code_buffer_size = Compilation::desired_max_code_buffer_size() +
Compilation::desired_max_constant_size();
BufferBlob* blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
code_buffer_size);
guarantee(blob != NULL, "must create initial code buffer");
return blob;
}
void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
// Allocate buffer blob once at startup since allocation for each
// compilation seems to be too expensive (at least on Intel win32).
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
if (buffer_blob == NULL) {
buffer_blob = build_buffer_blob();
CompilerThread::current()->set_buffer_blob(buffer_blob);
}
if (!is_initialized()) {
initialize();
}
// invoke compilation
#ifdef TIERED
// We are thread in native here...
CompilerThread* thread = CompilerThread::current();
{
ThreadInVMfromNative tv(thread);
MutexLocker only_one (C1_lock, thread);
while ( _compiling) {
C1_lock->wait();
}
_compiling = true;
}
#endif // TIERED
{
// We are nested here because we need for the destructor
// of Compilation to occur before we release the any
// competing compiler thread
ResourceMark rm;
Compilation c(this, env, method, entry_bci);
Compilation c(this, env, method, entry_bci, buffer_blob);
}
#ifdef TIERED
{
ThreadInVMfromNative tv(thread);
MutexLocker only_one (C1_lock, thread);
_compiling = false;
C1_lock->notify();
}
#endif // TIERED
}

View File

@ -31,10 +31,6 @@ class Compiler: public AbstractCompiler {
// Tracks whether runtime has been initialized
static volatile int _runtimes;
// In tiered it is possible for multiple threads to want to do compilation
// only one can enter c1 at a time
static volatile bool _compiling;
public:
// Creation
Compiler();
@ -47,6 +43,7 @@ class Compiler: public AbstractCompiler {
virtual bool is_c1() { return true; };
#endif // TIERED
BufferBlob* build_buffer_blob();
// Missing feature tests
virtual bool supports_native() { return true; }
@ -58,6 +55,7 @@ class Compiler: public AbstractCompiler {
// Initialization
virtual void initialize();
static void initialize_all();
// Compilation entry point for methods
virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);

View File

@ -153,7 +153,7 @@ int FrameMap::_cpu_reg2rnr [FrameMap::nof_cpu_regs];
FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
if (!_init_done) init();
assert(_init_done, "should already be completed");
_framesize = -1;
_num_spills = -1;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -150,6 +150,9 @@ class FrameMap : public CompilationResourceObj {
// Opr representing the stack_pointer on this platform
static LIR_Opr stack_pointer();
// JSR 292
static LIR_Opr method_handle_invoke_SP_save_opr();
static BasicTypeArray* signature_type_array_for(const ciMethod* method);
static BasicTypeArray* signature_type_array_for(const char * signature);
@ -232,7 +235,7 @@ class FrameMap : public CompilationResourceObj {
return _caller_save_fpu_regs[i];
}
static void init();
static void initialize();
};
// CallingConvention

View File

@ -2438,13 +2438,13 @@ BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface: invoke(code); break;
case Bytecodes::_new : new_instance(s.get_index_big()); break;
case Bytecodes::_new : new_instance(s.get_index_u2()); break;
case Bytecodes::_newarray : new_type_array(); break;
case Bytecodes::_anewarray : new_object_array(); break;
case Bytecodes::_arraylength : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
case Bytecodes::_athrow : throw_op(s.cur_bci()); break;
case Bytecodes::_checkcast : check_cast(s.get_index_big()); break;
case Bytecodes::_instanceof : instance_of(s.get_index_big()); break;
case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break;
case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break;
// Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break;
case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break;
@ -2530,16 +2530,10 @@ void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining)
}
bool GraphBuilder::_is_initialized = false;
bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes];
bool GraphBuilder::_is_async[Bytecodes::number_of_java_codes];
void GraphBuilder::initialize() {
// make sure initialization happens only once (need a
// lock here, if we allow the compiler to be re-entrant)
if (is_initialized()) return;
_is_initialized = true;
// the following bytecodes are assumed to potentially
// throw exceptions in compiled code - note that e.g.
// monitorexit & the return bytecodes do not throw
@ -2855,7 +2849,6 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
BlockList* bci2block = blm.bci2block();
BlockBegin* start_block = bci2block->at(0);
assert(is_initialized(), "GraphBuilder must have been initialized");
push_root_scope(scope, bci2block, start_block);
// setup state for std entry

View File

@ -162,7 +162,6 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
};
// for all GraphBuilders
static bool _is_initialized; // true if trap tables were initialized, false otherwise
static bool _can_trap[Bytecodes::number_of_java_codes];
static bool _is_async[Bytecodes::number_of_java_codes];
@ -268,7 +267,6 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
Instruction* append_split(StateSplit* instr);
// other helpers
static bool is_initialized() { return _is_initialized; }
static bool is_async(Bytecodes::Code code) {
assert(0 <= code && code < Bytecodes::number_of_java_codes, "illegal bytecode");
return _is_async[code];

View File

@ -230,7 +230,8 @@ CodeEmitInfo::CodeEmitInfo(int bci, ValueStack* stack, XHandlers* exception_hand
, _stack(stack)
, _exception_handlers(exception_handlers)
, _next(NULL)
, _id(-1) {
, _id(-1)
, _is_method_handle_invoke(false) {
assert(_stack != NULL, "must be non null");
assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode");
}
@ -241,7 +242,8 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
, _exception_handlers(NULL)
, _bci(info->_bci)
, _scope_debug_info(NULL)
, _oop_map(NULL) {
, _oop_map(NULL)
, _is_method_handle_invoke(info->_is_method_handle_invoke) {
if (lock_stack_only) {
if (info->_stack != NULL) {
_stack = info->_stack->copy_locks();
@ -259,10 +261,10 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
}
void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) {
void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
// record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
_scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke);
_scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke);
recorder->end_safepoint(pc_offset);
}
@ -285,11 +287,6 @@ void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
_locals_size(in_WordSize(-1))
, _num_loops(0) {
// initialize data structures
ValueType::initialize();
Instruction::initialize();
BlockBegin::initialize();
GraphBuilder::initialize();
// setup IR fields
_compilation = compilation;
_top_scope = new IRScope(compilation, NULL, -1, method, osr_bci, true);
@ -379,15 +376,15 @@ void IR::split_critical_edges() {
}
class UseCountComputer: public AllStatic {
class UseCountComputer: public ValueVisitor, BlockClosure {
private:
static void update_use_count(Value* n) {
void visit(Value* n) {
// Local instructions and Phis for expression stack values at the
// start of basic blocks are not added to the instruction list
if ((*n)->bci() == -99 && (*n)->as_Local() == NULL &&
(*n)->as_Phi() == NULL) {
assert(false, "a node was not appended to the graph");
Compilation::current_compilation()->bailout("a node was not appended to the graph");
Compilation::current()->bailout("a node was not appended to the graph");
}
// use n's input if not visited before
if (!(*n)->is_pinned() && !(*n)->has_uses()) {
@ -400,31 +397,31 @@ class UseCountComputer: public AllStatic {
(*n)->_use_count++;
}
static Values* worklist;
static int depth;
Values* worklist;
int depth;
enum {
max_recurse_depth = 20
};
static void uses_do(Value* n) {
void uses_do(Value* n) {
depth++;
if (depth > max_recurse_depth) {
// don't allow the traversal to recurse too deeply
worklist->push(*n);
} else {
(*n)->input_values_do(update_use_count);
(*n)->input_values_do(this);
// special handling for some instructions
if ((*n)->as_BlockEnd() != NULL) {
// note on BlockEnd:
// must 'use' the stack only if the method doesn't
// terminate, however, in those cases stack is empty
(*n)->state_values_do(update_use_count);
(*n)->state_values_do(this);
}
}
depth--;
}
static void basic_compute_use_count(BlockBegin* b) {
void block_do(BlockBegin* b) {
depth = 0;
// process all pinned nodes as the roots of expression trees
for (Instruction* n = b; n != NULL; n = n->next()) {
@ -447,18 +444,19 @@ class UseCountComputer: public AllStatic {
assert(depth == 0, "should have counted back down");
}
UseCountComputer() {
worklist = new Values();
depth = 0;
}
public:
static void compute(BlockList* blocks) {
worklist = new Values();
blocks->blocks_do(basic_compute_use_count);
worklist = NULL;
UseCountComputer ucc;
blocks->iterate_backward(&ucc);
}
};
Values* UseCountComputer::worklist = NULL;
int UseCountComputer::depth = 0;
// helper macro for short definition of trace-output inside code
#ifndef PRODUCT
#define TRACE_LINEAR_SCAN(level, code) \
@ -1300,7 +1298,7 @@ void IR::verify() {
#endif // PRODUCT
void SubstitutionResolver::substitute(Value* v) {
void SubstitutionResolver::visit(Value* v) {
Value v0 = *v;
if (v0) {
Value vs = v0->subst();
@ -1311,20 +1309,22 @@ void SubstitutionResolver::substitute(Value* v) {
}
#ifdef ASSERT
void check_substitute(Value* v) {
Value v0 = *v;
if (v0) {
Value vs = v0->subst();
assert(vs == v0, "missed substitution");
class SubstitutionChecker: public ValueVisitor {
void visit(Value* v) {
Value v0 = *v;
if (v0) {
Value vs = v0->subst();
assert(vs == v0, "missed substitution");
}
}
}
};
#endif
void SubstitutionResolver::block_do(BlockBegin* block) {
Instruction* last = NULL;
for (Instruction* n = block; n != NULL;) {
n->values_do(substitute);
n->values_do(this);
// need to remove this instruction from the instruction stream
if (n->subst() != n) {
assert(last != NULL, "must have last");
@ -1336,8 +1336,9 @@ void SubstitutionResolver::block_do(BlockBegin* block) {
}
#ifdef ASSERT
if (block->state()) block->state()->values_do(check_substitute);
block->block_values_do(check_substitute);
if (block->end() && block->end()->state()) block->end()->state()->values_do(check_substitute);
SubstitutionChecker check_substitute;
if (block->state()) block->state()->values_do(&check_substitute);
block->block_values_do(&check_substitute);
if (block->end() && block->end()->state()) block->end()->state()->values_do(&check_substitute);
#endif
}

View File

@ -269,6 +269,7 @@ class CodeEmitInfo: public CompilationResourceObj {
int _bci;
CodeEmitInfo* _next;
int _id;
bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
FrameMap* frame_map() const { return scope()->compilation()->frame_map(); }
Compilation* compilation() const { return scope()->compilation(); }
@ -287,7 +288,8 @@ class CodeEmitInfo: public CompilationResourceObj {
, _stack(NULL)
, _exception_handlers(NULL)
, _next(NULL)
, _id(-1) {
, _id(-1)
, _is_method_handle_invoke(false) {
}
// make a copy
@ -302,13 +304,16 @@ class CodeEmitInfo: public CompilationResourceObj {
int bci() const { return _bci; }
void add_register_oop(LIR_Opr opr);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
CodeEmitInfo* next() const { return _next; }
void set_next(CodeEmitInfo* next) { _next = next; }
int id() const { return _id; }
void set_id(int id) { _id = id; }
bool is_method_handle_invoke() const { return _is_method_handle_invoke; }
void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; }
};
@ -366,8 +371,8 @@ class IR: public CompilationResourceObj {
// instructions from the instruction list.
//
class SubstitutionResolver: public BlockClosure {
static void substitute(Value* v);
class SubstitutionResolver: public BlockClosure, ValueVisitor {
virtual void visit(Value* v);
public:
SubstitutionResolver(IR* hir) {

View File

@ -29,8 +29,6 @@
// Implementation of Instruction
int Instruction::_next_id = 0;
#ifdef ASSERT
void Instruction::create_hi_word() {
assert(type()->is_double_word() && _hi_word == NULL, "only double word has high word");
@ -193,22 +191,22 @@ ciType* CheckCast::exact_type() const {
}
void ArithmeticOp::other_values_do(void f(Value*)) {
void ArithmeticOp::other_values_do(ValueVisitor* f) {
if (lock_stack() != NULL) lock_stack()->values_do(f);
}
void NullCheck::other_values_do(void f(Value*)) {
void NullCheck::other_values_do(ValueVisitor* f) {
lock_stack()->values_do(f);
}
void AccessArray::other_values_do(void f(Value*)) {
void AccessArray::other_values_do(ValueVisitor* f) {
if (lock_stack() != NULL) lock_stack()->values_do(f);
}
// Implementation of AccessField
void AccessField::other_values_do(void f(Value*)) {
void AccessField::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
if (lock_stack() != NULL) lock_stack()->values_do(f);
}
@ -270,7 +268,7 @@ bool LogicOp::is_commutative() const {
// Implementation of CompareOp
void CompareOp::other_values_do(void f(Value*)) {
void CompareOp::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
}
@ -302,12 +300,12 @@ IRScope* StateSplit::scope() const {
}
void StateSplit::state_values_do(void f(Value*)) {
void StateSplit::state_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);
}
void BlockBegin::state_values_do(void f(Value*)) {
void BlockBegin::state_values_do(ValueVisitor* f) {
StateSplit::state_values_do(f);
if (is_set(BlockBegin::exception_entry_flag)) {
@ -318,13 +316,13 @@ void BlockBegin::state_values_do(void f(Value*)) {
}
void MonitorEnter::state_values_do(void f(Value*)) {
void MonitorEnter::state_values_do(ValueVisitor* f) {
StateSplit::state_values_do(f);
_lock_stack_before->values_do(f);
}
void Intrinsic::state_values_do(void f(Value*)) {
void Intrinsic::state_values_do(ValueVisitor* f) {
StateSplit::state_values_do(f);
if (lock_stack() != NULL) lock_stack()->values_do(f);
}
@ -349,8 +347,9 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
assert(args != NULL, "args must exist");
#ifdef ASSERT
values_do(assert_value);
#endif // ASSERT
AssertValues assert_value;
values_do(&assert_value);
#endif
// provide an initial guess of signature size.
_signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0));
@ -368,7 +367,7 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
}
void Invoke::state_values_do(void f(Value*)) {
void Invoke::state_values_do(ValueVisitor* f) {
StateSplit::state_values_do(f);
if (state_before() != NULL) state_before()->values_do(f);
if (state() != NULL) state()->values_do(f);
@ -500,30 +499,27 @@ BlockBegin* Constant::compare(Instruction::Condition cond, Value right,
}
void Constant::other_values_do(void f(Value*)) {
void Constant::other_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);
}
// Implementation of NewArray
void NewArray::other_values_do(void f(Value*)) {
void NewArray::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
}
// Implementation of TypeCheck
void TypeCheck::other_values_do(void f(Value*)) {
void TypeCheck::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
}
// Implementation of BlockBegin
int BlockBegin::_next_block_id = 0;
void BlockBegin::set_end(BlockEnd* end) {
assert(end != NULL, "should not reset block end to NULL");
BlockEnd* old_end = _end;
@ -738,7 +734,7 @@ void BlockBegin::iterate_postorder(BlockClosure* closure) {
}
void BlockBegin::block_values_do(void f(Value*)) {
void BlockBegin::block_values_do(ValueVisitor* f) {
for (Instruction* n = this; n != NULL; n = n->next()) n->values_do(f);
}
@ -930,7 +926,7 @@ void BlockList::blocks_do(void f(BlockBegin*)) {
}
void BlockList::values_do(void f(Value*)) {
void BlockList::values_do(ValueVisitor* f) {
for (int i = length() - 1; i >= 0; i--) at(i)->block_values_do(f);
}
@ -973,7 +969,7 @@ void BlockEnd::substitute_sux(BlockBegin* old_sux, BlockBegin* new_sux) {
}
void BlockEnd::other_values_do(void f(Value*)) {
void BlockEnd::other_values_do(ValueVisitor* f) {
if (state_before() != NULL) state_before()->values_do(f);
}
@ -1012,6 +1008,6 @@ int Phi::operand_count() const {
// Implementation of Throw
void Throw::state_values_do(void f(Value*)) {
void Throw::state_values_do(ValueVisitor* f) {
BlockEnd::state_values_do(f);
}

View File

@ -116,6 +116,13 @@ class BlockClosure: public CompilationResourceObj {
};
// A simple closure class for visiting the values of an Instruction
class ValueVisitor: public StackObj {
public:
virtual void visit(Value* v) = 0;
};
// Some array and list classes
define_array(BlockBeginArray, BlockBegin*)
define_stack(_BlockList, BlockBeginArray)
@ -129,7 +136,7 @@ class BlockList: public _BlockList {
void iterate_forward(BlockClosure* closure);
void iterate_backward(BlockClosure* closure);
void blocks_do(void f(BlockBegin*));
void values_do(void f(Value*));
void values_do(ValueVisitor* f);
void print(bool cfg_only = false, bool live_only = false) PRODUCT_RETURN;
};
@ -264,8 +271,6 @@ class InstructionVisitor: public StackObj {
class Instruction: public CompilationResourceObj {
private:
static int _next_id; // the node counter
int _id; // the unique instruction id
int _bci; // the instruction bci
int _use_count; // the number of instructions refering to this value (w/o prev/next); only roots can have use count = 0 or > 1
@ -283,6 +288,7 @@ class Instruction: public CompilationResourceObj {
#endif
friend class UseCountComputer;
friend class BlockBegin;
protected:
void set_bci(int bci) { assert(bci == SynchronizationEntryBCI || bci >= 0, "illegal bci"); _bci = bci; }
@ -292,6 +298,13 @@ class Instruction: public CompilationResourceObj {
}
public:
void* operator new(size_t size) {
Compilation* c = Compilation::current();
void* res = c->arena()->Amalloc(size);
((Instruction*)res)->_id = c->get_next_id();
return res;
}
enum InstructionFlag {
NeedsNullCheckFlag = 0,
CanTrapFlag,
@ -338,13 +351,13 @@ class Instruction: public CompilationResourceObj {
static Condition negate(Condition cond);
// initialization
static void initialize() { _next_id = 0; }
static int number_of_instructions() { return _next_id; }
static int number_of_instructions() {
return Compilation::current()->number_of_instructions();
}
// creation
Instruction(ValueType* type, bool type_is_constant = false, bool create_hi = true)
: _id(_next_id++)
, _bci(-99)
: _bci(-99)
, _use_count(0)
, _pin_state(0)
, _type(type)
@ -479,10 +492,10 @@ class Instruction: public CompilationResourceObj {
virtual bool can_trap() const { return false; }
virtual void input_values_do(void f(Value*)) = 0;
virtual void state_values_do(void f(Value*)) { /* usually no state - override on demand */ }
virtual void other_values_do(void f(Value*)) { /* usually no other - override on demand */ }
void values_do(void f(Value*)) { input_values_do(f); state_values_do(f); other_values_do(f); }
virtual void input_values_do(ValueVisitor* f) = 0;
virtual void state_values_do(ValueVisitor* f) { /* usually no state - override on demand */ }
virtual void other_values_do(ValueVisitor* f) { /* usually no other - override on demand */ }
void values_do(ValueVisitor* f) { input_values_do(f); state_values_do(f); other_values_do(f); }
virtual ciType* exact_type() const { return NULL; }
virtual ciType* declared_type() const { return NULL; }
@ -517,9 +530,12 @@ class Instruction: public CompilationResourceObj {
// Debugging support
#ifdef ASSERT
static void assert_value(Value* x) { assert((*x) != NULL, "value must exist"); }
#define ASSERT_VALUES values_do(assert_value);
class AssertValues: public ValueVisitor {
void visit(Value* x) { assert((*x) != NULL, "value must exist"); }
};
#define ASSERT_VALUES { AssertValues assert_value; values_do(&assert_value); }
#else
#define ASSERT_VALUES
#endif // ASSERT
@ -555,7 +571,7 @@ LEAF(HiWord, Instruction)
void make_illegal() { set_type(illegalType); }
// generic
virtual void input_values_do(void f(Value*)) { ShouldNotReachHere(); }
virtual void input_values_do(ValueVisitor* f) { ShouldNotReachHere(); }
};
@ -615,7 +631,7 @@ LEAF(Phi, Instruction)
}
// generic
virtual void input_values_do(void f(Value*)) {
virtual void input_values_do(ValueVisitor* f) {
}
};
@ -635,7 +651,7 @@ LEAF(Local, Instruction)
int java_index() const { return _java_index; }
// generic
virtual void input_values_do(void f(Value*)) { /* no values */ }
virtual void input_values_do(ValueVisitor* f) { /* no values */ }
};
@ -663,8 +679,8 @@ LEAF(Constant, Instruction)
// generic
virtual bool can_trap() const { return state() != NULL; }
virtual void input_values_do(void f(Value*)) { /* no values */ }
virtual void other_values_do(void f(Value*));
virtual void input_values_do(ValueVisitor* f) { /* no values */ }
virtual void other_values_do(ValueVisitor* f);
virtual intx hash() const;
virtual bool is_equal(Value v) const;
@ -734,8 +750,8 @@ BASE(AccessField, Instruction)
// generic
virtual bool can_trap() const { return needs_null_check() || needs_patching(); }
virtual void input_values_do(void f(Value*)) { f(&_obj); }
virtual void other_values_do(void f(Value*));
virtual void input_values_do(ValueVisitor* f) { f->visit(&_obj); }
virtual void other_values_do(ValueVisitor* f);
};
@ -776,7 +792,7 @@ LEAF(StoreField, AccessField)
bool needs_write_barrier() const { return check_flag(NeedsWriteBarrierFlag); }
// generic
virtual void input_values_do(void f(Value*)) { AccessField::input_values_do(f); f(&_value); }
virtual void input_values_do(ValueVisitor* f) { AccessField::input_values_do(f); f->visit(&_value); }
};
@ -804,8 +820,8 @@ BASE(AccessArray, Instruction)
// generic
virtual bool can_trap() const { return needs_null_check(); }
virtual void input_values_do(void f(Value*)) { f(&_array); }
virtual void other_values_do(void f(Value*));
virtual void input_values_do(ValueVisitor* f) { f->visit(&_array); }
virtual void other_values_do(ValueVisitor* f);
};
@ -857,7 +873,7 @@ BASE(AccessIndexed, AccessArray)
bool compute_needs_range_check();
// generic
virtual void input_values_do(void f(Value*)) { AccessArray::input_values_do(f); f(&_index); if (_length != NULL) f(&_length); }
virtual void input_values_do(ValueVisitor* f) { AccessArray::input_values_do(f); f->visit(&_index); if (_length != NULL) f->visit(&_length); }
};
@ -909,7 +925,7 @@ LEAF(StoreIndexed, AccessIndexed)
bool needs_store_check() const { return check_flag(NeedsStoreCheckFlag); }
// generic
virtual void input_values_do(void f(Value*)) { AccessIndexed::input_values_do(f); f(&_value); }
virtual void input_values_do(ValueVisitor* f) { AccessIndexed::input_values_do(f); f->visit(&_value); }
};
@ -927,7 +943,7 @@ LEAF(NegateOp, Instruction)
Value x() const { return _x; }
// generic
virtual void input_values_do(void f(Value*)) { f(&_x); }
virtual void input_values_do(ValueVisitor* f) { f->visit(&_x); }
};
@ -956,7 +972,7 @@ BASE(Op2, Instruction)
// generic
virtual bool is_commutative() const { return false; }
virtual void input_values_do(void f(Value*)) { f(&_x); f(&_y); }
virtual void input_values_do(ValueVisitor* f) { f->visit(&_x); f->visit(&_y); }
};
@ -982,7 +998,7 @@ LEAF(ArithmeticOp, Op2)
// generic
virtual bool is_commutative() const;
virtual bool can_trap() const;
virtual void other_values_do(void f(Value*));
virtual void other_values_do(ValueVisitor* f);
HASHING3(Op2, true, op(), x()->subst(), y()->subst())
};
@ -1023,7 +1039,7 @@ LEAF(CompareOp, Op2)
// generic
HASHING3(Op2, true, op(), x()->subst(), y()->subst())
virtual void other_values_do(void f(Value*));
virtual void other_values_do(ValueVisitor* f);
};
@ -1051,7 +1067,7 @@ LEAF(IfOp, Op2)
Value fval() const { return _fval; }
// generic
virtual void input_values_do(void f(Value*)) { Op2::input_values_do(f); f(&_tval); f(&_fval); }
virtual void input_values_do(ValueVisitor* f) { Op2::input_values_do(f); f->visit(&_tval); f->visit(&_fval); }
};
@ -1071,7 +1087,7 @@ LEAF(Convert, Instruction)
Value value() const { return _value; }
// generic
virtual void input_values_do(void f(Value*)) { f(&_value); }
virtual void input_values_do(ValueVisitor* f) { f->visit(&_value); }
HASHING2(Convert, true, op(), value()->subst())
};
@ -1100,8 +1116,8 @@ LEAF(NullCheck, Instruction)
// generic
virtual bool can_trap() const { return check_flag(CanTrapFlag); /* null-check elimination sets to false */ }
virtual void input_values_do(void f(Value*)) { f(&_obj); }
virtual void other_values_do(void f(Value*));
virtual void input_values_do(ValueVisitor* f) { f->visit(&_obj); }
virtual void other_values_do(ValueVisitor* f);
HASHING1(NullCheck, true, obj()->subst())
};
@ -1127,8 +1143,8 @@ BASE(StateSplit, Instruction)
void set_state(ValueStack* state) { _state = state; }
// generic
virtual void input_values_do(void f(Value*)) { /* no values */ }
virtual void state_values_do(void f(Value*));
virtual void input_values_do(ValueVisitor* f) { /* no values */ }
virtual void state_values_do(ValueVisitor* f);
};
@ -1169,12 +1185,12 @@ LEAF(Invoke, StateSplit)
// generic
virtual bool can_trap() const { return true; }
virtual void input_values_do(void f(Value*)) {
virtual void input_values_do(ValueVisitor* f) {
StateSplit::input_values_do(f);
if (has_receiver()) f(&_recv);
for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
if (has_receiver()) f->visit(&_recv);
for (int i = 0; i < _args->length(); i++) f->visit(_args->adr_at(i));
}
virtual void state_values_do(void f(Value*));
virtual void state_values_do(ValueVisitor *f);
};
@ -1212,8 +1228,8 @@ BASE(NewArray, StateSplit)
// generic
virtual bool can_trap() const { return true; }
virtual void input_values_do(void f(Value*)) { StateSplit::input_values_do(f); f(&_length); }
virtual void other_values_do(void f(Value*));
virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_length); }
virtual void other_values_do(ValueVisitor* f);
};
@ -1262,7 +1278,7 @@ LEAF(NewMultiArray, NewArray)
int rank() const { return dims()->length(); }
// generic
virtual void input_values_do(void f(Value*)) {
virtual void input_values_do(ValueVisitor* f) {
// NOTE: we do not call NewArray::input_values_do since "length"
// is meaningless for a multi-dimensional array; passing the
// zeroth element down to NewArray as its length is a bad idea
@ -1270,7 +1286,7 @@ LEAF(NewMultiArray, NewArray)
// get updated, and the value must not be traversed twice. Was bug
// - kbr 4/10/2001
StateSplit::input_values_do(f);
for (int i = 0; i < _dims->length(); i++) f(_dims->adr_at(i));
for (int i = 0; i < _dims->length(); i++) f->visit(_dims->adr_at(i));
}
};
@ -1300,8 +1316,8 @@ BASE(TypeCheck, StateSplit)
// generic
virtual bool can_trap() const { return true; }
virtual void input_values_do(void f(Value*)) { StateSplit::input_values_do(f); f(&_obj); }
virtual void other_values_do(void f(Value*));
virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_obj); }
virtual void other_values_do(ValueVisitor* f);
};
@ -1366,7 +1382,7 @@ BASE(AccessMonitor, StateSplit)
int monitor_no() const { return _monitor_no; }
// generic
virtual void input_values_do(void f(Value*)) { StateSplit::input_values_do(f); f(&_obj); }
virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_obj); }
};
@ -1385,7 +1401,7 @@ LEAF(MonitorEnter, AccessMonitor)
// accessors
ValueStack* lock_stack_before() const { return _lock_stack_before; }
virtual void state_values_do(void f(Value*));
virtual void state_values_do(ValueVisitor* f);
// generic
virtual bool can_trap() const { return true; }
@ -1454,11 +1470,11 @@ LEAF(Intrinsic, StateSplit)
// generic
virtual bool can_trap() const { return check_flag(CanTrapFlag); }
virtual void input_values_do(void f(Value*)) {
virtual void input_values_do(ValueVisitor* f) {
StateSplit::input_values_do(f);
for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
for (int i = 0; i < _args->length(); i++) f->visit(_args->adr_at(i));
}
virtual void state_values_do(void f(Value*));
virtual void state_values_do(ValueVisitor* f);
};
@ -1467,8 +1483,6 @@ class LIR_List;
LEAF(BlockBegin, StateSplit)
private:
static int _next_block_id; // the block counter
int _block_id; // the unique block id
int _depth_first_number; // number of this block in a depth-first ordering
int _linear_scan_number; // number of this block in linear-scan ordering
@ -1510,14 +1524,22 @@ LEAF(BlockBegin, StateSplit)
friend class SuxAndWeightAdjuster;
public:
void* operator new(size_t size) {
Compilation* c = Compilation::current();
void* res = c->arena()->Amalloc(size);
((BlockBegin*)res)->_id = c->get_next_id();
((BlockBegin*)res)->_block_id = c->get_next_block_id();
return res;
}
// initialization/counting
static void initialize() { _next_block_id = 0; }
static int number_of_blocks() { return _next_block_id; }
static int number_of_blocks() {
return Compilation::current()->number_of_blocks();
}
// creation
BlockBegin(int bci)
: StateSplit(illegalType)
, _block_id(_next_block_id++)
, _depth_first_number(-1)
, _linear_scan_number(-1)
, _loop_depth(0)
@ -1592,7 +1614,7 @@ LEAF(BlockBegin, StateSplit)
void init_stores_to_locals(int locals_count) { _stores_to_locals = BitMap(locals_count); _stores_to_locals.clear(); }
// generic
virtual void state_values_do(void f(Value*));
virtual void state_values_do(ValueVisitor* f);
// successors and predecessors
int number_of_sux() const;
@ -1646,7 +1668,7 @@ LEAF(BlockBegin, StateSplit)
void iterate_preorder (BlockClosure* closure);
void iterate_postorder (BlockClosure* closure);
void block_values_do(void f(Value*));
void block_values_do(ValueVisitor* f);
// loops
void set_loop_index(int ix) { _loop_index = ix; }
@ -1698,7 +1720,7 @@ BASE(BlockEnd, StateSplit)
void set_begin(BlockBegin* begin);
// generic
virtual void other_values_do(void f(Value*));
virtual void other_values_do(ValueVisitor* f);
// successors
int number_of_sux() const { return _sux != NULL ? _sux->length() : 0; }
@ -1787,7 +1809,7 @@ LEAF(If, BlockEnd)
void set_profiled_bci(int bci) { _profiled_bci = bci; }
// generic
virtual void input_values_do(void f(Value*)) { BlockEnd::input_values_do(f); f(&_x); f(&_y); }
virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); }
};
@ -1841,7 +1863,7 @@ LEAF(IfInstanceOf, BlockEnd)
}
// generic
virtual void input_values_do(void f(Value*)) { BlockEnd::input_values_do(f); f(&_obj); }
virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_obj); }
};
@ -1863,7 +1885,7 @@ BASE(Switch, BlockEnd)
int length() const { return number_of_sux() - 1; }
// generic
virtual void input_values_do(void f(Value*)) { BlockEnd::input_values_do(f); f(&_tag); }
virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_tag); }
};
@ -1916,9 +1938,9 @@ LEAF(Return, BlockEnd)
bool has_result() const { return result() != NULL; }
// generic
virtual void input_values_do(void f(Value*)) {
virtual void input_values_do(ValueVisitor* f) {
BlockEnd::input_values_do(f);
if (has_result()) f(&_result);
if (has_result()) f->visit(&_result);
}
};
@ -1938,8 +1960,8 @@ LEAF(Throw, BlockEnd)
// generic
virtual bool can_trap() const { return true; }
virtual void input_values_do(void f(Value*)) { BlockEnd::input_values_do(f); f(&_exception); }
virtual void state_values_do(void f(Value*));
virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_exception); }
virtual void state_values_do(ValueVisitor* f);
};
@ -1971,7 +1993,7 @@ LEAF(OsrEntry, Instruction)
#endif
// generic
virtual void input_values_do(void f(Value*)) { }
virtual void input_values_do(ValueVisitor* f) { }
};
@ -1984,7 +2006,7 @@ LEAF(ExceptionObject, Instruction)
}
// generic
virtual void input_values_do(void f(Value*)) { }
virtual void input_values_do(ValueVisitor* f) { }
};
@ -2008,7 +2030,7 @@ LEAF(RoundFP, Instruction)
Value input() const { return _input; }
// generic
virtual void input_values_do(void f(Value*)) { f(&_input); }
virtual void input_values_do(ValueVisitor* f) { f->visit(&_input); }
};
@ -2033,8 +2055,8 @@ BASE(UnsafeOp, Instruction)
BasicType basic_type() { return _basic_type; }
// generic
virtual void input_values_do(void f(Value*)) { }
virtual void other_values_do(void f(Value*)) { }
virtual void input_values_do(ValueVisitor* f) { }
virtual void other_values_do(ValueVisitor* f) { }
};
@ -2078,9 +2100,9 @@ BASE(UnsafeRawOp, UnsafeOp)
void set_log2_scale(int log2_scale) { _log2_scale = log2_scale; }
// generic
virtual void input_values_do(void f(Value*)) { UnsafeOp::input_values_do(f);
f(&_base);
if (has_index()) f(&_index); }
virtual void input_values_do(ValueVisitor* f) { UnsafeOp::input_values_do(f);
f->visit(&_base);
if (has_index()) f->visit(&_index); }
};
@ -2128,8 +2150,8 @@ LEAF(UnsafePutRaw, UnsafeRawOp)
Value value() { return _value; }
// generic
virtual void input_values_do(void f(Value*)) { UnsafeRawOp::input_values_do(f);
f(&_value); }
virtual void input_values_do(ValueVisitor* f) { UnsafeRawOp::input_values_do(f);
f->visit(&_value); }
};
@ -2149,9 +2171,9 @@ BASE(UnsafeObjectOp, UnsafeOp)
Value offset() { return _offset; }
bool is_volatile() { return _is_volatile; }
// generic
virtual void input_values_do(void f(Value*)) { UnsafeOp::input_values_do(f);
f(&_object);
f(&_offset); }
virtual void input_values_do(ValueVisitor* f) { UnsafeOp::input_values_do(f);
f->visit(&_object);
f->visit(&_offset); }
};
@ -2180,8 +2202,8 @@ LEAF(UnsafePutObject, UnsafeObjectOp)
Value value() { return _value; }
// generic
virtual void input_values_do(void f(Value*)) { UnsafeObjectOp::input_values_do(f);
f(&_value); }
virtual void input_values_do(ValueVisitor* f) { UnsafeObjectOp::input_values_do(f);
f->visit(&_value); }
};
@ -2238,7 +2260,7 @@ LEAF(ProfileCall, Instruction)
Value recv() { return _recv; }
ciKlass* known_holder() { return _known_holder; }
virtual void input_values_do(void f(Value*)) { if (_recv != NULL) f(&_recv); }
virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); }
};
@ -2266,7 +2288,7 @@ LEAF(ProfileCounter, Instruction)
int offset() { return _offset; }
int increment() { return _increment; }
virtual void input_values_do(void f(Value*)) { f(&_mdo); }
virtual void input_values_do(ValueVisitor* f) { f->visit(&_mdo); }
};

View File

@ -715,7 +715,10 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
}
if (opJavaCall->_info) do_info(opJavaCall->_info);
if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr());
if (opJavaCall->is_method_handle_invoke()) {
opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
}
do_call();
if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result);

View File

@ -505,15 +505,22 @@ class LIR_Address: public LIR_OprPtr {
, _type(type)
, _disp(0) { verify(); }
LIR_Address(LIR_Opr base, int disp, BasicType type):
LIR_Address(LIR_Opr base, intx disp, BasicType type):
_base(base)
, _index(LIR_OprDesc::illegalOpr())
, _scale(times_1)
, _type(type)
, _disp(disp) { verify(); }
LIR_Address(LIR_Opr base, BasicType type):
_base(base)
, _index(LIR_OprDesc::illegalOpr())
, _scale(times_1)
, _type(type)
, _disp(0) { verify(); }
#ifdef X86
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
_base(base)
, _index(index)
, _scale(scale)
@ -1033,8 +1040,9 @@ class LIR_OpJavaCall: public LIR_OpCall {
friend class LIR_OpVisitState;
private:
ciMethod* _method;
LIR_Opr _receiver;
ciMethod* _method;
LIR_Opr _receiver;
LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
public:
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
@ -1043,14 +1051,18 @@ class LIR_OpJavaCall: public LIR_OpCall {
CodeEmitInfo* info)
: LIR_OpCall(code, addr, result, arguments, info)
, _receiver(receiver)
, _method(method) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
, _method(method)
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
LIR_OprList* arguments, CodeEmitInfo* info)
: LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
, _receiver(receiver)
, _method(method) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
, _method(method)
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_Opr receiver() const { return _receiver; }
ciMethod* method() const { return _method; }

View File

@ -301,9 +301,9 @@ void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
}
void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
flush_debug_info(pc_offset);
cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
if (cinfo->exception_handlers() != NULL) {
compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
}
@ -413,12 +413,6 @@ void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
verify_oop_map(op->info());
// JSR 292
// Preserve the SP over MethodHandle call sites.
if (op->is_method_handle_invoke()) {
preserve_SP(op);
}
if (os::is_MP()) {
// must align calls sites, otherwise they can't be updated atomically on MP hardware
align_call(op->code());
@ -444,10 +438,6 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
default: ShouldNotReachHere();
}
if (op->is_method_handle_invoke()) {
restore_SP(op);
}
#if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it
if (UseSSE < 2) {

View File

@ -84,7 +84,7 @@ class LIR_Assembler: public CompilationResourceObj {
Address as_Address_hi(LIR_Address* addr);
// debug information
void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false);
void add_call_info(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_branch(CodeEmitInfo* info);
void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_div0_here(CodeEmitInfo* info);
@ -212,10 +212,6 @@ class LIR_Assembler: public CompilationResourceObj {
void ic_call( LIR_OpJavaCall* op);
void vtable_call( LIR_OpJavaCall* op);
// JSR 292
void preserve_SP(LIR_OpJavaCall* op);
void restore_SP( LIR_OpJavaCall* op);
void osr_entry();
void build_frame();

View File

@ -304,7 +304,7 @@ void LIRGenerator::block_do_prolog(BlockBegin* block) {
__ branch_destination(block->label());
if (LIRTraceExecution &&
Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() &&
Compilation::current()->hir()->start()->block_id() != block->block_id() &&
!block->is_set(BlockBegin::exception_entry_flag)) {
assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
trace_block_entry(block);
@ -1309,7 +1309,7 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be");
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT));
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
}
CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
info);
@ -1325,7 +1325,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
new_val->as_constant_ptr()->as_jobject() == NULL) return;
if (!new_val->is_register()) {
LIR_Opr new_val_reg = new_pointer_register();
LIR_Opr new_val_reg = new_register(T_OBJECT);
if (new_val->is_constant()) {
__ move(new_val, new_val_reg);
} else {
@ -1337,7 +1337,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
LIR_Opr ptr = new_pointer_register();
LIR_Opr ptr = new_register(T_OBJECT);
if (!address->index()->is_valid() && address->disp() == 0) {
__ move(address->base(), ptr);
} else {
@ -1350,7 +1350,6 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
LIR_Opr xor_res = new_pointer_register();
LIR_Opr xor_shift_res = new_pointer_register();
if (TwoOperandLIRForm ) {
__ move(addr, xor_res);
__ logical_xor(xor_res, new_val, xor_res);
@ -1368,7 +1367,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
}
if (!new_val->is_register()) {
LIR_Opr new_val_reg = new_pointer_register();
LIR_Opr new_val_reg = new_register(T_OBJECT);
__ leal(new_val, new_val_reg);
new_val = new_val_reg;
}
@ -1377,7 +1376,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
__ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
CodeStub* slow = new G1PostBarrierStub(addr, new_val);
__ branch(lir_cond_notEqual, T_INT, slow);
__ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
__ branch_destination(slow->continuation());
}
@ -2371,9 +2370,17 @@ void LIRGenerator::do_Invoke(Invoke* x) {
bool optimized = x->target_is_loaded() && x->target_is_final();
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
// JSR 292
// Preserve the SP over MethodHandle call sites.
ciMethod* target = x->target();
if (target->is_method_handle_invoke()) {
info->set_is_method_handle_invoke(true);
__ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
}
switch (x->code()) {
case Bytecodes::_invokestatic:
__ call_static(x->target(), result_register,
__ call_static(target, result_register,
SharedRuntime::get_resolve_static_call_stub(),
arg_list, info);
break;
@ -2383,17 +2390,17 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// for final target we still produce an inline cache, in order
// to be able to call mixed mode
if (x->code() == Bytecodes::_invokespecial || optimized) {
__ call_opt_virtual(x->target(), receiver, result_register,
__ call_opt_virtual(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info);
} else if (x->vtable_index() < 0) {
__ call_icvirtual(x->target(), receiver, result_register,
__ call_icvirtual(target, receiver, result_register,
SharedRuntime::get_resolve_virtual_call_stub(),
arg_list, info);
} else {
int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
__ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
__ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
}
break;
case Bytecodes::_invokedynamic: {
@ -2432,7 +2439,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
__ call_dynamic(x->target(), receiver, result_register,
__ call_dynamic(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info);
break;
@ -2442,6 +2449,12 @@ void LIRGenerator::do_Invoke(Invoke* x) {
break;
}
// JSR 292
// Restore the SP after MethodHandle call sites.
if (target->is_method_handle_invoke()) {
__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
}
if (x->type()->is_float() || x->type()->is_double()) {
// Force rounding of results from non-strictfp when in strictfp
// scope (or when we don't know the strictness of the callee, to

View File

@ -84,10 +84,6 @@ LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
, _fpu_stack_allocator(NULL)
#endif
{
// note: to use more than on instance of LinearScan at a time this function call has to
// be moved somewhere outside of this constructor:
Interval::initialize();
assert(this->ir() != NULL, "check if valid");
assert(this->compilation() != NULL, "check if valid");
assert(this->gen() != NULL, "check if valid");
@ -3929,8 +3925,8 @@ Range::Range(int from, int to, Range* next) :
// initialize sentinel
Range* Range::_end = NULL;
void Range::initialize() {
_end = new Range(max_jint, max_jint, NULL);
void Range::initialize(Arena* arena) {
_end = new (arena) Range(max_jint, max_jint, NULL);
}
int Range::intersects_at(Range* r2) const {
@ -3976,9 +3972,9 @@ void Range::print(outputStream* out) const {
// initialize sentinel
Interval* Interval::_end = NULL;
void Interval::initialize() {
Range::initialize();
_end = new Interval(-1);
void Interval::initialize(Arena* arena) {
Range::initialize(arena);
_end = new (arena) Interval(-1);
}
Interval::Interval(int reg_num) :

View File

@ -462,7 +462,7 @@ class Range : public CompilationResourceObj {
public:
Range(int from, int to, Range* next);
static void initialize();
static void initialize(Arena* arena);
static Range* end() { return _end; }
int from() const { return _from; }
@ -529,7 +529,7 @@ class Interval : public CompilationResourceObj {
public:
Interval(int reg_num);
static void initialize();
static void initialize(Arena* arena);
static Interval* end() { return _end; }
// accessors

View File

@ -437,11 +437,8 @@ public:
// Because of a static contained within (for the purpose of iteration
// over instructions), it is only valid to have one of these active at
// a time
class NullCheckEliminator {
class NullCheckEliminator: public ValueVisitor {
private:
static NullCheckEliminator* _static_nce;
static void do_value(Value* vp);
Optimizer* _opt;
ValueSet* _visitable_instructions; // Visit each instruction only once per basic block
@ -504,6 +501,8 @@ class NullCheckEliminator {
// Process a graph
void iterate(BlockBegin* root);
void visit(Value* f);
// In some situations (like NullCheck(x); getfield(x)) the debug
// information from the explicit NullCheck can be used to populate
// the getfield, even if the two instructions are in different
@ -602,14 +601,11 @@ void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_las
void NullCheckVisitor::do_ProfileCounter (ProfileCounter* x) {}
NullCheckEliminator* NullCheckEliminator::_static_nce = NULL;
void NullCheckEliminator::do_value(Value* p) {
void NullCheckEliminator::visit(Value* p) {
assert(*p != NULL, "should not find NULL instructions");
if (_static_nce->visitable(*p)) {
_static_nce->mark_visited(*p);
(*p)->visit(&_static_nce->_visitor);
if (visitable(*p)) {
mark_visited(*p);
(*p)->visit(&_visitor);
}
}
@ -637,7 +633,6 @@ void NullCheckEliminator::iterate_all() {
void NullCheckEliminator::iterate_one(BlockBegin* block) {
_static_nce = this;
clear_visitable_state();
// clear out an old explicit null checks
set_last_explicit_null_check(NULL);
@ -712,7 +707,7 @@ void NullCheckEliminator::iterate_one(BlockBegin* block) {
mark_visitable(instr);
if (instr->is_root() || instr->can_trap() || (instr->as_NullCheck() != NULL)) {
mark_visited(instr);
instr->input_values_do(&NullCheckEliminator::do_value);
instr->input_values_do(this);
instr->visit(&_visitor);
}
}

View File

@ -60,7 +60,6 @@ void StubAssembler::set_num_rt_args(int args) {
// Implementation of Runtime1
bool Runtime1::_is_initialized = false;
CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
const char *Runtime1::_blob_names[] = {
RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
@ -89,8 +88,6 @@ int Runtime1::_throw_array_store_exception_count = 0;
int Runtime1::_throw_count = 0;
#endif
BufferBlob* Runtime1::_buffer_blob = NULL;
// Simple helper to see if the caller of a runtime stub which
// entered the VM has been deoptimized
@ -117,43 +114,14 @@ static void deopt_caller() {
}
BufferBlob* Runtime1::get_buffer_blob() {
// Allocate code buffer space only once
BufferBlob* blob = _buffer_blob;
if (blob == NULL) {
// setup CodeBuffer. Preallocate a BufferBlob of size
// NMethodSizeLimit plus some extra space for constants.
int code_buffer_size = desired_max_code_buffer_size() + desired_max_constant_size();
blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
code_buffer_size);
guarantee(blob != NULL, "must create initial code buffer");
_buffer_blob = blob;
}
return _buffer_blob;
}
void Runtime1::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
// Preinitialize the consts section to some large size:
int locs_buffer_size = 20 * (relocInfo::length_limit + sizeof(relocInfo));
char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
locs_buffer_size / sizeof(relocInfo));
code->initialize_consts_size(desired_max_constant_size());
// Call stubs + deopt/exception handler
code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
LIR_Assembler::exception_handler_size +
LIR_Assembler::deopt_handler_size);
}
void Runtime1::generate_blob_for(StubID id) {
void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
assert(0 <= id && id < number_of_ids, "illegal stub id");
ResourceMark rm;
// create code buffer for code storage
CodeBuffer code(get_buffer_blob()->instructions_begin(),
get_buffer_blob()->instructions_size());
CodeBuffer code(buffer_blob->instructions_begin(),
buffer_blob->instructions_size());
setup_code_buffer(&code, 0);
Compilation::setup_code_buffer(&code, 0);
// create assembler for code generation
StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
@ -204,35 +172,28 @@ void Runtime1::generate_blob_for(StubID id) {
}
void Runtime1::initialize() {
// Warning: If we have more than one compilation running in parallel, we
// need a lock here with the current setup (lazy initialization).
if (!is_initialized()) {
_is_initialized = true;
// platform-dependent initialization
initialize_pd();
// generate stubs
for (int id = 0; id < number_of_ids; id++) generate_blob_for((StubID)id);
// printing
void Runtime1::initialize(BufferBlob* blob) {
// platform-dependent initialization
initialize_pd();
// generate stubs
for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
// printing
#ifndef PRODUCT
if (PrintSimpleStubs) {
ResourceMark rm;
for (int id = 0; id < number_of_ids; id++) {
_blobs[id]->print();
if (_blobs[id]->oop_maps() != NULL) {
_blobs[id]->oop_maps()->print();
}
if (PrintSimpleStubs) {
ResourceMark rm;
for (int id = 0; id < number_of_ids; id++) {
_blobs[id]->print();
if (_blobs[id]->oop_maps() != NULL) {
_blobs[id]->oop_maps()->print();
}
}
#endif
}
#endif
}
CodeBlob* Runtime1::blob_for(StubID id) {
assert(0 <= id && id < number_of_ids, "illegal stub id");
if (!is_initialized()) initialize();
return _blobs[id];
}

View File

@ -70,18 +70,6 @@ class StubAssembler;
class Runtime1: public AllStatic {
friend class VMStructs;
friend class ArrayCopyStub;
private:
static int desired_max_code_buffer_size() {
return (int) NMethodSizeLimit; // default 256K or 512K
}
static int desired_max_constant_size() {
return (int) NMethodSizeLimit / 10; // about 25K
}
// Note: This buffers is allocated once at startup since allocation
// for each compilation seems to be too expensive (at least on Intel
// win32).
static BufferBlob* _buffer_blob;
public:
enum StubID {
@ -115,12 +103,11 @@ class Runtime1: public AllStatic {
#endif
private:
static bool _is_initialized;
static CodeBlob* _blobs[number_of_ids];
static const char* _blob_names[];
// stub generation
static void generate_blob_for(StubID id);
static void generate_blob_for(BufferBlob* blob, StubID id);
static OopMapSet* generate_code_for(StubID id, StubAssembler* masm);
static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument);
static void generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool ignore_fpu_registers = false);
@ -162,12 +149,8 @@ class Runtime1: public AllStatic {
static void patch_code(JavaThread* thread, StubID stub_id);
public:
static BufferBlob* get_buffer_blob();
static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
// initialization
static bool is_initialized() { return _is_initialized; }
static void initialize();
static void initialize(BufferBlob* blob);
static void initialize_pd();
// stubs

View File

@ -119,14 +119,14 @@ void ValueStack::pin_stack_for_linear_scan() {
// apply function to all values of a list; factored out from values_do(f)
void ValueStack::apply(Values list, void f(Value*)) {
void ValueStack::apply(Values list, ValueVisitor* f) {
for (int i = 0; i < list.length(); i++) {
Value* va = list.adr_at(i);
Value v0 = *va;
if (v0 != NULL) {
if (!v0->type()->is_illegal()) {
assert(v0->as_HiWord() == NULL, "should never see HiWord during traversal");
f(va);
f->visit(va);
#ifdef ASSERT
Value v1 = *va;
if (v0 != v1) {
@ -143,7 +143,7 @@ void ValueStack::apply(Values list, void f(Value*)) {
}
void ValueStack::values_do(void f(Value*)) {
void ValueStack::values_do(ValueVisitor* f) {
apply(_stack, f);
apply(_locks, f);

View File

@ -41,7 +41,7 @@ class ValueStack: public CompilationResourceObj {
}
// helper routine
static void apply(Values list, void f(Value*));
static void apply(Values list, ValueVisitor* f);
public:
// creation
@ -143,7 +143,7 @@ class ValueStack: public CompilationResourceObj {
void pin_stack_for_linear_scan();
// iteration
void values_do(void f(Value*));
void values_do(ValueVisitor* f);
// untyped manipulation (for dup_x1, etc.)
void clear_stack() { _stack.clear(); }

View File

@ -46,27 +46,26 @@ IntConstant* intOne = NULL;
ObjectConstant* objectNull = NULL;
void ValueType::initialize() {
void ValueType::initialize(Arena* arena) {
// Note: Must initialize all types for each compilation
// as they are allocated within a ResourceMark!
// types
voidType = new VoidType();
intType = new IntType();
longType = new LongType();
floatType = new FloatType();
doubleType = new DoubleType();
objectType = new ObjectType();
arrayType = new ArrayType();
instanceType = new InstanceType();
classType = new ClassType();
addressType = new AddressType();
illegalType = new IllegalType();
voidType = new (arena) VoidType();
intType = new (arena) IntType();
longType = new (arena) LongType();
floatType = new (arena) FloatType();
doubleType = new (arena) DoubleType();
objectType = new (arena) ObjectType();
arrayType = new (arena) ArrayType();
instanceType = new (arena) InstanceType();
classType = new (arena) ClassType();
addressType = new (arena) AddressType();
illegalType = new (arena) IllegalType();
// constants
intZero = new IntConstant(0);
intOne = new IntConstant(1);
objectNull = new ObjectConstant(ciNullObject::make());
intZero = new (arena) IntConstant(0);
intOne = new (arena) IntConstant(1);
objectNull = new (arena) ObjectConstant(ciNullObject::make());
};

View File

@ -94,7 +94,7 @@ class ValueType: public CompilationResourceObj {
public:
// initialization
static void initialize();
static void initialize(Arena* arena);
// accessors
virtual ValueType* base() const = 0; // the 'canonical' type (e.g., intType for an IntConstant)

View File

@ -690,20 +690,32 @@ int ciMethod::scale_count(int count, float prof_factor) {
// ------------------------------------------------------------------
// invokedynamic support
// ------------------------------------------------------------------
// ciMethod::is_method_handle_invoke
//
// Return true if the method is a MethodHandle target.
bool ciMethod::is_method_handle_invoke() const {
check_is_loaded();
bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
methodOopDesc::is_method_handle_invoke_name(name()->sid()));
#ifdef ASSERT
{
VM_ENTRY_MARK;
bool flag2 = get_methodOop()->is_method_handle_invoke();
assert(flag == flag2, "consistent");
if (is_loaded()) {
bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
{
VM_ENTRY_MARK;
bool flag3 = get_methodOop()->is_method_handle_invoke();
assert(flag2 == flag3, "consistent");
assert(flag == flag3, "consistent");
}
}
#endif //ASSERT
return flag;
}
// ------------------------------------------------------------------
// ciMethod::is_method_handle_adapter
//
// Return true if the method is a generated MethodHandle adapter.
bool ciMethod::is_method_handle_adapter() const {
check_is_loaded();
VM_ENTRY_MARK;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,27 +81,21 @@ int ciExceptionHandlerStream::count_remaining() {
// providing accessors for constant pool items.
// ------------------------------------------------------------------
// ciBytecodeStream::wide
//
// Special handling for the wide bytcode
Bytecodes::Code ciBytecodeStream::wide()
{
// Get following bytecode; do not return wide
Bytecodes::Code bc = (Bytecodes::Code)_pc[1];
_pc += 2; // Skip both bytecodes
_pc += 2; // Skip index always
if( bc == Bytecodes::_iinc )
_pc += 2; // Skip optional constant
_was_wide = _pc; // Flag last wide bytecode found
return bc;
}
// ------------------------------------------------------------------
// ciBytecodeStream::table
// ciBytecodeStream::next_wide_or_table
//
// Special handling for switch ops
Bytecodes::Code ciBytecodeStream::table( Bytecodes::Code bc ) {
switch( bc ) { // Check for special bytecode handling
Bytecodes::Code ciBytecodeStream::next_wide_or_table(Bytecodes::Code bc) {
switch (bc) { // Check for special bytecode handling
case Bytecodes::_wide:
// Special handling for the wide bytcode
// Get following bytecode; do not return wide
assert(Bytecodes::Code(_pc[0]) == Bytecodes::_wide, "");
bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)_pc[1]);
assert(Bytecodes::wide_length_for(bc) > 2, "must make progress");
_pc += Bytecodes::wide_length_for(bc);
_was_wide = _pc; // Flag last wide bytecode found
assert(is_wide(), "accessor works right");
break;
case Bytecodes::_lookupswitch:
_pc++; // Skip wide bytecode
@ -164,7 +158,7 @@ void ciBytecodeStream::force_bci(int bci) {
int ciBytecodeStream::get_klass_index() const {
switch(cur_bc()) {
case Bytecodes::_ldc:
return get_index();
return get_index_u1();
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
case Bytecodes::_checkcast:
@ -173,7 +167,7 @@ int ciBytecodeStream::get_klass_index() const {
case Bytecodes::_multianewarray:
case Bytecodes::_new:
case Bytecodes::_newarray:
return get_index_big();
return get_index_u2();
default:
ShouldNotReachHere();
return 0;
@ -199,10 +193,10 @@ ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
int ciBytecodeStream::get_constant_index() const {
switch(cur_bc()) {
case Bytecodes::_ldc:
return get_index();
return get_index_u1();
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
return get_index_big();
return get_index_u2();
default:
ShouldNotReachHere();
return 0;
@ -239,7 +233,7 @@ int ciBytecodeStream::get_field_index() {
cur_bc() == Bytecodes::_putfield ||
cur_bc() == Bytecodes::_getstatic ||
cur_bc() == Bytecodes::_putstatic, "wrong bc");
return get_index_big();
return get_index_u2_cpcache();
}
@ -319,7 +313,9 @@ int ciBytecodeStream::get_method_index() {
ShouldNotReachHere();
}
#endif
return get_index_int();
if (has_index_u4())
return get_index_u4(); // invokedynamic
return get_index_u2_cpcache();
}
// ------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,15 +31,19 @@
// their original form during iteration.
class ciBytecodeStream : StackObj {
private:
// Handling for the weird bytecodes
Bytecodes::Code wide(); // Handle wide bytecode
Bytecodes::Code table(Bytecodes::Code); // Handle complicated inline table
// Handling for the weird bytecodes
Bytecodes::Code next_wide_or_table(Bytecodes::Code); // Handle _wide & complicated inline table
static Bytecodes::Code check_java(Bytecodes::Code c) {
assert(Bytecodes::is_java_code(c), "should not return _fast bytecodes");
return c;
}
static Bytecodes::Code check_defined(Bytecodes::Code c) {
assert(Bytecodes::is_defined(c), "");
return c;
}
ciMethod* _method; // the method
ciInstanceKlass* _holder;
address _bc_start; // Start of current bytecode for table
@ -50,11 +54,21 @@ private:
address _end; // Past end of bytecodes
address _pc; // Current PC
Bytecodes::Code _bc; // Current bytecode
Bytecodes::Code _raw_bc; // Current bytecode, raw form
void reset( address base, unsigned int size ) {
_bc_start =_was_wide = 0;
_start = _pc = base; _end = base + size; }
void assert_wide(bool require_wide) const {
if (require_wide)
{ assert(is_wide(), "must be a wide instruction"); }
else { assert(!is_wide(), "must not be a wide instruction"); }
}
Bytecode* bytecode() const { return Bytecode_at(_bc_start); }
Bytecode* next_bytecode() const { return Bytecode_at(_pc); }
public:
// End-Of-Bytecodes
static Bytecodes::Code EOBC() {
@ -92,11 +106,12 @@ public:
}
address cur_bcp() const { return _bc_start; } // Returns bcp to current instruction
int next_bci() const { return _pc -_start; }
int next_bci() const { return _pc - _start; }
int cur_bci() const { return _bc_start - _start; }
int instruction_size() const { return _pc - _bc_start; }
Bytecodes::Code cur_bc() const{ return check_java(_bc); }
Bytecodes::Code cur_bc_raw() const { return check_defined(_raw_bc); }
Bytecodes::Code next_bc() { return Bytecodes::java_code((Bytecodes::Code)* _pc); }
// Return current ByteCode and increment PC to next bytecode, skipping all
@ -109,85 +124,76 @@ public:
// Fetch Java bytecode
// All rewritten bytecodes maintain the size of original bytecode.
_bc = Bytecodes::java_code((Bytecodes::Code)*_pc);
_bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)*_pc);
int csize = Bytecodes::length_for(_bc); // Expected size
if( _bc == Bytecodes::_wide ) {
_bc=wide(); // Handle wide bytecode
} else if( csize == 0 ) {
_bc=table(_bc); // Handle inline tables
} else {
_pc += csize; // Bump PC past bytecode
_pc += csize; // Bump PC past bytecode
if (csize == 0) {
_bc = next_wide_or_table(_bc);
}
return check_java(_bc);
}
bool is_wide() const { return ( _pc == _was_wide ); }
// Does this instruction contain an index which refes into the CP cache?
bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
int get_index_u1() const {
return bytecode()->get_index_u1(cur_bc_raw());
}
// Get a byte index following this bytecode.
// If prefixed with a wide bytecode, get a wide index.
int get_index() const {
assert_index_size(is_wide() ? 2 : 1);
return (_pc == _was_wide) // was widened?
? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index
: _bc_start[1]; // no, return narrow index
? get_index_u2(true) // yes, return wide index
: get_index_u1(); // no, return narrow index
}
// Get 2-byte index (getfield/putstatic/etc)
int get_index_big() const {
assert_index_size(2);
return Bytes::get_Java_u2(_bc_start+1);
// Get 2-byte index (byte swapping depending on which bytecode)
int get_index_u2(bool is_wide = false) const {
return bytecode()->get_index_u2(cur_bc_raw(), is_wide);
}
// Get 2-byte index (or 4-byte, for invokedynamic)
int get_index_int() const {
return has_giant_index() ? get_index_giant() : get_index_big();
// Get 2-byte index in native byte order. (Rewriter::rewrite makes these.)
int get_index_u2_cpcache() const {
return bytecode()->get_index_u2_cpcache(cur_bc_raw());
}
// Get 4-byte index, for invokedynamic.
int get_index_giant() const {
assert_index_size(4);
return Bytes::get_native_u4(_bc_start+1);
int get_index_u4() const {
return bytecode()->get_index_u4(cur_bc_raw());
}
bool has_giant_index() const { return (cur_bc() == Bytecodes::_invokedynamic); }
bool has_index_u4() const {
return bytecode()->has_index_u4(cur_bc_raw());
}
// Get dimensions byte (multinewarray)
int get_dimensions() const { return *(unsigned char*)(_pc-1); }
// Sign-extended index byte/short, no widening
int get_byte() const { return (int8_t)(_pc[-1]); }
int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); }
int get_long() const { return (int32_t)Bytes::get_Java_u4(_pc-4); }
int get_constant_u1() const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); }
int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
// Get a byte signed constant for "iinc". Invalid for other bytecodes.
// If prefixed with a wide bytecode, get a wide constant
int get_iinc_con() const {return (_pc==_was_wide) ? get_short() :get_byte();}
int get_iinc_con() const {return (_pc==_was_wide) ? (jshort) get_constant_u2(true) : (jbyte) get_constant_u1();}
// 2-byte branch offset from current pc
int get_dest( ) const {
assert( Bytecodes::length_at(_bc_start) == sizeof(jshort)+1, "get_dest called with bad bytecode" );
return _bc_start-_start + (short)Bytes::get_Java_u2(_pc-2);
int get_dest() const {
return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw());
}
// 2-byte branch offset from next pc
int next_get_dest( ) const {
address next_bc_start = _pc;
assert( _pc < _end, "" );
Bytecodes::Code next_bc = (Bytecodes::Code)*_pc;
assert( next_bc != Bytecodes::_wide, "");
int next_csize = Bytecodes::length_for(next_bc);
assert( next_csize != 0, "" );
assert( next_bc <= Bytecodes::_jsr_w, "");
address next_pc = _pc + next_csize;
assert( Bytecodes::length_at(next_bc_start) == sizeof(jshort)+1, "next_get_dest called with bad bytecode" );
return next_bc_start-_start + (short)Bytes::get_Java_u2(next_pc-2);
int next_get_dest() const {
assert(_pc < _end, "");
return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq);
}
// 4-byte branch offset from current pc
int get_far_dest( ) const {
assert( Bytecodes::length_at(_bc_start) == sizeof(jint)+1, "dest4 called with bad bytecode" );
return _bc_start-_start + (int)Bytes::get_Java_u4(_pc-4);
int get_far_dest() const {
return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw());
}
// For a lookup or switch table, return target destination
@ -234,22 +240,6 @@ public:
ciCPCache* get_cpcache();
ciCallSite* get_call_site();
private:
void assert_index_size(int required_size) const {
#ifdef ASSERT
int isize = instruction_size() - (is_wide() ? 1 : 0) - 1;
if (isize == 2 && cur_bc() == Bytecodes::_iinc)
isize = 1;
else if (isize <= 2)
; // no change
else if (has_giant_index())
isize = 4;
else
isize = 2;
assert(isize = required_size, "wrong index size");
#endif
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2132,6 +2132,7 @@ bool ciTypeFlow::can_trap(ciBytecodeStream& str) {
if (!Bytecodes::can_trap(str.cur_bc())) return false;
switch (str.cur_bc()) {
// %%% FIXME: ldc of Class can generate an exception
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:

View File

@ -25,10 +25,10 @@
#include "incls/_precompiled.incl"
#include "incls/_classFileParser.cpp.incl"
// We generally try to create the oops directly when parsing, rather than allocating
// temporary data structures and copying the bytes twice. A temporary area is only
// needed when parsing utf8 entries in the constant pool and when parsing line number
// tables.
// We generally try to create the oops directly when parsing, rather than
// allocating temporary data structures and copying the bytes twice. A
// temporary area is only needed when parsing utf8 entries in the constant
// pool and when parsing line number tables.
// We add assert in debug mode when class format is not checked.
@ -47,6 +47,10 @@
// - also used as the max version when running in jdk6
#define JAVA_6_VERSION 50
// Used for backward compatibility reasons:
// - to check NameAndType_info signatures more aggressively
#define JAVA_7_VERSION 51
void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS) {
// Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
@ -384,6 +388,20 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
verify_legal_class_name(class_name, CHECK_(nullHandle));
break;
}
case JVM_CONSTANT_NameAndType: {
if (_need_verify && _major_version >= JAVA_7_VERSION) {
int sig_index = cp->signature_ref_index_at(index);
int name_index = cp->name_ref_index_at(index);
symbolHandle name(THREAD, cp->symbol_at(name_index));
symbolHandle sig(THREAD, cp->symbol_at(sig_index));
if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
verify_legal_method_signature(name, sig, CHECK_(nullHandle));
} else {
verify_legal_field_signature(name, sig, CHECK_(nullHandle));
}
}
break;
}
case JVM_CONSTANT_Fieldref:
case JVM_CONSTANT_Methodref:
case JVM_CONSTANT_InterfaceMethodref: {
@ -396,10 +414,28 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
symbolHandle signature(THREAD, cp->symbol_at(signature_ref_index));
if (tag == JVM_CONSTANT_Fieldref) {
verify_legal_field_name(name, CHECK_(nullHandle));
verify_legal_field_signature(name, signature, CHECK_(nullHandle));
if (_need_verify && _major_version >= JAVA_7_VERSION) {
// Signature is verified above, when iterating NameAndType_info.
// Need only to be sure it's the right type.
if (signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
throwIllegalSignature(
"Field", name, signature, CHECK_(nullHandle));
}
} else {
verify_legal_field_signature(name, signature, CHECK_(nullHandle));
}
} else {
verify_legal_method_name(name, CHECK_(nullHandle));
verify_legal_method_signature(name, signature, CHECK_(nullHandle));
if (_need_verify && _major_version >= JAVA_7_VERSION) {
// Signature is verified above, when iterating NameAndType_info.
// Need only to be sure it's the right type.
if (signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
throwIllegalSignature(
"Method", name, signature, CHECK_(nullHandle));
}
} else {
verify_legal_method_signature(name, signature, CHECK_(nullHandle));
}
if (tag == JVM_CONSTANT_Methodref) {
// 4509014: If a class method name begins with '<', it must be "<init>".
assert(!name.is_null(), "method name in constant pool is null");
@ -1313,6 +1349,14 @@ u2* ClassFileParser::parse_checked_exceptions(u2* checked_exceptions_length,
return checked_exceptions_start;
}
void ClassFileParser::throwIllegalSignature(
const char* type, symbolHandle name, symbolHandle sig, TRAPS) {
ResourceMark rm(THREAD);
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
"%s \"%s\" in class %s has illegal signature \"%s\"", type,
name->as_C_string(), _class_name->as_C_string(), sig->as_C_string());
}
#define MAX_ARGS_SIZE 255
#define MAX_CODE_SIZE 65535
@ -4058,14 +4102,7 @@ void ClassFileParser::verify_legal_field_signature(symbolHandle name, symbolHand
char* p = skip_over_field_signature(bytes, false, length, CHECK);
if (p == NULL || (p - bytes) != (int)length) {
ResourceMark rm(THREAD);
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbolHandles::java_lang_ClassFormatError(),
"Field \"%s\" in class %s has illegal signature \"%s\"",
name->as_C_string(), _class_name->as_C_string(), bytes
);
return;
throwIllegalSignature("Field", name, signature, CHECK);
}
}
@ -4116,13 +4153,7 @@ int ClassFileParser::verify_legal_method_signature(symbolHandle name, symbolHand
}
}
// Report error
ResourceMark rm(THREAD);
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbolHandles::java_lang_ClassFormatError(),
"Method \"%s\" in class %s has illegal signature \"%s\"",
name->as_C_string(), _class_name->as_C_string(), p
);
throwIllegalSignature("Method", name, signature, CHECK_0);
return 0;
}

View File

@ -195,6 +195,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
if (!b) { classfile_parse_error(msg, index, name, CHECK); }
}
void throwIllegalSignature(
const char* type, symbolHandle name, symbolHandle sig, TRAPS);
bool is_supported_version(u2 major, u2 minor);
bool has_illegal_visibility(jint flags);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
# include "incls/_precompiled.incl"
# include "incls/_verifier.cpp.incl"
#define NOFAILOVER_MAJOR_VERSION 51
// Access to external entry for VerifyClassCodes - old byte code verifier
extern "C" {
@ -91,7 +93,8 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
klass, message_buffer, message_buffer_len, THREAD);
split_verifier.verify_class(THREAD);
exception_name = split_verifier.result();
if (FailOverToOldVerifier && !HAS_PENDING_EXCEPTION &&
if (klass->major_version() < NOFAILOVER_MAJOR_VERSION &&
FailOverToOldVerifier && !HAS_PENDING_EXCEPTION &&
(exception_name == vmSymbols::java_lang_VerifyError() ||
exception_name == vmSymbols::java_lang_ClassFormatError())) {
if (TraceClassInitialization) {
@ -254,6 +257,9 @@ void ClassVerifier::verify_class(TRAPS) {
int num_methods = methods->length();
for (int index = 0; index < num_methods; index++) {
// Check for recursive re-verification before each method.
if (was_recursively_verified()) return;
methodOop m = (methodOop)methods->obj_at(index);
if (m->is_native() || m->is_abstract()) {
// If m is native or abstract, skip it. It is checked in class file
@ -262,6 +268,12 @@ void ClassVerifier::verify_class(TRAPS) {
}
verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
}
if (_verify_verbose || TraceClassInitialization) {
if (was_recursively_verified())
tty->print_cr("Recursive verification detected for: %s",
_klass->external_name());
}
}
void ClassVerifier::verify_method(methodHandle m, TRAPS) {
@ -326,6 +338,9 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
// instruction in sequence
Bytecodes::Code opcode;
while (!bcs.is_last_bytecode()) {
// Check for recursive re-verification before each bytecode.
if (was_recursively_verified()) return;
opcode = bcs.raw_next();
u2 bci = bcs.bci();
@ -410,13 +425,13 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_ldc :
verify_ldc(
opcode, bcs.get_index(), &current_frame,
opcode, bcs.get_index_u1(), &current_frame,
cp, bci, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_ldc_w :
case Bytecodes::_ldc2_w :
verify_ldc(
opcode, bcs.get_index_big(), &current_frame,
opcode, bcs.get_index_u2(), &current_frame,
cp, bci, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_iload :
@ -1182,7 +1197,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_new :
{
index = bcs.get_index_big();
index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
VerificationType new_class_type =
cp_index_to_type(index, cp, CHECK_VERIFY(this));
@ -1202,7 +1217,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_anewarray :
verify_anewarray(
bcs.get_index_big(), cp, &current_frame, CHECK_VERIFY(this));
bcs.get_index_u2(), cp, &current_frame, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_arraylength :
type = current_frame.pop_stack(
@ -1215,7 +1230,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_checkcast :
{
index = bcs.get_index_big();
index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
@ -1225,7 +1240,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
}
case Bytecodes::_instanceof : {
index = bcs.get_index_big();
index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
@ -1240,7 +1255,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break;
case Bytecodes::_multianewarray :
{
index = bcs.get_index_big();
index = bcs.get_index_u2();
u2 dim = *(bcs.bcp()+3);
verify_cp_class_type(index, cp, CHECK_VERIFY(this));
VerificationType new_array_type =
@ -1299,7 +1314,7 @@ char* ClassVerifier::generate_code_data(methodHandle m, u4 code_length, TRAPS) {
while (!bcs.is_last_bytecode()) {
if (bcs.raw_next() != Bytecodes::_illegal) {
int bci = bcs.bci();
if (bcs.code() == Bytecodes::_new) {
if (bcs.raw_code() == Bytecodes::_new) {
code_data[bci] = NEW_OFFSET;
} else {
code_data[bci] = BYTECODE_OFFSET;
@ -1470,20 +1485,9 @@ void ClassVerifier::verify_cp_type(
// In some situations, bytecode rewriting may occur while we're verifying.
// In this case, a constant pool cache exists and some indices refer to that
// instead. Get the original index for the tag check
constantPoolCacheOop cache = cp->cache();
if (cache != NULL &&
((types == (1 << JVM_CONSTANT_InterfaceMethodref)) ||
(types == (1 << JVM_CONSTANT_Methodref)) ||
(types == (1 << JVM_CONSTANT_Fieldref)))) {
int native_index = index;
if (Bytes::is_Java_byte_ordering_different()) {
native_index = Bytes::swap_u2(index);
}
assert((native_index >= 0) && (native_index < cache->length()),
"Must be a legal index into the cp cache");
index = cache->entry_at(native_index)->constant_pool_index();
}
// instead. Be sure we don't pick up such indices by accident.
// We must check was_recursively_verified() before we get here.
guarantee(cp->cache() == NULL, "not rewritten yet");
verify_cp_index(cp, index, CHECK_VERIFY(this));
unsigned int tag = cp->tag_at(index).value();
@ -1654,7 +1658,7 @@ void ClassVerifier::verify_switch(
int keys, delta;
current_frame->pop_stack(
VerificationType::integer_type(), CHECK_VERIFY(this));
if (bcs->code() == Bytecodes::_tableswitch) {
if (bcs->raw_code() == Bytecodes::_tableswitch) {
jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
if (low > high) {
@ -1710,7 +1714,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
StackMapFrame* current_frame,
constantPoolHandle cp,
TRAPS) {
u2 index = bcs->get_index_big();
u2 index = bcs->get_index_u2();
verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this));
// Get field name and signature
@ -1750,7 +1754,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
&sig_stream, field_type, CHECK_VERIFY(this));
u2 bci = bcs->bci();
bool is_assignable;
switch (bcs->code()) {
switch (bcs->raw_code()) {
case Bytecodes::_getstatic: {
for (int i = 0; i < n; i++) {
current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
@ -1870,7 +1874,7 @@ void ClassVerifier::verify_invoke_init(
ref_class_type.name(), CHECK_VERIFY(this));
methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
vmSymbols::object_initializer_name(),
cp->signature_ref_at(bcs->get_index_big()));
cp->signature_ref_at(bcs->get_index_u2()));
instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from(
@ -1893,8 +1897,8 @@ void ClassVerifier::verify_invoke_instructions(
bool *this_uninit, VerificationType return_type,
constantPoolHandle cp, TRAPS) {
// Make sure the constant pool item is the right type
u2 index = bcs->get_index_big();
Bytecodes::Code opcode = bcs->code();
u2 index = bcs->get_index_u2();
Bytecodes::Code opcode = bcs->raw_code();
unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic

View File

@ -158,6 +158,16 @@ class ClassVerifier : public StackObj {
methodHandle _method; // current method being verified
VerificationType _this_type; // the verification type of the current class
// Some recursive calls from the verifier to the name resolver
// can cause the current class to be re-verified and rewritten.
// If this happens, the original verification should not continue,
// because constant pool indexes will have changed.
// The rewriter is preceded by the verifier. If the verifier throws
// an error, rewriting is prevented. Also, rewriting always precedes
// bytecode execution or compilation. Thus, is_rewritten implies
// that a class has been verified and prepared for execution.
bool was_recursively_verified() { return _klass->is_rewritten(); }
public:
enum {
BYTECODE_OFFSET = 1,

View File

@ -66,8 +66,6 @@ CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_comple
_relocation_size = locs_size;
_instructions_offset = align_code_offset(header_size + locs_size);
_data_offset = size;
_oops_offset = size;
_oops_length = 0;
_frame_size = 0;
set_oop_maps(NULL);
}
@ -94,9 +92,6 @@ CodeBlob::CodeBlob(
_relocation_size = round_to(cb->total_relocation_size(), oopSize);
_instructions_offset = align_code_offset(header_size + _relocation_size);
_data_offset = _instructions_offset + round_to(cb->total_code_size(), oopSize);
_oops_offset = _size - round_to(cb->total_oop_size(), oopSize);
_oops_length = 0; // temporary, until the copy_oops handshake
assert(_oops_offset >= _data_offset, "codeBlob is too small");
assert(_data_offset <= size, "codeBlob is too small");
cb->copy_code_and_locs_to(this);
@ -131,99 +126,6 @@ void CodeBlob::flush() {
}
// Promote one word from an assembly-time handle to a live embedded oop.
inline void CodeBlob::initialize_immediate_oop(oop* dest, jobject handle) {
if (handle == NULL ||
// As a special case, IC oops are initialized to 1 or -1.
handle == (jobject) Universe::non_oop_word()) {
(*dest) = (oop)handle;
} else {
(*dest) = JNIHandles::resolve_non_null(handle);
}
}
void CodeBlob::copy_oops(GrowableArray<jobject>* array) {
assert(_oops_length == 0, "do this handshake just once, please");
int length = array->length();
assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
oop* dest = oops_begin();
for (int index = 0 ; index < length; index++) {
initialize_immediate_oop(&dest[index], array->at(index));
}
_oops_length = length;
// Now we can fix up all the oops in the code.
// We need to do this in the code because
// the assembler uses jobjects as placeholders.
// The code and relocations have already been
// initialized by the CodeBlob constructor,
// so it is valid even at this early point to
// iterate over relocations and patch the code.
fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
}
relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
return (relocInfo::relocType) iter.type();
}
// No relocation info found for pc
ShouldNotReachHere();
return relocInfo::none; // dummy return value
}
bool CodeBlob::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
if (iter.type() == relocInfo::poll_return_type)
return true;
}
return false;
}
bool CodeBlob::is_at_poll_or_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
relocInfo::relocType t = iter.type();
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
return true;
}
return false;
}
void CodeBlob::fix_oop_relocations(address begin, address end,
bool initialize_immediates) {
// re-patch all oop-bearing instructions, just in case some oops moved
RelocIterator iter(this, begin, end);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (initialize_immediates && reloc->oop_is_immediate()) {
oop* dest = reloc->oop_addr();
initialize_immediate_oop(dest, (jobject) *dest);
}
// Refresh the oop-related bits of this instruction.
reloc->fix_oop_relocation();
}
// There must not be any interfering patches or breakpoints.
assert(!(iter.type() == relocInfo::breakpoint_type
&& iter.breakpoint_reloc()->active()),
"no active breakpoint");
}
}
void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) {
ShouldNotReachHere();
}
OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
address pc = return_address ;
assert (oop_maps() != NULL, "nope");

View File

@ -54,17 +54,12 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
// that range. There is a similar range(s) on returns
// which we don't detect.
int _data_offset; // offset to where data region begins
int _oops_offset; // offset to where embedded oop table begins (inside data)
int _oops_length; // number of embedded oops
int _frame_size; // size of stack frame
OopMapSet* _oop_maps; // OopMap for this CodeBlob
CodeComments _comments;
friend class OopRecorder;
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
inline void initialize_immediate_oop(oop* dest, jobject handle);
public:
// Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
@ -115,14 +110,11 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
address instructions_end() const { return (address) header_begin() + _data_offset; }
address data_begin() const { return (address) header_begin() + _data_offset; }
address data_end() const { return (address) header_begin() + _size; }
oop* oops_begin() const { return (oop*) (header_begin() + _oops_offset); }
oop* oops_end() const { return oops_begin() + _oops_length; }
// Offsets
int relocation_offset() const { return _header_size; }
int instructions_offset() const { return _instructions_offset; }
int data_offset() const { return _data_offset; }
int oops_offset() const { return _oops_offset; }
// Sizes
int size() const { return _size; }
@ -130,40 +122,16 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); }
int instructions_size() const { return instructions_end() - instructions_begin(); }
int data_size() const { return data_end() - data_begin(); }
int oops_size() const { return (address) oops_end() - (address) oops_begin(); }
// Containment
bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); }
bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); }
bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); }
bool oops_contains(oop* addr) const { return oops_begin() <= addr && addr < oops_end(); }
bool contains(address addr) const { return instructions_contains(addr); }
bool is_frame_complete_at(address addr) const { return instructions_contains(addr) &&
addr >= instructions_begin() + _frame_complete_offset; }
// Relocation support
void fix_oop_relocations(address begin, address end) {
fix_oop_relocations(begin, end, false);
}
void fix_oop_relocations() {
fix_oop_relocations(NULL, NULL, false);
}
relocInfo::relocType reloc_type_for_address(address pc);
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
// Support for oops in scopes and relocs:
// Note: index 0 is reserved for null.
oop oop_at(int index) const { return index == 0? (oop)NULL: *oop_addr_at(index); }
oop* oop_addr_at(int index) const{ // for GC
// relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= _oops_length, "must be a valid non-zero index");
return &oops_begin()[index-1];
}
void copy_oops(GrowableArray<jobject>* oops);
// CodeCache support: really only used by the nmethods, but in order to get
// asserts and certain bookkeeping to work in the CodeCache they are defined
// virtual here.
@ -175,12 +143,6 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
// GC support
virtual bool is_alive() const = 0;
virtual void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred);
virtual void oops_do(OopClosure* f) = 0;
// (All CodeBlob subtypes other than NMethod currently have
// an empty oops_do() method.
// OopMap for frame
OopMapSet* oop_maps() const { return _oop_maps; }
@ -245,11 +207,6 @@ class BufferBlob: public CodeBlob {
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
bool is_alive() const { return true; }
void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) { /* do nothing */ }
void oops_do(OopClosure* f) { /* do nothing*/ }
void verify();
void print() const PRODUCT_RETURN;
@ -334,10 +291,6 @@ class RuntimeStub: public CodeBlob {
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
bool is_alive() const { return true; }
void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) { /* do nothing */ }
void oops_do(OopClosure* f) { /* do-nothing*/ }
void verify();
void print() const PRODUCT_RETURN;
@ -363,9 +316,6 @@ class SingletonBlob: public CodeBlob {
{};
bool is_alive() const { return true; }
void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) { /* do-nothing*/ }
void verify(); // does nothing
void print() const PRODUCT_RETURN;
@ -423,9 +373,6 @@ class DeoptimizationBlob: public SingletonBlob {
// GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
// Iteration
void oops_do(OopClosure* f) {}
// Printing
void print_value_on(outputStream* st) const PRODUCT_RETURN;
@ -477,9 +424,6 @@ class UncommonTrapBlob: public SingletonBlob {
// Typing
bool is_uncommon_trap_stub() const { return true; }
// Iteration
void oops_do(OopClosure* f) {}
};
@ -512,9 +456,6 @@ class ExceptionBlob: public SingletonBlob {
// Typing
bool is_exception_stub() const { return true; }
// Iteration
void oops_do(OopClosure* f) {}
};
#endif // COMPILER2
@ -548,7 +489,4 @@ class SafepointBlob: public SingletonBlob {
// Typing
bool is_safepoint_stub() const { return true; }
// Iteration
void oops_do(OopClosure* f) {}
};

View File

@ -74,12 +74,12 @@ class CodeBlob_sizes {
total_size += cb->size();
header_size += cb->header_size();
relocation_size += cb->relocation_size();
scopes_oop_size += cb->oops_size();
if (cb->is_nmethod()) {
nmethod *nm = (nmethod*)cb;
nmethod* nm = cb->as_nmethod_or_null();
code_size += nm->code_size();
stub_size += nm->stub_size();
scopes_oop_size += nm->oops_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
} else {
@ -262,14 +262,14 @@ int CodeCache::alignment_offset() {
}
// Mark code blobs for unloading if they contain otherwise
// unreachable oops.
// Mark nmethods for unloading if they contain otherwise unreachable
// oops.
void CodeCache::do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_BLOBS(cb) {
cb->do_unloading(is_alive, keep_alive, unloading_occurred);
FOR_ALL_ALIVE_NMETHODS(nm) {
nm->do_unloading(is_alive, keep_alive, unloading_occurred);
}
}
@ -509,9 +509,9 @@ void CodeCache::gc_epilogue() {
if (needs_cache_clean()) {
nm->cleanup_inline_caches();
}
debug_only(nm->verify();)
DEBUG_ONLY(nm->verify());
nm->fix_oop_relocations();
}
cb->fix_oop_relocations();
}
set_needs_cache_clean(false);
prune_scavenge_root_nmethods();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -441,11 +441,11 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
}
inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {
inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
address first_oop = NULL;
// Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
CodeBlob *code1 = code;
return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized);
nmethod* tmp_nm = nm;
return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
}
CompiledIC::CompiledIC(NativeCall* ic_call)

View File

@ -99,12 +99,12 @@ struct nmethod_stats_struct {
code_size += nm->code_size();
stub_size += nm->stub_size();
consts_size += nm->consts_size();
oops_size += nm->oops_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
dependencies_size += nm->dependencies_size();
handler_table_size += nm->handler_table_size();
nul_chk_table_size += nm->nul_chk_table_size();
oops_size += nm->oops_size();
}
void print_nmethod_stats() {
if (nmethod_count == 0) return;
@ -114,12 +114,12 @@ struct nmethod_stats_struct {
if (code_size != 0) tty->print_cr(" main code = %d", code_size);
if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size);
if (consts_size != 0) tty->print_cr(" constants = %d", consts_size);
if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size);
if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size);
if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size);
if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size);
if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size);
if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
}
int native_nmethod_count;
@ -600,7 +600,8 @@ nmethod::nmethod(
#endif // def HAVE_DTRACE_H
_stub_offset = data_offset();
_consts_offset = data_offset();
_scopes_data_offset = data_offset();
_oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
@ -690,7 +691,8 @@ nmethod::nmethod(
_orig_pc_offset = 0;
_stub_offset = data_offset();
_consts_offset = data_offset();
_scopes_data_offset = data_offset();
_oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
@ -805,8 +807,9 @@ nmethod::nmethod(
_unwind_handler_offset = -1;
}
_consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
_scopes_data_offset = data_offset();
_scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
_oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size (), oopSize);
_scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize);
_nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
@ -990,6 +993,79 @@ void nmethod::set_version(int v) {
}
// Promote one word from an assembly-time handle to a live embedded oop.
inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
if (handle == NULL ||
// As a special case, IC oops are initialized to 1 or -1.
handle == (jobject) Universe::non_oop_word()) {
(*dest) = (oop) handle;
} else {
(*dest) = JNIHandles::resolve_non_null(handle);
}
}
void nmethod::copy_oops(GrowableArray<jobject>* array) {
//assert(oops_size() == 0, "do this handshake just once, please");
int length = array->length();
assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
oop* dest = oops_begin();
for (int index = 0 ; index < length; index++) {
initialize_immediate_oop(&dest[index], array->at(index));
}
// Now we can fix up all the oops in the code. We need to do this
// in the code because the assembler uses jobjects as placeholders.
// The code and relocations have already been initialized by the
// CodeBlob constructor, so it is valid even at this early point to
// iterate over relocations and patch the code.
fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
}
bool nmethod::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
if (iter.type() == relocInfo::poll_return_type)
return true;
}
return false;
}
bool nmethod::is_at_poll_or_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
relocInfo::relocType t = iter.type();
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
return true;
}
return false;
}
void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
// re-patch all oop-bearing instructions, just in case some oops moved
RelocIterator iter(this, begin, end);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (initialize_immediates && reloc->oop_is_immediate()) {
oop* dest = reloc->oop_addr();
initialize_immediate_oop(dest, (jobject) *dest);
}
// Refresh the oop-related bits of this instruction.
reloc->fix_oop_relocation();
}
// There must not be any interfering patches or breakpoints.
assert(!(iter.type() == relocInfo::breakpoint_type
&& iter.breakpoint_reloc()->active()),
"no active breakpoint");
}
}
ScopeDesc* nmethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present");
@ -1266,19 +1342,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
if (state == zombie) {
DTRACE_METHOD_UNLOAD_PROBE(method());
if (JvmtiExport::should_post_compiled_method_unload() &&
!unload_reported()) {
assert(method() != NULL, "checking");
{
HandleMark hm;
JvmtiExport::post_compiled_method_unload_at_safepoint(
method()->jmethod_id(), code_begin());
}
set_unload_reported();
}
post_compiled_method_unload();
}
@ -1430,6 +1494,12 @@ void nmethod::post_compiled_method_load_event() {
}
void nmethod::post_compiled_method_unload() {
if (unload_reported()) {
// During unloading we transition to unloaded and then to zombie
// and the unloading is reported during the first transition.
return;
}
assert(_method != NULL && !is_unloaded(), "just checking");
DTRACE_METHOD_UNLOAD_PROBE(method());
@ -1439,8 +1509,7 @@ void nmethod::post_compiled_method_unload() {
if (JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded");
HandleMark hm;
JvmtiExport::post_compiled_method_unload_at_safepoint(
method()->jmethod_id(), code_begin());
JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin());
}
// The JVMTI CompiledMethodUnload event can be enabled or disabled at
@ -2282,6 +2351,10 @@ void nmethod::print() const {
consts_begin(),
consts_end(),
consts_size());
if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
oops_begin(),
oops_end(),
oops_size());
if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
scopes_data_begin(),
scopes_data_end(),

View File

@ -105,6 +105,7 @@ struct nmFlags {
// [Relocation]
// - relocation information
// - constant part (doubles, longs and floats used in nmethod)
// - oop table
// [Code]
// - code body
// - exception handler
@ -161,6 +162,7 @@ class nmethod : public CodeBlob {
#endif // def HAVE_DTRACE_H
int _stub_offset;
int _consts_offset;
int _oops_offset; // offset to where embedded oop table begins (inside data)
int _scopes_data_offset;
int _scopes_pcs_offset;
int _dependencies_offset;
@ -347,7 +349,10 @@ class nmethod : public CodeBlob {
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _consts_offset ; }
address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return header_begin() + _scopes_data_offset ; }
address consts_end () const { return header_begin() + _oops_offset ; }
oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
@ -359,20 +364,24 @@ class nmethod : public CodeBlob {
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
int code_size () const { return code_end () - code_begin (); }
int stub_size () const { return stub_end () - stub_begin (); }
int consts_size () const { return consts_end () - consts_begin (); }
int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
// Sizes
int code_size () const { return code_end () - code_begin (); }
int stub_size () const { return stub_end () - stub_begin (); }
int consts_size () const { return consts_end () - consts_begin (); }
int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
int total_size () const;
// Containment
bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
@ -431,6 +440,29 @@ class nmethod : public CodeBlob {
int version() const { return flags.version; }
void set_version(int v);
// Support for oops in scopes and relocs:
// Note: index 0 is reserved for null.
oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
oop* oop_addr_at(int index) const { // for GC
// relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
return &oops_begin()[index - 1];
}
void copy_oops(GrowableArray<jobject>* oops);
// Relocation support
private:
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
inline void initialize_immediate_oop(oop* dest, jobject handle);
public:
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
// Non-perm oop support
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
protected:
@ -511,8 +543,8 @@ class nmethod : public CodeBlob {
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);
virtual void oops_do(OopClosure* f) { oops_do(f, false); }
void oops_do(OopClosure* f, bool do_strong_roots_only);
void oops_do(OopClosure* f) { oops_do(f, false); }
void oops_do(OopClosure* f, bool do_strong_roots_only);
bool detect_scavenge_root_oops();
void verify_scavenge_root_oops() PRODUCT_RETURN;

Some files were not shown because too many files have changed in this diff Show More