This commit is contained in:
J. Duke 2017-07-05 17:39:01 +02:00
commit fb15471e58
113 changed files with 2053 additions and 1691 deletions

View File

@ -109,3 +109,4 @@ cc58c11af15411042719e9c82707fdbef60a9e0f jdk7-b130
0f62a65fb666b337caa585015ab6ea2e60e709ca jdk7-b132
c6f380693342feadccc5fe2c5adf500e861361aa jdk7-b133
ddc2fcb3682ffd27f44354db666128827be7e3c3 jdk7-b134
783bd02b4ab4596059c74b10a1793d7bd2f1c157 jdk7-b135

View File

@ -1763,6 +1763,12 @@
Where each of these directories contain the import JDK image
for that platform.
</dd>
<dt><a name="ALT_OPENWIN_HOME"><tt>ALT_OPENWIN_HOME</tt></a></dt>
<dd>
The top-level directory of the libraries and include files for the platform's
graphical programming environment. The default location is platform specific.
For example, on Linux it defaults to <tt>/usr/X11R6/</tt>.
</dd>
<dt><strong>Windows specific:</strong></dt>
<dd>
<dl>
@ -1791,6 +1797,81 @@
</dd>
</dl>
</dd>
<dt><strong>Cross-Compilation Support:</strong></dt>
<dd>
<dl>
<dt><a name="CROSS_COMPILE_ARCH"><tt>CROSS_COMPILE_ARCH</tt></a> </dt>
<dd>
Set to the target architecture of a cross-compilation build. If set, this
variable is used to signify that we are cross-compiling. The expectation
is that <a href="#ALT_COMPILER_PATH"><tt>ALT_COMPILER_PATH</tt></a> is set
to point to the cross-compiler and that any cross-compilation specific flags
are passed using <a href="#EXTRA_CFLAGS"><tt>EXTRA_CFLAGS</tt></a>.
The <a href="#ALT_OPENWIN_HOME"><tt>ALT_OPENWIN_HOME</tt></a> variable should
also be set to point to the graphical header files (e.g. X11) provided with
the cross-compiler.
When cross-compiling we skip execution of any demos etc that may be built, and
also skip binary-file verification.
</dd>
<dt><tt><a name="EXTRA_CFLAGS">EXTRA_CFLAGS</a></tt> </dt>
<dd>
Used to pass cross-compilation options to the cross-compiler.
These are added to the <tt>CFLAGS</tt> and <tt>CXXFLAGS</tt> variables.
</dd>
<dt><tt><a name="USE_ONLY_BOOTDIR_TOOLS">USE_ONLY_BOOTDIR_TOOLS</a></tt> </dt>
<dd>
Used primarily for cross-compilation builds (and always set in that case)
this variable indicates that tools from the boot JDK should be used during
the build process, not the tools (<tt>javac</tt>, <tt>javah</tt>, <tt>jar</tt>)
just built (which can't execute on the build host).
</dd>
<dt><tt><a name="HOST_CC">HOST_CC</a></tt> </dt>
<dd>
The location of the C compiler to generate programs to run on the build host.
Some parts of the build generate programs that are then compiled and executed
to produce other parts of the build. Normally the primary C compiler is used
to do this, but when cross-compiling that would be the cross-compiler and the
resulting program could not be executed.
On Linux this defaults to <tt>/usr/bin/gcc</tt>; on other platforms it must be
set explicitly.
</dd>
</dl>
<dt><strong>Specialized Build Options:</strong></dt>
<dd>
Some build variables exist to support specialized build environments and/or specialized
build products. Their use is only supported in those contexts:
<dl>
<dt><tt><a name="BUILD_CLIENT_ONLY">BUILD_CLIENT_ONLY</a></tt> </dt>
<dd>
Indicates this build will only contain the Hotspot client VM. In addition to
controlling the Hotspot build target, it ensures that we don't try to copy
any server VM files/directories, and defines a default <tt>jvm.cfg</tt> file
suitable for a client-only environment. Using this in a 64-bit build will
generate a sanity warning as 64-bit client builds are not directly supported.
</dd>
<dt><tt><a name="BUILD_HEADLESS_ONLY"></a>BUILD_HEADLESS_ONLY</tt> </dt>
<dd>
Used when the build environment has no graphical capabilities at all. This
excludes building anything that requires graphical libraries to be available.
</dd>
<dt><tt><a name="JAVASE_EMBEDDED"></a>JAVASE_EMBEDDED</tt> </dt>
<dd>
Used to indicate this is a build of the Oracle Java SE Embedded product.
This will enable the directives included in the SE-Embedded specific build
files.
</dd>
<dt><tt><a name="LIBZIP_CAN_USE_MMAP">LIBZIP_CAN_USE_MMAP</a></tt> </dt>
<dd>
If set to false, disables the use of mmap by the zip utility. Otherwise,
mmap will be used.
</dd>
<dt><tt><a name="COMPRESS_JARS"></a>COMPRESS_JARS</tt> </dt>
<dd>
If set to true, causes certain jar files that would otherwise be built without
compression, to use compression.
</dd>
</dl>
</dd>
</dl>
</blockquote>
<!-- ------------------------------------------------------ -->

View File

@ -156,3 +156,5 @@ a8d643a4db47c7b58e0bcb49c77b5c3610de86a8 hs21-b03
1b3a350709e4325d759bb453ff3fb6a463270488 jdk7-b133
447e6faab4a8755d4860c2366630729dbaec111c jdk7-b134
3c76374706ea8a77e15aec8310e831e5734f8775 hs21-b04
b898f0fc3cedc972d884d31a751afd75969531cf jdk7-b135
b898f0fc3cedc972d884d31a751afd75969531cf hs21-b05

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,7 +62,7 @@ public class FieldImpl extends TypeComponentImpl implements Field {
// get the value of static field
ValueImpl getValue() {
return getValue(saField.getFieldHolder());
return getValue(saField.getFieldHolder().getJavaMirror());
}
// get the value of this Field from a specific Oop

View File

@ -44,12 +44,10 @@ public class StringTable extends sun.jvm.hotspot.utilities.Hashtable {
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("StringTable");
theTableField = type.getAddressField("_the_table");
stringTableSize = db.lookupIntConstant("StringTable::string_table_size").intValue();
}
// Fields
private static AddressField theTableField;
private static int stringTableSize;
// Accessors
public static StringTable getTheTable() {
@ -57,10 +55,6 @@ public class StringTable extends sun.jvm.hotspot.utilities.Hashtable {
return (StringTable) VMObjectFactory.newObject(StringTable.class, tmp);
}
public static int getStringTableSize() {
return stringTableSize;
}
public StringTable(Address addr) {
super(addr);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -87,7 +87,7 @@ public class InstanceKlass extends Klass {
innerClasses = new OopField(type.getOopField("_inner_classes"), Oop.getHeaderSize());
nonstaticFieldSize = new CIntField(type.getCIntegerField("_nonstatic_field_size"), Oop.getHeaderSize());
staticFieldSize = new CIntField(type.getCIntegerField("_static_field_size"), Oop.getHeaderSize());
staticOopFieldSize = new CIntField(type.getCIntegerField("_static_oop_field_size"), Oop.getHeaderSize());
staticOopFieldCount = new CIntField(type.getCIntegerField("_static_oop_field_count"), Oop.getHeaderSize());
nonstaticOopMapSize = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), Oop.getHeaderSize());
isMarkedDependent = new CIntField(type.getCIntegerField("_is_marked_dependent"), Oop.getHeaderSize());
initState = new CIntField(type.getCIntegerField("_init_state"), Oop.getHeaderSize());
@ -140,7 +140,7 @@ public class InstanceKlass extends Klass {
private static OopField innerClasses;
private static CIntField nonstaticFieldSize;
private static CIntField staticFieldSize;
private static CIntField staticOopFieldSize;
private static CIntField staticOopFieldCount;
private static CIntField nonstaticOopMapSize;
private static CIntField isMarkedDependent;
private static CIntField initState;
@ -261,8 +261,7 @@ public class InstanceKlass extends Klass {
public Symbol getSourceDebugExtension(){ return getSymbol(sourceDebugExtension); }
public TypeArray getInnerClasses() { return (TypeArray) innerClasses.getValue(this); }
public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); }
public long getStaticFieldSize() { return staticFieldSize.getValue(this); }
public long getStaticOopFieldSize() { return staticOopFieldSize.getValue(this); }
public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); }
public long getNonstaticOopMapSize() { return nonstaticOopMapSize.getValue(this); }
public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; }
public long getVtableLen() { return vtableLen.getValue(this); }
@ -453,7 +452,7 @@ public class InstanceKlass extends Klass {
visitor.doOop(innerClasses, true);
visitor.doCInt(nonstaticFieldSize, true);
visitor.doCInt(staticFieldSize, true);
visitor.doCInt(staticOopFieldSize, true);
visitor.doCInt(staticOopFieldCount, true);
visitor.doCInt(nonstaticOopMapSize, true);
visitor.doCInt(isMarkedDependent, true);
visitor.doCInt(initState, true);
@ -692,7 +691,7 @@ public class InstanceKlass extends Klass {
public long getObjectSize() {
long bodySize = alignObjectOffset(getVtableLen() * getHeap().getOopSize())
+ alignObjectOffset(getItableLen() * getHeap().getOopSize())
+ (getStaticFieldSize() + getNonstaticOopMapSize()) * getHeap().getOopSize();
+ (getNonstaticOopMapSize()) * getHeap().getOopSize();
return alignObjectSize(headerSize + bodySize);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,12 @@ public class IntField extends Field {
super(holder, fieldArrayIndex);
}
public int getValue(Oop obj) { return obj.getHandle().getJIntAt(getOffset()); }
public int getValue(Oop obj) {
if (!isVMField() && !obj.isInstance() && !obj.isArray()) {
throw new InternalError(obj.toString());
}
return obj.getHandle().getJIntAt(getOffset());
}
public void setValue(Oop obj, int value) throws MutationException {
// Fix this: setJIntAt is missing in Address
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,11 +41,17 @@ public class OopField extends Field {
}
public Oop getValue(Oop obj) {
if (!isVMField() && !obj.isInstance() && !obj.isArray()) {
throw new InternalError();
}
return obj.getHeap().newOop(getValueAsOopHandle(obj));
}
/** Debugging support */
public OopHandle getValueAsOopHandle(Oop obj) {
if (!isVMField() && !obj.isInstance() && !obj.isArray()) {
throw new InternalError(obj.toString());
}
return obj.getHandle().getOopHandleAt(getOffset());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -274,13 +274,7 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
// hc_klass is a HotSpot magic field and hence we can't
// find it from InstanceKlass for java.lang.Class.
TypeDataBase db = VM.getVM().getTypeDataBase();
int hcKlassOffset = (int) Instance.getHeaderSize();
try {
hcKlassOffset += (db.lookupIntConstant("java_lang_Class::hc_klass_offset").intValue() *
VM.getVM().getHeapOopSize());
} catch (RuntimeException re) {
// ignore, currently java_lang_Class::hc_klass_offset is zero
}
int hcKlassOffset = (int) db.lookupType("java_lang_Class").getCIntegerField("klass_offset").getValue();
if (VM.getVM().isCompressedOopsEnabled()) {
hcKlassField = new NarrowOopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true);
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -839,13 +839,13 @@ public class VM {
}
private void readSystemProperties() {
InstanceKlass systemKls = getSystemDictionary().getSystemKlass();
final InstanceKlass systemKls = getSystemDictionary().getSystemKlass();
systemKls.iterate(new DefaultOopVisitor() {
ObjectReader objReader = new ObjectReader();
public void doOop(sun.jvm.hotspot.oops.OopField field, boolean isVMField) {
if (field.getID().getName().equals("props")) {
try {
sysProps = (Properties) objReader.readObject(field.getValue(getObj()));
sysProps = (Properties) objReader.readObject(field.getValue(systemKls.getJavaMirror()));
} catch (Exception e) {
if (Assert.ASSERTS_ENABLED) {
e.printStackTrace();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -746,7 +746,7 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
out.writeByte((byte)kind);
if (ik != null) {
// static field
writeField(field, ik);
writeField(field, ik.getJavaMirror());
}
}
}

View File

@ -43,6 +43,7 @@ if [ "$1" == "-help" ]; then
fi
jdk=$1
shift
OS=`uname`
if [ "$OS" != "Linux" ]; then
@ -68,7 +69,7 @@ fi
tmp=/tmp/sagsetup
rm -f $tmp
$jdk/bin/java sagtarg > $tmp &
$jdk/bin/java $* sagtarg > $tmp &
pid=$!
while [ ! -s $tmp ] ; do
# Kludge alert!

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=21
HS_MINOR_VER=0
HS_BUILD_NUMBER=05
HS_BUILD_NUMBER=06
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

View File

@ -301,7 +301,8 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
// thread.
assert(_obj != noreg, "must be a valid register");
assert(_oop_index >= 0, "must have oop index");
__ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
__ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
__ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
__ cmp(G2_thread, G3);
__ br(Assembler::notEqual, false, Assembler::pn, call_patch);
__ delayed()->nop();

View File

@ -80,13 +80,19 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
__ save(SP, -256, SP);
int offset = (i << 8) + j;
Register src = G0;
if (!Assembler::is_simm13(offset)) {
__ sethi(offset, L0);
src = L0;
offset = offset & ((1 << 10) - 1);
}
__ brx(Assembler::always, false, Assembler::pt, common_code);
// Load L0 with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[12..8] (5 bits) which virtual method table?
// -- must fit in 13-bit instruction immediate field.
__ delayed()->set((i << 8) + j, L0);
// -- bits[13..8] (6 bits) which virtual method table?
__ delayed()->or3(src, offset, L0);
}
}

View File

@ -775,9 +775,13 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
switch (ek) {
case _adapter_opt_i2l:
{
__ ldsw(arg_lsw, O2_scratch); // Load LSW
NOT_LP64(__ srlx(O2_scratch, BitsPerInt, O3_scratch)); // Move high bits to lower bits for std
__ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64
#ifdef _LP64
__ ldsw(arg_lsw, O2_scratch); // Load LSW sign-extended
#else
__ ldsw(arg_lsw, O3_scratch); // Load LSW sign-extended
__ srlx(O3_scratch, BitsPerInt, O2_scratch); // Move MSW value to lower 32-bits for std
#endif
__ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64
}
break;
case _adapter_opt_unboxl:

View File

@ -52,6 +52,22 @@ void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
}
void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
ResourceMark rm;
unsigned char buffer[10 * BytesPerInstWord];
CodeBuffer buf(buffer, 10 * BytesPerInstWord);
MacroAssembler masm(&buf);
Register destreg = inv_rd(*(unsigned int *)instaddr);
// Generate the proper sequence into a temporary buffer and compare
// it with the original sequence.
masm.patchable_sethi(x, destreg);
int len = buffer - masm.pc();
for (int i = 0; i < len; i++) {
assert(instaddr[i] == buffer[i], "instructions must match");
}
}
void NativeInstruction::verify() {
// make sure code pattern is actually an instruction address
address addr = addr_at(0);

View File

@ -254,6 +254,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
// sethi. This only does the sethi. The disp field (bottom 10 bits)
// must be handled separately.
static void set_data64_sethi(address instaddr, intptr_t x);
static void verify_data64_sethi(address instaddr, intptr_t x);
// combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)
static int data32(int sethi_insn, int arith_insn) {

View File

@ -30,7 +30,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o) {
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
NativeInstruction* ip = nativeInstruction_at(addr());
jint inst = ip->long_at(0);
assert(inst != NativeInstruction::illegal_instruction(), "no breakpoint");
@ -83,7 +83,11 @@ void Relocation::pd_set_data_value(address x, intptr_t o) {
guarantee(Assembler::is_simm13(simm13), "offset can't overflow simm13");
inst &= ~Assembler::simm( -1, 13);
inst |= Assembler::simm(simm13, 13);
ip->set_long_at(0, inst);
if (verify_only) {
assert(ip->long_at(0) == inst, "instructions must match");
} else {
ip->set_long_at(0, inst);
}
}
break;
@ -97,19 +101,36 @@ void Relocation::pd_set_data_value(address x, intptr_t o) {
jint np = oopDesc::encode_heap_oop((oop)x);
inst &= ~Assembler::hi22(-1);
inst |= Assembler::hi22((intptr_t)np);
ip->set_long_at(0, inst);
if (verify_only) {
assert(ip->long_at(0) == inst, "instructions must match");
} else {
ip->set_long_at(0, inst);
}
inst2 = ip->long_at( NativeInstruction::nop_instruction_size );
guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op");
ip->set_long_at(NativeInstruction::nop_instruction_size, ip->set_data32_simm13( inst2, (intptr_t)np));
if (verify_only) {
assert(ip->long_at(NativeInstruction::nop_instruction_size) == NativeInstruction::set_data32_simm13( inst2, (intptr_t)np),
"instructions must match");
} else {
ip->set_long_at(NativeInstruction::nop_instruction_size, NativeInstruction::set_data32_simm13( inst2, (intptr_t)np));
}
break;
}
ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
if (verify_only) {
ip->verify_data64_sethi( ip->addr_at(0), (intptr_t)x );
} else {
ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
}
#else
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
inst &= ~Assembler::hi22( -1);
inst |= Assembler::hi22((intptr_t)x);
// (ignore offset; it doesn't play into the sethi)
ip->set_long_at(0, inst);
if (verify_only) {
assert(ip->long_at(0) == inst, "instructions must match");
} else {
ip->set_long_at(0, inst);
}
#endif
}
break;

View File

@ -313,10 +313,13 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
}
assert(_obj != noreg, "must be a valid register");
Register tmp = rax;
if (_obj == tmp) tmp = rbx;
Register tmp2 = rbx;
__ push(tmp);
__ push(tmp2);
__ load_heap_oop(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
__ get_thread(tmp);
__ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
__ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
__ pop(tmp2);
__ pop(tmp);
__ jcc(Assembler::notEqual, call_patch);

View File

@ -31,7 +31,7 @@
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o) {
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
#ifdef AMD64
x += o;
typedef Assembler::WhichOperand WhichOperand;
@ -40,19 +40,35 @@ void Relocation::pd_set_data_value(address x, intptr_t o) {
which == Assembler::narrow_oop_operand ||
which == Assembler::imm_operand, "format unpacks ok");
if (which == Assembler::imm_operand) {
*pd_address_in_code() = x;
if (verify_only) {
assert(*pd_address_in_code() == x, "instructions must match");
} else {
*pd_address_in_code() = x;
}
} else if (which == Assembler::narrow_oop_operand) {
address disp = Assembler::locate_operand(addr(), which);
*(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
if (verify_only) {
assert(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
} else {
*(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
}
} else {
// Note: Use runtime_call_type relocations for call32_operand.
address ip = addr();
address disp = Assembler::locate_operand(ip, which);
address next_ip = Assembler::locate_next_instruction(ip);
*(int32_t*) disp = x - next_ip;
if (verify_only) {
assert(*(int32_t*) disp == (x - next_ip), "instructions must match");
} else {
*(int32_t*) disp = x - next_ip;
}
}
#else
*pd_address_in_code() = x + o;
if (verify_only) {
assert(*pd_address_in_code() == (x + o), "instructions must match");
} else {
*pd_address_in_code() = x + o;
}
#endif // AMD64
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -281,7 +281,7 @@ int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
if (method->is_static()) {
istate->set_oop_temp(
method->constants()->pool_holder()->klass_part()->java_mirror());
method->constants()->pool_holder()->java_mirror());
mirror = istate->oop_temp_addr();
*(dst++) = &mirror;
}
@ -667,7 +667,7 @@ InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
(BasicObjectLock *) stack->alloc(monitor_words * wordSize);
oop object;
if (method->is_static())
object = method->constants()->pool_holder()->klass_part()->java_mirror();
object = method->constants()->pool_holder()->java_mirror();
else
object = (oop) locals[0];
monitor->set_obj(object);

View File

@ -3297,9 +3297,14 @@ bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
"possibility of dangling Thread pointer");
OSThread* osthread = thread->osthread();
bool interrupted;
interrupted = osthread->interrupted();
if (clear_interrupted == true) {
bool interrupted = osthread->interrupted();
// There is no synchronization between the setting of the interrupt
// and it being cleared here. It is critical - see 6535709 - that
// we only clear the interrupt state, and reset the interrupt event,
// if we are going to report that we were indeed interrupted - else
// an interrupt can be "lost", leading to spurious wakeups or lost wakeups
// depending on the timing
if (interrupted && clear_interrupted) {
osthread->set_interrupted(false);
ResetEvent(osthread->interrupt_event());
} // Otherwise leave the interrupted state alone

View File

@ -1471,9 +1471,9 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
if (state_before != NULL) {
// build a patching constant
obj = new Constant(new ClassConstant(holder), state_before);
obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
} else {
obj = new Constant(new ClassConstant(holder));
obj = new Constant(new InstanceConstant(holder->java_mirror()));
}
}

View File

@ -808,7 +808,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
{ klassOop klass = resolve_field_return_klass(caller_method, bci, CHECK);
// Save a reference to the class that has to be checked for initialization
init_klass = KlassHandle(THREAD, klass);
k = klass;
k = klass->java_mirror();
}
break;
case Bytecodes::_new:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,8 +46,7 @@ size_t ciCPCache::get_f1_offset(int index) {
// ciCPCache::is_f1_null_at
bool ciCPCache::is_f1_null_at(int index) {
VM_ENTRY_MARK;
oop f1 = entry_at(index)->f1();
return (f1 == NULL);
return entry_at(index)->is_f1_null();
}

View File

@ -213,7 +213,7 @@ void ciField::initialize_from(fieldDescriptor* fd) {
// may change. The three examples are java.lang.System.in,
// java.lang.System.out, and java.lang.System.err.
Handle k = _holder->get_klassOop();
KlassHandle k = _holder->get_klassOop();
assert( SystemDictionary::System_klass() != NULL, "Check once per vm");
if( k() == SystemDictionary::System_klass() ) {
// Check offsets for case 2: System.in, System.out, or System.err
@ -225,36 +225,38 @@ void ciField::initialize_from(fieldDescriptor* fd) {
}
}
Handle mirror = k->java_mirror();
_is_constant = true;
switch(type()->basic_type()) {
case T_BYTE:
_constant_value = ciConstant(type()->basic_type(), k->byte_field(_offset));
_constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset));
break;
case T_CHAR:
_constant_value = ciConstant(type()->basic_type(), k->char_field(_offset));
_constant_value = ciConstant(type()->basic_type(), mirror->char_field(_offset));
break;
case T_SHORT:
_constant_value = ciConstant(type()->basic_type(), k->short_field(_offset));
_constant_value = ciConstant(type()->basic_type(), mirror->short_field(_offset));
break;
case T_BOOLEAN:
_constant_value = ciConstant(type()->basic_type(), k->bool_field(_offset));
_constant_value = ciConstant(type()->basic_type(), mirror->bool_field(_offset));
break;
case T_INT:
_constant_value = ciConstant(type()->basic_type(), k->int_field(_offset));
_constant_value = ciConstant(type()->basic_type(), mirror->int_field(_offset));
break;
case T_FLOAT:
_constant_value = ciConstant(k->float_field(_offset));
_constant_value = ciConstant(mirror->float_field(_offset));
break;
case T_DOUBLE:
_constant_value = ciConstant(k->double_field(_offset));
_constant_value = ciConstant(mirror->double_field(_offset));
break;
case T_LONG:
_constant_value = ciConstant(k->long_field(_offset));
_constant_value = ciConstant(mirror->long_field(_offset));
break;
case T_OBJECT:
case T_ARRAY:
{
oop o = k->obj_field(_offset);
oop o = mirror->obj_field(_offset);
// A field will be "constant" if it is known always to be
// a non-null reference to an instance of a particular class,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -138,3 +138,9 @@ void ciInstance::print_impl(outputStream* st) {
st->print(" type=");
klass()->print(st);
}
ciKlass* ciInstance::java_lang_Class_klass() {
VM_ENTRY_MARK;
return CURRENT_ENV->get_object(java_lang_Class::as_klassOop(get_oop()))->as_klass();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,6 +64,8 @@ public:
// Constant value of a field at the specified offset.
ciConstant field_value_by_offset(int field_offset);
ciKlass* java_lang_Class_klass();
};
#endif // SHARE_VM_CI_CIINSTANCE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,7 +85,6 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
if (h_k() != SystemDictionary::Object_klass()) {
super();
}
java_mirror();
//compute_nonstatic_fields(); // done outside of constructor
}
@ -320,6 +319,9 @@ ciInstanceKlass* ciInstanceKlass::super() {
// Get the instance of java.lang.Class corresponding to this klass.
// Cache it on this->_java_mirror.
ciInstance* ciInstanceKlass::java_mirror() {
if (is_shared()) {
return ciKlass::java_mirror();
}
if (_java_mirror == NULL) {
_java_mirror = ciKlass::java_mirror();
}

View File

@ -663,7 +663,7 @@ ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
if (key->is_perm() && _non_perm_count == 0) {
return emptyBucket;
} else if (key->is_instance()) {
if (key->klass() == SystemDictionary::Class_klass()) {
if (key->klass() == SystemDictionary::Class_klass() && JavaObjectsInPerm) {
// class mirror instances are always perm
return emptyBucket;
}

View File

@ -37,6 +37,7 @@
#include "memory/universe.inline.hpp"
#include "oops/constantPoolOop.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/klassOop.hpp"
#include "oops/klassVtable.hpp"
@ -2606,54 +2607,6 @@ typeArrayHandle ClassFileParser::assemble_annotations(u1* runtime_visible_annota
}
static void initialize_static_field(fieldDescriptor* fd, TRAPS) {
KlassHandle h_k (THREAD, fd->field_holder());
assert(h_k.not_null() && fd->is_static(), "just checking");
if (fd->has_initial_value()) {
BasicType t = fd->field_type();
switch (t) {
case T_BYTE:
h_k()->byte_field_put(fd->offset(), fd->int_initial_value());
break;
case T_BOOLEAN:
h_k()->bool_field_put(fd->offset(), fd->int_initial_value());
break;
case T_CHAR:
h_k()->char_field_put(fd->offset(), fd->int_initial_value());
break;
case T_SHORT:
h_k()->short_field_put(fd->offset(), fd->int_initial_value());
break;
case T_INT:
h_k()->int_field_put(fd->offset(), fd->int_initial_value());
break;
case T_FLOAT:
h_k()->float_field_put(fd->offset(), fd->float_initial_value());
break;
case T_DOUBLE:
h_k()->double_field_put(fd->offset(), fd->double_initial_value());
break;
case T_LONG:
h_k()->long_field_put(fd->offset(), fd->long_initial_value());
break;
case T_OBJECT:
{
#ifdef ASSERT
TempNewSymbol sym = SymbolTable::new_symbol("Ljava/lang/String;", CHECK);
assert(fd->signature() == sym, "just checking");
#endif
oop string = fd->string_initial_value(CHECK);
h_k()->obj_field_put(fd->offset(), string);
}
break;
default:
THROW_MSG(vmSymbols::java_lang_ClassFormatError(),
"Illegal ConstantValue attribute in class file");
}
}
}
void ClassFileParser::java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr,
constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS) {
// This code is for compatibility with earlier jdk's that do not
@ -2769,8 +2722,8 @@ void ClassFileParser::java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_pt
}
void ClassFileParser::java_lang_Class_fix_pre(objArrayHandle* methods_ptr,
FieldAllocationCount *fac_ptr, TRAPS) {
void ClassFileParser::java_lang_Class_fix_pre(int* nonstatic_field_size,
FieldAllocationCount *fac_ptr) {
// Add fake fields for java.lang.Class instances
//
// This is not particularly nice. We should consider adding a
@ -2787,10 +2740,13 @@ void ClassFileParser::java_lang_Class_fix_pre(objArrayHandle* methods_ptr,
// versions because when the offsets are computed at bootstrap
// time we don't know yet which version of the JDK we're running in.
// The values below are fake but will force two non-static oop fields and
// The values below are fake but will force three non-static oop fields and
// a corresponding non-static oop map block to be allocated.
const int extra = java_lang_Class::number_of_fake_oop_fields;
fac_ptr->nonstatic_oop_count += extra;
// Reserve some leading space for fake ints
*nonstatic_field_size += align_size_up(java_lang_Class::hc_number_of_fake_int_fields * BytesPerInt, heapOopSize) / heapOopSize;
}
@ -3205,9 +3161,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
int next_nonstatic_field_offset;
// Calculate the starting byte offsets
next_static_oop_offset = (instanceKlass::header_size() +
align_object_offset(vtable_size) +
align_object_offset(itable_size)) * wordSize;
next_static_oop_offset = instanceMirrorKlass::offset_of_static_fields();
next_static_double_offset = next_static_oop_offset +
(fac.static_oop_count * heapOopSize);
if ( fac.static_double_count &&
@ -3226,15 +3180,16 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
fac.static_byte_count ), wordSize );
static_field_size = (next_static_type_offset -
next_static_oop_offset) / wordSize;
first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() +
nonstatic_field_size * heapOopSize;
next_nonstatic_field_offset = first_nonstatic_field_offset;
// Add fake fields for java.lang.Class instances (also see below)
if (class_name == vmSymbols::java_lang_Class() && class_loader.is_null()) {
java_lang_Class_fix_pre(&methods, &fac, CHECK_(nullHandle));
java_lang_Class_fix_pre(&nonstatic_field_size, &fac);
}
first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() +
nonstatic_field_size * heapOopSize;
next_nonstatic_field_offset = first_nonstatic_field_offset;
// adjust the vmentry field declaration in java.lang.invoke.MethodHandle
if (EnableMethodHandles && class_name == vmSymbols::java_lang_invoke_MethodHandle() && class_loader.is_null()) {
java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
@ -3566,7 +3521,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
}
// We can now create the basic klassOop for this klass
klassOop ik = oopFactory::new_instanceKlass(vtable_size, itable_size,
klassOop ik = oopFactory::new_instanceKlass(name, vtable_size, itable_size,
static_field_size,
total_oop_map_count,
rt, CHECK_(nullHandle));
@ -3588,7 +3543,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
this_klass->set_class_loader(class_loader());
this_klass->set_nonstatic_field_size(nonstatic_field_size);
this_klass->set_has_nonstatic_fields(has_nonstatic_fields);
this_klass->set_static_oop_field_size(fac.static_oop_count);
this_klass->set_static_oop_field_count(fac.static_oop_count);
cp->set_pool_holder(this_klass());
error_handler.set_in_error(false); // turn off error handler for cp
this_klass->set_constants(cp());
@ -3649,9 +3604,6 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
// Make sure this is the end of class file stream
guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
// Initialize static fields
this_klass->do_local_static_fields(&initialize_static_field, CHECK_(nullHandle));
// VerifyOops believes that once this has been set, the object is completely loaded.
// Compute transitive closure of interfaces this class implements
this_klass->set_transitive_interfaces(transitive_interfaces());
@ -3685,6 +3637,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
check_illegal_static_method(this_klass, CHECK_(nullHandle));
}
// Allocate mirror and initialize static fields
java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle));
ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()),
false /* not shared class */);

View File

@ -154,11 +154,12 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
// Add the "discovered" field to java.lang.ref.Reference if
// it does not exist.
void java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr,
constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS);
constantPoolHandle cp,
FieldAllocationCount *fac_ptr, TRAPS);
// Adjust the field allocation counts for java.lang.Class to add
// fake fields.
void java_lang_Class_fix_pre(objArrayHandle* methods_ptr,
FieldAllocationCount *fac_ptr, TRAPS);
void java_lang_Class_fix_pre(int* nonstatic_field_size,
FieldAllocationCount *fac_ptr);
// Adjust the next_nonstatic_oop_offset to place the fake fields
// before any Java fields.
void java_lang_Class_fix_post(int* next_nonstatic_oop_offset);

View File

@ -33,6 +33,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/klass.hpp"
#include "oops/klassOop.hpp"
#include "oops/methodOop.hpp"
@ -161,7 +162,7 @@ Handle java_lang_String::create_from_unicode(jchar* unicode, int length, TRAPS)
}
Handle java_lang_String::create_tenured_from_unicode(jchar* unicode, int length, TRAPS) {
return basic_create_from_unicode(unicode, length, true, CHECK_NH);
return basic_create_from_unicode(unicode, length, JavaObjectsInPerm, CHECK_NH);
}
oop java_lang_String::create_oop_from_unicode(jchar* unicode, int length, TRAPS) {
@ -391,6 +392,75 @@ void java_lang_String::print(Handle java_string, outputStream* st) {
}
}
static void initialize_static_field(fieldDescriptor* fd, TRAPS) {
Handle mirror (THREAD, fd->field_holder()->java_mirror());
assert(mirror.not_null() && fd->is_static(), "just checking");
if (fd->has_initial_value()) {
BasicType t = fd->field_type();
switch (t) {
case T_BYTE:
mirror()->byte_field_put(fd->offset(), fd->int_initial_value());
break;
case T_BOOLEAN:
mirror()->bool_field_put(fd->offset(), fd->int_initial_value());
break;
case T_CHAR:
mirror()->char_field_put(fd->offset(), fd->int_initial_value());
break;
case T_SHORT:
mirror()->short_field_put(fd->offset(), fd->int_initial_value());
break;
case T_INT:
mirror()->int_field_put(fd->offset(), fd->int_initial_value());
break;
case T_FLOAT:
mirror()->float_field_put(fd->offset(), fd->float_initial_value());
break;
case T_DOUBLE:
mirror()->double_field_put(fd->offset(), fd->double_initial_value());
break;
case T_LONG:
mirror()->long_field_put(fd->offset(), fd->long_initial_value());
break;
case T_OBJECT:
{
#ifdef ASSERT
TempNewSymbol sym = SymbolTable::new_symbol("Ljava/lang/String;", CHECK);
assert(fd->signature() == sym, "just checking");
#endif
oop string = fd->string_initial_value(CHECK);
mirror()->obj_field_put(fd->offset(), string);
}
break;
default:
THROW_MSG(vmSymbols::java_lang_ClassFormatError(),
"Illegal ConstantValue attribute in class file");
}
}
}
// During bootstrap, java.lang.Class wasn't loaded so static field
// offsets were computed without the size added it. Go back and
// update all the static field offsets to included the size.
static void fixup_static_field(fieldDescriptor* fd, TRAPS) {
if (fd->is_static()) {
int real_offset = fd->offset() + instanceMirrorKlass::offset_of_static_fields();
typeArrayOop fields = instanceKlass::cast(fd->field_holder())->fields();
fields->short_at_put(fd->index() + instanceKlass::low_offset, extract_low_short_from_int(real_offset));
fields->short_at_put(fd->index() + instanceKlass::high_offset, extract_high_short_from_int(real_offset));
}
}
void java_lang_Class::fixup_mirror(KlassHandle k, TRAPS) {
assert(instanceMirrorKlass::offset_of_static_fields() != 0, "must have been computed already");
if (k->oop_is_instance()) {
// Fixup the offsets
instanceKlass::cast(k())->do_local_static_fields(&fixup_static_field, CHECK);
}
create_mirror(k, CHECK);
}
oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
assert(k->java_mirror() == NULL, "should only assign mirror once");
@ -400,12 +470,17 @@ oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
// class is put into the system dictionary.
int computed_modifiers = k->compute_modifier_flags(CHECK_0);
k->set_modifier_flags(computed_modifiers);
if (SystemDictionary::Class_klass_loaded()) {
if (SystemDictionary::Class_klass_loaded() && (k->oop_is_instance() || k->oop_is_javaArray())) {
// Allocate mirror (java.lang.Class instance)
Handle mirror = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0);
Handle mirror = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0);
// Setup indirections
mirror->obj_field_put(klass_offset, k());
k->set_java_mirror(mirror());
instanceMirrorKlass* mk = instanceMirrorKlass::cast(mirror->klass());
java_lang_Class::set_oop_size(mirror(), mk->instance_size(k));
java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror()));
// It might also have a component mirror. This mirror must already exist.
if (k->oop_is_javaArray()) {
Handle comp_mirror;
@ -428,6 +503,9 @@ oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
arrayKlass::cast(k->as_klassOop())->set_component_mirror(comp_mirror());
set_array_klass(comp_mirror(), k->as_klassOop());
}
} else if (k->oop_is_instance()) {
// Initialize static fields
instanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
}
return mirror();
} else {
@ -436,21 +514,46 @@ oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
}
int java_lang_Class::oop_size(oop java_class) {
assert(oop_size_offset != 0, "must be set");
return java_class->int_field(oop_size_offset);
}
void java_lang_Class::set_oop_size(oop java_class, int size) {
assert(oop_size_offset != 0, "must be set");
java_class->int_field_put(oop_size_offset, size);
}
int java_lang_Class::static_oop_field_count(oop java_class) {
assert(static_oop_field_count_offset != 0, "must be set");
return java_class->int_field(static_oop_field_count_offset);
}
void java_lang_Class::set_static_oop_field_count(oop java_class, int size) {
assert(static_oop_field_count_offset != 0, "must be set");
java_class->int_field_put(static_oop_field_count_offset, size);
}
oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
// This should be improved by adding a field at the Java level or by
// introducing a new VM klass (see comment in ClassFileParser)
oop java_class = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0);
oop java_class = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance((oop)NULL, CHECK_0);
if (type != T_VOID) {
klassOop aklass = Universe::typeArrayKlassObj(type);
assert(aklass != NULL, "correct bootstrap");
set_array_klass(java_class, aklass);
}
instanceMirrorKlass* mk = instanceMirrorKlass::cast(SystemDictionary::Class_klass());
java_lang_Class::set_oop_size(java_class, mk->instance_size(oop(NULL)));
java_lang_Class::set_static_oop_field_count(java_class, 0);
return java_class;
}
klassOop java_lang_Class::as_klassOop(oop java_class) {
//%note memory_2
assert(java_lang_Class::is_instance(java_class), "must be a Class object");
klassOop k = klassOop(java_class->obj_field(klass_offset));
assert(k == NULL || k->is_klass(), "type check");
return k;
@ -2152,7 +2255,7 @@ void java_lang_boxing_object::print(BasicType type, jvalue* value, outputStream*
// Support for java_lang_ref_Reference
oop java_lang_ref_Reference::pending_list_lock() {
instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass());
char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset);
address addr = ik->static_field_addr(static_lock_offset);
if (UseCompressedOops) {
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
} else {
@ -2162,7 +2265,7 @@ oop java_lang_ref_Reference::pending_list_lock() {
HeapWord *java_lang_ref_Reference::pending_list_addr() {
instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass());
char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset);
address addr = ik->static_field_addr(static_pending_offset);
// XXX This might not be HeapWord aligned, almost rather be char *.
return (HeapWord*)addr;
}
@ -2185,16 +2288,14 @@ jlong java_lang_ref_SoftReference::timestamp(oop ref) {
jlong java_lang_ref_SoftReference::clock() {
instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass());
int offset = ik->offset_of_static_fields() + static_clock_offset;
return SystemDictionary::SoftReference_klass()->long_field(offset);
jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset);
return *offset;
}
void java_lang_ref_SoftReference::set_clock(jlong value) {
instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass());
int offset = ik->offset_of_static_fields() + static_clock_offset;
SystemDictionary::SoftReference_klass()->long_field_put(offset, value);
jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset);
*offset = value;
}
@ -2625,26 +2726,18 @@ oop java_lang_ClassLoader::non_reflection_class_loader(oop loader) {
// Support for java_lang_System
void java_lang_System::compute_offsets() {
assert(offset_of_static_fields == 0, "offsets should be initialized only once");
instanceKlass* ik = instanceKlass::cast(SystemDictionary::System_klass());
offset_of_static_fields = ik->offset_of_static_fields();
}
int java_lang_System::in_offset_in_bytes() {
return (offset_of_static_fields + static_in_offset);
return (instanceMirrorKlass::offset_of_static_fields() + static_in_offset);
}
int java_lang_System::out_offset_in_bytes() {
return (offset_of_static_fields + static_out_offset);
return (instanceMirrorKlass::offset_of_static_fields() + static_out_offset);
}
int java_lang_System::err_offset_in_bytes() {
return (offset_of_static_fields + static_err_offset);
return (instanceMirrorKlass::offset_of_static_fields() + static_err_offset);
}
@ -2657,6 +2750,8 @@ int java_lang_Class::klass_offset;
int java_lang_Class::array_klass_offset;
int java_lang_Class::resolved_constructor_offset;
int java_lang_Class::number_of_fake_oop_fields;
int java_lang_Class::oop_size_offset;
int java_lang_Class::static_oop_field_count_offset;
int java_lang_Throwable::backtrace_offset;
int java_lang_Throwable::detailMessage_offset;
int java_lang_Throwable::cause_offset;
@ -2700,7 +2795,6 @@ int java_lang_ref_Reference::number_of_fake_oop_fields;
int java_lang_ref_SoftReference::timestamp_offset;
int java_lang_ref_SoftReference::static_clock_offset;
int java_lang_ClassLoader::parent_offset;
int java_lang_System::offset_of_static_fields;
int java_lang_System::static_in_offset;
int java_lang_System::static_out_offset;
int java_lang_System::static_err_offset;
@ -2817,10 +2911,19 @@ void JavaClasses::compute_hard_coded_offsets() {
java_lang_String::count_offset = java_lang_String::offset_offset + sizeof (jint);
java_lang_String::hash_offset = java_lang_String::count_offset + sizeof (jint);
// Do the Class Class
java_lang_Class::klass_offset = java_lang_Class::hc_klass_offset * x + header;
java_lang_Class::array_klass_offset = java_lang_Class::hc_array_klass_offset * x + header;
java_lang_Class::resolved_constructor_offset = java_lang_Class::hc_resolved_constructor_offset * x + header;
{
// Do the Class Class
int offset = header;
java_lang_Class::oop_size_offset = header;
offset += BytesPerInt;
java_lang_Class::static_oop_field_count_offset = offset;
offset = align_size_up(offset + BytesPerInt, x);
java_lang_Class::klass_offset = offset;
offset += x;
java_lang_Class::array_klass_offset = offset;
offset += x;
java_lang_Class::resolved_constructor_offset = offset;
}
// This is NOT an offset
java_lang_Class::number_of_fake_oop_fields = java_lang_Class::hc_number_of_fake_oop_fields;
@ -2877,7 +2980,6 @@ void JavaClasses::compute_hard_coded_offsets() {
void JavaClasses::compute_offsets() {
java_lang_Class::compute_offsets();
java_lang_System::compute_offsets();
java_lang_Thread::compute_offsets();
java_lang_ThreadGroup::compute_offsets();
if (EnableMethodHandles) {
@ -2961,10 +3063,10 @@ bool JavaClasses::check_static_offset(const char *klass_name, int hardcoded_offs
tty->print_cr("Static field %s.%s appears to be nonstatic", klass_name, field_name);
return false;
}
if (fd.offset() == hardcoded_offset + h_klass->offset_of_static_fields()) {
if (fd.offset() == hardcoded_offset + instanceMirrorKlass::offset_of_static_fields()) {
return true;
} else {
tty->print_cr("Offset of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_offset, fd.offset() - h_klass->offset_of_static_fields());
tty->print_cr("Offset of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_offset, fd.offset() - instanceMirrorKlass::offset_of_static_fields());
return false;
}
}

View File

@ -138,10 +138,8 @@ class java_lang_Class : AllStatic {
// The fake offsets are added by the class loader when java.lang.Class is loaded
enum {
hc_klass_offset = 0,
hc_array_klass_offset = 1,
hc_resolved_constructor_offset = 2,
hc_number_of_fake_oop_fields = 3
hc_number_of_fake_oop_fields = 3,
hc_number_of_fake_int_fields = 2
};
static int klass_offset;
@ -149,6 +147,9 @@ class java_lang_Class : AllStatic {
static int array_klass_offset;
static int number_of_fake_oop_fields;
static int oop_size_offset;
static int static_oop_field_count_offset;
static void compute_offsets();
static bool offsets_computed;
static int classRedefinedCount_offset;
@ -157,6 +158,7 @@ class java_lang_Class : AllStatic {
public:
// Instance creation
static oop create_mirror(KlassHandle k, TRAPS);
static void fixup_mirror(KlassHandle k, TRAPS);
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
// Conversion
static klassOop as_klassOop(oop java_class);
@ -191,6 +193,12 @@ class java_lang_Class : AllStatic {
static void set_classRedefinedCount(oop the_class_mirror, int value);
// Support for parallelCapable field
static bool parallelCapable(oop the_class_mirror);
static int oop_size(oop java_class);
static void set_oop_size(oop java_class, int size);
static int static_oop_field_count(oop java_class);
static void set_static_oop_field_count(oop java_class, int size);
// Debugging
friend class JavaClasses;
friend class instanceKlass; // verification code accesses offsets
@ -1165,13 +1173,10 @@ class java_lang_System : AllStatic {
hc_static_err_offset = 2
};
static int offset_of_static_fields;
static int static_in_offset;
static int static_out_offset;
static int static_err_offset;
static void compute_offsets();
public:
static int in_offset_in_bytes();
static int out_offset_in_bytes();

View File

@ -530,7 +530,7 @@ oop StringTable::basic_add(int index, Handle string_or_null, jchar* name,
Handle string;
// try to reuse the string if possible
if (!string_or_null.is_null() && string_or_null()->is_perm()) {
if (!string_or_null.is_null() && (!JavaObjectsInPerm || string_or_null()->is_perm())) {
string = string_or_null;
} else {
string = java_lang_String::create_tenured_from_unicode(name, len, CHECK_NULL);
@ -662,7 +662,7 @@ void StringTable::verify() {
for ( ; p != NULL; p = p->next()) {
oop s = p->literal();
guarantee(s != NULL, "interned string is NULL");
guarantee(s->is_perm(), "interned string not in permspace");
guarantee(s->is_perm() || !JavaObjectsInPerm, "interned string not in permspace");
int length;
jchar* chars = java_lang_String::as_unicode_string(s, length);

View File

@ -216,18 +216,14 @@ private:
oop basic_add(int index, Handle string_or_null, jchar* name, int len,
unsigned int hashValue, TRAPS);
// Table size
enum {
string_table_size = 1009
};
oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
StringTable() : Hashtable<oop>(string_table_size, sizeof (HashtableEntry<oop>)) {}
StringTable() : Hashtable<oop>((int)StringTableSize,
sizeof (HashtableEntry<oop>)) {}
StringTable(HashtableBucket* t, int number_of_entries)
: Hashtable<oop>(string_table_size, sizeof (HashtableEntry<oop>), t,
number_of_entries) {}
: Hashtable<oop>((int)StringTableSize, sizeof (HashtableEntry<oop>), t,
number_of_entries) {}
public:
// The string table
@ -241,7 +237,7 @@ public:
static void create_table(HashtableBucket* t, int length,
int number_of_entries) {
assert(_the_table == NULL, "One string table allowed.");
assert(length == string_table_size * sizeof(HashtableBucket),
assert((size_t)length == StringTableSize * sizeof(HashtableBucket),
"bad shared string size.");
_the_table = new StringTable(t, number_of_entries);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -337,7 +337,6 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
if (is_live) {
// Perform cur->oops_do(f), maybe just once per nmethod.
f->do_code_blob(cur);
cur->fix_oop_relocations();
}
}
@ -552,6 +551,19 @@ void CodeCache::gc_epilogue() {
}
void CodeCache::verify_oops() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
VerifyOopClosure voc;
FOR_ALL_ALIVE_BLOBS(cb) {
if (cb->is_nmethod()) {
nmethod *nm = (nmethod*)cb;
nm->oops_do(&voc);
nm->verify_oop_relocations();
}
}
}
address CodeCache::first_address() {
assert_locked_or_safepoint(CodeCache_lock);
return (address)_heap->begin();

View File

@ -122,6 +122,7 @@ class CodeCache : AllStatic {
// GC support
static void gc_epilogue();
static void gc_prologue();
static void verify_oops();
// If "unloading_occurred" is true, then unloads (i.e., breaks root links
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
// to "true" iff some code got unloaded.

View File

@ -653,6 +653,9 @@ nmethod::nmethod(
_pc_desc_cache.reset_to(NULL);
code_buffer->copy_oops_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
}
debug_only(verify_scavenge_root_oops());
CodeCache::commit(this);
}
@ -1105,6 +1108,20 @@ void nmethod::fix_oop_relocations(address begin, address end, bool initialize_im
}
void nmethod::verify_oop_relocations() {
// Ensure sure that the code matches the current oop values
RelocIterator iter(this, NULL, NULL);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (!reloc->oop_is_immediate()) {
reloc->verify_oop_relocation();
}
}
}
}
ScopeDesc* nmethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present");
@ -1823,6 +1840,7 @@ void nmethod::oops_do_marking_epilogue() {
assert(cur != NULL, "not NULL-terminated");
nmethod* next = cur->_oops_do_mark_link;
cur->_oops_do_mark_link = NULL;
cur->fix_oop_relocations();
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark\n"));
cur = next;
}

View File

@ -459,6 +459,7 @@ private:
public:
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
void verify_oop_relocations();
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);

View File

@ -798,6 +798,14 @@ void oop_Relocation::fix_oop_relocation() {
}
void oop_Relocation::verify_oop_relocation() {
if (!oop_is_immediate()) {
// get the oop from the pool, and re-insert it into the instruction:
verify_value(value());
}
}
RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop,
oop* &oop_addr, bool *is_optimized) {
assert(ic_call != NULL, "ic_call address must be set");

View File

@ -765,7 +765,8 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
protected:
// platform-dependent utilities for decoding and patching instructions
void pd_set_data_value (address x, intptr_t off); // a set or mem-ref
void pd_set_data_value (address x, intptr_t off, bool verify_only = false); // a set or mem-ref
void pd_verify_data_value (address x, intptr_t off) { pd_set_data_value(x, off, true); }
address pd_call_destination (address orig_addr = NULL);
void pd_set_call_destination (address x);
void pd_swap_in_breakpoint (address x, short* instrs, int instrlen);
@ -880,6 +881,12 @@ class DataRelocation : public Relocation {
else
pd_set_data_value(x, o);
}
void verify_value(address x) {
if (addr_in_const())
assert(*(address*)addr() == x, "must agree");
else
pd_verify_data_value(x, offset());
}
// The "o" (displacement) argument is relevant only to split relocations
// on RISC machines. In some CPUs (SPARC), the set-hi and set-lo ins'ns
@ -950,6 +957,8 @@ class oop_Relocation : public DataRelocation {
void fix_oop_relocation(); // reasserts oop value
void verify_oop_relocation();
address value() { return (address) *oop_addr(); }
bool oop_is_immediate() { return oop_index() == 0; }

View File

@ -5930,14 +5930,18 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
}
{
TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
// Now clean up stale oops in StringTable
StringTable::unlink(&_is_alive_closure);
TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
}
if (should_unload_classes() || !JavaObjectsInPerm) {
TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
// Now clean up stale oops in StringTable
StringTable::unlink(&_is_alive_closure);
}
verify_work_stacks_empty();
// Restore any preserved marks as a result of mark stack or
// work queue overflow

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
@ -439,6 +440,14 @@ bool PSScavenge::invoke_no_policy() {
reference_processor()->enqueue_discovered_references(NULL);
}
if (!JavaObjectsInPerm) {
// Unlink any dead interned Strings
StringTable::unlink(&_is_alive_closure);
// Process the remaining live ones
PSScavengeRootsClosure root_closure(promotion_manager);
StringTable::oops_do(&root_closure);
}
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
PSPromotionManager::post_scavenge();

View File

@ -86,4 +86,21 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
}
}
class PSScavengeRootsClosure: public OopClosure {
private:
PSPromotionManager* _promotion_manager;
protected:
template <class T> void do_oop_work(T *p) {
if (PSScavenge::should_scavenge(p)) {
// We never card mark roots, maybe call a func without test?
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
}
}
public:
PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); }
void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP

View File

@ -30,7 +30,7 @@
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
#include "gc_implementation/parallelScavenge/psTasks.hpp"
#include "memory/iterator.hpp"
#include "memory/universe.hpp"
@ -46,24 +46,6 @@
// ScavengeRootsTask
//
// Define before use
class PSScavengeRootsClosure: public OopClosure {
private:
PSPromotionManager* _promotion_manager;
protected:
template <class T> void do_oop_work(T *p) {
if (PSScavenge::should_scavenge(p)) {
// We never card mark roots, maybe call a func without test?
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
}
}
public:
PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); }
void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
};
void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");

View File

@ -656,7 +656,7 @@ BytecodeInterpreter::run(interpreterState istate) {
// oop rcvr = locals[0].j.r;
oop rcvr;
if (METHOD->is_static()) {
rcvr = METHOD->constants()->pool_holder()->klass_part()->java_mirror();
rcvr = METHOD->constants()->pool_holder()->java_mirror();
} else {
rcvr = LOCALS_OBJECT(0);
VERIFY_OOP(rcvr);
@ -2111,8 +2111,8 @@ run:
break;
case JVM_CONSTANT_Class:
VERIFY_OOP(constants->resolved_klass_at(index)->klass_part()->java_mirror());
SET_STACK_OBJECT(constants->resolved_klass_at(index)->klass_part()->java_mirror(), 0);
VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
break;
case JVM_CONSTANT_UnresolvedString:

View File

@ -118,7 +118,7 @@ IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
if (tag.is_unresolved_klass() || tag.is_klass()) {
klassOop klass = pool->klass_at(index, CHECK);
oop java_class = klass->klass_part()->java_mirror();
oop java_class = klass->java_mirror();
thread->set_vm_result(java_class);
} else {
#ifdef ASSERT
@ -983,7 +983,8 @@ IRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDes
ConstantPoolCacheEntry *cp_entry))
// check the access_flags for the field in the klass
instanceKlass* ik = instanceKlass::cast((klassOop)cp_entry->f1());
instanceKlass* ik = instanceKlass::cast(java_lang_Class::as_klassOop(cp_entry->f1()));
typeArrayOop fields = ik->fields();
int index = cp_entry->field_index();
assert(index < fields->length(), "holders field index is out of range");
@ -1009,7 +1010,7 @@ ConstantPoolCacheEntry *cp_entry))
// non-static field accessors have an object, but we need a handle
h_obj = Handle(thread, obj);
}
instanceKlassHandle h_cp_entry_f1(thread, (klassOop)cp_entry->f1());
instanceKlassHandle h_cp_entry_f1(thread, java_lang_Class::as_klassOop(cp_entry->f1()));
jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2(), is_static);
JvmtiExport::post_field_access(thread, method(thread), bcp(thread), h_cp_entry_f1, h_obj, fid);
IRT_END
@ -1017,7 +1018,7 @@ IRT_END
IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread,
oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value))
klassOop k = (klassOop)cp_entry->f1();
klassOop k = java_lang_Class::as_klassOop(cp_entry->f1());
// check the access_flags for the field in the klass
instanceKlass* ik = instanceKlass::cast(k);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -105,7 +105,7 @@ private:
public:
enum {
vtbl_list_size = 16, // number of entries in the shared space vtable list.
vtbl_list_size = 17, // number of entries in the shared space vtable list.
num_virtuals = 200 // number of virtual methods in Klass (or
// subclass) objects, or greater.
};

View File

@ -1561,6 +1561,7 @@ void GenCollectedHeap::preload_and_dump(TRAPS) {
// thread because it requires object allocation.
LinkClassesClosure lcc(Thread::current());
object_iterate(&lcc);
ensure_parsability(false); // arg is actually don't care
tty->print_cr("done. ");
// Create and dump the shared spaces.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -117,12 +117,12 @@ constantPoolCacheOop oopFactory::new_constantPoolCache(int length,
}
klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len,
klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_len,
int static_field_size,
unsigned int nonstatic_oop_map_count,
ReferenceType rt, TRAPS) {
instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj());
return ikk->allocate_instance_klass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL);
return ikk->allocate_instance_klass(name, vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,8 @@ class oopFactory: AllStatic {
TRAPS);
// Instance classes
static klassOop new_instanceKlass(int vtable_len, int itable_len,
static klassOop new_instanceKlass(Symbol* name,
int vtable_len, int itable_len,
int static_field_size,
unsigned int nonstatic_oop_map_count,
ReferenceType rt, TRAPS);

View File

@ -171,11 +171,13 @@ void SharedHeap::process_strong_roots(bool activate_scope,
}
if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
if (so & SO_Strings) {
StringTable::oops_do(roots);
}
// Verify if the string table contents are in the perm gen
NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure));
if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) {
StringTable::oops_do(roots);
}
if (JavaObjectsInPerm) {
// Verify the string table contents are in the perm gen
NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure));
}
}
if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,6 +51,7 @@
#include "oops/cpCacheKlass.hpp"
#include "oops/cpCacheOop.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceKlassKlass.hpp"
#include "oops/instanceRefKlass.hpp"
#include "oops/klassKlass.hpp"
@ -521,6 +522,7 @@ void Universe::init_self_patching_vtbl_list(void** list, int count) {
{ objArrayKlassKlass o; add_vtable(list, &n, &o, count); }
{ instanceKlassKlass o; add_vtable(list, &n, &o, count); }
{ instanceKlass o; add_vtable(list, &n, &o, count); }
{ instanceMirrorKlass o; add_vtable(list, &n, &o, count); }
{ instanceRefKlass o; add_vtable(list, &n, &o, count); }
{ typeArrayKlassKlass o; add_vtable(list, &n, &o, count); }
{ typeArrayKlass o; add_vtable(list, &n, &o, count); }
@ -547,7 +549,7 @@ class FixupMirrorClosure: public ObjectClosure {
KlassHandle k(THREAD, klassOop(obj));
// We will never reach the CATCH below since Exceptions::_throw will cause
// the VM to exit if an exception is thrown during initialization
java_lang_Class::create_mirror(k, CATCH);
java_lang_Class::fixup_mirror(k, CATCH);
// This call unconditionally creates a new mirror for k,
// and links in k's component_mirror field if k is an array.
// If k is an objArray, k's element type must already have
@ -605,6 +607,10 @@ void Universe::fixup_mirrors(TRAPS) {
// walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
// that the number of objects allocated at this point is very small.
assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
// Cache the start of the static fields
instanceMirrorKlass::init_offset_of_static_fields();
FixupMirrorClosure blk;
Universe::heap()->permanent_object_iterate(&blk);
}
@ -1313,6 +1319,8 @@ void Universe::verify(bool allow_dirty, bool silent, bool option) {
JNIHandles::verify();
if (!silent) gclog_or_tty->print("C-heap ");
os::check_heap();
if (!silent) gclog_or_tty->print("code cache ");
CodeCache::verify_oops();
if (!silent) gclog_or_tty->print_cr("]");
_verify_in_progress = false;

View File

@ -28,6 +28,13 @@
#include "oops/arrayKlassKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#ifndef SERIALGC
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
#include "memory/cardTableRS.hpp"
#include "oops/oop.pcgc.inline.hpp"
#endif
klassOop arrayKlassKlass::create_klass(TRAPS) {
@ -104,9 +111,12 @@ int arrayKlassKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
int arrayKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
assert(obj->is_klass(), "must be klass");
arrayKlass* ak = arrayKlass::cast(klassOop(obj));
blk->do_oop(ak->adr_component_mirror());
blk->do_oop(ak->adr_lower_dimension());
blk->do_oop(ak->adr_higher_dimension());
oop* addr = ak->adr_component_mirror();
if (mr.contains(addr)) blk->do_oop(addr);
addr = ak->adr_lower_dimension();
if (mr.contains(addr)) blk->do_oop(addr);
addr = ak->adr_higher_dimension();
if (mr.contains(addr)) blk->do_oop(addr);
ak->vtable()->oop_oop_iterate_m(blk, mr);
return klassKlass::oop_oop_iterate_m(obj, blk, mr);
}
@ -114,6 +124,12 @@ int arrayKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
#ifndef SERIALGC
void arrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert(obj->blueprint()->oop_is_arrayKlass(),"must be an array klass");
arrayKlass* ak = arrayKlass::cast(klassOop(obj));
oop* p = ak->adr_component_mirror();
if (PSScavenge::should_scavenge(p)) {
pm->claim_or_forward_depth(p);
}
klassKlass::oop_push_contents(pm, obj);
}
int arrayKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {

View File

@ -285,10 +285,11 @@ int constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
void constantPoolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert(obj->is_constantPool(), "should be constant pool");
constantPoolOop cp = (constantPoolOop) obj;
if (AnonymousClasses && cp->has_pseudo_string() && cp->tags() != NULL) {
oop* base = (oop*)cp->base();
for (int i = 0; i < cp->length(); ++i, ++base) {
if (cp->tags() != NULL &&
(!JavaObjectsInPerm || (AnonymousClasses && cp->has_pseudo_string()))) {
for (int i = 1; i < cp->length(); ++i) {
if (cp->tag_at(i).is_string()) {
oop* base = cp->obj_at_addr_raw(i);
if (PSScavenge::should_scavenge(base)) {
pm->claim_or_forward_depth(base);
}
@ -460,7 +461,8 @@ void constantPoolKlass::oop_verify_on(oop obj, outputStream* st) {
if (cp->tag_at(i).is_string()) {
if (!cp->has_pseudo_string()) {
if (entry.is_oop()) {
guarantee(entry.get_oop()->is_perm(), "should be in permspace");
guarantee(!JavaObjectsInPerm || entry.get_oop()->is_perm(),
"should be in permspace");
guarantee(entry.get_oop()->is_instance(), "should be instance");
}
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -481,7 +481,7 @@ oop constantPoolOopDesc::resolve_constant_at_impl(constantPoolHandle this_oop, i
{
klassOop resolved = klass_at_impl(this_oop, index, CHECK_NULL);
// ldc wants the java mirror.
result_oop = resolved->klass_part()->java_mirror();
result_oop = resolved->java_mirror();
break;
}

View File

@ -168,22 +168,18 @@ int constantPoolCacheKlass::oop_adjust_pointers(oop obj) {
void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm,
oop obj) {
assert(obj->is_constantPoolCache(), "should be constant pool");
if (EnableInvokeDynamic) {
if (ScavengeRootsInCode) {
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
// during a scavenge, it is safe to inspect my pool, since it is perm
constantPoolOop pool = cache->constant_pool();
assert(pool->is_constantPool(), "should be constant pool");
if (pool->has_invokedynamic()) {
for (int i = 0; i < cache->length(); i++) {
ConstantPoolCacheEntry* e = cache->entry_at(i);
oop* p = (oop*)&e->_f1;
if (e->is_secondary_entry()) {
if (PSScavenge::should_scavenge(p))
pm->claim_or_forward_depth(p);
assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)),
"no live oops here");
}
}
for (int i = 0; i < cache->length(); i++) {
ConstantPoolCacheEntry* e = cache->entry_at(i);
oop* p = (oop*)&e->_f1;
if (PSScavenge::should_scavenge(p))
pm->claim_or_forward_depth(p);
assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)),
"no live oops here");
}
}
}

View File

@ -133,7 +133,7 @@ void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
TosState field_type,
bool is_final,
bool is_volatile) {
set_f1(field_holder());
set_f1(field_holder()->java_mirror());
set_f2(field_offset);
// The field index is used by jvm/ti and is the index into fields() array
// in holder instanceKlass. This is scaled by instanceKlass::next_offset.

View File

@ -37,6 +37,7 @@
#include "memory/oopFactory.hpp"
#include "memory/permGen.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceOop.hpp"
#include "oops/methodOop.hpp"
#include "oops/objArrayKlassKlass.hpp"
@ -649,6 +650,7 @@ instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
}
instanceOop instanceKlass::allocate_instance(TRAPS) {
assert(!oop_is_instanceMirror(), "wrong allocation path");
bool has_finalizer_flag = has_finalizer(); // Query before possible GC
int size = size_helper(); // Query before forming handle.
@ -669,6 +671,7 @@ instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
// instances so simply disallow finalizable perm objects. This can
// be relaxed if a need for it is found.
assert(!has_finalizer(), "perm objects not allowed to have finalizers");
assert(!oop_is_instanceMirror(), "wrong allocation path");
int size = size_helper(); // Query before forming handle.
KlassHandle h_k(THREAD, as_klassOop());
instanceOop i = (instanceOop)
@ -898,6 +901,7 @@ void instanceKlass::methods_do(void f(methodOop method)) {
}
}
void instanceKlass::do_local_static_fields(FieldClosure* cl) {
fieldDescriptor fd;
int length = fields()->length();
@ -1609,36 +1613,6 @@ template <class T> void assert_nothing(T *p) {}
// The following macros call specialized macros, passing either oop or
// narrowOop as the specialization type. These test the UseCompressedOops
// flag.
#define InstanceKlass_OOP_ITERATE(start_p, count, \
do_oop, assert_fn) \
{ \
if (UseCompressedOops) { \
InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
start_p, count, \
do_oop, assert_fn) \
} else { \
InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
start_p, count, \
do_oop, assert_fn) \
} \
}
#define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
do_oop, assert_fn) \
{ \
if (UseCompressedOops) { \
InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
start_p, count, \
low, high, \
do_oop, assert_fn) \
} else { \
InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
start_p, count, \
low, high, \
do_oop, assert_fn) \
} \
}
#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
{ \
/* Compute oopmap block range. The common case \
@ -1711,38 +1685,6 @@ template <class T> void assert_nothing(T *p) {}
} \
}
void instanceKlass::follow_static_fields() {
InstanceKlass_OOP_ITERATE( \
start_of_static_fields(), static_oop_field_size(), \
MarkSweep::mark_and_push(p), \
assert_is_in_closed_subset)
}
#ifndef SERIALGC
void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
InstanceKlass_OOP_ITERATE( \
start_of_static_fields(), static_oop_field_size(), \
PSParallelCompact::mark_and_push(cm, p), \
assert_is_in)
}
#endif // SERIALGC
void instanceKlass::adjust_static_fields() {
InstanceKlass_OOP_ITERATE( \
start_of_static_fields(), static_oop_field_size(), \
MarkSweep::adjust_pointer(p), \
assert_nothing)
}
#ifndef SERIALGC
void instanceKlass::update_static_fields() {
InstanceKlass_OOP_ITERATE( \
start_of_static_fields(), static_oop_field_size(), \
PSParallelCompact::adjust_pointer(p), \
assert_nothing)
}
#endif // SERIALGC
void instanceKlass::oop_follow_contents(oop obj) {
assert(obj != NULL, "can't follow the content of NULL object");
obj->follow_header();
@ -1829,22 +1771,6 @@ ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
#endif // !SERIALGC
void instanceKlass::iterate_static_fields(OopClosure* closure) {
InstanceKlass_OOP_ITERATE( \
start_of_static_fields(), static_oop_field_size(), \
closure->do_oop(p), \
assert_is_in_reserved)
}
void instanceKlass::iterate_static_fields(OopClosure* closure,
MemRegion mr) {
InstanceKlass_BOUNDED_OOP_ITERATE( \
start_of_static_fields(), static_oop_field_size(), \
mr.start(), mr.end(), \
(closure)->do_oop_v(p), \
assert_is_in_closed_subset)
}
int instanceKlass::oop_adjust_pointers(oop obj) {
int size = size_helper();
InstanceKlass_OOP_MAP_ITERATE( \
@ -1873,21 +1799,6 @@ int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
return size_helper();
}
void instanceKlass::push_static_fields(PSPromotionManager* pm) {
InstanceKlass_OOP_ITERATE( \
start_of_static_fields(), static_oop_field_size(), \
if (PSScavenge::should_scavenge(p)) { \
pm->claim_or_forward_depth(p); \
}, \
assert_nothing )
}
void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
InstanceKlass_OOP_ITERATE( \
start_of_static_fields(), static_oop_field_size(), \
PSParallelCompact::adjust_pointer(p), \
assert_is_in)
}
#endif // SERIALGC
// This klass is alive but the implementor link is not followed/updated.
@ -2002,6 +1913,11 @@ void instanceKlass::set_source_debug_extension(Symbol* n) {
if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount();
}
address instanceKlass::static_field_addr(int offset) {
return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
}
const char* instanceKlass::signature_name() const {
const char* src = (const char*) (name()->as_C_string());
const int src_length = (int)strlen(src);
@ -2369,7 +2285,7 @@ nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_
void FieldPrinter::do_field(fieldDescriptor* fd) {
_st->print(BULLET);
if (fd->is_static() || (_obj == NULL)) {
if (_obj == NULL) {
fd->print_on(_st);
_st->cr();
} else {
@ -2399,8 +2315,8 @@ void instanceKlass::oop_print_on(oop obj, outputStream* st) {
}
st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
FieldPrinter print_nonstatic_field(st, obj);
do_nonstatic_fields(&print_nonstatic_field);
FieldPrinter print_field(st, obj);
do_nonstatic_fields(&print_field);
if (as_klassOop() == SystemDictionary::Class_klass()) {
st->print(BULLET"signature: ");
@ -2418,6 +2334,12 @@ void instanceKlass::oop_print_on(oop obj, outputStream* st) {
st->print(BULLET"fake entry for array: ");
array_klass->print_value_on(st);
st->cr();
st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj));
st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj));
klassOop real_klass = java_lang_Class::as_klassOop(obj);
if (real_klass && real_klass->klass_part()->oop_is_instance()) {
instanceKlass::cast(real_klass)->do_local_static_fields(&print_field);
}
} else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
st->print(BULLET"signature: ");
java_lang_invoke_MethodType::print_signature(obj, st);
@ -2560,7 +2482,7 @@ void JNIid::deallocate(JNIid* current) {
void JNIid::verify(klassOop holder) {
int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
int first_field_offset = instanceMirrorKlass::offset_of_static_fields();
int end_field_offset;
end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);

View File

@ -75,8 +75,6 @@
// [Java vtable length ]
// [oop map cache (stack maps) ]
// [EMBEDDED Java vtable ] size in words = vtable_len
// [EMBEDDED static oop fields ] size in words = static_oop_fields_size
// [ static non-oop fields ] size in words = static_field_size - static_oop_fields_size
// [EMBEDDED nonstatic oop-map blocks] size in words = nonstatic_oop_map_size
//
// The embedded nonstatic oop-map blocks are short pairs (offset, length) indicating
@ -230,7 +228,7 @@ class instanceKlass: public Klass {
// (including inherited fields but after header_size()).
int _nonstatic_field_size;
int _static_field_size; // number words used by static fields (oop and non-oop) in this klass
int _static_oop_field_size;// number of static oop fields in this klass
int _static_oop_field_count;// number of static oop fields in this klass
int _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks
bool _is_marked_dependent; // used for marking during flushing and deoptimization
bool _rewritten; // methods rewritten.
@ -281,8 +279,8 @@ class instanceKlass: public Klass {
int static_field_size() const { return _static_field_size; }
void set_static_field_size(int size) { _static_field_size = size; }
int static_oop_field_size() const { return _static_oop_field_size; }
void set_static_oop_field_size(int size) { _static_oop_field_size = size; }
int static_oop_field_count() const { return _static_oop_field_count; }
void set_static_oop_field_count(int size) { _static_oop_field_count = size; }
// Java vtable
int vtable_length() const { return _vtable_len; }
@ -660,6 +658,7 @@ class instanceKlass: public Klass {
// Casting from klassOop
static instanceKlass* cast(klassOop k) {
assert(k->is_klass(), "must be");
Klass* kp = k->klass_part();
assert(kp->null_vtbl() || kp->oop_is_instance_slow(), "cast to instanceKlass");
return (instanceKlass*) kp;
@ -667,7 +666,7 @@ class instanceKlass: public Klass {
// Sizing (in words)
static int header_size() { return align_object_offset(oopDesc::header_size() + sizeof(instanceKlass)/HeapWordSize); }
int object_size() const { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + static_field_size() + nonstatic_oop_map_size()); }
int object_size() const { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + nonstatic_oop_map_size()); }
static int vtable_start_offset() { return header_size(); }
static int vtable_length_offset() { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; }
static int object_size(int extra) { return align_object_size(header_size() + extra); }
@ -676,20 +675,12 @@ class instanceKlass: public Klass {
intptr_t* start_of_itable() const { return start_of_vtable() + align_object_offset(vtable_length()); }
int itable_offset_in_words() const { return start_of_itable() - (intptr_t*)as_klassOop(); }
// Static field offset is an offset into the Heap, should be converted by
// based on UseCompressedOop for traversal
HeapWord* start_of_static_fields() const {
return (HeapWord*)(start_of_itable() + align_object_offset(itable_length()));
}
intptr_t* end_of_itable() const { return start_of_itable() + itable_length(); }
int offset_of_static_fields() const {
return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop();
}
address static_field_addr(int offset);
OopMapBlock* start_of_nonstatic_oop_maps() const {
return (OopMapBlock*) (start_of_static_fields() + static_field_size());
return (OopMapBlock*)(start_of_itable() + align_object_offset(itable_length()));
}
// Allocation profiling support
@ -719,8 +710,6 @@ class instanceKlass: public Klass {
// Garbage collection
void oop_follow_contents(oop obj);
void follow_static_fields();
void adjust_static_fields();
int oop_adjust_pointers(oop obj);
bool object_is_parsable() const { return _init_state != unparsable_by_gc; }
// Value of _init_state must be zero (unparsable_by_gc) when klass field is set.
@ -732,16 +721,6 @@ class instanceKlass: public Klass {
// Parallel Scavenge and Parallel Old
PARALLEL_GC_DECLS
#ifndef SERIALGC
// Parallel Scavenge
void push_static_fields(PSPromotionManager* pm);
// Parallel Old
void follow_static_fields(ParCompactionManager* cm);
void copy_static_fields(ParCompactionManager* cm);
void update_static_fields();
#endif // SERIALGC
// Naming
const char* signature_name() const;
@ -770,9 +749,6 @@ class instanceKlass: public Klass {
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
#endif // !SERIALGC
void iterate_static_fields(OopClosure* closure);
void iterate_static_fields(OopClosure* closure, MemRegion mr);
private:
// initialization state
#ifdef ASSERT
@ -926,6 +902,10 @@ class JNIid: public CHeapObj {
// Identifier lookup
JNIid* find(int offset);
bool find_local_field(fieldDescriptor* fd) {
return instanceKlass::cast(holder())->find_local_field_from_offset(offset(), true, fd);
}
// Garbage collection support
oop* holder_addr() { return (oop*)&_holder; }
void oops_do(OopClosure* f);

View File

@ -31,6 +31,7 @@
#include "memory/gcLocker.hpp"
#include "oops/constantPoolOop.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceKlassKlass.hpp"
#include "oops/instanceRefKlass.hpp"
#include "oops/objArrayKlassKlass.hpp"
@ -86,7 +87,6 @@ void instanceKlassKlass::oop_follow_contents(oop obj) {
assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass");
instanceKlass* ik = instanceKlass::cast(klassOop(obj));
ik->follow_static_fields();
{
HandleMark hm;
ik->vtable()->oop_follow_contents();
@ -127,7 +127,6 @@ void instanceKlassKlass::oop_follow_contents(ParCompactionManager* cm,
assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass");
instanceKlass* ik = instanceKlass::cast(klassOop(obj));
ik->follow_static_fields(cm);
ik->vtable()->oop_follow_contents(cm);
ik->itable()->oop_follow_contents(cm);
@ -168,7 +167,6 @@ int instanceKlassKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
// Don't call size() or oop_size() since that is a virtual call.
int size = ik->object_size();
ik->iterate_static_fields(blk);
ik->vtable()->oop_oop_iterate(blk);
ik->itable()->oop_oop_iterate(blk);
@ -209,7 +207,6 @@ int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
// Don't call size() or oop_size() since that is a virtual call.
int size = ik->object_size();
ik->iterate_static_fields(blk, mr);
ik->vtable()->oop_oop_iterate_m(blk, mr);
ik->itable()->oop_oop_iterate_m(blk, mr);
@ -266,7 +263,6 @@ int instanceKlassKlass::oop_adjust_pointers(oop obj) {
assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass");
instanceKlass* ik = instanceKlass::cast(klassOop(obj));
ik->adjust_static_fields();
ik->vtable()->oop_adjust_pointers();
ik->itable()->oop_adjust_pointers();
@ -300,7 +296,6 @@ int instanceKlassKlass::oop_adjust_pointers(oop obj) {
#ifndef SERIALGC
void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
instanceKlass* ik = instanceKlass::cast(klassOop(obj));
ik->push_static_fields(pm);
oop* loader_addr = ik->adr_class_loader();
if (PSScavenge::should_scavenge(loader_addr)) {
@ -336,7 +331,6 @@ int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
"must be instance klass");
instanceKlass* ik = instanceKlass::cast(klassOop(obj));
ik->update_static_fields();
ik->vtable()->oop_update_pointers(cm);
ik->itable()->oop_update_pointers(cm);
@ -356,22 +350,28 @@ int instanceKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
#endif // SERIALGC
klassOop
instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len,
instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int itable_len,
int static_field_size,
unsigned nonstatic_oop_map_count,
ReferenceType rt, TRAPS) {
const int nonstatic_oop_map_size =
instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count);
int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + static_field_size + nonstatic_oop_map_size);
int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + nonstatic_oop_map_size);
// Allocation
KlassHandle h_this_klass(THREAD, as_klassOop());
KlassHandle k;
if (rt == REF_NONE) {
// regular klass
instanceKlass o;
k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL);
if (name != vmSymbols::java_lang_Class()) {
// regular klass
instanceKlass o;
k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL);
} else {
// Class
instanceMirrorKlass o;
k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL);
}
} else {
// reference klass
instanceRefKlass o;
@ -408,7 +408,7 @@ instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len,
ik->set_source_debug_extension(NULL);
ik->set_array_name(NULL);
ik->set_inner_classes(NULL);
ik->set_static_oop_field_size(0);
ik->set_static_oop_field_count(0);
ik->set_nonstatic_field_size(0);
ik->set_is_marked_dependent(false);
ik->set_init_state(instanceKlass::allocated);
@ -442,9 +442,6 @@ instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len,
// To get verify to work - must be set to partial loaded before first GC point.
k()->set_partially_loaded();
}
// GC can happen here
java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror
return k();
}
@ -566,13 +563,6 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) {
FieldPrinter print_nonstatic_field(st);
ik->do_nonstatic_fields(&print_nonstatic_field);
st->print(BULLET"static oop maps: ");
if (ik->static_oop_field_size() > 0) {
int first_offset = ik->offset_of_static_fields();
st->print("%d-%d", first_offset, first_offset + ik->static_oop_field_size() - 1);
}
st->cr();
st->print(BULLET"non-static oop maps: ");
OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
OopMapBlock* end_map = map + ik->nonstatic_oop_map_count();
@ -630,7 +620,6 @@ void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) {
// Verify static fields
VerifyFieldClosure blk;
ik->iterate_static_fields(&blk);
// Verify vtables
if (ik->is_linked()) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,8 @@ class instanceKlassKlass : public klassKlass {
// Allocation
DEFINE_ALLOCATE_PERMANENT(instanceKlassKlass);
static klassOop create_klass(TRAPS);
klassOop allocate_instance_klass(int vtable_len,
klassOop allocate_instance_klass(Symbol* name,
int vtable_len,
int itable_len,
int static_field_size,
unsigned int nonstatic_oop_map_count,

View File

@ -0,0 +1,313 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/permGen.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/handles.inline.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#endif
int instanceMirrorKlass::_offset_of_static_fields = 0;
#ifdef ASSERT
template <class T> void assert_is_in(T *p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(Universe::heap()->is_in(o), "should be in heap");
}
}
template <class T> void assert_is_in_closed_subset(T *p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
}
}
template <class T> void assert_is_in_reserved(T *p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
}
}
template <class T> void assert_nothing(T *p) {}
#else
template <class T> void assert_is_in(T *p) {}
template <class T> void assert_is_in_closed_subset(T *p) {}
template <class T> void assert_is_in_reserved(T *p) {}
template <class T> void assert_nothing(T *p) {}
#endif // ASSERT
#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \
T, start_p, count, do_oop, \
assert_fn) \
{ \
T* p = (T*)(start_p); \
T* const end = p + (count); \
while (p < end) { \
(assert_fn)(p); \
do_oop; \
++p; \
} \
}
#define InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
T, start_p, count, low, high, \
do_oop, assert_fn) \
{ \
T* const l = (T*)(low); \
T* const h = (T*)(high); \
assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
"bounded region must be properly aligned"); \
T* p = (T*)(start_p); \
T* end = p + (count); \
if (p < l) p = l; \
if (end > h) end = h; \
while (p < end) { \
(assert_fn)(p); \
do_oop; \
++p; \
} \
}
#define InstanceMirrorKlass_OOP_ITERATE(start_p, count, \
do_oop, assert_fn) \
{ \
if (UseCompressedOops) { \
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
start_p, count, \
do_oop, assert_fn) \
} else { \
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(oop, \
start_p, count, \
do_oop, assert_fn) \
} \
}
// The following macros call specialized macros, passing either oop or
// narrowOop as the specialization type. These test the UseCompressedOops
// flag.
#define InstanceMirrorKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
do_oop, assert_fn) \
{ \
if (UseCompressedOops) { \
InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
start_p, count, \
low, high, \
do_oop, assert_fn) \
} else { \
InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
start_p, count, \
low, high, \
do_oop, assert_fn) \
} \
}
void instanceMirrorKlass::oop_follow_contents(oop obj) {
instanceKlass::oop_follow_contents(obj);
InstanceMirrorKlass_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
MarkSweep::mark_and_push(p), \
assert_is_in_closed_subset)
}
#ifndef SERIALGC
void instanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm,
oop obj) {
instanceKlass::oop_follow_contents(cm, obj);
InstanceMirrorKlass_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
PSParallelCompact::mark_and_push(cm, p), \
assert_is_in)
}
#endif // SERIALGC
int instanceMirrorKlass::oop_adjust_pointers(oop obj) {
int size = oop_size(obj);
instanceKlass::oop_adjust_pointers(obj);
InstanceMirrorKlass_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
MarkSweep::adjust_pointer(p), \
assert_nothing)
return size;
}
#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(T, nv_suffix) \
InstanceMirrorKlass_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
(closure)->do_oop##nv_suffix(p), \
assert_is_in_closed_subset) \
return oop_size(obj); \
#define InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(T, nv_suffix, mr) \
InstanceMirrorKlass_BOUNDED_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
mr.start(), mr.end(), \
(closure)->do_oop##nv_suffix(p), \
assert_is_in_closed_subset) \
return oop_size(obj); \
// Macro to define instanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for
// all closures. Macros calling macros above for each oop size.
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
int instanceMirrorKlass:: \
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
/* Get size before changing pointers */ \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \
\
instanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
\
if (UseCompressedOops) { \
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \
} else { \
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \
} \
}
#ifndef SERIALGC
#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
\
int instanceMirrorKlass:: \
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
/* Get size before changing pointers */ \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \
\
instanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \
\
if (UseCompressedOops) { \
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \
} else { \
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \
} \
}
#endif // !SERIALGC
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
\
int instanceMirrorKlass:: \
oop_oop_iterate##nv_suffix##_m(oop obj, \
OopClosureType* closure, \
MemRegion mr) { \
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \
\
instanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
if (UseCompressedOops) { \
InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr); \
} else { \
InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr); \
} \
}
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN)
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN)
#ifndef SERIALGC
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
#endif // SERIALGC
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m)
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m)
#ifndef SERIALGC
void instanceMirrorKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
instanceKlass::oop_push_contents(pm, obj);
InstanceMirrorKlass_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\
if (PSScavenge::should_scavenge(p)) { \
pm->claim_or_forward_depth(p); \
}, \
assert_nothing )
}
int instanceMirrorKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
instanceKlass::oop_update_pointers(cm, obj);
InstanceMirrorKlass_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\
PSParallelCompact::adjust_pointer(p), \
assert_nothing)
return oop_size(obj);
}
#endif // SERIALGC
int instanceMirrorKlass::instance_size(KlassHandle k) {
if (k() != NULL && k->oop_is_instance()) {
return align_object_size(size_helper() + instanceKlass::cast(k())->static_field_size());
}
return size_helper();
}
instanceOop instanceMirrorKlass::allocate_instance(KlassHandle k, TRAPS) {
// Query before forming handle.
int size = instance_size(k);
KlassHandle h_k(THREAD, as_klassOop());
instanceOop i;
if (JavaObjectsInPerm) {
i = (instanceOop) CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
} else {
assert(ScavengeRootsInCode > 0, "must be");
i = (instanceOop) CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
}
return i;
}
int instanceMirrorKlass::oop_size(oop obj) const {
return java_lang_Class::oop_size(obj);
}
int instanceMirrorKlass::compute_static_oop_field_count(oop obj) {
klassOop k = java_lang_Class::as_klassOop(obj);
if (k != NULL && k->klass_part()->oop_is_instance()) {
return instanceKlass::cast(k)->static_oop_field_count();
}
return 0;
}

View File

@ -0,0 +1,110 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP
#define SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP
#include "oops/instanceKlass.hpp"
// An instanceMirrorKlass is a specialized instanceKlass for
// java.lang.Class instances. These instances are special because
// they contain the static fields of the class in addition to the
// normal fields of Class. This means they are variable sized
// instances and need special logic for computing their size and for
// iteration of their oops.
class instanceMirrorKlass: public instanceKlass {
private:
static int _offset_of_static_fields;
public:
// Type testing
bool oop_is_instanceMirror() const { return true; }
// Casting from klassOop
static instanceMirrorKlass* cast(klassOop k) {
assert(k->klass_part()->oop_is_instanceMirror(), "cast to instanceMirrorKlass");
return (instanceMirrorKlass*) k->klass_part();
}
// Returns the size of the instance including the extra static fields.
virtual int oop_size(oop obj) const;
// Static field offset is an offset into the Heap, should be converted by
// based on UseCompressedOop for traversal
static HeapWord* start_of_static_fields(oop obj) {
return (HeapWord*)((intptr_t)obj + offset_of_static_fields());
}
static void init_offset_of_static_fields() {
// Cache the offset of the static fields in the Class instance
assert(_offset_of_static_fields == 0, "once");
_offset_of_static_fields = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->size_helper() << LogHeapWordSize;
}
static int offset_of_static_fields() {
return _offset_of_static_fields;
}
int compute_static_oop_field_count(oop obj);
// Given a Klass return the size of the instance
int instance_size(KlassHandle k);
// allocation
DEFINE_ALLOCATE_PERMANENT(instanceMirrorKlass);
instanceOop allocate_instance(KlassHandle k, TRAPS);
// Garbage collection
int oop_adjust_pointers(oop obj);
void oop_follow_contents(oop obj);
// Parallel Scavenge and Parallel Old
PARALLEL_GC_DECLS
int oop_oop_iterate(oop obj, OopClosure* blk) {
return oop_oop_iterate_v(obj, blk);
}
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
return oop_oop_iterate_v_m(obj, blk, mr);
}
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr);
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL)
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL)
#ifndef SERIALGC
#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
#endif // !SERIALGC
};
#endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -577,6 +577,7 @@ class Klass : public Klass_vtbl {
public:
// type testing operations
virtual bool oop_is_instance_slow() const { return false; }
virtual bool oop_is_instanceMirror() const { return false; }
virtual bool oop_is_instanceRef() const { return false; }
virtual bool oop_is_array() const { return false; }
virtual bool oop_is_objArray_slow() const { return false; }
@ -811,4 +812,8 @@ class Klass : public Klass_vtbl {
#endif
};
inline oop klassOopDesc::java_mirror() const { return klass_part()->java_mirror(); }
#endif // SHARE_VM_OOPS_KLASS_HPP

View File

@ -41,6 +41,10 @@
#include "oops/typeArrayKlass.hpp"
#include "runtime/handles.inline.hpp"
#ifndef SERIALGC
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
#include "memory/cardTableRS.hpp"
#include "oops/oop.pcgc.inline.hpp"
#endif
@ -175,6 +179,12 @@ int klassKlass::oop_adjust_pointers(oop obj) {
#ifndef SERIALGC
void klassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
Klass* k = Klass::cast(klassOop(obj));
oop* p = k->adr_java_mirror();
if (PSScavenge::should_scavenge(p)) {
pm->claim_or_forward_depth(p);
}
}
int klassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
@ -233,7 +243,7 @@ void klassKlass::oop_verify_on(oop obj, outputStream* st) {
if (k->java_mirror() != NULL || (k->oop_is_instance() && instanceKlass::cast(klassOop(obj))->is_loaded())) {
guarantee(k->java_mirror() != NULL, "should be allocated");
guarantee(k->java_mirror()->is_perm(), "should be in permspace");
guarantee(k->java_mirror()->is_perm() || !JavaObjectsInPerm, "should be in permspace");
guarantee(k->java_mirror()->is_instance(), "should be instance");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,7 +45,73 @@ class klassOopDesc : public oopDesc {
static int klass_part_offset_in_bytes() { return sizeof(klassOopDesc); }
// returns the Klass part containing dispatching behavior
Klass* klass_part() { return (Klass*)((address)this + klass_part_offset_in_bytes()); }
Klass* klass_part() const { return (Klass*)((address)this + klass_part_offset_in_bytes()); }
// Convenience wrapper
inline oop java_mirror() const;
private:
// These have no implementation since klassOop should never be accessed in this fashion
oop obj_field(int offset) const;
void obj_field_put(int offset, oop value);
void obj_field_raw_put(int offset, oop value);
jbyte byte_field(int offset) const;
void byte_field_put(int offset, jbyte contents);
jchar char_field(int offset) const;
void char_field_put(int offset, jchar contents);
jboolean bool_field(int offset) const;
void bool_field_put(int offset, jboolean contents);
jint int_field(int offset) const;
void int_field_put(int offset, jint contents);
jshort short_field(int offset) const;
void short_field_put(int offset, jshort contents);
jlong long_field(int offset) const;
void long_field_put(int offset, jlong contents);
jfloat float_field(int offset) const;
void float_field_put(int offset, jfloat contents);
jdouble double_field(int offset) const;
void double_field_put(int offset, jdouble contents);
address address_field(int offset) const;
void address_field_put(int offset, address contents);
oop obj_field_acquire(int offset) const;
void release_obj_field_put(int offset, oop value);
jbyte byte_field_acquire(int offset) const;
void release_byte_field_put(int offset, jbyte contents);
jchar char_field_acquire(int offset) const;
void release_char_field_put(int offset, jchar contents);
jboolean bool_field_acquire(int offset) const;
void release_bool_field_put(int offset, jboolean contents);
jint int_field_acquire(int offset) const;
void release_int_field_put(int offset, jint contents);
jshort short_field_acquire(int offset) const;
void release_short_field_put(int offset, jshort contents);
jlong long_field_acquire(int offset) const;
void release_long_field_put(int offset, jlong contents);
jfloat float_field_acquire(int offset) const;
void release_float_field_put(int offset, jfloat contents);
jdouble double_field_acquire(int offset) const;
void release_double_field_put(int offset, jdouble contents);
address address_field_acquire(int offset) const;
void release_address_field_put(int offset, address contents);
};
#endif // SHARE_VM_OOPS_KLASSOOP_HPP

View File

@ -1095,7 +1095,7 @@ void klassItable::setup_itable_offset_table(instanceKlassHandle klass) {
itableOffsetEntry* ioe = (itableOffsetEntry*)klass->start_of_itable();
itableMethodEntry* ime = (itableMethodEntry*)(ioe + nof_interfaces);
intptr_t* end = klass->end_of_itable();
assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_static_fields(), "wrong offset calculation (1)");
assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_nonstatic_oop_maps(), "wrong offset calculation (1)");
assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)");
// Visit all interfaces and initialize itable offset table

View File

@ -31,6 +31,13 @@
#include "oops/objArrayKlassKlass.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#ifndef SERIALGC
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
#include "memory/cardTableRS.hpp"
#include "oops/oop.pcgc.inline.hpp"
#endif
klassOop objArrayKlassKlass::create_klass(TRAPS) {
objArrayKlassKlass o;
@ -236,12 +243,23 @@ objArrayKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
addr = oak->bottom_klass_addr();
if (mr.contains(addr)) blk->do_oop(addr);
return arrayKlassKlass::oop_oop_iterate(obj, blk);
return arrayKlassKlass::oop_oop_iterate_m(obj, blk, mr);
}
#ifndef SERIALGC
void objArrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert(obj->blueprint()->oop_is_objArrayKlass(),"must be an obj array klass");
objArrayKlass* oak = objArrayKlass::cast((klassOop)obj);
oop* p = oak->element_klass_addr();
if (PSScavenge::should_scavenge(p)) {
pm->claim_or_forward_depth(p);
}
p = oak->bottom_klass_addr();
if (PSScavenge::should_scavenge(p)) {
pm->claim_or_forward_depth(p);
}
arrayKlassKlass::oop_push_contents(pm, obj);
}
int objArrayKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
@ -287,7 +305,7 @@ const char* objArrayKlassKlass::internal_name() const {
// Verification
void objArrayKlassKlass::oop_verify_on(oop obj, outputStream* st) {
klassKlass::oop_verify_on(obj, st);
arrayKlassKlass::oop_verify_on(obj, st);
objArrayKlass* oak = objArrayKlass::cast((klassOop)obj);
guarantee(oak->element_klass()->is_perm(), "should be in permspace");
guarantee(oak->element_klass()->is_klass(), "should be klass");

View File

@ -129,6 +129,7 @@ class oopDesc {
// type test operations (inlined in oop.inline.h)
bool is_instance() const;
bool is_instanceMirror() const;
bool is_instanceRef() const;
bool is_array() const;
bool is_objArray() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -141,6 +141,7 @@ inline Klass* oopDesc::blueprint() const { return klass()->klass_part(
inline bool oopDesc::is_a(klassOop k) const { return blueprint()->is_subtype_of(k); }
inline bool oopDesc::is_instance() const { return blueprint()->oop_is_instance(); }
inline bool oopDesc::is_instanceMirror() const { return blueprint()->oop_is_instanceMirror(); }
inline bool oopDesc::is_instanceRef() const { return blueprint()->oop_is_instanceRef(); }
inline bool oopDesc::is_array() const { return blueprint()->oop_is_array(); }
inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_objArray(); }
@ -399,7 +400,7 @@ inline void oopDesc::release_address_field_put(int offset, address contents) { O
inline int oopDesc::size_given_klass(Klass* klass) {
int lh = klass->layout_helper();
int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
int s;
// lh is now a value computed at class initialization that may hint
// at the size. For instances, this is positive and equal to the
@ -412,7 +413,13 @@ inline int oopDesc::size_given_klass(Klass* klass) {
// alive or dead. So the speed here is equal in importance to the
// speed of allocation.
if (lh <= Klass::_lh_neutral_value) {
if (lh > Klass::_lh_neutral_value) {
if (!Klass::layout_helper_needs_slow_path(lh)) {
s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
} else {
s = klass->oop_size(this);
}
} else if (lh <= Klass::_lh_neutral_value) {
// The most common case is instances; fall through if so.
if (lh < Klass::_lh_neutral_value) {
// Second most common case is arrays. We have to fetch the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -174,6 +174,7 @@ DEF_OOP(compiledICHolder);
class Klass;
class instanceKlass;
class instanceMirrorKlass;
class instanceRefKlass;
class methodKlass;
class constMethodKlass;

View File

@ -180,6 +180,9 @@
develop(bool, TraceLoopPredicate, false, \
"Trace generation of loop predicates") \
\
develop(bool, TraceLoopOpts, false, \
"Trace executed loop optimizations") \
\
product(bool, OptimizeFill, false, \
"convert fill/copy loops into intrinsic") \
\

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1202,11 +1202,15 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
// Oop pointers need some flattening
const TypeInstPtr *to = tj->isa_instptr();
if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
ciInstanceKlass *k = to->klass()->as_instance_klass();
if( ptr == TypePtr::Constant ) {
// No constant oop pointers (such as Strings); they alias with
// unknown strings.
assert(!is_known_inst, "not scalarizable allocation");
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
if (to->klass() != ciEnv::current()->Class_klass() ||
offset < k->size_helper() * wordSize) {
// No constant oop pointers (such as Strings); they alias with
// unknown strings.
assert(!is_known_inst, "not scalarizable allocation");
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
}
} else if( is_known_inst ) {
tj = to; // Keep NotNull and klass_is_exact for instance type
} else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
@ -1216,7 +1220,6 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
}
// Canonicalize the holder of this field
ciInstanceKlass *k = to->klass()->as_instance_klass();
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
// First handle header references such as a LoadKlassNode, even if the
// object's klass is unloaded at compile time (4965979).
@ -1224,9 +1227,13 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
}
} else if (offset < 0 || offset >= k->size_helper() * wordSize) {
to = NULL;
tj = TypeOopPtr::BOTTOM;
offset = tj->offset();
// Static fields are in the space above the normal instance
// fields in the java.lang.Class instance.
if (to->klass() != ciEnv::current()->Class_klass()) {
to = NULL;
tj = TypeOopPtr::BOTTOM;
offset = tj->offset();
}
} else {
ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
if (!k->equals(canonical_holder) || tj->offset() != offset) {
@ -1399,7 +1406,7 @@ void Compile::grow_alias_types() {
//--------------------------------find_alias_type------------------------------
Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create) {
Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
if (_AliasLevel == 0)
return alias_type(AliasIdxBot);
@ -1464,22 +1471,28 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
// but the base pointer type is not distinctive enough to identify
// references into JavaThread.)
// Check for final instance fields.
// Check for final fields.
const TypeInstPtr* tinst = flat->isa_instptr();
if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
ciInstanceKlass *k = tinst->klass()->as_instance_klass();
ciField* field = k->get_field_by_offset(tinst->offset(), false);
ciField* field;
if (tinst->const_oop() != NULL &&
tinst->klass() == ciEnv::current()->Class_klass() &&
tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
// static field
ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
field = k->get_field_by_offset(tinst->offset(), true);
} else {
ciInstanceKlass *k = tinst->klass()->as_instance_klass();
field = k->get_field_by_offset(tinst->offset(), false);
}
assert(field == NULL ||
original_field == NULL ||
(field->holder() == original_field->holder() &&
field->offset() == original_field->offset() &&
field->is_static() == original_field->is_static()), "wrong field?");
// Set field() and is_rewritable() attributes.
if (field != NULL) alias_type(idx)->set_field(field);
}
const TypeKlassPtr* tklass = flat->isa_klassptr();
// Check for final static fields.
if (tklass && tklass->klass()->is_instance_klass()) {
ciInstanceKlass *k = tklass->klass()->as_instance_klass();
ciField* field = k->get_field_by_offset(tklass->offset(), true);
// Set field() and is_rewritable() attributes.
if (field != NULL) alias_type(idx)->set_field(field);
}
}
// Fill the cache for next time.
@ -1502,10 +1515,10 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
Compile::AliasType* Compile::alias_type(ciField* field) {
const TypeOopPtr* t;
if (field->is_static())
t = TypeKlassPtr::make(field->holder());
t = TypeInstPtr::make(field->holder()->java_mirror());
else
t = TypeOopPtr::make_from_klass_raw(field->holder());
AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()));
AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
return atp;
}
@ -1522,7 +1535,7 @@ bool Compile::have_alias_type(const TypePtr* adr_type) {
if (adr_type == NULL) return true;
if (adr_type == TypePtr::BOTTOM) return true;
return find_alias_type(adr_type, true) != NULL;
return find_alias_type(adr_type, true, NULL) != NULL;
}
//-----------------------------must_alias--------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -596,7 +596,7 @@ class Compile : public Phase {
}
AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
AliasType* alias_type(const TypePtr* adr_type) { return find_alias_type(adr_type, false); }
AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
bool have_alias_type(const TypePtr* adr_type);
AliasType* alias_type(ciField* field);
@ -835,7 +835,7 @@ class Compile : public Phase {
void grow_alias_types();
AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
AliasType* find_alias_type(const TypePtr* adr_type, bool no_create);
AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
void verify_top(Node*) const PRODUCT_RETURN;

View File

@ -3338,6 +3338,49 @@ InitializeNode* AllocateNode::initialization() {
return NULL;
}
//----------------------------- loop predicates ---------------------------
//------------------------------add_predicate_impl----------------------------
void GraphKit::add_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {
// Too many traps seen?
if (too_many_traps(reason)) {
#ifdef ASSERT
if (TraceLoopPredicate) {
int tc = C->trap_count(reason);
tty->print("too many traps=%s tcount=%d in ",
Deoptimization::trap_reason_name(reason), tc);
method()->print(); // which method has too many predicate traps
tty->cr();
}
#endif
// We cannot afford to take more traps here,
// do not generate predicate.
return;
}
Node *cont = _gvn.intcon(1);
Node* opq = _gvn.transform(new (C, 2) Opaque1Node(C, cont));
Node *bol = _gvn.transform(new (C, 2) Conv2BNode(opq));
IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
Node* iffalse = _gvn.transform(new (C, 1) IfFalseNode(iff));
C->add_predicate_opaq(opq);
{
PreserveJVMState pjvms(this);
set_control(iffalse);
_sp += nargs;
uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
}
Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff));
set_control(iftrue);
}
//------------------------------add_predicate---------------------------------
void GraphKit::add_predicate(int nargs) {
if (UseLoopPredicate) {
add_predicate_impl(Deoptimization::Reason_predicate, nargs);
}
}
//----------------------------- store barriers ----------------------------
#define __ ideal.

View File

@ -793,6 +793,10 @@ class GraphKit : public Phase {
if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
return iff;
}
// Insert a loop predicate into the graph
void add_predicate(int nargs = 0);
void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
};
// Helper class to support building of control flow branches. Upon

View File

@ -154,8 +154,18 @@ void IdealKit::end_if() {
//
// Pushes the loop top cvstate first, then the else (loop exit) cvstate
// onto the stack.
void IdealKit::loop(IdealVariable& iv, Node* init, BoolTest::mask relop, Node* limit, float prob, float cnt) {
void IdealKit::loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask relop, Node* limit, float prob, float cnt) {
assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new loop");
// Sync IdealKit and graphKit.
gkit->set_all_memory(this->merged_memory());
gkit->set_control(this->ctrl());
// Add loop predicate.
gkit->add_predicate(nargs);
// Update IdealKit memory.
this->set_all_memory(gkit->merged_memory());
this->set_ctrl(gkit->control());
set(iv, init);
Node* head = make_label(1);
bind(head);

View File

@ -29,6 +29,7 @@
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/divnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
@ -160,7 +161,7 @@ class IdealKit: public StackObj {
bool push_new_state = true);
void else_();
void end_if();
void loop(IdealVariable& iv, Node* init, BoolTest::mask cmp, Node* limit,
void loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask cmp, Node* limit,
float prob = PROB_LIKELY(0.9), float cnt = COUNT_UNKNOWN);
void end_loop();
Node* make_label(int goto_ct);

View File

@ -1101,6 +1101,8 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
float likely = PROB_LIKELY(0.9);
float unlikely = PROB_UNLIKELY(0.9);
const int nargs = 2; // number of arguments to push back for uncommon trap in predicate
const int value_offset = java_lang_String::value_offset_in_bytes();
const int count_offset = java_lang_String::count_offset_in_bytes();
const int offset_offset = java_lang_String::offset_offset_in_bytes();
@ -1116,7 +1118,7 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
Node* sourcea = basic_plus_adr(string_object, string_object, value_offset);
Node* source = make_load(no_ctrl, sourcea, source_type, T_OBJECT, string_type->add_offset(value_offset));
Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array)) );
Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)) );
jint target_length = target_array->length();
const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
@ -1138,12 +1140,12 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
Node* return_ = __ make_label(1);
__ set(rtn,__ ConI(-1));
__ loop(i, sourceOffset, BoolTest::lt, sourceEnd); {
__ loop(this, nargs, i, sourceOffset, BoolTest::lt, sourceEnd); {
Node* i2 = __ AddI(__ value(i), targetCountLess1);
// pin to prohibit loading of "next iteration" value which may SEGV (rare)
Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS);
__ if_then(src, BoolTest::eq, lastChar, unlikely); {
__ loop(j, zero, BoolTest::lt, targetCountLess1); {
__ loop(this, nargs, j, zero, BoolTest::lt, targetCountLess1); {
Node* tpj = __ AddI(targetOffset, __ value(j));
Node* targ = load_array_element(no_ctrl, target, tpj, target_type);
Node* ipj = __ AddI(__ value(i), __ value(j));

View File

@ -205,6 +205,8 @@ Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
}
phase->register_new_node(addx, phase->get_ctrl(x));
phase->_igvn.replace_node(n1, addx);
assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
_body.yank(n1);
return addx;
}
@ -307,15 +309,21 @@ void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
// iterations adjusted. Therefore, we need to declare this loop as
// no longer a 'main' loop; it will need new pre and post loops before
// we can do further RCE.
#ifndef PRODUCT
if (TraceLoopOpts) {
tty->print("Peel ");
loop->dump_head();
}
#endif
Node *h = loop->_head;
if( h->is_CountedLoop() ) {
if (h->is_CountedLoop()) {
CountedLoopNode *cl = h->as_CountedLoop();
assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
cl->set_trip_count(cl->trip_count() - 1);
if( cl->is_main_loop() ) {
if (cl->is_main_loop()) {
cl->set_normal_loop();
#ifndef PRODUCT
if( PrintOpto && VerifyLoopOptimizations ) {
if (PrintOpto && VerifyLoopOptimizations) {
tty->print("Peeling a 'main' loop; resetting to 'normal' ");
loop->dump_head();
}
@ -645,6 +653,15 @@ Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ct
// alignment. Useful to unroll loops that do no array accesses.
void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
#ifndef PRODUCT
if (TraceLoopOpts) {
if (peel_only)
tty->print("PeelMainPost ");
else
tty->print("PreMainPost ");
loop->dump_head();
}
#endif
C->set_major_progress();
// Find common pieces of the loop being guarded with pre & post loops
@ -897,16 +914,19 @@ bool IdealLoopTree::is_invariant(Node* n) const {
//------------------------------do_unroll--------------------------------------
// Unroll the loop body one step - make each trip do 2 iterations.
void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
assert( LoopUnrollLimit, "" );
assert(LoopUnrollLimit, "");
CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
CountedLoopEndNode *loop_end = loop_head->loopexit();
assert(loop_end, "");
#ifndef PRODUCT
if( PrintOpto && VerifyLoopOptimizations ) {
if (PrintOpto && VerifyLoopOptimizations) {
tty->print("Unrolling ");
loop->dump_head();
} else if (TraceLoopOpts) {
tty->print("Unroll %d ", loop_head->unrolled_count()*2);
loop->dump_head();
}
#endif
CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
CountedLoopEndNode *loop_end = loop_head->loopexit();
assert( loop_end, "" );
// Remember loop node count before unrolling to detect
// if rounds of unroll,optimize are making progress
@ -915,7 +935,7 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
Node *ctrl = loop_head->in(LoopNode::EntryControl);
Node *limit = loop_head->limit();
Node *init = loop_head->init_trip();
Node *strid = loop_head->stride();
Node *stride = loop_head->stride();
Node *opaq = NULL;
if( adjust_min_trip ) { // If not maximally unrolling, need adjustment
@ -955,13 +975,13 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
// odd iteration: (trip_cnt & ~1). Then back compute a new limit.
Node *span = new (C, 3) SubINode( limit, init );
register_new_node( span, ctrl );
Node *trip = new (C, 3) DivINode( 0, span, strid );
Node *trip = new (C, 3) DivINode( 0, span, stride );
register_new_node( trip, ctrl );
Node *mtwo = _igvn.intcon(-2);
set_ctrl(mtwo, C->root());
Node *rond = new (C, 3) AndINode( trip, mtwo );
register_new_node( rond, ctrl );
Node *spn2 = new (C, 3) MulINode( rond, strid );
Node *spn2 = new (C, 3) MulINode( rond, stride );
register_new_node( spn2, ctrl );
Node *lim2 = new (C, 3) AddINode( spn2, init );
register_new_node( lim2, ctrl );
@ -1040,17 +1060,23 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
CountedLoopNode *cl = loop->_head->as_CountedLoop();
assert( cl->trip_count() > 0, "");
assert(cl->trip_count() > 0, "");
#ifndef PRODUCT
if (TraceLoopOpts) {
tty->print("MaxUnroll %d ", cl->trip_count());
loop->dump_head();
}
#endif
// If loop is tripping an odd number of times, peel odd iteration
if( (cl->trip_count() & 1) == 1 ) {
do_peeling( loop, old_new );
if ((cl->trip_count() & 1) == 1) {
do_peeling(loop, old_new);
}
// Now its tripping an even number of times remaining. Double loop body.
// Do not adjust pre-guards; they are not needed and do not exist.
if( cl->trip_count() > 0 ) {
do_unroll( loop, old_new, false );
if (cl->trip_count() > 0) {
do_unroll(loop, old_new, false);
}
}
@ -1227,35 +1253,55 @@ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale,
// Eliminate range-checks and other trip-counter vs loop-invariant tests.
void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
#ifndef PRODUCT
if( PrintOpto && VerifyLoopOptimizations ) {
if (PrintOpto && VerifyLoopOptimizations) {
tty->print("Range Check Elimination ");
loop->dump_head();
} else if (TraceLoopOpts) {
tty->print("RangeCheck ");
loop->dump_head();
}
#endif
assert( RangeCheckElimination, "" );
assert(RangeCheckElimination, "");
CountedLoopNode *cl = loop->_head->as_CountedLoop();
assert( cl->is_main_loop(), "" );
assert(cl->is_main_loop(), "");
// protect against stride not being a constant
if (!cl->stride_is_con())
return;
// Find the trip counter; we are iteration splitting based on it
Node *trip_counter = cl->phi();
// Find the main loop limit; we will trim it's iterations
// to not ever trip end tests
Node *main_limit = cl->limit();
// Need to find the main-loop zero-trip guard
Node *ctrl = cl->in(LoopNode::EntryControl);
assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
Node *iffm = ctrl->in(0);
assert(iffm->Opcode() == Op_If, "");
Node *bolzm = iffm->in(1);
assert(bolzm->Opcode() == Op_Bool, "");
Node *cmpzm = bolzm->in(1);
assert(cmpzm->is_Cmp(), "");
Node *opqzm = cmpzm->in(2);
// Can not optimize a loop if pre-loop Opaque1 node is optimized
// away and then another round of loop opts attempted.
if (opqzm->Opcode() != Op_Opaque1)
return;
assert(opqzm->in(1) == main_limit, "do not understand situation");
// Find the pre-loop limit; we will expand it's iterations to
// not ever trip low tests.
Node *ctrl = cl->in(LoopNode::EntryControl);
assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
Node *iffm = ctrl->in(0);
assert( iffm->Opcode() == Op_If, "" );
Node *p_f = iffm->in(0);
assert( p_f->Opcode() == Op_IfFalse, "" );
assert(p_f->Opcode() == Op_IfFalse, "");
CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
assert( pre_end->loopnode()->is_pre_loop(), "" );
assert(pre_end->loopnode()->is_pre_loop(), "");
Node *pre_opaq1 = pre_end->limit();
// Occasionally it's possible for a pre-loop Opaque1 node to be
// optimized away and then another round of loop opts attempted.
// We can not optimize this particular loop in that case.
if( pre_opaq1->Opcode() != Op_Opaque1 )
if (pre_opaq1->Opcode() != Op_Opaque1)
return;
Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
Node *pre_limit = pre_opaq->in(1);
@ -1266,25 +1312,11 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
// Ensure the original loop limit is available from the
// pre-loop Opaque1 node.
Node *orig_limit = pre_opaq->original_loop_limit();
if( orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP )
if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
return;
// Need to find the main-loop zero-trip guard
Node *bolzm = iffm->in(1);
assert( bolzm->Opcode() == Op_Bool, "" );
Node *cmpzm = bolzm->in(1);
assert( cmpzm->is_Cmp(), "" );
Node *opqzm = cmpzm->in(2);
if( opqzm->Opcode() != Op_Opaque1 )
return;
assert( opqzm->in(1) == main_limit, "do not understand situation" );
// Must know if its a count-up or count-down loop
// protect against stride not being a constant
if ( !cl->stride_is_con() ) {
return;
}
int stride_con = cl->stride_con();
Node *zero = _igvn.intcon(0);
Node *one = _igvn.intcon(1);
@ -1566,16 +1598,24 @@ void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
// have on the last iteration. This will break the loop.
bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
// Minimum size must be empty loop
if( _body.size() > 7/*number of nodes in an empty loop*/ ) return false;
if (_body.size() > 7/*number of nodes in an empty loop*/)
return false;
if( !_head->is_CountedLoop() ) return false; // Dead loop
if (!_head->is_CountedLoop())
return false; // Dead loop
CountedLoopNode *cl = _head->as_CountedLoop();
if( !cl->loopexit() ) return false; // Malformed loop
if( !phase->is_member(this,phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)) ) )
if (!cl->loopexit())
return false; // Malformed loop
if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
return false; // Infinite loop
#ifndef PRODUCT
if( PrintOpto )
tty->print_cr("Removing empty loop");
if (PrintOpto) {
tty->print("Removing empty loop");
this->dump_head();
} else if (TraceLoopOpts) {
tty->print("Empty ");
this->dump_head();
}
#endif
#ifdef ASSERT
// Ensure only one phi which is the iv.
@ -1720,7 +1760,7 @@ bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_
//------------------------------iteration_split--------------------------------
bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
// Recursively iteration split nested loops
if( _child && !_child->iteration_split( phase, old_new ))
if (_child && !_child->iteration_split(phase, old_new))
return false;
// Clean out prior deadwood
@ -1729,21 +1769,20 @@ bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new )
// Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
// Replace with a 1-in-10 exit guess.
if( _parent /*not the root loop*/ &&
if (_parent /*not the root loop*/ &&
!_irreducible &&
// Also ignore the occasional dead backedge
!tail()->is_top() ) {
!tail()->is_top()) {
adjust_loop_exit_prob(phase);
}
// Gate unrolling, RCE and peeling efforts.
if( !_child && // If not an inner loop, do not split
if (!_child && // If not an inner loop, do not split
!_irreducible &&
_allow_optimizations &&
!tail()->is_top() ) { // Also ignore the occasional dead backedge
!tail()->is_top()) { // Also ignore the occasional dead backedge
if (!_has_call) {
if (!iteration_split_impl( phase, old_new )) {
if (!iteration_split_impl(phase, old_new)) {
return false;
}
} else if (policy_unswitching(phase)) {
@ -1752,16 +1791,17 @@ bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new )
}
// Minor offset re-organization to remove loop-fallout uses of
// trip counter.
if( _head->is_CountedLoop() ) phase->reorg_offsets( this );
if( _next && !_next->iteration_split( phase, old_new ))
// trip counter when there was no major reshaping.
phase->reorg_offsets(this);
if (_next && !_next->iteration_split(phase, old_new))
return false;
return true;
}
//-------------------------------is_uncommon_trap_proj----------------------------
// Return true if proj is the form of "proj->[region->..]call_uct"
bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate) {
bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) {
int path_limit = 10;
assert(proj, "invalid argument");
Node* out = proj;
@ -1772,8 +1812,8 @@ bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, bool must_reason_pred
if (out->is_CallStaticJava()) {
int req = out->as_CallStaticJava()->uncommon_trap_request();
if (req != 0) {
Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(req);
if (!must_reason_predicate || reason == Deoptimization::Reason_predicate){
Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
if (trap_reason == reason || reason == Deoptimization::Reason_none) {
return true;
}
}
@ -1790,15 +1830,15 @@ bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, bool must_reason_pred
// other_proj->[region->..]call_uct"
//
// "must_reason_predicate" means the uct reason must be Reason_predicate
bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, bool must_reason_predicate) {
bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) {
Node *in0 = proj->in(0);
if (!in0->is_If()) return false;
// Variation of a dead If node.
if (in0->outcnt() < 2) return false;
IfNode* iff = in0->as_If();
// we need "If(Conv2B(Opaque1(...)))" pattern for must_reason_predicate
if (must_reason_predicate) {
// we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
if (reason != Deoptimization::Reason_none) {
if (iff->in(1)->Opcode() != Op_Conv2B ||
iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
return false;
@ -1806,7 +1846,19 @@ bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, bool must_reaso
}
ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
return is_uncommon_trap_proj(other_proj, must_reason_predicate);
return is_uncommon_trap_proj(other_proj, reason);
}
//-------------------------------register_control-------------------------
void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) {
assert(n->is_CFG(), "must be control node");
_igvn.register_new_node_with_optimizer(n);
loop->_body.push(n);
set_loop(n, loop);
// When called from beautify_loops() idom is not constructed yet.
if (_idom != NULL) {
set_idom(n, pred, dom_depth(pred));
}
}
//------------------------------create_new_if_for_predicate------------------------
@ -1843,8 +1895,10 @@ bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, bool must_reaso
//
// We will create a region to guard the uct call if there is no one there.
// The true projecttion (if_cont) of the new_iff is returned.
ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj) {
assert(is_uncommon_trap_if_pattern(cont_proj, true), "must be a uct if pattern!");
// This code is also used to clone predicates to clonned loops.
ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason) {
assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
IfNode* iff = cont_proj->in(0)->as_If();
ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
@ -1854,57 +1908,84 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj) {
if (!rgn->is_Region()) { // create a region to guard the call
assert(rgn->is_Call(), "must be call uct");
CallNode* call = rgn->as_Call();
IdealLoopTree* loop = get_loop(call);
rgn = new (C, 1) RegionNode(1);
_igvn.set_type(rgn, rgn->bottom_type());
rgn->add_req(uncommon_proj);
set_idom(rgn, idom(uncommon_proj), dom_depth(uncommon_proj)+1);
register_control(rgn, loop, uncommon_proj);
_igvn.hash_delete(call);
call->set_req(0, rgn);
// When called from beautify_loops() idom is not constructed yet.
if (_idom != NULL) {
set_idom(call, rgn, dom_depth(rgn));
}
}
Node* entry = iff->in(0);
if (new_entry != NULL) {
// Clonning the predicate to new location.
entry = new_entry;
}
// Create new_iff
uint iffdd = dom_depth(iff);
IdealLoopTree* lp = get_loop(iff);
IfNode *new_iff = new (C, 2) IfNode(iff->in(0), NULL, iff->_prob, iff->_fcnt);
register_node(new_iff, lp, idom(iff), iffdd);
IdealLoopTree* lp = get_loop(entry);
IfNode *new_iff = new (C, 2) IfNode(entry, NULL, iff->_prob, iff->_fcnt);
register_control(new_iff, lp, entry);
Node *if_cont = new (C, 1) IfTrueNode(new_iff);
Node *if_uct = new (C, 1) IfFalseNode(new_iff);
if (cont_proj->is_IfFalse()) {
// Swap
Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
}
register_node(if_cont, lp, new_iff, iffdd);
register_node(if_uct, get_loop(rgn), new_iff, iffdd);
// if_cont to iff
_igvn.hash_delete(iff);
iff->set_req(0, if_cont);
set_idom(iff, if_cont, dom_depth(iff));
register_control(if_cont, lp, new_iff);
register_control(if_uct, get_loop(rgn), new_iff);
// if_uct to rgn
_igvn.hash_delete(rgn);
rgn->add_req(if_uct);
Node* ridom = idom(rgn);
Node* nrdom = dom_lca(ridom, new_iff);
set_idom(rgn, nrdom, dom_depth(rgn));
// When called from beautify_loops() idom is not constructed yet.
if (_idom != NULL) {
Node* ridom = idom(rgn);
Node* nrdom = dom_lca(ridom, new_iff);
set_idom(rgn, nrdom, dom_depth(rgn));
}
// rgn must have no phis
assert(!rgn->as_Region()->has_phi(), "region must have no phis");
if (new_entry == NULL) {
// Attach if_cont to iff
_igvn.hash_delete(iff);
iff->set_req(0, if_cont);
if (_idom != NULL) {
set_idom(iff, if_cont, dom_depth(iff));
}
}
return if_cont->as_Proj();
}
//------------------------------find_predicate_insertion_point--------------------------
//--------------------------find_predicate_insertion_point-------------------
// Find a good location to insert a predicate
ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c) {
if (start_c == C->root() || !start_c->is_Proj())
ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
if (start_c == NULL || !start_c->is_Proj())
return NULL;
if (is_uncommon_trap_if_pattern(start_c->as_Proj(), true/*Reason_Predicate*/)) {
if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) {
return start_c->as_Proj();
}
return NULL;
}
//--------------------------find_predicate------------------------------------
// Find a predicate
Node* PhaseIdealLoop::find_predicate(Node* entry) {
Node* predicate = NULL;
if (UseLoopPredicate) {
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
if (predicate != NULL) { // right pattern that can be used by loop predication
assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
return entry;
}
}
return NULL;
}
//------------------------------Invariance-----------------------------------
// Helper class for loop_predication_impl to compute invariance on the fly and
// clone invariants.
@ -2151,6 +2232,11 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
return false;
}
if (loop->_head->unique_ctrl_out()->Opcode() == Op_NeverBranch) {
// do nothing for infinite loops
return false;
}
CountedLoopNode *cl = NULL;
if (loop->_head->is_CountedLoop()) {
cl = loop->_head->as_CountedLoop();
@ -2158,40 +2244,22 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
if (!cl->is_normal_loop()) return false;
}
// Too many traps seen?
bool tmt = C->too_many_traps(C->method(), 0, Deoptimization::Reason_predicate);
int tc = C->trap_count(Deoptimization::Reason_predicate);
if (tmt || tc > 0) {
if (TraceLoopPredicate) {
tty->print_cr("too many predicate traps: %d", tc);
C->method()->print(); // which method has too many predicate traps
tty->print_cr("");
}
return false;
}
LoopNode *lpn = loop->_head->as_Loop();
Node* entry = lpn->in(LoopNode::EntryControl);
ProjNode *predicate_proj = find_predicate_insertion_point(entry);
if (!predicate_proj){
ProjNode *predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
if (!predicate_proj) {
#ifndef PRODUCT
if (TraceLoopPredicate) {
tty->print("missing predicate:");
loop->dump_head();
lpn->dump(1);
}
#endif
return false;
}
ConNode* zero = _igvn.intcon(0);
set_ctrl(zero, C->root());
Node *cond_false = new (C, 2) Conv2BNode(zero);
register_new_node(cond_false, C->root());
ConNode* one = _igvn.intcon(1);
set_ctrl(one, C->root());
Node *cond_true = new (C, 2) Conv2BNode(one);
register_new_node(cond_true, C->root());
ResourceArea *area = Thread::current()->resource_area();
Invariance invar(area, loop);
@ -2218,7 +2286,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
ProjNode* proj = if_proj_list.pop()->as_Proj();
IfNode* iff = proj->in(0)->as_If();
if (!is_uncommon_trap_if_pattern(proj)) {
if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) {
if (loop->is_loop_exit(iff)) {
// stop processing the remaining projs in the list because the execution of them
// depends on the condition of "iff" (iff->in(1)).
@ -2242,7 +2310,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
BoolNode* bol = test->as_Bool();
if (invar.is_invariant(bol)) {
// Invariant test
new_predicate_proj = create_new_if_for_predicate(predicate_proj);
new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL,
Deoptimization::Reason_predicate);
Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
@ -2256,8 +2325,15 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If();
_igvn.hash_delete(new_predicate_iff);
new_predicate_iff->set_req(1, new_predicate_bol);
if (TraceLoopPredicate) tty->print_cr("invariant if%s: %d", negated ? " negated" : "", new_predicate_iff->_idx);
#ifndef PRODUCT
if (TraceLoopPredicate) {
tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx);
loop->dump_head();
} else if (TraceLoopOpts) {
tty->print("Predicate IC ");
loop->dump_head();
}
#endif
} else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
assert(proj->_con == predicate_proj->_con, "must match");
@ -2281,8 +2357,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
// lower_bound test will dominate the upper bound test and all
// cloned or created nodes will use the lower bound test as
// their declared control.
ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj);
ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj);
ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate);
ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate);
assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0);
@ -2311,41 +2387,24 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
// Fall through into rest of the clean up code which will move
// any dependent nodes onto the upper bound test.
new_predicate_proj = upper_bound_proj;
#ifndef PRODUCT
if (TraceLoopOpts && !TraceLoopPredicate) {
tty->print("Predicate RC ");
loop->dump_head();
}
#endif
} else {
// The other proj of the "iff" is a uncommon trap projection, and we can assume
// the other proj will not be executed ("executed" means uct raised).
// Loop variant check (for example, range check in non-counted loop)
// with uncommon trap.
continue;
}
assert(new_predicate_proj != NULL, "sanity");
// Success - attach condition (new_predicate_bol) to predicate if
invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
// Eliminate the old if in the loop body
_igvn.hash_delete(iff);
iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true);
Node* ctrl = new_predicate_proj; // new control
ProjNode* dp = proj; // old control
assert(get_loop(dp) == loop, "guaranteed at the time of collecting proj");
// Find nodes (depends only on the test) off the surviving projection;
// move them outside the loop with the control of proj_clone
for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
Node* cd = dp->fast_out(i); // Control-dependent node
if (cd->depends_only_on_test()) {
assert(cd->in(0) == dp, "");
_igvn.hash_delete(cd);
cd->set_req(0, ctrl); // ctrl, not NULL
set_early_ctrl(cd);
_igvn._worklist.push(cd);
IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
if (new_loop != loop) {
if (!loop->_child) loop->_body.yank(cd);
if (!new_loop->_child ) new_loop->_body.push(cd);
}
--i;
--imax;
}
}
// Eliminate the old If in the loop body
dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con );
hoisted = true;
C->set_major_progress();

View File

@ -110,6 +110,13 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
IfNode* unswitch_iff = find_unswitching_candidate((const IdealLoopTree *)loop);
assert(unswitch_iff != NULL, "should be at least one");
#ifndef PRODUCT
if (TraceLoopOpts) {
tty->print("Unswitch %d ", head->unswitch_count()+1);
loop->dump_head();
}
#endif
// Need to revert back to normal loop
if (head->is_CountedLoop() && !head->as_CountedLoop()->is_normal_loop()) {
head->as_CountedLoop()->set_normal_loop();

View File

@ -56,12 +56,32 @@ const Node* Node::is_loop_iv() const {
// Dump special per-node info
#ifndef PRODUCT
void LoopNode::dump_spec(outputStream *st) const {
if( is_inner_loop () ) st->print( "inner " );
if( is_partial_peel_loop () ) st->print( "partial_peel " );
if( partial_peel_has_failed () ) st->print( "partial_peel_failed " );
if (is_inner_loop()) st->print( "inner " );
if (is_partial_peel_loop()) st->print( "partial_peel " );
if (partial_peel_has_failed()) st->print( "partial_peel_failed " );
}
#endif
//------------------------------is_valid_counted_loop-------------------------
bool LoopNode::is_valid_counted_loop() const {
if (is_CountedLoop()) {
CountedLoopNode* l = as_CountedLoop();
CountedLoopEndNode* le = l->loopexit();
if (le != NULL &&
le->proj_out(1 /* true */) == l->in(LoopNode::LoopBackControl)) {
Node* phi = l->phi();
Node* exit = le->proj_out(0 /* false */);
if (exit != NULL && exit->Opcode() == Op_IfFalse &&
phi != NULL && phi->is_Phi() &&
phi->in(LoopNode::LoopBackControl) == l->incr() &&
le->loopnode() == l && le->stride_is_con()) {
return true;
}
}
}
return false;
}
//------------------------------get_early_ctrl---------------------------------
// Compute earliest legal control
Node *PhaseIdealLoop::get_early_ctrl( Node *n ) {
@ -142,43 +162,44 @@ void PhaseIdealLoop::set_subtree_ctrl( Node *n ) {
}
//------------------------------is_counted_loop--------------------------------
Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
PhaseGVN *gvn = &_igvn;
// Counted loop head must be a good RegionNode with only 3 not NULL
// control input edges: Self, Entry, LoopBack.
if ( x->in(LoopNode::Self) == NULL || x->req() != 3 )
return NULL;
if (x->in(LoopNode::Self) == NULL || x->req() != 3)
return false;
Node *init_control = x->in(LoopNode::EntryControl);
Node *back_control = x->in(LoopNode::LoopBackControl);
if( init_control == NULL || back_control == NULL ) // Partially dead
return NULL;
if (init_control == NULL || back_control == NULL) // Partially dead
return false;
// Must also check for TOP when looking for a dead loop
if( init_control->is_top() || back_control->is_top() )
return NULL;
if (init_control->is_top() || back_control->is_top())
return false;
// Allow funny placement of Safepoint
if( back_control->Opcode() == Op_SafePoint )
if (back_control->Opcode() == Op_SafePoint)
back_control = back_control->in(TypeFunc::Control);
// Controlling test for loop
Node *iftrue = back_control;
uint iftrue_op = iftrue->Opcode();
if( iftrue_op != Op_IfTrue &&
iftrue_op != Op_IfFalse )
if (iftrue_op != Op_IfTrue &&
iftrue_op != Op_IfFalse)
// I have a weird back-control. Probably the loop-exit test is in
// the middle of the loop and I am looking at some trailing control-flow
// merge point. To fix this I would have to partially peel the loop.
return NULL; // Obscure back-control
return false; // Obscure back-control
// Get boolean guarding loop-back test
Node *iff = iftrue->in(0);
if( get_loop(iff) != loop || !iff->in(1)->is_Bool() ) return NULL;
if (get_loop(iff) != loop || !iff->in(1)->is_Bool())
return false;
BoolNode *test = iff->in(1)->as_Bool();
BoolTest::mask bt = test->_test._test;
float cl_prob = iff->as_If()->_prob;
if( iftrue_op == Op_IfFalse ) {
if (iftrue_op == Op_IfFalse) {
bt = BoolTest(bt).negate();
cl_prob = 1.0 - cl_prob;
}
@ -186,7 +207,7 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
Node *cmp = test->in(1);
int cmp_op = cmp->Opcode();
if( cmp_op != Op_CmpI )
return NULL; // Avoid pointer & float compares
return false; // Avoid pointer & float compares
// Find the trip-counter increment & limit. Limit must be loop invariant.
Node *incr = cmp->in(1);
@ -196,55 +217,64 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// need 'loop()' test to tell if limit is loop invariant
// ---------
if( !is_member( loop, get_ctrl(incr) ) ) { // Swapped trip counter and limit?
Node *tmp = incr; // Then reverse order into the CmpI
if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit?
Node *tmp = incr; // Then reverse order into the CmpI
incr = limit;
limit = tmp;
bt = BoolTest(bt).commute(); // And commute the exit test
}
if( is_member( loop, get_ctrl(limit) ) ) // Limit must loop-invariant
return NULL;
if (is_member(loop, get_ctrl(limit))) // Limit must be loop-invariant
return false;
if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant
return false;
Node* phi_incr = NULL;
// Trip-counter increment must be commutative & associative.
uint incr_op = incr->Opcode();
if( incr_op == Op_Phi && incr->req() == 3 ) {
incr = incr->in(2); // Assume incr is on backedge of Phi
incr_op = incr->Opcode();
if (incr->is_Phi()) {
if (incr->as_Phi()->region() != x || incr->req() != 3)
return false; // Not simple trip counter expression
phi_incr = incr;
incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi
if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant
return false;
}
Node* trunc1 = NULL;
Node* trunc2 = NULL;
const TypeInt* iv_trunc_t = NULL;
if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t))) {
return NULL; // Funny increment opcode
return false; // Funny increment opcode
}
assert(incr->Opcode() == Op_AddI, "wrong increment code");
// Get merge point
Node *xphi = incr->in(1);
Node *stride = incr->in(2);
if( !stride->is_Con() ) { // Oops, swap these
if( !xphi->is_Con() ) // Is the other guy a constant?
return NULL; // Nope, unknown stride, bail out
if (!stride->is_Con()) { // Oops, swap these
if (!xphi->is_Con()) // Is the other guy a constant?
return false; // Nope, unknown stride, bail out
Node *tmp = xphi; // 'incr' is commutative, so ok to swap
xphi = stride;
stride = tmp;
}
//if( loop(xphi) != l) return NULL;// Merge point is in inner loop??
if( !xphi->is_Phi() ) return NULL; // Too much math on the trip counter
// Stride must be constant
int stride_con = stride->get_int();
assert(stride_con != 0, "missed some peephole opt");
if (!xphi->is_Phi())
return false; // Too much math on the trip counter
if (phi_incr != NULL && phi_incr != xphi)
return false;
PhiNode *phi = xphi->as_Phi();
// Stride must be constant
const Type *stride_t = stride->bottom_type();
int stride_con = stride_t->is_int()->get_con();
assert( stride_con, "missed some peephole opt" );
// Phi must be of loop header; backedge must wrap to increment
if( phi->region() != x ) return NULL;
if( trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr ||
trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1 ) {
return NULL;
if (phi->region() != x)
return false;
if (trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr ||
trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1) {
return false;
}
Node *init_trip = phi->in(LoopNode::EntryControl);
//if (!init_trip->is_Con()) return NULL; // avoid rolling over MAXINT/MININT
// If iv trunc type is smaller than int, check for possible wrap.
if (!TypeInt::INT->higher_equal(iv_trunc_t)) {
@ -267,12 +297,12 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
if (stride_con > 0) {
if (iv_trunc_t->_hi - phi_ft->_hi < stride_con ||
iv_trunc_t->_lo > phi_ft->_lo) {
return NULL; // truncation may occur
return false; // truncation may occur
}
} else if (stride_con < 0) {
if (iv_trunc_t->_lo - phi_ft->_lo > stride_con ||
iv_trunc_t->_hi < phi_ft->_hi) {
return NULL; // truncation may occur
return false; // truncation may occur
}
}
// No possibility of wrap so truncation can be discarded
@ -281,35 +311,45 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
assert(trunc1 == NULL && trunc2 == NULL, "no truncation for int");
}
// If the condition is inverted and we will be rolling
// through MININT to MAXINT, then bail out.
if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice!
// Odd stride
bt == BoolTest::ne && stride_con != 1 && stride_con != -1 ||
// Count down loop rolls through MAXINT
(bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0 ||
// Count up loop rolls through MININT
(bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0 ) {
return false; // Bail out
}
const TypeInt* init_t = gvn->type(init_trip)->is_int();
const TypeInt* limit_t = gvn->type(limit)->is_int();
if (stride_con > 0) {
long init_p = (long)init_t->_lo + stride_con;
if (init_p > (long)max_jint || init_p > (long)limit_t->_hi)
return false; // cyclic loop or this loop trips only once
} else {
long init_p = (long)init_t->_hi + stride_con;
if (init_p < (long)min_jint || init_p < (long)limit_t->_lo)
return false; // cyclic loop or this loop trips only once
}
// =================================================
// ---- SUCCESS! Found A Trip-Counted Loop! -----
//
// Canonicalize the condition on the test. If we can exactly determine
// the trip-counter exit value, then set limit to that value and use
// a '!=' test. Otherwise use condition '<' for count-up loops and
// '>' for count-down loops. If the condition is inverted and we will
// be rolling through MININT to MAXINT, then bail out.
assert(x->Opcode() == Op_Loop, "regular loops only");
C->print_method("Before CountedLoop", 3);
// Check for SafePoint on backedge and remove
Node *sfpt = x->in(LoopNode::LoopBackControl);
if( sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
lazy_replace( sfpt, iftrue );
loop->_tail = iftrue;
}
// If compare points to incr, we are ok. Otherwise the compare
// can directly point to the phi; in this case adjust the compare so that
// it points to the incr by adjusting the limit.
if( cmp->in(1) == phi || cmp->in(2) == phi )
if (cmp->in(1) == phi || cmp->in(2) == phi)
limit = gvn->transform(new (C, 3) AddINode(limit,stride));
// trip-count for +-tive stride should be: (limit - init_trip + stride - 1)/stride.
// Final value for iterator should be: trip_count * stride + init_trip.
const Type *limit_t = limit->bottom_type();
const Type *init_t = init_trip->bottom_type();
Node *one_p = gvn->intcon( 1);
Node *one_m = gvn->intcon(-1);
@ -317,15 +357,15 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
Node *hook = new (C, 6) Node(6);
switch( bt ) {
case BoolTest::eq:
return NULL; // Bail out, but this loop trips at most twice!
ShouldNotReachHere();
case BoolTest::ne: // Ahh, the case we desire
if( stride_con == 1 )
if (stride_con == 1)
trip_count = gvn->transform(new (C, 3) SubINode(limit,init_trip));
else if( stride_con == -1 )
else if (stride_con == -1)
trip_count = gvn->transform(new (C, 3) SubINode(init_trip,limit));
else
return NULL; // Odd stride; must prove we hit limit exactly
set_subtree_ctrl( trip_count );
ShouldNotReachHere();
set_subtree_ctrl(trip_count);
//_loop.map(trip_count->_idx,loop(limit));
break;
case BoolTest::le: // Maybe convert to '<' case
@ -338,7 +378,8 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
//_loop.map(limit->_idx,limit_loop);
// Fall into next case
case BoolTest::lt: { // Maybe convert to '!=' case
if( stride_con < 0 ) return NULL; // Count down loop rolls through MAXINT
if (stride_con < 0) // Count down loop rolls through MAXINT
ShouldNotReachHere();
Node *range = gvn->transform(new (C, 3) SubINode(limit,init_trip));
set_subtree_ctrl( range );
hook->init_req(0, range);
@ -367,7 +408,8 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
//_loop.map(limit->_idx,limit_loop);
// Fall into next case
case BoolTest::gt: { // Maybe convert to '!=' case
if( stride_con > 0 ) return NULL; // count up loop rolls through MININT
if (stride_con > 0) // count up loop rolls through MININT
ShouldNotReachHere();
Node *range = gvn->transform(new (C, 3) SubINode(limit,init_trip));
set_subtree_ctrl( range );
hook->init_req(0, range);
@ -385,7 +427,7 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
hook->init_req(3, trip_count);
break;
}
}
} // switch( bt )
Node *span = gvn->transform(new (C, 3) MulINode(trip_count,stride));
set_subtree_ctrl( span );
@ -394,83 +436,82 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
limit = gvn->transform(new (C, 3) AddINode(span,init_trip));
set_subtree_ctrl( limit );
// Check for SafePoint on backedge and remove
Node *sfpt = x->in(LoopNode::LoopBackControl);
if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
lazy_replace( sfpt, iftrue );
loop->_tail = iftrue;
}
// Build a canonical trip test.
// Clone code, as old values may be in use.
Node* nphi = PhiNode::make(x, init_trip, TypeInt::INT);
nphi = _igvn.register_new_node_with_optimizer(nphi);
set_ctrl(nphi, get_ctrl(phi));
incr = incr->clone();
incr->set_req(1,phi);
incr->set_req(1,nphi);
incr->set_req(2,stride);
incr = _igvn.register_new_node_with_optimizer(incr);
set_early_ctrl( incr );
_igvn.hash_delete(phi);
phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );
// If phi type is more restrictive than Int, raise to
// Int to prevent (almost) infinite recursion in igvn
// which can only handle integer types for constants or minint..maxint.
if (!TypeInt::INT->higher_equal(phi->bottom_type())) {
Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInt::INT);
nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
nphi = _igvn.register_new_node_with_optimizer(nphi);
set_ctrl(nphi, get_ctrl(phi));
_igvn.replace_node(phi, nphi);
phi = nphi->as_Phi();
}
nphi->set_req(LoopNode::LoopBackControl, incr);
_igvn.replace_node(phi, nphi);
phi = nphi->as_Phi();
cmp = cmp->clone();
cmp->set_req(1,incr);
cmp->set_req(2,limit);
cmp = _igvn.register_new_node_with_optimizer(cmp);
set_ctrl(cmp, iff->in(0));
Node *tmp = test->clone();
assert( tmp->is_Bool(), "" );
test = (BoolNode*)tmp;
(*(BoolTest*)&test->_test)._test = bt; //BoolTest::ne;
test = test->clone()->as_Bool();
(*(BoolTest*)&test->_test)._test = bt;
test->set_req(1,cmp);
_igvn.register_new_node_with_optimizer(test);
set_ctrl(test, iff->in(0));
// If the exit test is dead, STOP!
if( test == NULL ) return NULL;
_igvn.hash_delete(iff);
iff->set_req_X( 1, test, &_igvn );
// Replace the old IfNode with a new LoopEndNode
Node *lex = _igvn.register_new_node_with_optimizer(new (C, 2) CountedLoopEndNode( iff->in(0), iff->in(1), cl_prob, iff->as_If()->_fcnt ));
Node *lex = _igvn.register_new_node_with_optimizer(new (C, 2) CountedLoopEndNode( iff->in(0), test, cl_prob, iff->as_If()->_fcnt ));
IfNode *le = lex->as_If();
uint dd = dom_depth(iff);
set_idom(le, le->in(0), dd); // Update dominance for loop exit
set_loop(le, loop);
// Get the loop-exit control
Node *if_f = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue));
Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue));
// Need to swap loop-exit and loop-back control?
if( iftrue_op == Op_IfFalse ) {
if (iftrue_op == Op_IfFalse) {
Node *ift2=_igvn.register_new_node_with_optimizer(new (C, 1) IfTrueNode (le));
Node *iff2=_igvn.register_new_node_with_optimizer(new (C, 1) IfFalseNode(le));
loop->_tail = back_control = ift2;
set_loop(ift2, loop);
set_loop(iff2, get_loop(if_f));
set_loop(iff2, get_loop(iffalse));
// Lazy update of 'get_ctrl' mechanism.
lazy_replace_proj( if_f , iff2 );
lazy_replace_proj( iftrue, ift2 );
lazy_replace_proj( iffalse, iff2 );
lazy_replace_proj( iftrue, ift2 );
// Swap names
if_f = iff2;
iftrue = ift2;
iffalse = iff2;
iftrue = ift2;
} else {
_igvn.hash_delete(if_f );
_igvn.hash_delete(iffalse);
_igvn.hash_delete(iftrue);
if_f ->set_req_X( 0, le, &_igvn );
iftrue->set_req_X( 0, le, &_igvn );
iffalse->set_req_X( 0, le, &_igvn );
iftrue ->set_req_X( 0, le, &_igvn );
}
set_idom(iftrue, le, dd+1);
set_idom(if_f, le, dd+1);
set_idom(iftrue, le, dd+1);
set_idom(iffalse, le, dd+1);
assert(iff->outcnt() == 0, "should be dead now");
lazy_replace( iff, le ); // fix 'get_ctrl'
// Now setup a new CountedLoopNode to replace the existing LoopNode
CountedLoopNode *l = new (C, 3) CountedLoopNode(init_control, back_control);
l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve
// The following assert is approximately true, and defines the intention
// of can_be_counted_loop. It fails, however, because phase->type
// is not yet initialized for this loop and its parts.
@ -491,10 +532,14 @@ Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// Free up intermediate goo
_igvn.remove_dead_node(hook);
#ifdef ASSERT
assert(l->is_valid_counted_loop(), "counted loop shape is messed up");
assert(l == loop->_head && l->phi() == phi && l->loopexit() == lex, "" );
#endif
C->print_method("After CountedLoop", 3);
// Return trip counter
return trip_count;
return true;
}
@ -1256,17 +1301,98 @@ bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) {
return true;
}
//---------------------------replace_parallel_iv-------------------------------
// Replace parallel induction variable (parallel to trip counter)
void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
assert(loop->_head->is_CountedLoop(), "");
CountedLoopNode *cl = loop->_head->as_CountedLoop();
Node *incr = cl->incr();
if (incr == NULL)
return; // Dead loop?
Node *init = cl->init_trip();
Node *phi = cl->phi();
// protect against stride not being a constant
if (!cl->stride_is_con())
return;
int stride_con = cl->stride_con();
PhaseGVN *gvn = &_igvn;
// Visit all children, looking for Phis
for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
Node *out = cl->out(i);
// Look for other phis (secondary IVs). Skip dead ones
if (!out->is_Phi() || out == phi || !has_node(out))
continue;
PhiNode* phi2 = out->as_Phi();
Node *incr2 = phi2->in( LoopNode::LoopBackControl );
// Look for induction variables of the form: X += constant
if (phi2->region() != loop->_head ||
incr2->req() != 3 ||
incr2->in(1) != phi2 ||
incr2 == incr ||
incr2->Opcode() != Op_AddI ||
!incr2->in(2)->is_Con())
continue;
// Check for parallel induction variable (parallel to trip counter)
// via an affine function. In particular, count-down loops with
// count-up array indices are common. We only RCE references off
// the trip-counter, so we need to convert all these to trip-counter
// expressions.
Node *init2 = phi2->in( LoopNode::EntryControl );
int stride_con2 = incr2->in(2)->get_int();
// The general case here gets a little tricky. We want to find the
// GCD of all possible parallel IV's and make a new IV using this
// GCD for the loop. Then all possible IVs are simple multiples of
// the GCD. In practice, this will cover very few extra loops.
// Instead we require 'stride_con2' to be a multiple of 'stride_con',
// where +/-1 is the common case, but other integer multiples are
// also easy to handle.
int ratio_con = stride_con2/stride_con;
if ((ratio_con * stride_con) == stride_con2) { // Check for exact
// Convert to using the trip counter. The parallel induction
// variable differs from the trip counter by a loop-invariant
// amount, the difference between their respective initial values.
// It is scaled by the 'ratio_con'.
// Perform local Ideal transformation since in most cases ratio == 1.
Node* ratio = _igvn.intcon(ratio_con);
set_ctrl(ratio, C->root());
Node* hook = new (C, 3) Node(3);
Node* ratio_init = gvn->transform(new (C, 3) MulINode(init, ratio));
hook->init_req(0, ratio_init);
Node* diff = gvn->transform(new (C, 3) SubINode(init2, ratio_init));
hook->init_req(1, diff);
Node* ratio_idx = gvn->transform(new (C, 3) MulINode(phi, ratio));
hook->init_req(2, ratio_idx);
Node* add = gvn->transform(new (C, 3) AddINode(ratio_idx, diff));
set_subtree_ctrl(add);
_igvn.replace_node( phi2, add );
// Free up intermediate goo
_igvn.remove_dead_node(hook);
// Sometimes an induction variable is unused
if (add->outcnt() == 0) {
_igvn.remove_dead_node(add);
}
--i; // deleted this phi; rescan starting with next position
continue;
}
}
}
//------------------------------counted_loop-----------------------------------
// Convert to counted loops where possible
void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
// For grins, set the inner-loop flag here
if( !_child ) {
if( _head->is_Loop() ) _head->as_Loop()->set_inner_loop();
if (!_child) {
if (_head->is_Loop()) _head->as_Loop()->set_inner_loop();
}
if( _head->is_CountedLoop() ||
phase->is_counted_loop( _head, this ) ) {
if (_head->is_CountedLoop() ||
phase->is_counted_loop(_head, this)) {
_has_sfpt = 1; // Indicate we do not need a safepoint here
// Look for a safepoint to remove
@ -1275,79 +1401,9 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
phase->is_deleteable_safept(n))
phase->lazy_replace(n,n->in(TypeFunc::Control));
CountedLoopNode *cl = _head->as_CountedLoop();
Node *incr = cl->incr();
if( !incr ) return; // Dead loop?
Node *init = cl->init_trip();
Node *phi = cl->phi();
// protect against stride not being a constant
if( !cl->stride_is_con() ) return;
int stride_con = cl->stride_con();
// Look for induction variables
phase->replace_parallel_iv(this);
// Visit all children, looking for Phis
for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
Node *out = cl->out(i);
// Look for other phis (secondary IVs). Skip dead ones
if (!out->is_Phi() || out == phi || !phase->has_node(out)) continue;
PhiNode* phi2 = out->as_Phi();
Node *incr2 = phi2->in( LoopNode::LoopBackControl );
// Look for induction variables of the form: X += constant
if( phi2->region() != _head ||
incr2->req() != 3 ||
incr2->in(1) != phi2 ||
incr2 == incr ||
incr2->Opcode() != Op_AddI ||
!incr2->in(2)->is_Con() )
continue;
// Check for parallel induction variable (parallel to trip counter)
// via an affine function. In particular, count-down loops with
// count-up array indices are common. We only RCE references off
// the trip-counter, so we need to convert all these to trip-counter
// expressions.
Node *init2 = phi2->in( LoopNode::EntryControl );
int stride_con2 = incr2->in(2)->get_int();
// The general case here gets a little tricky. We want to find the
// GCD of all possible parallel IV's and make a new IV using this
// GCD for the loop. Then all possible IVs are simple multiples of
// the GCD. In practice, this will cover very few extra loops.
// Instead we require 'stride_con2' to be a multiple of 'stride_con',
// where +/-1 is the common case, but other integer multiples are
// also easy to handle.
int ratio_con = stride_con2/stride_con;
if( ratio_con * stride_con == stride_con2 ) { // Check for exact
// Convert to using the trip counter. The parallel induction
// variable differs from the trip counter by a loop-invariant
// amount, the difference between their respective initial values.
// It is scaled by the 'ratio_con'.
Compile* C = phase->C;
Node* ratio = phase->_igvn.intcon(ratio_con);
phase->set_ctrl(ratio, C->root());
Node* ratio_init = new (C, 3) MulINode(init, ratio);
phase->_igvn.register_new_node_with_optimizer(ratio_init, init);
phase->set_early_ctrl(ratio_init);
Node* diff = new (C, 3) SubINode(init2, ratio_init);
phase->_igvn.register_new_node_with_optimizer(diff, init2);
phase->set_early_ctrl(diff);
Node* ratio_idx = new (C, 3) MulINode(phi, ratio);
phase->_igvn.register_new_node_with_optimizer(ratio_idx, phi);
phase->set_ctrl(ratio_idx, cl);
Node* add = new (C, 3) AddINode(ratio_idx, diff);
phase->_igvn.register_new_node_with_optimizer(add);
phase->set_ctrl(add, cl);
phase->_igvn.replace_node( phi2, add );
// Sometimes an induction variable is unused
if (add->outcnt() == 0) {
phase->_igvn.remove_dead_node(add);
}
--i; // deleted this phi; rescan starting with next position
continue;
}
}
} else if (_parent != NULL && !_irreducible) {
// Not a counted loop.
// Look for a safepoint on the idom-path to remove, preserving the first one
@ -1366,24 +1422,31 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
}
// Recursively
if( _child ) _child->counted_loop( phase );
if( _next ) _next ->counted_loop( phase );
if (_child) _child->counted_loop( phase );
if (_next) _next ->counted_loop( phase );
}
#ifndef PRODUCT
//------------------------------dump_head--------------------------------------
// Dump 1 liner for loop header info
void IdealLoopTree::dump_head( ) const {
for( uint i=0; i<_nest; i++ )
for (uint i=0; i<_nest; i++)
tty->print(" ");
tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx);
if( _irreducible ) tty->print(" IRREDUCIBLE");
if( _head->is_CountedLoop() ) {
if (_irreducible) tty->print(" IRREDUCIBLE");
if (UseLoopPredicate) {
Node* entry = _head->in(LoopNode::EntryControl);
if (entry != NULL && entry->is_Proj() &&
PhaseIdealLoop::is_uncommon_trap_if_pattern(entry->as_Proj(), Deoptimization::Reason_predicate)) {
tty->print(" predicated");
}
}
if (_head->is_CountedLoop()) {
CountedLoopNode *cl = _head->as_CountedLoop();
tty->print(" counted");
if( cl->is_pre_loop () ) tty->print(" pre" );
if( cl->is_main_loop() ) tty->print(" main");
if( cl->is_post_loop() ) tty->print(" post");
if (cl->is_pre_loop ()) tty->print(" pre" );
if (cl->is_main_loop()) tty->print(" main");
if (cl->is_post_loop()) tty->print(" post");
}
tty->cr();
}
@ -1392,8 +1455,8 @@ void IdealLoopTree::dump_head( ) const {
// Dump loops by loop tree
void IdealLoopTree::dump( ) const {
dump_head();
if( _child ) _child->dump();
if( _next ) _next ->dump();
if (_child) _child->dump();
if (_next) _next ->dump();
}
#endif
@ -1439,19 +1502,19 @@ void PhaseIdealLoop::collect_potentially_useful_predicates(
}
// self (only loops that we can apply loop predication may use their predicates)
if (loop->_head->is_Loop() &&
!loop->_irreducible &&
if (loop->_head->is_Loop() &&
!loop->_irreducible &&
!loop->tail()->is_top()) {
LoopNode *lpn = loop->_head->as_Loop();
LoopNode* lpn = loop->_head->as_Loop();
Node* entry = lpn->in(LoopNode::EntryControl);
ProjNode *predicate_proj = find_predicate_insertion_point(entry);
Node* predicate_proj = find_predicate(entry);
if (predicate_proj != NULL ) { // right pattern that can be used by loop predication
assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be");
useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
}
}
if ( loop->_next ) { // sibling
if (loop->_next) { // sibling
collect_potentially_useful_predicates(loop->_next, useful_predicates);
}
}
@ -1459,7 +1522,8 @@ void PhaseIdealLoop::collect_potentially_useful_predicates(
//------------------------eliminate_useless_predicates-----------------------------
// Eliminate all inserted predicates if they could not be used by loop predication.
void PhaseIdealLoop::eliminate_useless_predicates() {
if (C->predicate_count() == 0) return; // no predicate left
if (C->predicate_count() == 0)
return; // no predicate left
Unique_Node_List useful_predicates; // to store useful predicates
if (C->has_loops()) {
@ -1647,12 +1711,15 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool do_loop_pred) {
#ifndef PRODUCT
C->verify_graph_edges();
if( _verify_me ) { // Nested verify pass?
if (_verify_me) { // Nested verify pass?
// Check to see if the verify mode is broken
assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?");
return;
}
if( VerifyLoopOptimizations ) verify();
if(VerifyLoopOptimizations) verify();
if(TraceLoopOpts && C->has_loops()) {
_ltree_root->dump();
}
#endif
if (ReassociateInvariants) {

View File

@ -93,6 +93,7 @@ public:
in(1) != NULL && phase->type(in(1)) != Type::TOP &&
in(2) != NULL && phase->type(in(2)) != Type::TOP;
}
bool is_valid_counted_loop() const;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
@ -101,9 +102,8 @@ public:
//------------------------------Counted Loops----------------------------------
// Counted loops are all trip-counted loops, with exactly 1 trip-counter exit
// path (and maybe some other exit paths). The trip-counter exit is always
// last in the loop. The trip-counter does not have to stride by a constant,
// but it does have to stride by a loop-invariant amount; the exit value is
// also loop invariant.
// last in the loop. The trip-counter have to stride by a constant;
// the exit value is also loop invariant.
// CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The
// CountedLoopNode has the incoming loop control and the loop-back-control
@ -112,7 +112,7 @@ public:
// CountedLoopNode if there is control flow in the loop), the post-increment
// trip-counter value, and the limit. The trip-counter value is always of
// the form (Op old-trip-counter stride). The old-trip-counter is produced
// by a Phi connected to the CountedLoopNode. The stride is loop invariant.
// by a Phi connected to the CountedLoopNode. The stride is constant.
// The Op is any commutable opcode, including Add, Mul, Xor. The
// CountedLoopEndNode also takes in the loop-invariant limit value.
@ -696,6 +696,9 @@ private:
// Is safept not required by an outer loop?
bool is_deleteable_safept(Node* sfpt);
// Replace parallel induction variable (parallel to trip counter)
void replace_parallel_iv(IdealLoopTree *loop);
// Perform verification that the graph is valid.
PhaseIdealLoop( PhaseIterGVN &igvn) :
PhaseTransform(Ideal_Loop),
@ -751,7 +754,7 @@ public:
// Per-Node transform
virtual Node *transform( Node *a_node ) { return 0; }
Node *is_counted_loop( Node *x, IdealLoopTree *loop );
bool is_counted_loop( Node *x, IdealLoopTree *loop );
// Return a post-walked LoopNode
IdealLoopTree *get_loop( Node *n ) const {
@ -815,16 +818,22 @@ public:
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
// Return true if proj is for "proj->[region->..]call_uct"
bool is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate = false);
// Return true if proj is for "proj->[region->..]call_uct"
static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason);
// Return true for "if(test)-> proj -> ...
// |
// V
// other_proj->[region->..]call_uct"
bool is_uncommon_trap_if_pattern(ProjNode* proj, bool must_reason_predicate = false);
static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason);
// Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
ProjNode* create_new_if_for_predicate(ProjNode* cont_proj);
// Find a good location to insert a predicate
ProjNode* find_predicate_insertion_point(Node* start_c);
ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason);
void register_control(Node* n, IdealLoopTree *loop, Node* pred);
// Find a good location to insert a predicate
static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason);
// Find a predicate
static Node* find_predicate(Node* entry);
// Construct a range check for a predicate if
BoolNode* rc_predicate(Node* ctrl,
int scale, Node* offset,
@ -936,7 +945,7 @@ public:
Node *has_local_phi_input( Node *n );
// Mark an IfNode as being dominated by a prior test,
// without actually altering the CFG (and hence IDOM info).
void dominated_by( Node *prevdom, Node *iff );
void dominated_by( Node *prevdom, Node *iff, bool flip = false );
// Split Node 'n' through merge point
Node *split_thru_region( Node *n, Node *region );

View File

@ -42,13 +42,13 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
return NULL;
}
int wins = 0;
assert( !n->is_CFG(), "" );
assert( region->is_Region(), "" );
assert(!n->is_CFG(), "");
assert(region->is_Region(), "");
const Type* type = n->bottom_type();
const TypeOopPtr *t_oop = _igvn.type(n)->isa_oopptr();
Node *phi;
if( t_oop != NULL && t_oop->is_known_instance_field() ) {
if (t_oop != NULL && t_oop->is_known_instance_field()) {
int iid = t_oop->instance_id();
int index = C->get_alias_index(t_oop);
int offset = t_oop->offset();
@ -57,20 +57,20 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
phi = PhiNode::make_blank(region, n);
}
uint old_unique = C->unique();
for( uint i = 1; i < region->req(); i++ ) {
for (uint i = 1; i < region->req(); i++) {
Node *x;
Node* the_clone = NULL;
if( region->in(i) == C->top() ) {
if (region->in(i) == C->top()) {
x = C->top(); // Dead path? Use a dead data op
} else {
x = n->clone(); // Else clone up the data op
the_clone = x; // Remember for possible deletion.
// Alter data node to use pre-phi inputs
if( n->in(0) == region )
if (n->in(0) == region)
x->set_req( 0, region->in(i) );
for( uint j = 1; j < n->req(); j++ ) {
for (uint j = 1; j < n->req(); j++) {
Node *in = n->in(j);
if( in->is_Phi() && in->in(0) == region )
if (in->is_Phi() && in->in(0) == region)
x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone
}
}
@ -85,7 +85,7 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// happen if the singleton occurs on loop entry, as the elimination of
// the PhiNode may cause the resulting node to migrate back to a previous
// loop iteration.
if( singleton && t == Type::TOP ) {
if (singleton && t == Type::TOP) {
// Is_Loop() == false does not confirm the absence of a loop (e.g., an
// irreducible loop may not be indicated by an affirmative is_Loop());
// therefore, the only top we can split thru a phi is on a backedge of
@ -93,7 +93,7 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
}
if( singleton ) {
if (singleton) {
wins++;
x = ((PhaseGVN&)_igvn).makecon(t);
} else {
@ -108,12 +108,12 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// igvn->type(x) is set to x->Value() already.
x->raise_bottom_type(t);
Node *y = x->Identity(&_igvn);
if( y != x ) {
if (y != x) {
wins++;
x = y;
} else {
y = _igvn.hash_find(x);
if( y ) {
if (y) {
wins++;
x = y;
} else {
@ -129,7 +129,7 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
phi->set_req( i, x );
}
// Too few wins?
if( wins <= policy ) {
if (wins <= policy) {
_igvn.remove_dead_node(phi);
return NULL;
}
@ -137,7 +137,7 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// Record Phi
register_new_node( phi, region );
for( uint i2 = 1; i2 < phi->req(); i2++ ) {
for (uint i2 = 1; i2 < phi->req(); i2++) {
Node *x = phi->in(i2);
// If we commoned up the cloned 'x' with another existing Node,
// the existing Node picks up a new use. We need to make the
@ -145,24 +145,44 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
Node *old_ctrl;
IdealLoopTree *old_loop;
if (x->is_Con()) {
// Constant's control is always root.
set_ctrl(x, C->root());
continue;
}
// The occasional new node
if( x->_idx >= old_unique ) { // Found a new, unplaced node?
old_ctrl = x->is_Con() ? C->root() : NULL;
old_loop = NULL; // Not in any prior loop
if (x->_idx >= old_unique) { // Found a new, unplaced node?
old_ctrl = NULL;
old_loop = NULL; // Not in any prior loop
} else {
old_ctrl = x->is_Con() ? C->root() : get_ctrl(x);
old_ctrl = get_ctrl(x);
old_loop = get_loop(old_ctrl); // Get prior loop
}
// New late point must dominate new use
Node *new_ctrl = dom_lca( old_ctrl, region->in(i2) );
Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
if (new_ctrl == old_ctrl) // Nothing is changed
continue;
IdealLoopTree *new_loop = get_loop(new_ctrl);
// Don't move x into a loop if its uses are
// outside of loop. Otherwise x will be cloned
// for each use outside of this loop.
IdealLoopTree *use_loop = get_loop(region);
if (!new_loop->is_member(use_loop) &&
(old_loop == NULL || !new_loop->is_member(old_loop))) {
// Take early control, later control will be recalculated
// during next iteration of loop optimizations.
new_ctrl = get_early_ctrl(x);
new_loop = get_loop(new_ctrl);
}
// Set new location
set_ctrl(x, new_ctrl);
IdealLoopTree *new_loop = get_loop( new_ctrl );
// If changing loop bodies, see if we need to collect into new body
if( old_loop != new_loop ) {
if( old_loop && !old_loop->_child )
if (old_loop != new_loop) {
if (old_loop && !old_loop->_child)
old_loop->_body.yank(x);
if( !new_loop->_child )
if (!new_loop->_child)
new_loop->_body.push(x); // Collect body info
}
}
@ -174,9 +194,9 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// Replace the dominated test with an obvious true or false. Place it on the
// IGVN worklist for later cleanup. Move control-dependent data Nodes on the
// live path up to the dominating control.
void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff ) {
void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip ) {
#ifndef PRODUCT
if( VerifyLoopOptimizations && PrintOpto ) tty->print_cr("dominating test");
if (VerifyLoopOptimizations && PrintOpto) tty->print_cr("dominating test");
#endif
@ -185,6 +205,12 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff ) {
assert( iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added");
int pop = prevdom->Opcode();
assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
if (flip) {
if (pop == Op_IfTrue)
pop = Op_IfFalse;
else
pop = Op_IfTrue;
}
// 'con' is set to true or false to kill the dominated test.
Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
set_ctrl(con, C->root()); // Constant gets a new use
@ -197,7 +223,7 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff ) {
// I can assume this path reaches an infinite loop. In this case it's not
// important to optimize the data Nodes - either the whole compilation will
// be tossed or this path (and all data Nodes) will go dead.
if( iff->outcnt() != 2 ) return;
if (iff->outcnt() != 2) return;
// Make control-dependent data Nodes on the live path (path that will remain
// once the dominated IF is removed) become control-dependent on the
@ -207,16 +233,16 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff ) {
for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
Node* cd = dp->fast_out(i); // Control-dependent node
if( cd->depends_only_on_test() ) {
assert( cd->in(0) == dp, "" );
_igvn.hash_delete( cd );
if (cd->depends_only_on_test()) {
assert(cd->in(0) == dp, "");
_igvn.hash_delete(cd);
cd->set_req(0, prevdom);
set_early_ctrl( cd );
set_early_ctrl(cd);
_igvn._worklist.push(cd);
IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
if( old_loop != new_loop ) {
if( !old_loop->_child ) old_loop->_body.yank(cd);
if( !new_loop->_child ) new_loop->_body.push(cd);
if (old_loop != new_loop) {
if (!old_loop->_child) old_loop->_body.yank(cd);
if (!new_loop->_child) new_loop->_body.push(cd);
}
--i;
--imax;
@ -2338,6 +2364,11 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
}
#if !defined(PRODUCT)
if (TraceLoopOpts) {
tty->print("PartialPeel ");
loop->dump_head();
}
if (TracePartialPeeling) {
tty->print_cr("before partial peel one iteration");
Node_List wl;
@ -2481,6 +2512,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// Create new loop head for new phis and to hang
// the nodes being moved (sinked) from the peel region.
LoopNode* new_head = new (C, 3) LoopNode(last_peel, last_peel);
new_head->set_unswitch_count(head->unswitch_count()); // Preserve
_igvn.register_new_node_with_optimizer(new_head);
assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
first_not_peeled->set_req(0, new_head);
@ -2651,24 +2683,23 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// prevent loop-fallout uses of the pre-incremented trip counter (which are
// then alive with the post-incremented trip counter forcing an extra
// register move)
void PhaseIdealLoop::reorg_offsets( IdealLoopTree *loop ) {
void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) {
// Perform it only for canonical counted loops.
// Loop's shape could be messed up by iteration_split_impl.
if (!loop->_head->is_CountedLoop())
return;
if (!loop->_head->as_Loop()->is_valid_counted_loop())
return;
CountedLoopNode *cl = loop->_head->as_CountedLoop();
CountedLoopEndNode *cle = cl->loopexit();
if( !cle ) return; // The occasional dead loop
// Find loop exit control
Node *exit = cle->proj_out(false);
assert( exit->Opcode() == Op_IfFalse, "" );
Node *phi = cl->phi();
// Check for the special case of folks using the pre-incremented
// trip-counter on the fall-out path (forces the pre-incremented
// and post-incremented trip counter to be live at the same time).
// Fix this by adjusting to use the post-increment trip counter.
Node *phi = cl->phi();
if( !phi ) return; // Dead infinite loop
// Shape messed up, probably by iteration_split_impl
if (phi->in(LoopNode::LoopBackControl) != cl->incr()) return;
bool progress = true;
while (progress) {
@ -2677,21 +2708,19 @@ void PhaseIdealLoop::reorg_offsets( IdealLoopTree *loop ) {
Node* use = phi->fast_out(i); // User of trip-counter
if (!has_ctrl(use)) continue;
Node *u_ctrl = get_ctrl(use);
if( use->is_Phi() ) {
if (use->is_Phi()) {
u_ctrl = NULL;
for( uint j = 1; j < use->req(); j++ )
if( use->in(j) == phi )
u_ctrl = dom_lca( u_ctrl, use->in(0)->in(j) );
for (uint j = 1; j < use->req(); j++)
if (use->in(j) == phi)
u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j));
}
IdealLoopTree *u_loop = get_loop(u_ctrl);
// Look for loop-invariant use
if( u_loop == loop ) continue;
if( loop->is_member( u_loop ) ) continue;
if (u_loop == loop) continue;
if (loop->is_member(u_loop)) continue;
// Check that use is live out the bottom. Assuming the trip-counter
// update is right at the bottom, uses of of the loop middle are ok.
if( dom_lca( exit, u_ctrl ) != exit ) continue;
// protect against stride not being a constant
if( !cle->stride_is_con() ) continue;
if (dom_lca(exit, u_ctrl) != exit) continue;
// Hit! Refactor use to use the post-incremented tripcounter.
// Compute a post-increment tripcounter.
Node *opaq = new (C, 2) Opaque2Node( C, cle->incr() );
@ -2702,9 +2731,10 @@ void PhaseIdealLoop::reorg_offsets( IdealLoopTree *loop ) {
register_new_node( post, u_ctrl );
_igvn.hash_delete(use);
_igvn._worklist.push(use);
for( uint j = 1; j < use->req(); j++ )
if( use->in(j) == phi )
for (uint j = 1; j < use->req(); j++) {
if (use->in(j) == phi)
use->set_req(j, post);
}
// Since DU info changed, rerun loop
progress = true;
break;

View File

@ -1573,9 +1573,9 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
return TypeInt::make(constant.as_int());
} else if (constant.basic_type() == T_ARRAY) {
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
return TypeNarrowOop::make_from_constant(constant.as_object());
return TypeNarrowOop::make_from_constant(constant.as_object(), true);
} else {
return TypeOopPtr::make_from_constant(constant.as_object());
return TypeOopPtr::make_from_constant(constant.as_object(), true);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -136,6 +136,7 @@ class Parse : public GraphKit {
uint _count; // how many times executed? Currently only set by _goto's
bool _is_parsed; // has this block been parsed yet?
bool _is_handler; // is this block an exception handler?
bool _has_merged_backedge; // does this block have merged backedge?
SafePointNode* _start_map; // all values flowing into this block
MethodLivenessResult _live_locals; // lazily initialized liveness bitmap
@ -168,6 +169,18 @@ class Parse : public GraphKit {
// True after any predecessor flows control into this block
bool is_merged() const { return _start_map != NULL; }
#ifdef ASSERT
// True after backedge predecessor flows control into this block
bool has_merged_backedge() const { return _has_merged_backedge; }
void mark_merged_backedge(Block* pred) {
assert(is_SEL_head(), "should be loop head");
if (pred != NULL && is_SEL_backedge(pred)) {
assert(is_parsed(), "block should be parsed before merging backedges");
_has_merged_backedge = true;
}
}
#endif
// True when all non-exception predecessors have been parsed.
bool is_ready() const { return preds_parsed() == pred_count(); }
@ -441,11 +454,6 @@ class Parse : public GraphKit {
}
}
// Return true if the parser should add a loop predicate
bool should_add_predicate(int target_bci);
// Insert a loop predicate into the graph
void add_predicate();
// Note: Intrinsic generation routines may be found in library_call.cpp.
// Helper function to setup Ideal Call nodes
@ -483,8 +491,8 @@ class Parse : public GraphKit {
bool static_field_ok_in_clinit(ciField *field, ciMethod *method);
// common code for actually performing the load or store
void do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field);
void do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field);
void do_get_xxx(Node* obj, ciField* field, bool is_field);
void do_put_xxx(Node* obj, ciField* field, bool is_field);
// loading from a constant field or the constant pool
// returns false if push failed (non-perm field constants only, not ldcs)

View File

@ -637,6 +637,25 @@ void Parse::do_all_blocks() {
// (Note that dead locals do not get phis built, ever.)
ensure_phis_everywhere();
if (block->is_SEL_head() &&
UseLoopPredicate) {
// Add predicate to single entry (not irreducible) loop head.
assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
// Need correct bci for predicate.
// It is fine to set it here since do_one_block() will set it anyway.
set_parse_bci(block->start());
add_predicate();
// Add new region for back branches.
int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
RegionNode *r = new (C, edges+1) RegionNode(edges+1);
_gvn.set_type(r, Type::CONTROL);
record_for_igvn(r);
r->init_req(edges, control());
set_control(r);
// Add new phis.
ensure_phis_everywhere();
}
// Leave behind an undisturbed copy of the map, for future merges.
set_map(clone_map());
}
@ -1113,7 +1132,7 @@ void Parse::Block::init_node(Parse* outer, int rpo) {
_preds_parsed = 0;
_count = 0;
assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
assert(!(is_merged() || is_parsed() || is_handler()), "sanity");
assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
assert(_live_locals.size() == 0, "sanity");
// entry point has additional predecessor
@ -1350,10 +1369,6 @@ void Parse::do_one_block() {
set_parse_bci(iter().cur_bci());
if (bci() == block()->limit()) {
// insert a predicate if it falls through to a loop head block
if (should_add_predicate(bci())){
add_predicate();
}
// Do not walk into the next block until directed by do_all_blocks.
merge(bci());
break;
@ -1498,17 +1513,29 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
|| target->is_handler() // These have unpredictable inputs.
|| target->is_loop_head() // Known multiple inputs
|| control()->is_Region()) { // We must hide this guy.
int current_bci = bci();
set_parse_bci(target->start()); // Set target bci
if (target->is_SEL_head()) {
DEBUG_ONLY( target->mark_merged_backedge(block()); )
if (target->start() == 0) {
// Add loop predicate for the special case when
// there are backbranches to the method entry.
add_predicate();
}
}
// Add a Region to start the new basic block. Phis will be added
// later lazily.
int edges = target->pred_count();
if (edges < pnum) edges = pnum; // might be a new path!
Node *r = new (C, edges+1) RegionNode(edges+1);
RegionNode *r = new (C, edges+1) RegionNode(edges+1);
gvn().set_type(r, Type::CONTROL);
record_for_igvn(r);
// zap all inputs to NULL for debugging (done in Node(uint) constructor)
// for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
r->init_req(pnum, control());
set_control(r);
set_parse_bci(current_bci); // Restore bci
}
// Convert the existing Parser mapping into a mapping at this bci.
@ -1517,7 +1544,11 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
} else { // Prior mapping at this bci
if (TraceOptoParse) { tty->print(" with previous state"); }
#ifdef ASSERT
if (target->is_SEL_head()) {
target->mark_merged_backedge(block());
}
#endif
// We must not manufacture more phis if the target is already parsed.
bool nophi = target->is_parsed();
@ -2054,37 +2085,6 @@ void Parse::add_safepoint() {
}
}
//------------------------------should_add_predicate--------------------------
bool Parse::should_add_predicate(int target_bci) {
if (!UseLoopPredicate) return false;
Block* target = successor_for_bci(target_bci);
if (target != NULL &&
target->is_loop_head() &&
block()->rpo() < target->rpo()) {
return true;
}
return false;
}
//------------------------------add_predicate---------------------------------
void Parse::add_predicate() {
assert(UseLoopPredicate,"use only for loop predicate");
Node *cont = _gvn.intcon(1);
Node* opq = _gvn.transform(new (C, 2) Opaque1Node(C, cont));
Node *bol = _gvn.transform(new (C, 2) Conv2BNode(opq));
IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
Node* iffalse = _gvn.transform(new (C, 1) IfFalseNode(iff));
C->add_predicate_opaq(opq);
{
PreserveJVMState pjvms(this);
set_control(iffalse);
uncommon_trap(Deoptimization::Reason_predicate,
Deoptimization::Action_maybe_recompile);
}
Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff));
set_control(iftrue);
}
#ifndef PRODUCT
//------------------------show_parse_info--------------------------------------
void Parse::show_parse_info() {

View File

@ -293,11 +293,6 @@ void Parse::do_tableswitch() {
if (len < 1) {
// If this is a backward branch, add safepoint
maybe_add_safepoint(default_dest);
if (should_add_predicate(default_dest)){
_sp += 1; // set original stack for use by uncommon_trap
add_predicate();
_sp -= 1;
}
merge(default_dest);
return;
}
@ -344,11 +339,6 @@ void Parse::do_lookupswitch() {
if (len < 1) { // If this is a backward branch, add safepoint
maybe_add_safepoint(default_dest);
if (should_add_predicate(default_dest)){
_sp += 1; // set original stack for use by uncommon_trap
add_predicate();
_sp -= 1;
}
merge(default_dest);
return;
}
@ -756,9 +746,6 @@ void Parse::do_jsr() {
push(_gvn.makecon(ret_addr));
// Flow to the jsr.
if (should_add_predicate(jsr_bci)){
add_predicate();
}
merge(jsr_bci);
}
@ -1040,11 +1027,6 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
profile_taken_branch(target_bci);
adjust_map_after_if(btest, c, prob, branch_block, next_block);
if (!stopped()) {
if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop
int nargs = repush_if_args(); // set original stack for uncommon_trap
add_predicate();
_sp -= nargs;
}
merge(target_bci);
}
}
@ -1168,11 +1150,6 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
profile_taken_branch(target_bci);
adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
if (!stopped()) {
if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop
int nargs = repush_if_args(); // set original stack for the uncommon_trap
add_predicate();
_sp -= nargs;
}
merge(target_bci);
}
}
@ -2166,10 +2143,6 @@ void Parse::do_one_bytecode() {
// Update method data
profile_taken_branch(target_bci);
// Add loop predicate if it goes to a loop
if (should_add_predicate(target_bci)){
add_predicate();
}
// Merge the current control into the target basic block
merge(target_bci);

View File

@ -112,29 +112,31 @@ void Parse::do_field_access(bool is_get, bool is_field) {
// Compile-time detect of null-exception?
if (stopped()) return;
#ifdef ASSERT
const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
#endif
if (is_get) {
--_sp; // pop receiver before getting
do_get_xxx(tjp, obj, field, is_field);
do_get_xxx(obj, field, is_field);
} else {
do_put_xxx(tjp, obj, field, is_field);
do_put_xxx(obj, field, is_field);
--_sp; // pop receiver after putting
}
} else {
const TypeKlassPtr* tkp = TypeKlassPtr::make(field_holder);
obj = _gvn.makecon(tkp);
const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
obj = _gvn.makecon(tip);
if (is_get) {
do_get_xxx(tkp, obj, field, is_field);
do_get_xxx(obj, field, is_field);
} else {
do_put_xxx(tkp, obj, field, is_field);
do_put_xxx(obj, field, is_field);
}
}
}
void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
// Does this field have a constant value? If so, just push the value.
if (field->is_constant()) {
if (field->is_static()) {
@ -231,7 +233,7 @@ void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool
}
}
void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
bool is_vol = field->is_volatile();
// If reference is volatile, prevent following memory ops from
// floating down past the volatile write. Also prevents commoning

View File

@ -910,7 +910,7 @@ Node* PhaseStringOpts::fetch_static_field(GraphKit& kit, ciField* field) {
ciObject* con = field->constant_value().as_object();
// Do not "join" in the previous type; it doesn't add value,
// and may yield a vacuous result if the field is of interface type.
type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
type = TypeOopPtr::make_from_constant(con, true)->isa_oopptr();
assert(type != NULL, "field singleton type must be consistent");
} else {
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
@ -969,6 +969,10 @@ Node* PhaseStringOpts::int_stringSize(GraphKit& kit, Node* arg) {
// for (int i=0; ; i++)
// if (x <= sizeTable[i])
// return i+1;
// Add loop predicate first.
kit.add_predicate();
RegionNode *loop = new (C, 3) RegionNode(3);
loop->init_req(1, kit.control());
kit.gvn().set_type(loop, Type::CONTROL);
@ -1086,6 +1090,9 @@ void PhaseStringOpts::int_getChars(GraphKit& kit, Node* arg, Node* char_array, N
// }
{
// Add loop predicate first.
kit.add_predicate();
RegionNode *head = new (C, 3) RegionNode(3);
head->init_req(1, kit.control());
kit.gvn().set_type(head, Type::CONTROL);

View File

@ -32,6 +32,7 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/klassKlass.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/typeArrayKlass.hpp"
@ -2241,43 +2242,49 @@ TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int
} else if (this->isa_aryptr()) {
_is_ptr_to_narrowoop = (klass()->is_obj_array_klass() &&
_offset != arrayOopDesc::length_offset_in_bytes());
} else if (klass() == ciEnv::current()->Class_klass() &&
(_offset == java_lang_Class::klass_offset_in_bytes() ||
_offset == java_lang_Class::array_klass_offset_in_bytes())) {
// Special hidden fields from the Class.
assert(this->isa_instptr(), "must be an instance ptr.");
_is_ptr_to_narrowoop = true;
} else if (klass()->is_instance_klass()) {
ciInstanceKlass* ik = klass()->as_instance_klass();
ciField* field = NULL;
if (this->isa_klassptr()) {
// Perm objects don't use compressed references, except for
// static fields which are currently compressed.
field = ik->get_field_by_offset(_offset, true);
if (field != NULL) {
BasicType basic_elem_type = field->layout_type();
_is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT ||
basic_elem_type == T_ARRAY);
}
// Perm objects don't use compressed references
} else if (_offset == OffsetBot || _offset == OffsetTop) {
// unsafe access
_is_ptr_to_narrowoop = true;
} else { // exclude unsafe ops
assert(this->isa_instptr(), "must be an instance ptr.");
// Field which contains a compressed oop references.
field = ik->get_field_by_offset(_offset, false);
if (field != NULL) {
if (klass() == ciEnv::current()->Class_klass() &&
(_offset == java_lang_Class::klass_offset_in_bytes() ||
_offset == java_lang_Class::array_klass_offset_in_bytes())) {
// Special hidden fields from the Class.
assert(this->isa_instptr(), "must be an instance ptr.");
_is_ptr_to_narrowoop = true;
} else if (klass() == ciEnv::current()->Class_klass() &&
_offset >= instanceMirrorKlass::offset_of_static_fields()) {
// Static fields
assert(o != NULL, "must be constant");
ciInstanceKlass* k = o->as_instance()->java_lang_Class_klass()->as_instance_klass();
ciField* field = k->get_field_by_offset(_offset, true);
assert(field != NULL, "missing field");
BasicType basic_elem_type = field->layout_type();
_is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT ||
basic_elem_type == T_ARRAY);
} else if (klass()->equals(ciEnv::current()->Object_klass())) {
// Compile::find_alias_type() cast exactness on all types to verify
// that it does not affect alias type.
_is_ptr_to_narrowoop = true;
} else {
// Type for the copy start in LibraryCallKit::inline_native_clone().
assert(!klass_is_exact(), "only non-exact klass");
_is_ptr_to_narrowoop = true;
// Instance fields which contains a compressed oop references.
field = ik->get_field_by_offset(_offset, false);
if (field != NULL) {
BasicType basic_elem_type = field->layout_type();
_is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT ||
basic_elem_type == T_ARRAY);
} else if (klass()->equals(ciEnv::current()->Object_klass())) {
// Compile::find_alias_type() cast exactness on all types to verify
// that it does not affect alias type.
_is_ptr_to_narrowoop = true;
} else {
// Type for the copy start in LibraryCallKit::inline_native_clone().
assert(!klass_is_exact(), "only non-exact klass");
_is_ptr_to_narrowoop = true;
}
}
}
}

View File

@ -988,8 +988,8 @@ public:
static const TypeNarrowOop *make( const TypePtr* type);
static const TypeNarrowOop* make_from_constant(ciObject* con) {
return make(TypeOopPtr::make_from_constant(con));
static const TypeNarrowOop* make_from_constant(ciObject* con, bool require_constant = false) {
return make(TypeOopPtr::make_from_constant(con, require_constant));
}
// returns the equivalent ptr type for this compressed pointer

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1858,7 +1858,7 @@ JNI_ENTRY(jobject, jni_ToReflectedField(JNIEnv *env, jclass cls, jfieldID fieldI
// Static field. The fieldID a JNIid specifying the field holder and the offset within the klassOop.
JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID);
assert(id->is_static_field_id(), "invalid static field id");
found = instanceKlass::cast(id->holder())->find_local_field_from_offset(id->offset(), true, &fd);
found = id->find_local_field(&fd);
} else {
// Non-static field. The fieldID is really the offset of the field within the instanceOop.
int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID);
@ -1906,9 +1906,7 @@ JNI_ENTRY(jfieldID, jni_GetStaticFieldID(JNIEnv *env, jclass clazz,
JNIid* id = instanceKlass::cast(fd.field_holder())->jni_id_for(fd.offset());
debug_only(id->set_is_static_field_id();)
debug_only(int first_offset = instanceKlass::cast(fd.field_holder())->offset_of_static_fields();)
debug_only(int end_offset = first_offset + (instanceKlass::cast(fd.field_holder())->static_field_size() * wordSize);)
assert(id->offset() >= first_offset && id->offset() < end_offset, "invalid static field offset");
debug_only(id->verify(fd.field_holder()));
ret = jfieldIDWorkaround::to_static_jfieldID(id);
return ret;
@ -1928,7 +1926,7 @@ JNI_ENTRY(jobject, jni_GetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID
if (JvmtiExport::should_post_field_access()) {
JvmtiExport::jni_GetField_probe(thread, NULL, NULL, id->holder(), fieldID, true);
}
jobject ret = JNIHandles::make_local(id->holder()->obj_field(id->offset()));
jobject ret = JNIHandles::make_local(id->holder()->java_mirror()->obj_field(id->offset()));
DTRACE_PROBE1(hotspot_jni, GetStaticObjectField__return, ret);
return ret;
JNI_END
@ -1950,7 +1948,7 @@ JNI_ENTRY(Return, jni_GetStatic##Result##Field(JNIEnv *env, jclass clazz, jfield
if (JvmtiExport::should_post_field_access()) { \
JvmtiExport::jni_GetField_probe(thread, NULL, NULL, id->holder(), fieldID, true); \
} \
ret = id->holder()-> Fieldname##_field (id->offset()); \
ret = id->holder()->java_mirror()-> Fieldname##_field (id->offset()); \
return ret;\
JNI_END
@ -1976,7 +1974,7 @@ JNI_ENTRY(void, jni_SetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID fie
field_value.l = value;
JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, 'L', (jvalue *)&field_value);
}
id->holder()->obj_field_put(id->offset(), JNIHandles::resolve(value));
id->holder()->java_mirror()->obj_field_put(id->offset(), JNIHandles::resolve(value));
DTRACE_PROBE(hotspot_jni, SetStaticObjectField__return);
JNI_END
@ -1999,7 +1997,7 @@ JNI_ENTRY(void, jni_SetStatic##Result##Field(JNIEnv *env, jclass clazz, jfieldID
field_value.unionType = value; \
JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, SigType, (jvalue *)&field_value); \
} \
id->holder()-> Fieldname##_field_put (id->offset(), value); \
id->holder()->java_mirror()-> Fieldname##_field_put (id->offset(), value); \
DTRACE_PROBE(hotspot_jni, SetStatic##Result##Field__return);\
JNI_END

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -224,8 +224,7 @@ checkStaticFieldID(JavaThread* thr, jfieldID fid, jclass cls, int ftype)
ReportJNIFatalError(thr, fatal_wrong_static_field);
/* check for proper field type */
if (!instanceKlass::cast(f_oop)->find_local_field_from_offset(
id->offset(), true, &fd))
if (!id->find_local_field(&fd))
ReportJNIFatalError(thr, fatal_static_field_not_found);
if ((fd.field_type() != ftype) &&
!(fd.field_type() == T_ARRAY && ftype == T_OBJECT)) {

View File

@ -1808,7 +1808,7 @@ JVM_ENTRY(jclass, JVM_ConstantPoolGetClassAt(JNIEnv *env, jobject unused, jobjec
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Wrong type at constant pool index");
}
klassOop k = cp->klass_at(index, CHECK_NULL);
return (jclass) JNIHandles::make_local(k->klass_part()->java_mirror());
return (jclass) JNIHandles::make_local(k->java_mirror());
}
JVM_END
@ -1824,7 +1824,7 @@ JVM_ENTRY(jclass, JVM_ConstantPoolGetClassAtIfLoaded(JNIEnv *env, jobject unused
}
klassOop k = constantPoolOopDesc::klass_at_if_loaded(cp, index);
if (k == NULL) return NULL;
return (jclass) JNIHandles::make_local(k->klass_part()->java_mirror());
return (jclass) JNIHandles::make_local(k->java_mirror());
}
JVM_END

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -616,9 +616,7 @@ JvmtiEnvBase::get_field_descriptor(klassOop k, jfieldID field, fieldDescriptor*
bool found = false;
if (jfieldIDWorkaround::is_static_jfieldID(field)) {
JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field);
int offset = id->offset();
klassOop holder = id->holder();
found = instanceKlass::cast(holder)->find_local_field_from_offset(offset, true, fd);
found = id->find_local_field(fd);
} else {
// Non-static field. The fieldID is really the offset of the field within the object.
int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field);

View File

@ -3350,11 +3350,12 @@ void VM_RedefineClasses::increment_class_counter(instanceKlass *ik, TRAPS) {
for (Klass *subk = ik->subklass(); subk != NULL;
subk = subk->next_sibling()) {
klassOop sub = subk->as_klassOop();
instanceKlass *subik = (instanceKlass *)sub->klass_part();
// recursively do subclasses of the current subclass
increment_class_counter(subik, THREAD);
if (subk->oop_is_instance()) {
// Only update instanceKlasses
instanceKlass *subik = (instanceKlass*)subk;
// recursively do subclasses of the current subclass
increment_class_counter(subik, THREAD);
}
}
}

Some files were not shown because too many files have changed in this diff Show More