Merge
This commit is contained in:
commit
50bd95aadf
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,8 +50,7 @@ public class ConstMethod extends Oop {
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
|
||||
Type type = db.lookupType("constMethodOopDesc");
|
||||
// Backpointer to non-const methodOop
|
||||
method = new OopField(type.getOopField("_method"), 0);
|
||||
constants = new OopField(type.getOopField("_constants"), 0);
|
||||
// The exception handler table. 4-tuples of ints [start_pc, end_pc,
|
||||
// handler_pc, catch_type index] For methods with no exceptions the
|
||||
// table is pointing to Universe::the_empty_int_array
|
||||
@ -69,6 +68,7 @@ public class ConstMethod extends Oop {
|
||||
nameIndex = new CIntField(type.getCIntegerField("_name_index"), 0);
|
||||
signatureIndex = new CIntField(type.getCIntegerField("_signature_index"), 0);
|
||||
genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"),0);
|
||||
idnum = new CIntField(type.getCIntegerField("_method_idnum"), 0);
|
||||
|
||||
// start of byte code
|
||||
bytecodeOffset = type.getSize();
|
||||
@ -85,7 +85,7 @@ public class ConstMethod extends Oop {
|
||||
}
|
||||
|
||||
// Fields
|
||||
private static OopField method;
|
||||
private static OopField constants;
|
||||
private static OopField exceptionTable;
|
||||
private static CIntField constMethodSize;
|
||||
private static ByteField flags;
|
||||
@ -93,6 +93,7 @@ public class ConstMethod extends Oop {
|
||||
private static CIntField nameIndex;
|
||||
private static CIntField signatureIndex;
|
||||
private static CIntField genericSignatureIndex;
|
||||
private static CIntField idnum;
|
||||
|
||||
// start of bytecode
|
||||
private static long bytecodeOffset;
|
||||
@ -100,9 +101,15 @@ public class ConstMethod extends Oop {
|
||||
private static long checkedExceptionElementSize;
|
||||
private static long localVariableTableElementSize;
|
||||
|
||||
// Accessors for declared fields
|
||||
public Method getMethod() {
|
||||
return (Method) method.getValue(this);
|
||||
InstanceKlass ik = (InstanceKlass)getConstants().getPoolHolder();
|
||||
ObjArray methods = ik.getMethods();
|
||||
return (Method)methods.getObjAt(getIdNum());
|
||||
}
|
||||
|
||||
// Accessors for declared fields
|
||||
public ConstantPool getConstants() {
|
||||
return (ConstantPool) constants.getValue(this);
|
||||
}
|
||||
|
||||
public TypeArray getExceptionTable() {
|
||||
@ -133,6 +140,10 @@ public class ConstMethod extends Oop {
|
||||
return genericSignatureIndex.getValue(this);
|
||||
}
|
||||
|
||||
public long getIdNum() {
|
||||
return idnum.getValue(this);
|
||||
}
|
||||
|
||||
public Symbol getName() {
|
||||
return getMethod().getName();
|
||||
}
|
||||
@ -223,7 +234,7 @@ public class ConstMethod extends Oop {
|
||||
public void iterateFields(OopVisitor visitor, boolean doVMFields) {
|
||||
super.iterateFields(visitor, doVMFields);
|
||||
if (doVMFields) {
|
||||
visitor.doOop(method, true);
|
||||
visitor.doOop(constants, true);
|
||||
visitor.doOop(exceptionTable, true);
|
||||
visitor.doCInt(constMethodSize, true);
|
||||
visitor.doByte(flags, true);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,7 +48,6 @@ public class Method extends Oop {
|
||||
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
|
||||
Type type = db.lookupType("methodOopDesc");
|
||||
constMethod = new OopField(type.getOopField("_constMethod"), 0);
|
||||
constants = new OopField(type.getOopField("_constants"), 0);
|
||||
methodData = new OopField(type.getOopField("_method_data"), 0);
|
||||
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
|
||||
maxStack = new CIntField(type.getCIntegerField("_max_stack"), 0);
|
||||
@ -83,7 +82,6 @@ public class Method extends Oop {
|
||||
|
||||
// Fields
|
||||
private static OopField constMethod;
|
||||
private static OopField constants;
|
||||
private static OopField methodData;
|
||||
private static CIntField methodSize;
|
||||
private static CIntField maxStack;
|
||||
@ -125,7 +123,9 @@ public class Method extends Oop {
|
||||
|
||||
// Accessors for declared fields
|
||||
public ConstMethod getConstMethod() { return (ConstMethod) constMethod.getValue(this); }
|
||||
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
|
||||
public ConstantPool getConstants() {
|
||||
return getConstMethod().getConstants();
|
||||
}
|
||||
public MethodData getMethodData() { return (MethodData) methodData.getValue(this); }
|
||||
public TypeArray getExceptionTable() { return getConstMethod().getExceptionTable(); }
|
||||
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
|
||||
@ -281,7 +281,6 @@ public class Method extends Oop {
|
||||
super.iterateFields(visitor, doVMFields);
|
||||
if (doVMFields) {
|
||||
visitor.doOop(constMethod, true);
|
||||
visitor.doOop(constants, true);
|
||||
visitor.doCInt(methodSize, true);
|
||||
visitor.doCInt(maxStack, true);
|
||||
visitor.doCInt(maxLocals, true);
|
||||
|
@ -214,7 +214,7 @@ endif
|
||||
|
||||
# Flags for generating make dependency flags.
|
||||
ifneq ("${CC_VER_MAJOR}", "2")
|
||||
DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
||||
DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
||||
endif
|
||||
|
||||
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
|
||||
|
||||
HS_MAJOR_VER=24
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=13
|
||||
HS_BUILD_NUMBER=14
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -54,72 +54,72 @@ jprt.sync.push=false
|
||||
# Define the Solaris platforms we want for the various releases
|
||||
jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
|
||||
jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
|
||||
jprt.my.solaris.sparc.jdk7u4=${jprt.my.solaris.sparc.jdk7}
|
||||
jprt.my.solaris.sparc.jdk7u6=${jprt.my.solaris.sparc.jdk7}
|
||||
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
|
||||
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
|
||||
jprt.my.solaris.sparcv9.jdk7u4=${jprt.my.solaris.sparcv9.jdk7}
|
||||
jprt.my.solaris.sparcv9.jdk7u6=${jprt.my.solaris.sparcv9.jdk7}
|
||||
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.solaris.i586.jdk8=solaris_i586_5.10
|
||||
jprt.my.solaris.i586.jdk7=solaris_i586_5.10
|
||||
jprt.my.solaris.i586.jdk7u4=${jprt.my.solaris.i586.jdk7}
|
||||
jprt.my.solaris.i586.jdk7u6=${jprt.my.solaris.i586.jdk7}
|
||||
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.solaris.x64.jdk8=solaris_x64_5.10
|
||||
jprt.my.solaris.x64.jdk7=solaris_x64_5.10
|
||||
jprt.my.solaris.x64.jdk7u4=${jprt.my.solaris.x64.jdk7}
|
||||
jprt.my.solaris.x64.jdk7u6=${jprt.my.solaris.x64.jdk7}
|
||||
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.i586.jdk8=linux_i586_2.6
|
||||
jprt.my.linux.i586.jdk7=linux_i586_2.6
|
||||
jprt.my.linux.i586.jdk7u4=${jprt.my.linux.i586.jdk7}
|
||||
jprt.my.linux.i586.jdk7u6=${jprt.my.linux.i586.jdk7}
|
||||
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.x64.jdk8=linux_x64_2.6
|
||||
jprt.my.linux.x64.jdk7=linux_x64_2.6
|
||||
jprt.my.linux.x64.jdk7u4=${jprt.my.linux.x64.jdk7}
|
||||
jprt.my.linux.x64.jdk7u6=${jprt.my.linux.x64.jdk7}
|
||||
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.ppc.jdk8=linux_ppc_2.6
|
||||
jprt.my.linux.ppc.jdk7=linux_ppc_2.6
|
||||
jprt.my.linux.ppc.jdk7u4=${jprt.my.linux.ppc.jdk7}
|
||||
jprt.my.linux.ppc.jdk7u6=${jprt.my.linux.ppc.jdk7}
|
||||
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
|
||||
jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
|
||||
jprt.my.linux.ppcv2.jdk7u4=${jprt.my.linux.ppcv2.jdk7}
|
||||
jprt.my.linux.ppcv2.jdk7u6=${jprt.my.linux.ppcv2.jdk7}
|
||||
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
|
||||
jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
|
||||
jprt.my.linux.ppcsflt.jdk7u4=${jprt.my.linux.ppcsflt.jdk7}
|
||||
jprt.my.linux.ppcsflt.jdk7u6=${jprt.my.linux.ppcsflt.jdk7}
|
||||
jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
|
||||
jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
|
||||
jprt.my.linux.armvfp.jdk7u4=${jprt.my.linux.armvfp.jdk7}
|
||||
jprt.my.linux.armvfp.jdk7u6=${jprt.my.linux.armvfp.jdk7}
|
||||
jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
|
||||
jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
|
||||
jprt.my.linux.armsflt.jdk7u4=${jprt.my.linux.armsflt.jdk7}
|
||||
jprt.my.linux.armsflt.jdk7u6=${jprt.my.linux.armsflt.jdk7}
|
||||
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.macosx.x64.jdk8=macosx_x64_10.7
|
||||
jprt.my.macosx.x64.jdk7=macosx_x64_10.7
|
||||
jprt.my.macosx.x64.jdk7u4=${jprt.my.macosx.x64.jdk7}
|
||||
jprt.my.macosx.x64.jdk7u6=${jprt.my.macosx.x64.jdk7}
|
||||
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.windows.i586.jdk8=windows_i586_5.1
|
||||
jprt.my.windows.i586.jdk7=windows_i586_5.1
|
||||
jprt.my.windows.i586.jdk7u4=${jprt.my.windows.i586.jdk7}
|
||||
jprt.my.windows.i586.jdk7u6=${jprt.my.windows.i586.jdk7}
|
||||
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.windows.x64.jdk8=windows_x64_5.2
|
||||
jprt.my.windows.x64.jdk7=windows_x64_5.2
|
||||
jprt.my.windows.x64.jdk7u4=${jprt.my.windows.x64.jdk7}
|
||||
jprt.my.windows.x64.jdk7u6=${jprt.my.windows.x64.jdk7}
|
||||
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
|
||||
|
||||
# Standard list of jprt build targets for this source tree
|
||||
@ -154,7 +154,7 @@ jprt.build.targets.all=${jprt.build.targets.standard}, \
|
||||
|
||||
jprt.build.targets.jdk8=${jprt.build.targets.all}
|
||||
jprt.build.targets.jdk7=${jprt.build.targets.all}
|
||||
jprt.build.targets.jdk7u4=${jprt.build.targets.all}
|
||||
jprt.build.targets.jdk7u6=${jprt.build.targets.all}
|
||||
jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
|
||||
|
||||
# Subset lists of test targets for this source tree
|
||||
@ -346,12 +346,12 @@ jprt.my.macosx.x64.test.targets = \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
|
||||
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
|
||||
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
|
||||
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
||||
|
||||
jprt.my.windows.i586.test.targets = \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||
@ -447,7 +447,7 @@ jprt.test.targets.embedded= \
|
||||
|
||||
jprt.test.targets.jdk8=${jprt.test.targets.standard}
|
||||
jprt.test.targets.jdk7=${jprt.test.targets.standard}
|
||||
jprt.test.targets.jdk7u4=${jprt.test.targets.jdk7}
|
||||
jprt.test.targets.jdk7u6=${jprt.test.targets.jdk7}
|
||||
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
|
||||
|
||||
# The default test/Makefile targets that should be run
|
||||
@ -507,6 +507,9 @@ jprt.make.rule.test.targets.embedded = \
|
||||
|
||||
jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
|
||||
jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
|
||||
jprt.make.rule.test.targets.jdk7u4=${jprt.make.rule.test.targets.jdk7}
|
||||
jprt.make.rule.test.targets.jdk7u6=${jprt.make.rule.test.targets.jdk7}
|
||||
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
|
||||
|
||||
# 7155453: Work-around to prevent popups on OSX from blocking test completion
|
||||
# but the work-around is added to all platforms to be consistent
|
||||
jprt.jbb.options=-Djava.awt.headless=true
|
||||
|
@ -166,7 +166,7 @@ endif
|
||||
|
||||
# Flags for generating make dependency flags.
|
||||
ifneq ("${CC_VER_MAJOR}", "2")
|
||||
DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
||||
DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
||||
endif
|
||||
|
||||
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
|
||||
|
@ -141,7 +141,7 @@ OPT_CFLAGS/NOOPT=-O0
|
||||
|
||||
# Flags for generating make dependency flags.
|
||||
ifneq ("${CC_VER_MAJOR}", "2")
|
||||
DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
||||
DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
|
||||
endif
|
||||
|
||||
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
|
||||
|
@ -644,30 +644,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
|
||||
}
|
||||
|
||||
|
||||
void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 3, "wrong type");
|
||||
LIRItem obj (x->argument_at(0), this); // AtomicLong object
|
||||
LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
|
||||
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
|
||||
|
||||
obj.load_item();
|
||||
cmp_value.load_item();
|
||||
new_value.load_item();
|
||||
|
||||
// generate compare-and-swap and produce zero condition if swap occurs
|
||||
int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
|
||||
LIR_Opr addr = FrameMap::O7_opr;
|
||||
__ add(obj.result(), LIR_OprFact::intConst(value_offset), addr);
|
||||
LIR_Opr t1 = FrameMap::G1_opr; // temp for 64-bit value
|
||||
LIR_Opr t2 = FrameMap::G3_opr; // temp for 64-bit value
|
||||
__ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
|
||||
|
||||
// generate conditional move of boolean result
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
|
||||
}
|
||||
|
||||
|
||||
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
||||
assert(x->number_of_arguments() == 4, "wrong type");
|
||||
LIRItem obj (x->argument_at(0), this); // object
|
||||
@ -989,10 +965,10 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
|
||||
if (!x->klass()->is_loaded() || PatchALot) {
|
||||
patching_info = state_for(x, x->state_before());
|
||||
|
||||
// cannot re-use same xhandlers for multiple CodeEmitInfos, so
|
||||
// clone all handlers. This is handled transparently in other
|
||||
// places by the CodeEmitInfo cloning logic but is handled
|
||||
// specially here because a stub isn't being used.
|
||||
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
|
||||
// clone all handlers (NOTE: Usually this is handled transparently
|
||||
// by the CodeEmitInfo cloning logic in CodeStub constructors but
|
||||
// is done explicitly here because a stub isn't being used).
|
||||
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
|
||||
}
|
||||
CodeEmitInfo* info = state_for(x, x->state());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -490,7 +490,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
||||
|
||||
// get constant pool cache
|
||||
__ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch);
|
||||
__ ld_ptr(G5_method, in_bytes(methodOopDesc::const_offset()), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, in_bytes(constMethodOopDesc::constants_offset()), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
|
||||
|
||||
// get specific constant pool cache entry
|
||||
@ -768,7 +769,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// for static methods insert the mirror argument
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
|
||||
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1);
|
||||
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: const_offset())), O1);
|
||||
__ ld_ptr(Address(O1, 0, in_bytes(constMethodOopDesc::constants_offset())), O1);
|
||||
__ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1);
|
||||
__ ld_ptr(O1, mirror_offset, O1);
|
||||
// where the mirror handle body is allocated:
|
||||
@ -1047,7 +1049,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||
assert_different_registers(state, prev_state);
|
||||
assert_different_registers(prev_state, G3_scratch);
|
||||
const Register Gtmp = G3_scratch;
|
||||
const Address constants (G5_method, 0, in_bytes(methodOopDesc::constants_offset()));
|
||||
const Address constMethod (G5_method, 0, in_bytes(methodOopDesc::const_offset()));
|
||||
const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
|
||||
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
|
||||
const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
|
||||
@ -1155,7 +1157,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||
__ set((int) BytecodeInterpreter::method_entry, O1);
|
||||
__ st(O1, XXX_STATE(_msg));
|
||||
|
||||
__ ld_ptr(constants, O3);
|
||||
__ ld_ptr(constMethod, O3);
|
||||
__ ld_ptr(O3, in_bytes(constMethodOopDesc::constants_offset()), O3);
|
||||
__ ld_ptr(O3, constantPoolOopDesc::cache_offset_in_bytes(), O2);
|
||||
__ st_ptr(O2, XXX_STATE(_constants));
|
||||
|
||||
@ -1178,7 +1181,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||
__ ld_ptr(XXX_STATE(_locals), O1);
|
||||
__ br( Assembler::zero, true, Assembler::pt, got_obj);
|
||||
__ delayed()->ld_ptr(O1, 0, O1); // get receiver for not-static case
|
||||
__ ld_ptr(constants, O1);
|
||||
__ ld_ptr(constMethod, O1);
|
||||
__ ld_ptr( O1, in_bytes(constMethodOopDesc::constants_offset()), O1);
|
||||
__ ld_ptr( O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
|
||||
// lock the mirror, not the klassOop
|
||||
__ ld_ptr( O1, mirror_offset, O1);
|
||||
@ -1536,7 +1540,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
const Register Gtmp1 = G3_scratch;
|
||||
// const Register Lmirror = L1; // native mirror (native calls only)
|
||||
|
||||
const Address constants (G5_method, 0, in_bytes(methodOopDesc::constants_offset()));
|
||||
const Address constMethod (G5_method, 0, in_bytes(methodOopDesc::const_offset()));
|
||||
const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
|
||||
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
|
||||
const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -934,8 +934,14 @@ void InterpreterMacroAssembler::index_check(Register array, Register index, int
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::get_const(Register Rdst) {
|
||||
ld_ptr(Lmethod, in_bytes(methodOopDesc::const_offset()), Rdst);
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
|
||||
ld_ptr(Lmethod, in_bytes(methodOopDesc::constants_offset()), Rdst);
|
||||
get_const(Rdst);
|
||||
ld_ptr(Rdst, in_bytes(constMethodOopDesc::constants_offset()), Rdst);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -205,6 +205,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
|
||||
void index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res);
|
||||
|
||||
void get_const(Register Rdst);
|
||||
void get_constant_pool(Register Rdst);
|
||||
void get_constant_pool_cache(Register Rdst);
|
||||
void get_cpool_and_tags(Register Rcpool, Register Rtags);
|
||||
|
@ -827,7 +827,6 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
|
||||
// a Load
|
||||
// inputs are (0:control, 1:memory, 2:address)
|
||||
if (!(n->ideal_Opcode()==ld_op) && // Following are special cases
|
||||
!(n->ideal_Opcode()==Op_LoadLLocked && ld_op==Op_LoadI) &&
|
||||
!(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
|
||||
!(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&
|
||||
!(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&
|
||||
@ -7306,17 +7305,6 @@ instruct loadPLocked(iRegP dst, memory mem) %{
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// LoadL-locked. Same as a regular long load when used with a compare-swap
|
||||
instruct loadLLocked(iRegL dst, memory mem) %{
|
||||
match(Set dst (LoadLLocked mem));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
format %{ "LDX $mem,$dst\t! long" %}
|
||||
opcode(Assembler::ldx_op3);
|
||||
ins_encode(simple_form3_mem_reg( mem, dst ) );
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
|
||||
match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
|
||||
effect( KILL newval );
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -371,7 +371,8 @@ void InterpreterGenerator::lock_method(void) {
|
||||
__ br( Assembler::zero, true, Assembler::pt, done);
|
||||
__ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
|
||||
|
||||
__ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0);
|
||||
__ ld_ptr( Lmethod, in_bytes(methodOopDesc::const_offset()), O0);
|
||||
__ ld_ptr( O0, in_bytes(constMethodOopDesc::constants_offset()), O0);
|
||||
__ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0);
|
||||
|
||||
// lock the mirror, not the klassOop
|
||||
@ -670,7 +671,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
||||
|
||||
// get constant pool cache
|
||||
__ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
|
||||
__ ld_ptr(G5_method, methodOopDesc::const_offset(), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, constMethodOopDesc::constants_offset(), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
|
||||
|
||||
// get specific constant pool cache entry
|
||||
@ -993,7 +995,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// for static methods insert the mirror argument
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
|
||||
__ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
|
||||
__ ld_ptr(Lmethod, methodOopDesc:: const_offset(), O1);
|
||||
__ ld_ptr(O1, constMethodOopDesc::constants_offset(), O1);
|
||||
__ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
|
||||
__ ld_ptr(O1, mirror_offset, O1);
|
||||
#ifdef ASSERT
|
||||
|
@ -6927,21 +6927,42 @@ void MacroAssembler::pow_exp_core_encoding() {
|
||||
addptr(rsp,sizeof(jdouble));
|
||||
}
|
||||
|
||||
void MacroAssembler::increase_precision() {
|
||||
subptr(rsp, BytesPerWord);
|
||||
fnstcw(Address(rsp, 0));
|
||||
movl(rax, Address(rsp, 0));
|
||||
orl(rax, 0x300);
|
||||
push(rax);
|
||||
fldcw(Address(rsp, 0));
|
||||
pop(rax);
|
||||
}
|
||||
|
||||
void MacroAssembler::restore_precision() {
|
||||
fldcw(Address(rsp, 0));
|
||||
addptr(rsp, BytesPerWord);
|
||||
}
|
||||
|
||||
void MacroAssembler::fast_pow() {
|
||||
// computes X^Y = 2^(Y * log2(X))
|
||||
// if fast computation is not possible, result is NaN. Requires
|
||||
// fallback from user of this macro.
|
||||
// increase precision for intermediate steps of the computation
|
||||
increase_precision();
|
||||
fyl2x(); // Stack: (Y*log2(X)) ...
|
||||
pow_exp_core_encoding(); // Stack: exp(X) ...
|
||||
restore_precision();
|
||||
}
|
||||
|
||||
void MacroAssembler::fast_exp() {
|
||||
// computes exp(X) = 2^(X * log2(e))
|
||||
// if fast computation is not possible, result is NaN. Requires
|
||||
// fallback from user of this macro.
|
||||
// increase precision for intermediate steps of the computation
|
||||
increase_precision();
|
||||
fldl2e(); // Stack: log2(e) X ...
|
||||
fmulp(1); // Stack: (X*log2(e)) ...
|
||||
pow_exp_core_encoding(); // Stack: exp(X) ...
|
||||
restore_precision();
|
||||
}
|
||||
|
||||
void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
|
||||
|
@ -2395,6 +2395,8 @@ class MacroAssembler: public Assembler {
|
||||
// runtime call.
|
||||
void fast_pow();
|
||||
void fast_exp();
|
||||
void increase_precision();
|
||||
void restore_precision();
|
||||
|
||||
// computes exp(x). Fallback to runtime call included.
|
||||
void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); }
|
||||
|
@ -2673,7 +2673,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
|
||||
#endif // _LP64
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));
|
||||
}
|
||||
// cpu register - address
|
||||
} else if (opr2->is_address()) {
|
||||
|
@ -718,35 +718,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
|
||||
}
|
||||
|
||||
|
||||
void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 3, "wrong type");
|
||||
LIRItem obj (x->argument_at(0), this); // AtomicLong object
|
||||
LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
|
||||
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
|
||||
|
||||
// compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
|
||||
cmp_value.load_item_force(FrameMap::long0_opr);
|
||||
|
||||
// new value must be in rcx,ebx (hi,lo)
|
||||
new_value.load_item_force(FrameMap::long1_opr);
|
||||
|
||||
// object pointer register is overwritten with field address
|
||||
obj.load_item();
|
||||
|
||||
// generate compare-and-swap; produces zero condition if swap occurs
|
||||
int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
|
||||
LIR_Opr addr = new_pointer_register();
|
||||
__ leal(LIR_OprFact::address(new LIR_Address(obj.result(), value_offset, T_LONG)), addr);
|
||||
LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed
|
||||
LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed
|
||||
__ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
|
||||
|
||||
// generate conditional move of boolean result
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
|
||||
}
|
||||
|
||||
|
||||
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
||||
assert(x->number_of_arguments() == 4, "wrong type");
|
||||
LIRItem obj (x->argument_at(0), this); // object
|
||||
@ -1116,10 +1087,10 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
|
||||
if (!x->klass()->is_loaded() || PatchALot) {
|
||||
patching_info = state_for(x, x->state_before());
|
||||
|
||||
// cannot re-use same xhandlers for multiple CodeEmitInfos, so
|
||||
// clone all handlers. This is handled transparently in other
|
||||
// places by the CodeEmitInfo cloning logic but is handled
|
||||
// specially here because a stub isn't being used.
|
||||
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
|
||||
// clone all handlers (NOTE: Usually this is handled transparently
|
||||
// by the CodeEmitInfo cloning logic in CodeStub constructors but
|
||||
// is done explicitly here because a stub isn't being used).
|
||||
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
|
||||
}
|
||||
CodeEmitInfo* info = state_for(x, x->state());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -481,7 +481,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||
__ xorptr(rdx, rdx);
|
||||
__ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
|
||||
__ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
|
||||
__ movptr(STATE(_constants), rdx); // state->_constants = constants()
|
||||
|
||||
@ -516,7 +517,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||
__ testl(rax, JVM_ACC_STATIC);
|
||||
__ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
|
||||
__ jcc(Assembler::zero, done);
|
||||
__ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
|
||||
__ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
||||
__ movptr(rax, Address(rax, mirror_offset));
|
||||
__ bind(done);
|
||||
@ -769,7 +771,8 @@ void InterpreterGenerator::lock_method(void) {
|
||||
__ testl(rax, JVM_ACC_STATIC);
|
||||
__ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
|
||||
__ jcc(Assembler::zero, done);
|
||||
__ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
|
||||
__ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
||||
__ movptr(rax, Address(rax, mirror_offset));
|
||||
__ bind(done);
|
||||
@ -821,9 +824,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
__ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
@ -1185,7 +1188,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ testl(t, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L);
|
||||
// get mirror
|
||||
__ movptr(t, Address(method, methodOopDesc:: constants_offset()));
|
||||
__ movptr(t, Address(method, methodOopDesc:: const_offset()));
|
||||
__ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
||||
__ movptr(t, Address(t, mirror_offset));
|
||||
// copy mirror into activation object
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -77,7 +77,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
// Helpers for runtime call arguments/results
|
||||
void get_method(Register reg) { movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); }
|
||||
void get_constant_pool(Register reg) { get_method(reg); movptr(reg, Address(reg, methodOopDesc::constants_offset())); }
|
||||
void get_const(Register reg) { get_method(reg); movptr(reg, Address(reg, methodOopDesc::const_offset())); }
|
||||
void get_constant_pool(Register reg) { get_const(reg); movptr(reg, Address(reg, constMethodOopDesc::constants_offset())); }
|
||||
void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); }
|
||||
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,9 +84,14 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
|
||||
}
|
||||
|
||||
void get_constant_pool(Register reg) {
|
||||
void get_const(Register reg) {
|
||||
get_method(reg);
|
||||
movptr(reg, Address(reg, methodOopDesc::constants_offset()));
|
||||
movptr(reg, Address(reg, methodOopDesc::const_offset()));
|
||||
}
|
||||
|
||||
void get_constant_pool(Register reg) {
|
||||
get_const(reg);
|
||||
movptr(reg, Address(reg, constMethodOopDesc::constants_offset()));
|
||||
}
|
||||
|
||||
void get_constant_pool_cache(Register reg) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -566,7 +566,8 @@ void InterpreterGenerator::lock_method(void) {
|
||||
__ testl(rax, JVM_ACC_STATIC);
|
||||
__ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
|
||||
__ jcc(Assembler::zero, done);
|
||||
__ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
|
||||
__ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
||||
__ movptr(rax, Address(rax, mirror_offset));
|
||||
__ bind(done);
|
||||
@ -606,7 +607,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ push(0);
|
||||
}
|
||||
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
|
||||
__ push(rdx); // set constant pool cache
|
||||
__ push(rdi); // set locals pointer
|
||||
@ -661,9 +663,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
__ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
@ -1026,7 +1028,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ testl(t, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L);
|
||||
// get mirror
|
||||
__ movptr(t, Address(method, methodOopDesc:: constants_offset()));
|
||||
__ movptr(t, Address(method, methodOopDesc:: const_offset()));
|
||||
__ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
||||
__ movptr(t, Address(t, mirror_offset));
|
||||
// copy mirror into activation frame
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -522,7 +522,8 @@ void InterpreterGenerator::lock_method(void) {
|
||||
// get receiver (assume this is frequent case)
|
||||
__ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
|
||||
__ jcc(Assembler::zero, done);
|
||||
__ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
|
||||
__ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(rax, Address(rax,
|
||||
constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
||||
__ movptr(rax, Address(rax, mirror_offset));
|
||||
@ -579,7 +580,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ push(0);
|
||||
}
|
||||
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
|
||||
__ push(rdx); // set constant pool cache
|
||||
__ push(r14); // set locals pointer
|
||||
@ -629,9 +631,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
__ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
@ -1020,7 +1022,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ testl(t, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L);
|
||||
// get mirror
|
||||
__ movptr(t, Address(method, methodOopDesc::constants_offset()));
|
||||
__ movptr(t, Address(method, methodOopDesc::const_offset()));
|
||||
__ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
|
||||
__ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
|
||||
__ movptr(t, Address(t, mirror_offset));
|
||||
// copy mirror into activation frame
|
||||
|
@ -5555,8 +5555,9 @@ instruct bytes_reverse_long(eRegL dst) %{
|
||||
ins_pipe( ialu_reg_reg);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(eRegI dst) %{
|
||||
instruct bytes_reverse_unsigned_short(eRegI dst, eFlagsReg cr) %{
|
||||
match(Set dst (ReverseBytesUS dst));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "BSWAP $dst\n\t"
|
||||
"SHR $dst,16\n\t" %}
|
||||
@ -5567,8 +5568,9 @@ instruct bytes_reverse_unsigned_short(eRegI dst) %{
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(eRegI dst) %{
|
||||
instruct bytes_reverse_short(eRegI dst, eFlagsReg cr) %{
|
||||
match(Set dst (ReverseBytesS dst));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "BSWAP $dst\n\t"
|
||||
"SAR $dst,16\n\t" %}
|
||||
@ -5729,9 +5731,10 @@ instruct countTrailingZerosL(eRegI dst, eRegL src, eFlagsReg cr) %{
|
||||
|
||||
//---------- Population Count Instructions -------------------------------------
|
||||
|
||||
instruct popCountI(eRegI dst, eRegI src) %{
|
||||
instruct popCountI(eRegI dst, eRegI src, eFlagsReg cr) %{
|
||||
predicate(UsePopCountInstruction);
|
||||
match(Set dst (PopCountI src));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "POPCNT $dst, $src" %}
|
||||
ins_encode %{
|
||||
@ -5740,9 +5743,10 @@ instruct popCountI(eRegI dst, eRegI src) %{
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct popCountI_mem(eRegI dst, memory mem) %{
|
||||
instruct popCountI_mem(eRegI dst, memory mem, eFlagsReg cr) %{
|
||||
predicate(UsePopCountInstruction);
|
||||
match(Set dst (PopCountI (LoadI mem)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "POPCNT $dst, $mem" %}
|
||||
ins_encode %{
|
||||
@ -7796,50 +7800,6 @@ instruct loadPLocked(eRegP dst, memory mem) %{
|
||||
ins_pipe( ialu_reg_mem );
|
||||
%}
|
||||
|
||||
// LoadLong-locked - same as a volatile long load when used with compare-swap
|
||||
instruct loadLLocked(stackSlotL dst, memory mem) %{
|
||||
predicate(UseSSE<=1);
|
||||
match(Set dst (LoadLLocked mem));
|
||||
|
||||
ins_cost(200);
|
||||
format %{ "FILD $mem\t# Atomic volatile long load\n\t"
|
||||
"FISTp $dst" %}
|
||||
ins_encode(enc_loadL_volatile(mem,dst));
|
||||
ins_pipe( fpu_reg_mem );
|
||||
%}
|
||||
|
||||
instruct loadLX_Locked(stackSlotL dst, memory mem, regD tmp) %{
|
||||
predicate(UseSSE>=2);
|
||||
match(Set dst (LoadLLocked mem));
|
||||
effect(TEMP tmp);
|
||||
ins_cost(180);
|
||||
format %{ "MOVSD $tmp,$mem\t# Atomic volatile long load\n\t"
|
||||
"MOVSD $dst,$tmp" %}
|
||||
ins_encode %{
|
||||
__ movdbl($tmp$$XMMRegister, $mem$$Address);
|
||||
__ movdbl(Address(rsp, $dst$$disp), $tmp$$XMMRegister);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct loadLX_reg_Locked(eRegL dst, memory mem, regD tmp) %{
|
||||
predicate(UseSSE>=2);
|
||||
match(Set dst (LoadLLocked mem));
|
||||
effect(TEMP tmp);
|
||||
ins_cost(160);
|
||||
format %{ "MOVSD $tmp,$mem\t# Atomic volatile long load\n\t"
|
||||
"MOVD $dst.lo,$tmp\n\t"
|
||||
"PSRLQ $tmp,32\n\t"
|
||||
"MOVD $dst.hi,$tmp" %}
|
||||
ins_encode %{
|
||||
__ movdbl($tmp$$XMMRegister, $mem$$Address);
|
||||
__ movdl($dst$$Register, $tmp$$XMMRegister);
|
||||
__ psrlq($tmp$$XMMRegister, 32);
|
||||
__ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// Conditional-store of the updated heap-top.
|
||||
// Used during allocation of the shared heap.
|
||||
// Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.
|
||||
|
@ -6417,14 +6417,14 @@ instruct bytes_reverse_long(rRegL dst) %{
|
||||
match(Set dst (ReverseBytesL dst));
|
||||
|
||||
format %{ "bswapq $dst" %}
|
||||
|
||||
opcode(0x0F, 0xC8); /* Opcode 0F /C8 */
|
||||
ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) );
|
||||
ins_pipe( ialu_reg);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(rRegI dst) %{
|
||||
instruct bytes_reverse_unsigned_short(rRegI dst, rFlagsReg cr) %{
|
||||
match(Set dst (ReverseBytesUS dst));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "bswapl $dst\n\t"
|
||||
"shrl $dst,16\n\t" %}
|
||||
@ -6435,8 +6435,9 @@ instruct bytes_reverse_unsigned_short(rRegI dst) %{
|
||||
ins_pipe( ialu_reg );
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(rRegI dst) %{
|
||||
instruct bytes_reverse_short(rRegI dst, rFlagsReg cr) %{
|
||||
match(Set dst (ReverseBytesS dst));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "bswapl $dst\n\t"
|
||||
"sar $dst,16\n\t" %}
|
||||
@ -6564,9 +6565,10 @@ instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
|
||||
|
||||
//---------- Population Count Instructions -------------------------------------
|
||||
|
||||
instruct popCountI(rRegI dst, rRegI src) %{
|
||||
instruct popCountI(rRegI dst, rRegI src, rFlagsReg cr) %{
|
||||
predicate(UsePopCountInstruction);
|
||||
match(Set dst (PopCountI src));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "popcnt $dst, $src" %}
|
||||
ins_encode %{
|
||||
@ -6575,9 +6577,10 @@ instruct popCountI(rRegI dst, rRegI src) %{
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct popCountI_mem(rRegI dst, memory mem) %{
|
||||
instruct popCountI_mem(rRegI dst, memory mem, rFlagsReg cr) %{
|
||||
predicate(UsePopCountInstruction);
|
||||
match(Set dst (PopCountI (LoadI mem)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "popcnt $dst, $mem" %}
|
||||
ins_encode %{
|
||||
@ -6587,9 +6590,10 @@ instruct popCountI_mem(rRegI dst, memory mem) %{
|
||||
%}
|
||||
|
||||
// Note: Long.bitCount(long) returns an int.
|
||||
instruct popCountL(rRegI dst, rRegL src) %{
|
||||
instruct popCountL(rRegI dst, rRegL src, rFlagsReg cr) %{
|
||||
predicate(UsePopCountInstruction);
|
||||
match(Set dst (PopCountL src));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "popcnt $dst, $src" %}
|
||||
ins_encode %{
|
||||
@ -6599,9 +6603,10 @@ instruct popCountL(rRegI dst, rRegL src) %{
|
||||
%}
|
||||
|
||||
// Note: Long.bitCount(long) returns an int.
|
||||
instruct popCountL_mem(rRegI dst, memory mem) %{
|
||||
instruct popCountL_mem(rRegI dst, memory mem, rFlagsReg cr) %{
|
||||
predicate(UsePopCountInstruction);
|
||||
match(Set dst (PopCountL (LoadL mem)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "popcnt $dst, $mem" %}
|
||||
ins_encode %{
|
||||
@ -7492,18 +7497,6 @@ instruct loadPLocked(rRegP dst, memory mem)
|
||||
ins_pipe(ialu_reg_mem); // XXX
|
||||
%}
|
||||
|
||||
// LoadL-locked - same as a regular LoadL when used with compare-swap
|
||||
instruct loadLLocked(rRegL dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadLLocked mem));
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movq $dst, $mem\t# long locked" %}
|
||||
opcode(0x8B);
|
||||
ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
|
||||
ins_pipe(ialu_reg_mem); // XXX
|
||||
%}
|
||||
|
||||
// Conditional-store of the updated heap-top.
|
||||
// Used during allocation of the shared heap.
|
||||
// Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -220,10 +220,10 @@ int generateJvmOffsets(GEN_variant gen_variant) {
|
||||
printf("\n");
|
||||
|
||||
GEN_OFFS(methodOopDesc, _constMethod);
|
||||
GEN_OFFS(methodOopDesc, _constants);
|
||||
GEN_OFFS(methodOopDesc, _access_flags);
|
||||
printf("\n");
|
||||
|
||||
GEN_OFFS(constMethodOopDesc, _constants);
|
||||
GEN_OFFS(constMethodOopDesc, _flags);
|
||||
GEN_OFFS(constMethodOopDesc, _code_size);
|
||||
GEN_OFFS(constMethodOopDesc, _name_index);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -118,7 +118,7 @@ dtrace:helper:ustack:
|
||||
copyin_offset(OFFSET_Symbol_body);
|
||||
|
||||
copyin_offset(OFFSET_methodOopDesc_constMethod);
|
||||
copyin_offset(OFFSET_methodOopDesc_constants);
|
||||
copyin_offset(OFFSET_constMethodOopDesc_constants);
|
||||
copyin_offset(OFFSET_constMethodOopDesc_name_index);
|
||||
copyin_offset(OFFSET_constMethodOopDesc_signature_index);
|
||||
|
||||
@ -359,8 +359,8 @@ dtrace:helper:ustack:
|
||||
this->signatureIndex = copyin_uint16(this->constMethod +
|
||||
OFFSET_constMethodOopDesc_signature_index);
|
||||
|
||||
this->constantPool = copyin_ptr(this->methodOopPtr +
|
||||
OFFSET_methodOopDesc_constants);
|
||||
this->constantPool = copyin_ptr(this->constMethod +
|
||||
OFFSET_constMethodOopDesc_constants);
|
||||
|
||||
this->nameSymbol = copyin_ptr(this->constantPool +
|
||||
this->nameIndex * sizeof (pointer) + SIZE_constantPoolOopDesc);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -514,10 +514,10 @@ name_for_methodOop(jvm_agent_t* J, uint64_t methodOopPtr, char * result, size_t
|
||||
char * signatureString = NULL;
|
||||
int err;
|
||||
|
||||
err = read_pointer(J, methodOopPtr + OFFSET_methodOopDesc_constants, &constantPool);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, methodOopPtr + OFFSET_methodOopDesc_constMethod, &constMethod);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J->P, constMethod + OFFSET_constMethodOopDesc_constants, &constantPool);
|
||||
CHECK_FAIL(err);
|
||||
|
||||
/* To get name string */
|
||||
err = ps_pread(J->P, constMethod + OFFSET_constMethodOopDesc_name_index, &nameIndex, 2);
|
||||
|
@ -1591,7 +1591,8 @@ void os::win32::print_windows_version(outputStream* st) {
|
||||
case 5001: st->print(" Windows XP"); break;
|
||||
case 5002:
|
||||
case 6000:
|
||||
case 6001: {
|
||||
case 6001:
|
||||
case 6002: {
|
||||
// Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
|
||||
// find out whether we are running on 64 bit processor or not.
|
||||
SYSTEM_INFO si;
|
||||
@ -1623,6 +1624,14 @@ void os::win32::print_windows_version(outputStream* st) {
|
||||
}
|
||||
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
|
||||
st->print(" , 64 bit");
|
||||
} else if (os_vers == 6002) {
|
||||
if (osvi.wProductType == VER_NT_WORKSTATION) {
|
||||
st->print(" Windows 8");
|
||||
} else {
|
||||
st->print(" Windows Server 2012");
|
||||
}
|
||||
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
|
||||
st->print(" , 64 bit");
|
||||
} else { // future os
|
||||
// Unrecognized windows, print out its major and minor versions
|
||||
st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
|
||||
|
@ -261,7 +261,6 @@ Form::DataType Form::is_load_from_memory(const char *opType) const {
|
||||
if( strcmp(opType,"LoadL")==0 ) return Form::idealL;
|
||||
if( strcmp(opType,"LoadL_unaligned")==0 ) return Form::idealL;
|
||||
if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP;
|
||||
if( strcmp(opType,"LoadLLocked")==0 ) return Form::idealL;
|
||||
if( strcmp(opType,"LoadP")==0 ) return Form::idealP;
|
||||
if( strcmp(opType,"LoadN")==0 ) return Form::idealN;
|
||||
if( strcmp(opType,"LoadRange")==0 ) return Form::idealI;
|
||||
|
@ -3387,7 +3387,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
"Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,
|
||||
"Load8B" ,"Load4B" ,"Load8C" ,"Load4C" ,"Load2C" ,"Load8S", "Load4S","Load2S",
|
||||
"LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned",
|
||||
"LoadPLocked", "LoadLLocked",
|
||||
"LoadPLocked",
|
||||
"StorePConditional", "StoreIConditional", "StoreLConditional",
|
||||
"CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
|
||||
"StoreCM",
|
||||
|
@ -42,6 +42,11 @@ void Canonicalizer::set_canonical(Value x) {
|
||||
// the instruction stream (because the instruction list is embedded
|
||||
// in the instructions).
|
||||
if (canonical() != x) {
|
||||
#ifndef PRODUCT
|
||||
if (!x->has_printable_bci()) {
|
||||
x->set_printable_bci(bci());
|
||||
}
|
||||
#endif
|
||||
if (PrintCanonicalization) {
|
||||
PrintValueVisitor do_print_value;
|
||||
canonical()->input_values_do(&do_print_value);
|
||||
@ -451,6 +456,28 @@ void Canonicalizer::do_Intrinsic (Intrinsic* x) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case vmIntrinsics::_isInstance : {
|
||||
assert(x->number_of_arguments() == 2, "wrong type");
|
||||
|
||||
InstanceConstant* c = x->argument_at(0)->type()->as_InstanceConstant();
|
||||
if (c != NULL && !c->value()->is_null_object()) {
|
||||
// ciInstance::java_mirror_type() returns non-NULL only for Java mirrors
|
||||
ciType* t = c->value()->as_instance()->java_mirror_type();
|
||||
if (t->is_klass()) {
|
||||
// substitute cls.isInstance(obj) of a constant Class into
|
||||
// an InstantOf instruction
|
||||
InstanceOf* i = new InstanceOf(t->as_klass(), x->argument_at(1), x->state_before());
|
||||
set_canonical(i);
|
||||
// and try to canonicalize even further
|
||||
do_InstanceOf(i);
|
||||
} else {
|
||||
assert(t->is_primitive_type(), "should be a primitive type");
|
||||
// cls.isInstance(obj) always returns false for primitive classes
|
||||
set_constant(0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -677,8 +704,8 @@ void Canonicalizer::do_If(If* x) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
set_canonical(canon);
|
||||
set_bci(cmp->state_before()->bci());
|
||||
set_canonical(canon);
|
||||
}
|
||||
}
|
||||
} else if (l->as_InstanceOf() != NULL) {
|
||||
|
@ -3170,6 +3170,7 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_getClass :
|
||||
case vmIntrinsics::_isInstance :
|
||||
if (!InlineClassNatives) return false;
|
||||
preserves_state = true;
|
||||
break;
|
||||
@ -3194,13 +3195,6 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
||||
preserves_state = true;
|
||||
break;
|
||||
|
||||
// sun/misc/AtomicLong.attemptUpdate
|
||||
case vmIntrinsics::_attemptUpdate :
|
||||
if (!VM_Version::supports_cx8()) return false;
|
||||
if (!InlineAtomicLong) return false;
|
||||
preserves_state = true;
|
||||
break;
|
||||
|
||||
// Use special nodes for Unsafe instructions so we can more easily
|
||||
// perform an address-mode optimization on the raw variants
|
||||
case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT, false);
|
||||
|
@ -302,8 +302,6 @@ class Instruction: public CompilationResourceObj {
|
||||
|
||||
void update_exception_state(ValueStack* state);
|
||||
|
||||
bool has_printable_bci() const { return NOT_PRODUCT(_printable_bci != -99) PRODUCT_ONLY(false); }
|
||||
|
||||
protected:
|
||||
void set_type(ValueType* type) {
|
||||
assert(type != NULL, "type must exist");
|
||||
@ -392,8 +390,9 @@ class Instruction: public CompilationResourceObj {
|
||||
// accessors
|
||||
int id() const { return _id; }
|
||||
#ifndef PRODUCT
|
||||
bool has_printable_bci() const { return _printable_bci != -99; }
|
||||
int printable_bci() const { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; }
|
||||
void set_printable_bci(int bci) { NOT_PRODUCT(_printable_bci = bci;) }
|
||||
void set_printable_bci(int bci) { _printable_bci = bci; }
|
||||
#endif
|
||||
int use_count() const { return _use_count; }
|
||||
int pin_state() const { return _pin_state; }
|
||||
@ -576,6 +575,7 @@ LEAF(Phi, Instruction)
|
||||
, _block(b)
|
||||
, _index(index)
|
||||
{
|
||||
NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci()));
|
||||
if (type->is_illegal()) {
|
||||
make_illegal();
|
||||
}
|
||||
@ -631,7 +631,9 @@ LEAF(Local, Instruction)
|
||||
: Instruction(type)
|
||||
, _java_index(index)
|
||||
, _declared_type(declared)
|
||||
{}
|
||||
{
|
||||
NOT_PRODUCT(set_printable_bci(-1));
|
||||
}
|
||||
|
||||
// accessors
|
||||
int java_index() const { return _java_index; }
|
||||
|
@ -1242,6 +1242,36 @@ void LIRGenerator::do_Reference_get(Intrinsic* x) {
|
||||
NULL /* info */);
|
||||
}
|
||||
|
||||
// Example: clazz.isInstance(object)
|
||||
void LIRGenerator::do_isInstance(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 2, "wrong type");
|
||||
|
||||
// TODO could try to substitute this node with an equivalent InstanceOf
|
||||
// if clazz is known to be a constant Class. This will pick up newly found
|
||||
// constants after HIR construction. I'll leave this to a future change.
|
||||
|
||||
// as a first cut, make a simple leaf call to runtime to stay platform independent.
|
||||
// could follow the aastore example in a future change.
|
||||
|
||||
LIRItem clazz(x->argument_at(0), this);
|
||||
LIRItem object(x->argument_at(1), this);
|
||||
clazz.load_item();
|
||||
object.load_item();
|
||||
LIR_Opr result = rlock_result(x);
|
||||
|
||||
// need to perform null check on clazz
|
||||
if (x->needs_null_check()) {
|
||||
CodeEmitInfo* info = state_for(x);
|
||||
__ null_check(clazz.result(), info);
|
||||
}
|
||||
|
||||
LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
|
||||
CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
|
||||
x->type(),
|
||||
NULL); // NULL CodeEmitInfo results in a leaf call
|
||||
__ move(call_result, result);
|
||||
}
|
||||
|
||||
// Example: object.getClass ()
|
||||
void LIRGenerator::do_getClass(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 1, "wrong type");
|
||||
@ -2777,31 +2807,29 @@ void LIRGenerator::do_Invoke(Invoke* x) {
|
||||
int index = bcs.get_method_index();
|
||||
size_t call_site_offset = cpcache->get_f1_offset(index);
|
||||
|
||||
// Load CallSite object from constant pool cache.
|
||||
LIR_Opr call_site = new_register(objectType);
|
||||
__ oop2reg(cpcache->constant_encoding(), call_site);
|
||||
__ move_wide(new LIR_Address(call_site, call_site_offset, T_OBJECT), call_site);
|
||||
|
||||
// If this invokedynamic call site hasn't been executed yet in
|
||||
// the interpreter, the CallSite object in the constant pool
|
||||
// cache is still null and we need to deoptimize.
|
||||
if (cpcache->is_f1_null_at(index)) {
|
||||
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
|
||||
// clone all handlers. This is handled transparently in other
|
||||
// places by the CodeEmitInfo cloning logic but is handled
|
||||
// specially here because a stub isn't being used.
|
||||
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
|
||||
|
||||
// Only deoptimize if the CallSite object is still null; we don't
|
||||
// recompile methods in C1 after deoptimization so this call site
|
||||
// might be resolved the next time we execute it after OSR.
|
||||
DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
|
||||
__ jump(deopt_stub);
|
||||
__ cmp(lir_cond_equal, call_site, LIR_OprFact::oopConst(NULL));
|
||||
__ branch(lir_cond_equal, T_OBJECT, deopt_stub);
|
||||
}
|
||||
|
||||
// Use the receiver register for the synthetic MethodHandle
|
||||
// argument.
|
||||
receiver = LIR_Assembler::receiverOpr();
|
||||
LIR_Opr tmp = new_register(objectType);
|
||||
|
||||
// Load CallSite object from constant pool cache.
|
||||
__ oop2reg(cpcache->constant_encoding(), tmp);
|
||||
__ move_wide(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
|
||||
|
||||
// Load target MethodHandle from CallSite object.
|
||||
__ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
|
||||
__ load(new LIR_Address(call_site, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
|
||||
|
||||
__ call_dynamic(target, receiver, result_register,
|
||||
SharedRuntime::get_resolve_opt_virtual_call_stub(),
|
||||
@ -2809,7 +2837,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2951,6 +2979,7 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
|
||||
case vmIntrinsics::_isInstance: do_isInstance(x); break;
|
||||
case vmIntrinsics::_getClass: do_getClass(x); break;
|
||||
case vmIntrinsics::_currentThread: do_currentThread(x); break;
|
||||
|
||||
@ -2978,11 +3007,6 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
||||
do_CompareAndSwap(x, longType);
|
||||
break;
|
||||
|
||||
// sun.misc.AtomicLongCSImpl.attemptUpdate
|
||||
case vmIntrinsics::_attemptUpdate:
|
||||
do_AttemptUpdate(x);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_Reference_get:
|
||||
do_Reference_get(x);
|
||||
break;
|
||||
@ -3223,4 +3247,3 @@ void LIRGenerator::do_MemBar(MemBar* x) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,12 +238,12 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
||||
LIR_Opr getThreadPointer();
|
||||
|
||||
void do_RegisterFinalizer(Intrinsic* x);
|
||||
void do_isInstance(Intrinsic* x);
|
||||
void do_getClass(Intrinsic* x);
|
||||
void do_currentThread(Intrinsic* x);
|
||||
void do_MathIntrinsic(Intrinsic* x);
|
||||
void do_ArrayCopy(Intrinsic* x);
|
||||
void do_CompareAndSwap(Intrinsic* x, ValueType* type);
|
||||
void do_AttemptUpdate(Intrinsic* x);
|
||||
void do_NIOCheckIndex(Intrinsic* x);
|
||||
void do_FPIntrinsics(Intrinsic* x);
|
||||
void do_Reference_get(Intrinsic* x);
|
||||
|
@ -294,6 +294,7 @@ const char* Runtime1::name_for_address(address entry) {
|
||||
FUNCTION_CASE(entry, SharedRuntime::lrem);
|
||||
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
|
||||
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
|
||||
FUNCTION_CASE(entry, is_instance_of);
|
||||
FUNCTION_CASE(entry, trace_block_entry);
|
||||
#ifdef TRACE_HAVE_INTRINSICS
|
||||
FUNCTION_CASE(entry, TRACE_TIME_METHOD);
|
||||
@ -1270,6 +1271,19 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
|
||||
JRT_END
|
||||
|
||||
|
||||
JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
|
||||
// had to return int instead of bool, otherwise there may be a mismatch
|
||||
// between the C calling convention and the Java one.
|
||||
// e.g., on x86, GCC may clear only %al when returning a bool false, but
|
||||
// JVM takes the whole %eax as the return value, which may misinterpret
|
||||
// the return value as a boolean true.
|
||||
|
||||
assert(mirror != NULL, "should null-check on mirror before calling");
|
||||
klassOop k = java_lang_Class::as_klassOop(mirror);
|
||||
return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
|
||||
JRT_END
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Runtime1::print_statistics() {
|
||||
tty->print_cr("C1 Runtime statistics:");
|
||||
|
@ -186,6 +186,7 @@ class Runtime1: public AllStatic {
|
||||
static int arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length);
|
||||
static void primitive_arraycopy(HeapWord* src, HeapWord* dst, int length);
|
||||
static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length);
|
||||
static int is_instance_of(oopDesc* mirror, oopDesc* obj);
|
||||
|
||||
static void print_statistics() PRODUCT_RETURN;
|
||||
};
|
||||
|
@ -141,8 +141,11 @@ class ValueNumberingVisitor: public InstructionVisitor {
|
||||
|
||||
// visitor functions
|
||||
void do_StoreField (StoreField* x) {
|
||||
if (x->is_init_point()) {
|
||||
// putstatic is an initialization point so treat it as a wide kill
|
||||
if (x->is_init_point() || // putstatic is an initialization point so treat it as a wide kill
|
||||
// This is actually too strict and the JMM doesn't require
|
||||
// this in all cases (e.g. load a; volatile store b; load a)
|
||||
// but possible future optimizations might require this.
|
||||
x->field()->is_volatile()) {
|
||||
kill_memory();
|
||||
} else {
|
||||
kill_field(x->field());
|
||||
@ -160,8 +163,8 @@ class ValueNumberingVisitor: public InstructionVisitor {
|
||||
void do_Local (Local* x) { /* nothing to do */ }
|
||||
void do_Constant (Constant* x) { /* nothing to do */ }
|
||||
void do_LoadField (LoadField* x) {
|
||||
if (x->is_init_point()) {
|
||||
// getstatic is an initialization point so treat it as a wide kill
|
||||
if (x->is_init_point() || // getstatic is an initialization point so treat it as a wide kill
|
||||
x->field()->is_volatile()) { // the JMM requires this
|
||||
kill_memory();
|
||||
}
|
||||
}
|
||||
|
@ -2919,7 +2919,6 @@ int java_lang_AssertionStatusDirectives::packages_offset;
|
||||
int java_lang_AssertionStatusDirectives::packageEnabled_offset;
|
||||
int java_lang_AssertionStatusDirectives::deflt_offset;
|
||||
int java_nio_Buffer::_limit_offset;
|
||||
int sun_misc_AtomicLongCSImpl::_value_offset;
|
||||
int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
|
||||
int sun_reflect_ConstantPool::_cp_oop_offset;
|
||||
int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
|
||||
@ -2979,21 +2978,6 @@ void java_nio_Buffer::compute_offsets() {
|
||||
compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
|
||||
}
|
||||
|
||||
// Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate
|
||||
int sun_misc_AtomicLongCSImpl::value_offset() {
|
||||
assert(SystemDictionary::AtomicLongCSImpl_klass() != NULL, "can't call this");
|
||||
return _value_offset;
|
||||
}
|
||||
|
||||
|
||||
void sun_misc_AtomicLongCSImpl::compute_offsets() {
|
||||
klassOop k = SystemDictionary::AtomicLongCSImpl_klass();
|
||||
// If this class is not present, its value field offset won't be referenced.
|
||||
if (k != NULL) {
|
||||
compute_offset(_value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature());
|
||||
}
|
||||
}
|
||||
|
||||
void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
|
||||
if (_owner_offset != 0) return;
|
||||
|
||||
@ -3098,7 +3082,6 @@ void JavaClasses::compute_offsets() {
|
||||
sun_reflect_ConstantPool::compute_offsets();
|
||||
sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
|
||||
}
|
||||
sun_misc_AtomicLongCSImpl::compute_offsets();
|
||||
|
||||
// generated interpreter code wants to know about the offsets we just computed:
|
||||
AbstractAssembler::update_delayed_values();
|
||||
|
@ -1383,15 +1383,6 @@ class java_nio_Buffer: AllStatic {
|
||||
static void compute_offsets();
|
||||
};
|
||||
|
||||
class sun_misc_AtomicLongCSImpl: AllStatic {
|
||||
private:
|
||||
static int _value_offset;
|
||||
|
||||
public:
|
||||
static int value_offset();
|
||||
static void compute_offsets();
|
||||
};
|
||||
|
||||
class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
|
||||
private:
|
||||
static int _owner_offset;
|
||||
|
@ -170,9 +170,6 @@ class SymbolPropertyTable;
|
||||
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
|
||||
template(nio_Buffer_klass, java_nio_Buffer, Opt) \
|
||||
\
|
||||
/* If this class isn't present, it won't be referenced. */ \
|
||||
template(AtomicLongCSImpl_klass, sun_misc_AtomicLongCSImpl, Opt) \
|
||||
\
|
||||
template(DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
|
||||
\
|
||||
template(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \
|
||||
|
@ -1738,10 +1738,14 @@ void ClassVerifier::verify_switch(
|
||||
int target = bci + default_offset;
|
||||
stackmap_table->check_jump_target(current_frame, target, CHECK_VERIFY(this));
|
||||
for (int i = 0; i < keys; i++) {
|
||||
// Because check_jump_target() may safepoint, the bytecode could have
|
||||
// moved, which means 'aligned_bcp' is no good and needs to be recalculated.
|
||||
aligned_bcp = (address)round_to((intptr_t)(bcs->bcp() + 1), jintSize);
|
||||
target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
|
||||
stackmap_table->check_jump_target(
|
||||
current_frame, target, CHECK_VERIFY(this));
|
||||
}
|
||||
NOT_PRODUCT(aligned_bcp = NULL); // no longer valid at this point
|
||||
}
|
||||
|
||||
bool ClassVerifier::name_in_supers(
|
||||
|
@ -722,15 +722,6 @@
|
||||
/* java/lang/ref/Reference */ \
|
||||
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
|
||||
\
|
||||
\
|
||||
do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \
|
||||
do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \
|
||||
/* (symbols get_name and void_long_signature defined above) */ \
|
||||
\
|
||||
do_intrinsic(_attemptUpdate, sun_misc_AtomicLongCSImpl, attemptUpdate_name, attemptUpdate_signature, F_R) \
|
||||
do_name( attemptUpdate_name, "attemptUpdate") \
|
||||
do_signature(attemptUpdate_signature, "(JJ)Z") \
|
||||
\
|
||||
/* support for sun.misc.Unsafe */ \
|
||||
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
|
||||
\
|
||||
|
@ -293,7 +293,7 @@ void ConcurrentMarkThread::run() {
|
||||
// Java thread is waiting for a full GC to happen (e.g., it
|
||||
// called System.gc() with +ExplicitGCInvokesConcurrent).
|
||||
_sts.join();
|
||||
g1h->increment_full_collections_completed(true /* concurrent */);
|
||||
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
|
||||
_sts.leave();
|
||||
}
|
||||
assert(_should_terminate, "just checking");
|
||||
|
@ -1299,6 +1299,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
|
||||
gc_prologue(true);
|
||||
increment_total_collections(true /* full gc */);
|
||||
increment_old_marking_cycles_started();
|
||||
|
||||
size_t g1h_prev_used = used();
|
||||
assert(used() == recalculate_used(), "Should be equal");
|
||||
@ -1492,22 +1493,28 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
JavaThread::dirty_card_queue_set().abandon_logs();
|
||||
assert(!G1DeferredRSUpdate
|
||||
|| (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
|
||||
|
||||
_young_list->reset_sampled_info();
|
||||
// At this point there should be no regions in the
|
||||
// entire heap tagged as young.
|
||||
assert( check_young_list_empty(true /* check_heap */),
|
||||
"young list should be empty at this point");
|
||||
|
||||
// Update the number of full collections that have been completed.
|
||||
increment_old_marking_cycles_completed(false /* concurrent */);
|
||||
|
||||
_hrs.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
|
||||
print_heap_after_gc();
|
||||
|
||||
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
|
||||
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
|
||||
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
|
||||
// before any GC notifications are raised.
|
||||
g1mm()->update_sizes();
|
||||
}
|
||||
|
||||
_young_list->reset_sampled_info();
|
||||
// At this point there should be no regions in the
|
||||
// entire heap tagged as young.
|
||||
assert( check_young_list_empty(true /* check_heap */),
|
||||
"young list should be empty at this point");
|
||||
|
||||
// Update the number of full collections that have been completed.
|
||||
increment_full_collections_completed(false /* concurrent */);
|
||||
|
||||
_hrs.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
|
||||
print_heap_after_gc();
|
||||
g1mm()->update_sizes();
|
||||
post_full_gc_dump();
|
||||
|
||||
return true;
|
||||
@ -1888,7 +1895,8 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_retained_old_gc_alloc_region(NULL),
|
||||
_expand_heap_after_alloc_failure(true),
|
||||
_surviving_young_words(NULL),
|
||||
_full_collections_completed(0),
|
||||
_old_marking_cycles_started(0),
|
||||
_old_marking_cycles_completed(0),
|
||||
_in_cset_fast_test(NULL),
|
||||
_in_cset_fast_test_base(NULL),
|
||||
_dirty_cards_region_list(NULL),
|
||||
@ -2360,7 +2368,16 @@ void G1CollectedHeap::allocate_dummy_regions() {
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
|
||||
void G1CollectedHeap::increment_old_marking_cycles_started() {
|
||||
assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
|
||||
_old_marking_cycles_started == _old_marking_cycles_completed + 1,
|
||||
err_msg("Wrong marking cycle count (started: %d, completed: %d)",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed));
|
||||
|
||||
_old_marking_cycles_started++;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
|
||||
MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// We assume that if concurrent == true, then the caller is a
|
||||
@ -2368,11 +2385,6 @@ void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
|
||||
// Set. If there's ever a cheap way to check this, we should add an
|
||||
// assert here.
|
||||
|
||||
// We have already incremented _total_full_collections at the start
|
||||
// of the GC, so total_full_collections() represents how many full
|
||||
// collections have been started.
|
||||
unsigned int full_collections_started = total_full_collections();
|
||||
|
||||
// Given that this method is called at the end of a Full GC or of a
|
||||
// concurrent cycle, and those can be nested (i.e., a Full GC can
|
||||
// interrupt a concurrent cycle), the number of full collections
|
||||
@ -2382,21 +2394,21 @@ void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
|
||||
|
||||
// This is the case for the inner caller, i.e. a Full GC.
|
||||
assert(concurrent ||
|
||||
(full_collections_started == _full_collections_completed + 1) ||
|
||||
(full_collections_started == _full_collections_completed + 2),
|
||||
err_msg("for inner caller (Full GC): full_collections_started = %u "
|
||||
"is inconsistent with _full_collections_completed = %u",
|
||||
full_collections_started, _full_collections_completed));
|
||||
(_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
|
||||
(_old_marking_cycles_started == _old_marking_cycles_completed + 2),
|
||||
err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
|
||||
"is inconsistent with _old_marking_cycles_completed = %u",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed));
|
||||
|
||||
// This is the case for the outer caller, i.e. the concurrent cycle.
|
||||
assert(!concurrent ||
|
||||
(full_collections_started == _full_collections_completed + 1),
|
||||
(_old_marking_cycles_started == _old_marking_cycles_completed + 1),
|
||||
err_msg("for outer caller (concurrent cycle): "
|
||||
"full_collections_started = %u "
|
||||
"is inconsistent with _full_collections_completed = %u",
|
||||
full_collections_started, _full_collections_completed));
|
||||
"_old_marking_cycles_started = %u "
|
||||
"is inconsistent with _old_marking_cycles_completed = %u",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed));
|
||||
|
||||
_full_collections_completed += 1;
|
||||
_old_marking_cycles_completed += 1;
|
||||
|
||||
// We need to clear the "in_progress" flag in the CM thread before
|
||||
// we wake up any waiters (especially when ExplicitInvokesConcurrent
|
||||
@ -2432,7 +2444,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
assert_heap_not_locked();
|
||||
|
||||
unsigned int gc_count_before;
|
||||
unsigned int full_gc_count_before;
|
||||
unsigned int old_marking_count_before;
|
||||
bool retry_gc;
|
||||
|
||||
do {
|
||||
@ -2443,7 +2455,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
|
||||
// Read the GC count while holding the Heap_lock
|
||||
gc_count_before = total_collections();
|
||||
full_gc_count_before = total_full_collections();
|
||||
old_marking_count_before = _old_marking_cycles_started;
|
||||
}
|
||||
|
||||
if (should_do_concurrent_full_gc(cause)) {
|
||||
@ -2458,7 +2470,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
|
||||
VMThread::execute(&op);
|
||||
if (!op.pause_succeeded()) {
|
||||
if (full_gc_count_before == total_full_collections()) {
|
||||
if (old_marking_count_before == _old_marking_cycles_started) {
|
||||
retry_gc = op.should_retry_gc();
|
||||
} else {
|
||||
// A Full GC happened while we were trying to schedule the
|
||||
@ -2486,7 +2498,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
VMThread::execute(&op);
|
||||
} else {
|
||||
// Schedule a Full GC.
|
||||
VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
|
||||
VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
}
|
||||
@ -3613,7 +3625,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
// We are about to start a marking cycle, so we increment the
|
||||
// full collection counter.
|
||||
increment_total_full_collections();
|
||||
increment_old_marking_cycles_started();
|
||||
}
|
||||
// if the log level is "finer" is on, we'll print long statistics information
|
||||
// in the collector policy code, so let's not print this as the output
|
||||
@ -3930,26 +3942,31 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
gc_epilogue(false);
|
||||
}
|
||||
|
||||
// The closing of the inner scope, immediately above, will complete
|
||||
// logging at the "fine" level. The record_collection_pause_end() call
|
||||
// above will complete logging at the "finer" level.
|
||||
//
|
||||
// It is not yet to safe, however, to tell the concurrent mark to
|
||||
// start as we have some optional output below. We don't want the
|
||||
// output from the concurrent mark thread interfering with this
|
||||
// logging output either.
|
||||
|
||||
_hrs.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
|
||||
TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
|
||||
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
|
||||
|
||||
print_heap_after_gc();
|
||||
|
||||
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
|
||||
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
|
||||
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
|
||||
// before any GC notifications are raised.
|
||||
g1mm()->update_sizes();
|
||||
}
|
||||
|
||||
// The closing of the inner scope, immediately above, will complete
|
||||
// logging at the "fine" level. The record_collection_pause_end() call
|
||||
// above will complete logging at the "finer" level.
|
||||
//
|
||||
// It is not yet to safe, however, to tell the concurrent mark to
|
||||
// start as we have some optional output below. We don't want the
|
||||
// output from the concurrent mark thread interfering with this
|
||||
// logging output either.
|
||||
|
||||
_hrs.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
|
||||
TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
|
||||
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
|
||||
|
||||
print_heap_after_gc();
|
||||
g1mm()->update_sizes();
|
||||
|
||||
if (G1SummarizeRSetStats &&
|
||||
(G1SummarizeRSetStatsPeriod > 0) &&
|
||||
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
|
||||
|
@ -359,10 +359,13 @@ private:
|
||||
// (c) cause == _g1_humongous_allocation
|
||||
bool should_do_concurrent_full_gc(GCCause::Cause cause);
|
||||
|
||||
// Keeps track of how many "full collections" (i.e., Full GCs or
|
||||
// concurrent cycles) we have completed. The number of them we have
|
||||
// started is maintained in _total_full_collections in CollectedHeap.
|
||||
volatile unsigned int _full_collections_completed;
|
||||
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
|
||||
// concurrent cycles) we have started.
|
||||
volatile unsigned int _old_marking_cycles_started;
|
||||
|
||||
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
|
||||
// concurrent cycles) we have completed.
|
||||
volatile unsigned int _old_marking_cycles_completed;
|
||||
|
||||
// This is a non-product method that is helpful for testing. It is
|
||||
// called at the end of a GC and artificially expands the heap by
|
||||
@ -673,8 +676,12 @@ public:
|
||||
(size_t) _in_cset_fast_test_length * sizeof(bool));
|
||||
}
|
||||
|
||||
// This is called at the start of either a concurrent cycle or a Full
|
||||
// GC to update the number of old marking cycles started.
|
||||
void increment_old_marking_cycles_started();
|
||||
|
||||
// This is called at the end of either a concurrent cycle or a Full
|
||||
// GC to update the number of full collections completed. Those two
|
||||
// GC to update the number of old marking cycles completed. Those two
|
||||
// can happen in a nested fashion, i.e., we start a concurrent
|
||||
// cycle, a Full GC happens half-way through it which ends first,
|
||||
// and then the cycle notices that a Full GC happened and ends
|
||||
@ -683,14 +690,14 @@ public:
|
||||
// false, the caller is the inner caller in the nesting (i.e., the
|
||||
// Full GC). If concurrent is true, the caller is the outer caller
|
||||
// in this nesting (i.e., the concurrent cycle). Further nesting is
|
||||
// not currently supported. The end of the this call also notifies
|
||||
// not currently supported. The end of this call also notifies
|
||||
// the FullGCCount_lock in case a Java thread is waiting for a full
|
||||
// GC to happen (e.g., it called System.gc() with
|
||||
// +ExplicitGCInvokesConcurrent).
|
||||
void increment_full_collections_completed(bool concurrent);
|
||||
void increment_old_marking_cycles_completed(bool concurrent);
|
||||
|
||||
unsigned int full_collections_completed() {
|
||||
return _full_collections_completed;
|
||||
unsigned int old_marking_cycles_completed() {
|
||||
return _old_marking_cycles_completed;
|
||||
}
|
||||
|
||||
G1HRPrinter* hr_printer() { return &_hr_printer; }
|
||||
|
@ -64,7 +64,7 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
|
||||
_should_initiate_conc_mark(should_initiate_conc_mark),
|
||||
_target_pause_time_ms(target_pause_time_ms),
|
||||
_should_retry_gc(false),
|
||||
_full_collections_completed_before(0) {
|
||||
_old_marking_cycles_completed_before(0) {
|
||||
guarantee(target_pause_time_ms > 0.0,
|
||||
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
||||
target_pause_time_ms));
|
||||
@ -112,11 +112,11 @@ void VM_G1IncCollectionPause::doit() {
|
||||
|
||||
GCCauseSetter x(g1h, _gc_cause);
|
||||
if (_should_initiate_conc_mark) {
|
||||
// It's safer to read full_collections_completed() here, given
|
||||
// It's safer to read old_marking_cycles_completed() here, given
|
||||
// that noone else will be updating it concurrently. Since we'll
|
||||
// only need it if we're initiating a marking cycle, no point in
|
||||
// setting it earlier.
|
||||
_full_collections_completed_before = g1h->full_collections_completed();
|
||||
_old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
|
||||
|
||||
// At this point we are supposed to start a concurrent cycle. We
|
||||
// will do so if one is not already in progress.
|
||||
@ -181,17 +181,17 @@ void VM_G1IncCollectionPause::doit_epilogue() {
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// In the doit() method we saved g1h->full_collections_completed()
|
||||
// in the _full_collections_completed_before field. We have to
|
||||
// wait until we observe that g1h->full_collections_completed()
|
||||
// In the doit() method we saved g1h->old_marking_cycles_completed()
|
||||
// in the _old_marking_cycles_completed_before field. We have to
|
||||
// wait until we observe that g1h->old_marking_cycles_completed()
|
||||
// has increased by at least one. This can happen if a) we started
|
||||
// a cycle and it completes, b) a cycle already in progress
|
||||
// completes, or c) a Full GC happens.
|
||||
|
||||
// If the condition has already been reached, there's no point in
|
||||
// actually taking the lock and doing the wait.
|
||||
if (g1h->full_collections_completed() <=
|
||||
_full_collections_completed_before) {
|
||||
if (g1h->old_marking_cycles_completed() <=
|
||||
_old_marking_cycles_completed_before) {
|
||||
// The following is largely copied from CMS
|
||||
|
||||
Thread* thr = Thread::current();
|
||||
@ -200,8 +200,8 @@ void VM_G1IncCollectionPause::doit_epilogue() {
|
||||
ThreadToNativeFromVM native(jt);
|
||||
|
||||
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
||||
while (g1h->full_collections_completed() <=
|
||||
_full_collections_completed_before) {
|
||||
while (g1h->old_marking_cycles_completed() <=
|
||||
_old_marking_cycles_completed_before) {
|
||||
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ private:
|
||||
bool _should_initiate_conc_mark;
|
||||
bool _should_retry_gc;
|
||||
double _target_pause_time_ms;
|
||||
unsigned int _full_collections_completed_before;
|
||||
unsigned int _old_marking_cycles_completed_before;
|
||||
public:
|
||||
VM_G1IncCollectionPause(unsigned int gc_count_before,
|
||||
size_t word_size,
|
||||
|
@ -844,6 +844,14 @@ nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, addr
|
||||
int bci = method->bci_from(fr.interpreter_frame_bcp());
|
||||
nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (TraceOnStackReplacement) {
|
||||
if (nm != NULL) {
|
||||
tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", nm->osr_entry());
|
||||
nm->print();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return nm;
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) {
|
||||
link_tail(chunk);
|
||||
|
||||
assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
|
||||
FreeList<Chunk>::increment_count();
|
||||
increment_count();
|
||||
debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
|
||||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
|
||||
@ -258,7 +258,7 @@ void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) {
|
||||
}
|
||||
head()->link_after(chunk);
|
||||
assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
|
||||
FreeList<Chunk>::increment_count();
|
||||
increment_count();
|
||||
debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
|
||||
assert(head() == NULL || head()->prev() == NULL, "list invariant");
|
||||
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
|
||||
@ -909,6 +909,7 @@ class TreeCensusClosure : public StackObj {
|
||||
|
||||
template <class Chunk>
|
||||
class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> {
|
||||
using TreeCensusClosure<Chunk>::do_list;
|
||||
public:
|
||||
void do_tree(TreeList<Chunk>* tl) {
|
||||
if (tl != NULL) {
|
||||
@ -921,6 +922,7 @@ class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> {
|
||||
|
||||
template <class Chunk>
|
||||
class DescendTreeCensusClosure : public TreeCensusClosure<Chunk> {
|
||||
using TreeCensusClosure<Chunk>::do_list;
|
||||
public:
|
||||
void do_tree(TreeList<Chunk>* tl) {
|
||||
if (tl != NULL) {
|
||||
@ -987,6 +989,7 @@ class AscendTreeSearchClosure : public TreeSearchClosure {
|
||||
|
||||
template <class Chunk>
|
||||
class DescendTreeSearchClosure : public TreeSearchClosure<Chunk> {
|
||||
using TreeSearchClosure<Chunk>::do_list;
|
||||
public:
|
||||
bool do_tree(TreeList<Chunk>* tl) {
|
||||
if (tl != NULL) {
|
||||
|
@ -60,13 +60,18 @@ class TreeList: public FreeList<Chunk> {
|
||||
TreeList<Chunk>* left() const { return _left; }
|
||||
TreeList<Chunk>* right() const { return _right; }
|
||||
|
||||
// Wrapper on call to base class, to get the template to compile.
|
||||
Chunk* head() const { return FreeList<Chunk>::head(); }
|
||||
Chunk* tail() const { return FreeList<Chunk>::tail(); }
|
||||
void set_head(Chunk* head) { FreeList<Chunk>::set_head(head); }
|
||||
void set_tail(Chunk* tail) { FreeList<Chunk>::set_tail(tail); }
|
||||
// Explicitly import these names into our namespace to fix name lookup with templates
|
||||
using FreeList<Chunk>::head;
|
||||
using FreeList<Chunk>::set_head;
|
||||
|
||||
size_t size() const { return FreeList<Chunk>::size(); }
|
||||
using FreeList<Chunk>::tail;
|
||||
using FreeList<Chunk>::set_tail;
|
||||
using FreeList<Chunk>::link_tail;
|
||||
|
||||
using FreeList<Chunk>::increment_count;
|
||||
NOT_PRODUCT(using FreeList<Chunk>::increment_returned_bytes_by;)
|
||||
using FreeList<Chunk>::verify_chunk_in_free_list;
|
||||
using FreeList<Chunk>::size;
|
||||
|
||||
// Accessors for links in tree.
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -80,7 +80,7 @@ constMethodOop constMethodKlass::allocate(int byte_code_size,
|
||||
No_Safepoint_Verifier no_safepoint;
|
||||
cm->set_interpreter_kind(Interpreter::invalid);
|
||||
cm->init_fingerprint();
|
||||
cm->set_method(NULL);
|
||||
cm->set_constants(NULL);
|
||||
cm->set_stackmap_data(NULL);
|
||||
cm->set_exception_table(NULL);
|
||||
cm->set_code_size(byte_code_size);
|
||||
@ -98,7 +98,7 @@ constMethodOop constMethodKlass::allocate(int byte_code_size,
|
||||
void constMethodKlass::oop_follow_contents(oop obj) {
|
||||
assert (obj->is_constMethod(), "object must be constMethod");
|
||||
constMethodOop cm = constMethodOop(obj);
|
||||
MarkSweep::mark_and_push(cm->adr_method());
|
||||
MarkSweep::mark_and_push(cm->adr_constants());
|
||||
MarkSweep::mark_and_push(cm->adr_stackmap_data());
|
||||
MarkSweep::mark_and_push(cm->adr_exception_table());
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
@ -110,7 +110,7 @@ void constMethodKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
assert (obj->is_constMethod(), "object must be constMethod");
|
||||
constMethodOop cm_oop = constMethodOop(obj);
|
||||
PSParallelCompact::mark_and_push(cm, cm_oop->adr_method());
|
||||
PSParallelCompact::mark_and_push(cm, cm_oop->adr_constants());
|
||||
PSParallelCompact::mark_and_push(cm, cm_oop->adr_stackmap_data());
|
||||
PSParallelCompact::mark_and_push(cm, cm_oop->adr_exception_table());
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
@ -121,7 +121,7 @@ void constMethodKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
int constMethodKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
|
||||
assert (obj->is_constMethod(), "object must be constMethod");
|
||||
constMethodOop cm = constMethodOop(obj);
|
||||
blk->do_oop(cm->adr_method());
|
||||
blk->do_oop(cm->adr_constants());
|
||||
blk->do_oop(cm->adr_stackmap_data());
|
||||
blk->do_oop(cm->adr_exception_table());
|
||||
// Get size before changing pointers.
|
||||
@ -135,7 +135,7 @@ int constMethodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr)
|
||||
assert (obj->is_constMethod(), "object must be constMethod");
|
||||
constMethodOop cm = constMethodOop(obj);
|
||||
oop* adr;
|
||||
adr = cm->adr_method();
|
||||
adr = cm->adr_constants();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
adr = cm->adr_stackmap_data();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
@ -153,7 +153,7 @@ int constMethodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr)
|
||||
int constMethodKlass::oop_adjust_pointers(oop obj) {
|
||||
assert(obj->is_constMethod(), "should be constMethod");
|
||||
constMethodOop cm = constMethodOop(obj);
|
||||
MarkSweep::adjust_pointer(cm->adr_method());
|
||||
MarkSweep::adjust_pointer(cm->adr_constants());
|
||||
MarkSweep::adjust_pointer(cm->adr_stackmap_data());
|
||||
MarkSweep::adjust_pointer(cm->adr_exception_table());
|
||||
// Get size before changing pointers.
|
||||
@ -188,8 +188,8 @@ void constMethodKlass::oop_print_on(oop obj, outputStream* st) {
|
||||
assert(obj->is_constMethod(), "must be constMethod");
|
||||
Klass::oop_print_on(obj, st);
|
||||
constMethodOop m = constMethodOop(obj);
|
||||
st->print(" - method: " INTPTR_FORMAT " ", (address)m->method());
|
||||
m->method()->print_value_on(st); st->cr();
|
||||
st->print(" - constants: " INTPTR_FORMAT " ", (address)m->constants());
|
||||
m->constants()->print_value_on(st); st->cr();
|
||||
st->print(" - exceptions: " INTPTR_FORMAT "\n", (address)m->exception_table());
|
||||
if (m->has_stackmap_table()) {
|
||||
st->print(" - stackmap data: ");
|
||||
@ -223,8 +223,8 @@ void constMethodKlass::oop_verify_on(oop obj, outputStream* st) {
|
||||
// Verification can occur during oop construction before the method or
|
||||
// other fields have been initialized.
|
||||
if (!obj->partially_loaded()) {
|
||||
guarantee(m->method()->is_perm(), "should be in permspace");
|
||||
guarantee(m->method()->is_method(), "should be method");
|
||||
guarantee(m->constants()->is_perm(), "should be in permspace");
|
||||
guarantee(m->constants()->is_constantPool(), "should be constant pool");
|
||||
typeArrayOop stackmap_data = m->stackmap_data();
|
||||
guarantee(stackmap_data == NULL ||
|
||||
stackmap_data->is_perm(), "should be in permspace");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,6 +53,10 @@ int constMethodOopDesc::object_size(int code_size,
|
||||
return align_object_size(header_size() + extra_words);
|
||||
}
|
||||
|
||||
methodOop constMethodOopDesc::method() const {
|
||||
return instanceKlass::cast(_constants->pool_holder())->method_with_idnum(
|
||||
_method_idnum);
|
||||
}
|
||||
|
||||
// linenumber table - note that length is unknown until decompression,
|
||||
// see class CompressedLineNumberReadStream.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,7 +41,7 @@
|
||||
// |------------------------------------------------------|
|
||||
// | fingerprint 1 |
|
||||
// | fingerprint 2 |
|
||||
// | method (oop) |
|
||||
// | constants (oop) |
|
||||
// | stackmap_data (oop) |
|
||||
// | exception_table (oop) |
|
||||
// | constMethod_size |
|
||||
@ -113,7 +113,7 @@ private:
|
||||
volatile bool _is_conc_safe; // if true, safe for concurrent GC processing
|
||||
|
||||
public:
|
||||
oop* oop_block_beg() const { return adr_method(); }
|
||||
oop* oop_block_beg() const { return adr_constants(); }
|
||||
oop* oop_block_end() const { return adr_exception_table() + 1; }
|
||||
|
||||
private:
|
||||
@ -121,8 +121,7 @@ private:
|
||||
// The oop block. See comment in klass.hpp before making changes.
|
||||
//
|
||||
|
||||
// Backpointer to non-const methodOop (needed for some JVMTI operations)
|
||||
methodOop _method;
|
||||
constantPoolOop _constants; // Constant pool
|
||||
|
||||
// Raw stackmap data for the method
|
||||
typeArrayOop _stackmap_data;
|
||||
@ -167,10 +166,13 @@ public:
|
||||
void set_interpreter_kind(int kind) { _interpreter_kind = kind; }
|
||||
int interpreter_kind(void) const { return _interpreter_kind; }
|
||||
|
||||
// backpointer to non-const methodOop
|
||||
methodOop method() const { return _method; }
|
||||
void set_method(methodOop m) { oop_store_without_check((oop*)&_method, (oop) m); }
|
||||
// constant pool
|
||||
constantPoolOop constants() const { return _constants; }
|
||||
void set_constants(constantPoolOop c) {
|
||||
oop_store_without_check((oop*)&_constants, (oop)c);
|
||||
}
|
||||
|
||||
methodOop method() const;
|
||||
|
||||
// stackmap table data
|
||||
typeArrayOop stackmap_data() const { return _stackmap_data; }
|
||||
@ -278,11 +280,13 @@ public:
|
||||
{ return in_ByteSize(sizeof(constMethodOopDesc)); }
|
||||
|
||||
// interpreter support
|
||||
static ByteSize constants_offset()
|
||||
{ return byte_offset_of(constMethodOopDesc, _constants); }
|
||||
static ByteSize exception_table_offset()
|
||||
{ return byte_offset_of(constMethodOopDesc, _exception_table); }
|
||||
|
||||
// Garbage collection support
|
||||
oop* adr_method() const { return (oop*)&_method; }
|
||||
oop* adr_constants() const { return (oop*)&_constants; }
|
||||
oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; }
|
||||
oop* adr_exception_table() const { return (oop*)&_exception_table; }
|
||||
bool is_conc_safe() { return _is_conc_safe; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -112,11 +112,6 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
|
||||
|
||||
assert(m->is_parsable(), "must be parsable here.");
|
||||
assert(m->size() == size, "wrong size for object");
|
||||
// We should not publish an uprasable object's reference
|
||||
// into one that is parsable, since that presents problems
|
||||
// for the concurrent parallel marking and precleaning phases
|
||||
// of concurrent gc (CMS).
|
||||
xconst->set_method(m);
|
||||
return m;
|
||||
}
|
||||
|
||||
@ -127,7 +122,6 @@ void methodKlass::oop_follow_contents(oop obj) {
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::methodKlassObj never moves.
|
||||
MarkSweep::mark_and_push(m->adr_constMethod());
|
||||
MarkSweep::mark_and_push(m->adr_constants());
|
||||
if (m->method_data() != NULL) {
|
||||
MarkSweep::mark_and_push(m->adr_method_data());
|
||||
}
|
||||
@ -141,7 +135,6 @@ void methodKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::methodKlassObj never moves.
|
||||
PSParallelCompact::mark_and_push(cm, m->adr_constMethod());
|
||||
PSParallelCompact::mark_and_push(cm, m->adr_constants());
|
||||
#ifdef COMPILER2
|
||||
if (m->method_data() != NULL) {
|
||||
PSParallelCompact::mark_and_push(cm, m->adr_method_data());
|
||||
@ -159,7 +152,6 @@ int methodKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::methodKlassObj never moves
|
||||
blk->do_oop(m->adr_constMethod());
|
||||
blk->do_oop(m->adr_constants());
|
||||
if (m->method_data() != NULL) {
|
||||
blk->do_oop(m->adr_method_data());
|
||||
}
|
||||
@ -178,8 +170,6 @@ int methodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
|
||||
oop* adr;
|
||||
adr = m->adr_constMethod();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
adr = m->adr_constants();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
if (m->method_data() != NULL) {
|
||||
adr = m->adr_method_data();
|
||||
if (mr.contains(adr)) blk->do_oop(adr);
|
||||
@ -197,7 +187,6 @@ int methodKlass::oop_adjust_pointers(oop obj) {
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::methodKlassObj never moves.
|
||||
MarkSweep::adjust_pointer(m->adr_constMethod());
|
||||
MarkSweep::adjust_pointer(m->adr_constants());
|
||||
if (m->method_data() != NULL) {
|
||||
MarkSweep::adjust_pointer(m->adr_method_data());
|
||||
}
|
||||
@ -213,7 +202,6 @@ int methodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
assert(obj->is_method(), "should be method");
|
||||
methodOop m = methodOop(obj);
|
||||
PSParallelCompact::adjust_pointer(m->adr_constMethod());
|
||||
PSParallelCompact::adjust_pointer(m->adr_constants());
|
||||
#ifdef COMPILER2
|
||||
if (m->method_data() != NULL) {
|
||||
PSParallelCompact::adjust_pointer(m->adr_method_data());
|
||||
@ -339,8 +327,6 @@ void methodKlass::oop_verify_on(oop obj, outputStream* st) {
|
||||
if (!obj->partially_loaded()) {
|
||||
methodOop m = methodOop(obj);
|
||||
guarantee(m->is_perm(), "should be in permspace");
|
||||
guarantee(m->constants()->is_perm(), "should be in permspace");
|
||||
guarantee(m->constants()->is_constantPool(), "should be constant pool");
|
||||
guarantee(m->constMethod()->is_constMethod(), "should be constMethodOop");
|
||||
guarantee(m->constMethod()->is_perm(), "should be in permspace");
|
||||
methodDataOop method_data = m->method_data();
|
||||
|
@ -70,11 +70,11 @@ address methodOopDesc::get_c2i_unverified_entry() {
|
||||
return _adapter->get_c2i_unverified_entry();
|
||||
}
|
||||
|
||||
char* methodOopDesc::name_and_sig_as_C_string() {
|
||||
char* methodOopDesc::name_and_sig_as_C_string() const {
|
||||
return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature());
|
||||
}
|
||||
|
||||
char* methodOopDesc::name_and_sig_as_C_string(char* buf, int size) {
|
||||
char* methodOopDesc::name_and_sig_as_C_string(char* buf, int size) const {
|
||||
return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature(), buf, size);
|
||||
}
|
||||
|
||||
@ -177,7 +177,8 @@ void methodOopDesc::mask_for(int bci, InterpreterOopMap* mask) {
|
||||
|
||||
|
||||
int methodOopDesc::bci_from(address bcp) const {
|
||||
assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(), "bcp doesn't belong to this method");
|
||||
assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(),
|
||||
err_msg("bcp doesn't belong to this method: bcp: " INTPTR_FORMAT ", method: %s", bcp, name_and_sig_as_C_string()));
|
||||
return bcp - code_base();
|
||||
}
|
||||
|
||||
@ -531,9 +532,9 @@ int methodOopDesc::line_number_from_bci(int bci) const {
|
||||
|
||||
|
||||
bool methodOopDesc::is_klass_loaded_by_klass_index(int klass_index) const {
|
||||
if( _constants->tag_at(klass_index).is_unresolved_klass() ) {
|
||||
if( constants()->tag_at(klass_index).is_unresolved_klass() ) {
|
||||
Thread *thread = Thread::current();
|
||||
Symbol* klass_name = _constants->klass_name_at(klass_index);
|
||||
Symbol* klass_name = constants()->klass_name_at(klass_index);
|
||||
Handle loader(thread, instanceKlass::cast(method_holder())->class_loader());
|
||||
Handle prot (thread, Klass::cast(method_holder())->protection_domain());
|
||||
return SystemDictionary::find(klass_name, loader, prot, thread) != NULL;
|
||||
@ -544,7 +545,7 @@ bool methodOopDesc::is_klass_loaded_by_klass_index(int klass_index) const {
|
||||
|
||||
|
||||
bool methodOopDesc::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
|
||||
int klass_index = _constants->klass_ref_index_at(refinfo_index);
|
||||
int klass_index = constants()->klass_ref_index_at(refinfo_index);
|
||||
if (must_be_resolved) {
|
||||
// Make sure klass is resolved in constantpool.
|
||||
if (constants()->tag_at(klass_index).is_unresolved_klass()) return false;
|
||||
@ -886,11 +887,13 @@ oop methodOopDesc::method_handle_type() const {
|
||||
}
|
||||
|
||||
jint* methodOopDesc::method_type_offsets_chain() {
|
||||
static jint pchase[] = { -1, -1, -1 };
|
||||
static jint pchase[] = { -1, -1, -1, -1 };
|
||||
if (pchase[0] == -1) {
|
||||
jint step0 = in_bytes(constants_offset());
|
||||
jint step1 = (constantPoolOopDesc::header_size() + _imcp_method_type_value) * HeapWordSize;
|
||||
jint step0 = in_bytes(const_offset());
|
||||
jint step1 = in_bytes(constMethodOopDesc::constants_offset());
|
||||
jint step2 = (constantPoolOopDesc::header_size() + _imcp_method_type_value) * HeapWordSize;
|
||||
// do this in reverse to avoid races:
|
||||
OrderAccess::release_store(&pchase[2], step2);
|
||||
OrderAccess::release_store(&pchase[1], step1);
|
||||
OrderAccess::release_store(&pchase[0], step0);
|
||||
}
|
||||
@ -1076,9 +1079,7 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
|
||||
assert(m->constMethod()->is_parsable(), "Should remain parsable");
|
||||
|
||||
// Reset correct method/const method, method size, and parameter info
|
||||
newcm->set_method(newm());
|
||||
newm->set_constMethod(newcm);
|
||||
assert(newcm->method() == newm(), "check");
|
||||
newm->constMethod()->set_code_size(new_code_length);
|
||||
newm->constMethod()->set_constMethod_size(new_const_method_size);
|
||||
newm->set_method_size(new_method_size);
|
||||
|
@ -64,7 +64,6 @@
|
||||
// | klass |
|
||||
// |------------------------------------------------------|
|
||||
// | constMethodOop (oop) |
|
||||
// | constants (oop) |
|
||||
// |------------------------------------------------------|
|
||||
// | methodData (oop) |
|
||||
// | interp_invocation_count |
|
||||
@ -110,7 +109,6 @@ class methodOopDesc : public oopDesc {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
constMethodOop _constMethod; // Method read-only data.
|
||||
constantPoolOop _constants; // Constant pool
|
||||
methodDataOop _method_data;
|
||||
int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
|
||||
AccessFlags _access_flags; // Access flags
|
||||
@ -170,17 +168,17 @@ class methodOopDesc : public oopDesc {
|
||||
void set_access_flags(AccessFlags flags) { _access_flags = flags; }
|
||||
|
||||
// name
|
||||
Symbol* name() const { return _constants->symbol_at(name_index()); }
|
||||
Symbol* name() const { return constants()->symbol_at(name_index()); }
|
||||
int name_index() const { return constMethod()->name_index(); }
|
||||
void set_name_index(int index) { constMethod()->set_name_index(index); }
|
||||
|
||||
// signature
|
||||
Symbol* signature() const { return _constants->symbol_at(signature_index()); }
|
||||
Symbol* signature() const { return constants()->symbol_at(signature_index()); }
|
||||
int signature_index() const { return constMethod()->signature_index(); }
|
||||
void set_signature_index(int index) { constMethod()->set_signature_index(index); }
|
||||
|
||||
// generics support
|
||||
Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? _constants->symbol_at(idx) : (Symbol*)NULL); }
|
||||
Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
|
||||
int generic_signature_index() const { return constMethod()->generic_signature_index(); }
|
||||
void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
|
||||
|
||||
@ -198,8 +196,8 @@ class methodOopDesc : public oopDesc {
|
||||
// C string, for the purpose of providing more useful NoSuchMethodErrors
|
||||
// and fatal error handling. The string is allocated in resource
|
||||
// area if a buffer is not provided by the caller.
|
||||
char* name_and_sig_as_C_string();
|
||||
char* name_and_sig_as_C_string(char* buf, int size);
|
||||
char* name_and_sig_as_C_string() const;
|
||||
char* name_and_sig_as_C_string(char* buf, int size) const;
|
||||
|
||||
// Static routine in the situations we don't have a methodOop
|
||||
static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
|
||||
@ -242,8 +240,8 @@ class methodOopDesc : public oopDesc {
|
||||
}
|
||||
|
||||
// constant pool for klassOop holding this method
|
||||
constantPoolOop constants() const { return _constants; }
|
||||
void set_constants(constantPoolOop c) { oop_store_without_check((oop*)&_constants, c); }
|
||||
constantPoolOop constants() const { return constMethod()->constants(); }
|
||||
void set_constants(constantPoolOop c) { constMethod()->set_constants(c); }
|
||||
|
||||
// max stack
|
||||
int max_stack() const { return _max_stack; }
|
||||
@ -453,7 +451,7 @@ class methodOopDesc : public oopDesc {
|
||||
{ return constMethod()->compressed_linenumber_table(); }
|
||||
|
||||
// method holder (the klassOop holding this method)
|
||||
klassOop method_holder() const { return _constants->pool_holder(); }
|
||||
klassOop method_holder() const { return constants()->pool_holder(); }
|
||||
|
||||
void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
|
||||
Symbol* klass_name() const; // returns the name of the method holder
|
||||
@ -544,7 +542,6 @@ class methodOopDesc : public oopDesc {
|
||||
|
||||
// interpreter support
|
||||
static ByteSize const_offset() { return byte_offset_of(methodOopDesc, _constMethod ); }
|
||||
static ByteSize constants_offset() { return byte_offset_of(methodOopDesc, _constants ); }
|
||||
static ByteSize access_flags_offset() { return byte_offset_of(methodOopDesc, _access_flags ); }
|
||||
#ifdef CC_INTERP
|
||||
static ByteSize result_index_offset() { return byte_offset_of(methodOopDesc, _result_index ); }
|
||||
@ -723,7 +720,6 @@ class methodOopDesc : public oopDesc {
|
||||
|
||||
// Garbage collection support
|
||||
oop* adr_constMethod() const { return (oop*)&_constMethod; }
|
||||
oop* adr_constants() const { return (oop*)&_constants; }
|
||||
oop* adr_method_data() const { return (oop*)&_method_data; }
|
||||
};
|
||||
|
||||
|
@ -147,7 +147,6 @@ macro(LoadNKlass)
|
||||
macro(LoadL)
|
||||
macro(LoadL_unaligned)
|
||||
macro(LoadPLocked)
|
||||
macro(LoadLLocked)
|
||||
macro(LoadP)
|
||||
macro(LoadN)
|
||||
macro(LoadRange)
|
||||
|
@ -2297,7 +2297,6 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
|
||||
case Op_LoadL:
|
||||
case Op_LoadL_unaligned:
|
||||
case Op_LoadPLocked:
|
||||
case Op_LoadLLocked:
|
||||
case Op_LoadP:
|
||||
case Op_LoadN:
|
||||
case Op_LoadRange:
|
||||
|
@ -284,9 +284,14 @@ static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_con
|
||||
|
||||
const int N = 64;
|
||||
|
||||
// Dummy node to keep intermediate nodes alive during construction
|
||||
Node* hook = new (phase->C, 4) Node(4);
|
||||
|
||||
// u0 = u & 0xFFFFFFFF; u1 = u >> 32;
|
||||
Node* u0 = phase->transform(new (phase->C, 3) AndLNode(dividend, phase->longcon(0xFFFFFFFF)));
|
||||
Node* u1 = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N / 2)));
|
||||
hook->init_req(0, u0);
|
||||
hook->init_req(1, u1);
|
||||
|
||||
// v0 = v & 0xFFFFFFFF; v1 = v >> 32;
|
||||
Node* v0 = phase->longcon(magic_const & 0xFFFFFFFF);
|
||||
@ -299,19 +304,14 @@ static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_con
|
||||
Node* u1v0 = phase->transform(new (phase->C, 3) MulLNode(u1, v0));
|
||||
Node* temp = phase->transform(new (phase->C, 3) URShiftLNode(w0, phase->intcon(N / 2)));
|
||||
Node* t = phase->transform(new (phase->C, 3) AddLNode(u1v0, temp));
|
||||
hook->init_req(2, t);
|
||||
|
||||
// w1 = t & 0xFFFFFFFF;
|
||||
Node* w1 = new (phase->C, 3) AndLNode(t, phase->longcon(0xFFFFFFFF));
|
||||
Node* w1 = phase->transform(new (phase->C, 3) AndLNode(t, phase->longcon(0xFFFFFFFF)));
|
||||
hook->init_req(3, w1);
|
||||
|
||||
// w2 = t >> 32;
|
||||
Node* w2 = new (phase->C, 3) RShiftLNode(t, phase->intcon(N / 2));
|
||||
|
||||
// 6732154: Construct both w1 and w2 before transforming, so t
|
||||
// doesn't go dead prematurely.
|
||||
// 6837011: We need to transform w2 before w1 because the
|
||||
// transformation of w1 could return t.
|
||||
w2 = phase->transform(w2);
|
||||
w1 = phase->transform(w1);
|
||||
Node* w2 = phase->transform(new (phase->C, 3) RShiftLNode(t, phase->intcon(N / 2)));
|
||||
|
||||
// w1 = u0*v1 + w1;
|
||||
Node* u0v1 = phase->transform(new (phase->C, 3) MulLNode(u0, v1));
|
||||
@ -322,6 +322,16 @@ static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_con
|
||||
Node* temp1 = phase->transform(new (phase->C, 3) AddLNode(u1v1, w2));
|
||||
Node* temp2 = phase->transform(new (phase->C, 3) RShiftLNode(w1, phase->intcon(N / 2)));
|
||||
|
||||
// Remove the bogus extra edges used to keep things alive
|
||||
PhaseIterGVN* igvn = phase->is_IterGVN();
|
||||
if (igvn != NULL) {
|
||||
igvn->remove_dead_node(hook);
|
||||
} else {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
hook->set_req(i, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
return new (phase->C, 3) AddLNode(temp1, temp2);
|
||||
}
|
||||
|
||||
|
@ -465,15 +465,11 @@ void PhaseIdealLoop::Dominators() {
|
||||
// Kill dead input path
|
||||
assert( !visited.test(whead->in(i)->_idx),
|
||||
"input with no loop must be dead" );
|
||||
_igvn.hash_delete(whead);
|
||||
whead->del_req(i);
|
||||
_igvn._worklist.push(whead);
|
||||
_igvn.delete_input_of(whead, i);
|
||||
for (DUIterator_Fast jmax, j = whead->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* p = whead->fast_out(j);
|
||||
if( p->is_Phi() ) {
|
||||
_igvn.hash_delete(p);
|
||||
p->del_req(i);
|
||||
_igvn._worklist.push(p);
|
||||
_igvn.delete_input_of(p, i);
|
||||
}
|
||||
}
|
||||
i--; // Rerun same iteration
|
||||
|
@ -338,8 +338,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
|
||||
Node *phi_f = NULL; // do not construct unless needed
|
||||
for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
|
||||
Node* v = phi->last_out(i2);// User of the phi
|
||||
igvn->hash_delete(v); // Have to fixup other Phi users
|
||||
igvn->_worklist.push(v);
|
||||
igvn->rehash_node_delayed(v); // Have to fixup other Phi users
|
||||
uint vop = v->Opcode();
|
||||
Node *proj = NULL;
|
||||
if( vop == Op_Phi ) { // Remote merge point
|
||||
@ -552,9 +551,8 @@ static void adjust_check(Node* proj, Node* range, Node* index,
|
||||
if( new_cmp == cmp ) return;
|
||||
// Else, adjust existing check
|
||||
Node *new_bol = gvn->transform( new (gvn->C, 2) BoolNode( new_cmp, bol->as_Bool()->_test._test ) );
|
||||
igvn->hash_delete( iff );
|
||||
igvn->rehash_node_delayed( iff );
|
||||
iff->set_req_X( 1, new_bol, igvn );
|
||||
igvn->_worklist.push( iff );
|
||||
}
|
||||
|
||||
//------------------------------up_one_dom-------------------------------------
|
||||
@ -732,9 +730,7 @@ Node* IfNode::fold_compares(PhaseGVN* phase) {
|
||||
Node* adjusted = phase->transform(new (phase->C, 3) SubINode(n, phase->intcon(failtype->_lo)));
|
||||
Node* newcmp = phase->transform(new (phase->C, 3) CmpUNode(adjusted, phase->intcon(bound)));
|
||||
Node* newbool = phase->transform(new (phase->C, 2) BoolNode(newcmp, cond));
|
||||
phase->hash_delete(dom_iff);
|
||||
dom_iff->set_req(1, phase->intcon(ctrl->as_Proj()->_con));
|
||||
phase->is_IterGVN()->_worklist.push(dom_iff);
|
||||
phase->is_IterGVN()->replace_input_of(dom_iff, 1, phase->intcon(ctrl->as_Proj()->_con));
|
||||
phase->hash_delete(this);
|
||||
set_req(1, newbool);
|
||||
return this;
|
||||
@ -1042,17 +1038,15 @@ void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) {
|
||||
// Loop ends when projection has no more uses.
|
||||
for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
|
||||
Node* s = ifp->last_out(j); // Get child of IfTrue/IfFalse
|
||||
igvn->hash_delete(s); // Yank from hash table before edge hacking
|
||||
if( !s->depends_only_on_test() ) {
|
||||
// Find the control input matching this def-use edge.
|
||||
// For Regions it may not be in slot 0.
|
||||
uint l;
|
||||
for( l = 0; s->in(l) != ifp; l++ ) { }
|
||||
s->set_req(l, ctrl_target);
|
||||
igvn->replace_input_of(s, l, ctrl_target);
|
||||
} else { // Else, for control producers,
|
||||
s->set_req(0, data_target); // Move child to data-target
|
||||
igvn->replace_input_of(s, 0, data_target); // Move child to data-target
|
||||
}
|
||||
igvn->_worklist.push(s); // Revisit collapsed Phis
|
||||
} // End for each child of a projection
|
||||
|
||||
igvn->remove_dead_node(ifp);
|
||||
|
@ -192,8 +192,6 @@ class LibraryCallKit : public GraphKit {
|
||||
void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
|
||||
bool inline_native_clone(bool is_virtual);
|
||||
bool inline_native_Reflection_getCallerClass();
|
||||
bool inline_native_AtomicLong_get();
|
||||
bool inline_native_AtomicLong_attemptUpdate();
|
||||
bool is_method_invoke_or_aux_frame(JVMState* jvms);
|
||||
// Helper function for inlining native object hash method
|
||||
bool inline_native_hashcode(bool is_virtual, bool is_static);
|
||||
@ -331,11 +329,6 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||
// We do not intrinsify this. The optimizer does fine with it.
|
||||
return NULL;
|
||||
|
||||
case vmIntrinsics::_get_AtomicLong:
|
||||
case vmIntrinsics::_attemptUpdate:
|
||||
if (!InlineAtomicLong) return NULL;
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_getCallerClass:
|
||||
if (!UseNewReflection) return NULL;
|
||||
if (!InlineReflectionGetCallerClass) return NULL;
|
||||
@ -711,11 +704,6 @@ bool LibraryCallKit::try_to_inline() {
|
||||
case vmIntrinsics::_reverseBytes_c:
|
||||
return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
|
||||
|
||||
case vmIntrinsics::_get_AtomicLong:
|
||||
return inline_native_AtomicLong_get();
|
||||
case vmIntrinsics::_attemptUpdate:
|
||||
return inline_native_AtomicLong_attemptUpdate();
|
||||
|
||||
case vmIntrinsics::_getCallerClass:
|
||||
return inline_native_Reflection_getCallerClass();
|
||||
|
||||
@ -4006,113 +3994,6 @@ bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static int value_field_offset = -1; // offset of the "value" field of AtomicLongCSImpl. This is needed by
|
||||
// inline_native_AtomicLong_attemptUpdate() but it has no way of
|
||||
// computing it since there is no lookup field by name function in the
|
||||
// CI interface. This is computed and set by inline_native_AtomicLong_get().
|
||||
// Using a static variable here is safe even if we have multiple compilation
|
||||
// threads because the offset is constant. At worst the same offset will be
|
||||
// computed and stored multiple
|
||||
|
||||
bool LibraryCallKit::inline_native_AtomicLong_get() {
|
||||
// Restore the stack and pop off the argument
|
||||
_sp+=1;
|
||||
Node *obj = pop();
|
||||
|
||||
// get the offset of the "value" field. Since the CI interfaces
|
||||
// does not provide a way to look up a field by name, we scan the bytecodes
|
||||
// to get the field index. We expect the first 2 instructions of the method
|
||||
// to be:
|
||||
// 0 aload_0
|
||||
// 1 getfield "value"
|
||||
ciMethod* method = callee();
|
||||
if (value_field_offset == -1)
|
||||
{
|
||||
ciField* value_field;
|
||||
ciBytecodeStream iter(method);
|
||||
Bytecodes::Code bc = iter.next();
|
||||
|
||||
if ((bc != Bytecodes::_aload_0) &&
|
||||
((bc != Bytecodes::_aload) || (iter.get_index() != 0)))
|
||||
return false;
|
||||
bc = iter.next();
|
||||
if (bc != Bytecodes::_getfield)
|
||||
return false;
|
||||
bool ignore;
|
||||
value_field = iter.get_field(ignore);
|
||||
value_field_offset = value_field->offset_in_bytes();
|
||||
}
|
||||
|
||||
// Null check without removing any arguments.
|
||||
_sp++;
|
||||
obj = do_null_check(obj, T_OBJECT);
|
||||
_sp--;
|
||||
// Check for locking null object
|
||||
if (stopped()) return true;
|
||||
|
||||
Node *adr = basic_plus_adr(obj, obj, value_field_offset);
|
||||
const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
|
||||
int alias_idx = C->get_alias_index(adr_type);
|
||||
|
||||
Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr));
|
||||
|
||||
push_pair(result);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() {
|
||||
// Restore the stack and pop off the arguments
|
||||
_sp+=5;
|
||||
Node *newVal = pop_pair();
|
||||
Node *oldVal = pop_pair();
|
||||
Node *obj = pop();
|
||||
|
||||
// we need the offset of the "value" field which was computed when
|
||||
// inlining the get() method. Give up if we don't have it.
|
||||
if (value_field_offset == -1)
|
||||
return false;
|
||||
|
||||
// Null check without removing any arguments.
|
||||
_sp+=5;
|
||||
obj = do_null_check(obj, T_OBJECT);
|
||||
_sp-=5;
|
||||
// Check for locking null object
|
||||
if (stopped()) return true;
|
||||
|
||||
Node *adr = basic_plus_adr(obj, obj, value_field_offset);
|
||||
const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
|
||||
int alias_idx = C->get_alias_index(adr_type);
|
||||
|
||||
Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal));
|
||||
Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
|
||||
set_memory(store_proj, alias_idx);
|
||||
Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) );
|
||||
|
||||
Node *result;
|
||||
// CMove node is not used to be able fold a possible check code
|
||||
// after attemptUpdate() call. This code could be transformed
|
||||
// into CMove node by loop optimizations.
|
||||
{
|
||||
RegionNode *r = new (C, 3) RegionNode(3);
|
||||
result = new (C, 3) PhiNode(r, TypeInt::BOOL);
|
||||
|
||||
Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
|
||||
Node *iftrue = opt_iff(r, iff);
|
||||
r->init_req(1, iftrue);
|
||||
result->init_req(1, intcon(1));
|
||||
result->init_req(2, intcon(0));
|
||||
|
||||
set_control(_gvn.transform(r));
|
||||
record_for_igvn(r);
|
||||
|
||||
C->set_has_split_ifs(true); // Has chance for split-if optimization
|
||||
}
|
||||
|
||||
push(_gvn.transform(result));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
|
||||
// restore the arguments
|
||||
_sp += arg_size();
|
||||
|
@ -212,9 +212,8 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
|
||||
Node* use = rgn->fast_out(i);
|
||||
if (use->is_Phi() && use->outcnt() > 0) {
|
||||
assert(use->in(0) == rgn, "");
|
||||
_igvn.hash_delete(use);
|
||||
_igvn.rehash_node_delayed(use);
|
||||
use->add_req(use->in(proj_index));
|
||||
_igvn._worklist.push(use);
|
||||
has_phi = true;
|
||||
}
|
||||
}
|
||||
@ -284,9 +283,8 @@ ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* n
|
||||
for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) {
|
||||
Node* use = rgn->fast_out(i);
|
||||
if (use->is_Phi() && use->outcnt() > 0) {
|
||||
hash_delete(use);
|
||||
rehash_node_delayed(use);
|
||||
use->add_req(use->in(proj_index));
|
||||
_worklist.push(use);
|
||||
has_phi = true;
|
||||
}
|
||||
}
|
||||
|
@ -961,9 +961,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
|
||||
set_loop(zer_iff, loop->_parent);
|
||||
|
||||
// Plug in the false-path, taken if we need to skip post-loop
|
||||
_igvn.hash_delete( main_exit );
|
||||
main_exit->set_req(0, zer_iff);
|
||||
_igvn._worklist.push(main_exit);
|
||||
_igvn.replace_input_of(main_exit, 0, zer_iff);
|
||||
set_idom(main_exit, zer_iff, dd_main_exit);
|
||||
set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
|
||||
// Make the true-path, must enter the post loop
|
||||
@ -1956,9 +1954,7 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
C->set_major_progress();
|
||||
Node *kill_con = _igvn.intcon( 1-flip );
|
||||
set_ctrl(kill_con, C->root());
|
||||
_igvn.hash_delete(iff);
|
||||
iff->set_req(1, kill_con);
|
||||
_igvn._worklist.push(iff);
|
||||
_igvn.replace_input_of(iff, 1, kill_con);
|
||||
// Find surviving projection
|
||||
assert(iff->is_If(), "");
|
||||
ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
|
||||
@ -1966,11 +1962,9 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
|
||||
Node* cd = dp->fast_out(i); // Control-dependent node
|
||||
if( cd->is_Load() ) { // Loads can now float around in the loop
|
||||
_igvn.hash_delete(cd);
|
||||
// Allow the load to float around in the loop, or before it
|
||||
// but NOT before the pre-loop.
|
||||
cd->set_req(0, ctrl); // ctrl, not NULL
|
||||
_igvn._worklist.push(cd);
|
||||
_igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
|
||||
--i;
|
||||
--imax;
|
||||
}
|
||||
@ -2029,14 +2023,10 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
main_bol->set_req(1,main_cmp);
|
||||
}
|
||||
// Hack the now-private loop bounds
|
||||
_igvn.hash_delete(main_cmp);
|
||||
main_cmp->set_req(2, main_limit);
|
||||
_igvn._worklist.push(main_cmp);
|
||||
_igvn.replace_input_of(main_cmp, 2, main_limit);
|
||||
// The OpaqueNode is unshared by design
|
||||
_igvn.hash_delete(opqzm);
|
||||
assert( opqzm->outcnt() == 1, "cannot hack shared node" );
|
||||
opqzm->set_req(1,main_limit);
|
||||
_igvn._worklist.push(opqzm);
|
||||
_igvn.replace_input_of(opqzm, 1, main_limit);
|
||||
}
|
||||
|
||||
//------------------------------DCE_loop_body----------------------------------
|
||||
@ -2178,9 +2168,7 @@ bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
|
||||
Node* cmp = cl->loopexit()->cmp_node();
|
||||
assert(cl->limit() == cmp->in(2), "sanity");
|
||||
phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
|
||||
phase->_igvn.hash_delete(cmp);
|
||||
cmp->set_req(2, exact_limit);
|
||||
phase->_igvn._worklist.push(cmp); // put cmp on worklist
|
||||
phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist
|
||||
}
|
||||
// Note: the final value after increment should not overflow since
|
||||
// counted loop has limit check predicate.
|
||||
|
@ -174,27 +174,21 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
|
||||
Node* use = worklist.pop();
|
||||
Node* nuse = use->clone();
|
||||
nuse->set_req(0, invar_proj);
|
||||
_igvn.hash_delete(use);
|
||||
use->set_req(1, nuse);
|
||||
_igvn._worklist.push(use);
|
||||
_igvn.replace_input_of(use, 1, nuse);
|
||||
register_new_node(nuse, invar_proj);
|
||||
// Same for the clone
|
||||
Node* use_clone = old_new[use->_idx];
|
||||
_igvn.hash_delete(use_clone);
|
||||
use_clone->set_req(1, nuse);
|
||||
_igvn._worklist.push(use_clone);
|
||||
_igvn.replace_input_of(use_clone, 1, nuse);
|
||||
}
|
||||
}
|
||||
|
||||
// Hardwire the control paths in the loops into if(true) and if(false)
|
||||
_igvn.hash_delete(unswitch_iff);
|
||||
_igvn.rehash_node_delayed(unswitch_iff);
|
||||
short_circuit_if(unswitch_iff, proj_true);
|
||||
_igvn._worklist.push(unswitch_iff);
|
||||
|
||||
IfNode* unswitch_iff_clone = old_new[unswitch_iff->_idx]->as_If();
|
||||
_igvn.hash_delete(unswitch_iff_clone);
|
||||
_igvn.rehash_node_delayed(unswitch_iff_clone);
|
||||
short_circuit_if(unswitch_iff_clone, proj_false);
|
||||
_igvn._worklist.push(unswitch_iff_clone);
|
||||
|
||||
// Reoptimize loops
|
||||
loop->record_for_igvn();
|
||||
@ -224,8 +218,7 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
|
||||
LoopNode* head = loop->_head->as_Loop();
|
||||
bool counted_loop = head->is_CountedLoop();
|
||||
Node* entry = head->in(LoopNode::EntryControl);
|
||||
_igvn.hash_delete(entry);
|
||||
_igvn._worklist.push(entry);
|
||||
_igvn.rehash_node_delayed(entry);
|
||||
IdealLoopTree* outer_loop = loop->_parent;
|
||||
|
||||
Node *cont = _igvn.intcon(1);
|
||||
@ -249,18 +242,14 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
|
||||
|
||||
// Fast (true) control
|
||||
Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop);
|
||||
_igvn.hash_delete(head);
|
||||
head->set_req(LoopNode::EntryControl, iffast_pred);
|
||||
_igvn.replace_input_of(head, LoopNode::EntryControl, iffast_pred);
|
||||
set_idom(head, iffast_pred, dom_depth(head));
|
||||
_igvn._worklist.push(head);
|
||||
|
||||
// Slow (false) control
|
||||
Node* ifslow_pred = clone_loop_predicates(entry, ifslow, !counted_loop);
|
||||
LoopNode* slow_head = old_new[head->_idx]->as_Loop();
|
||||
_igvn.hash_delete(slow_head);
|
||||
slow_head->set_req(LoopNode::EntryControl, ifslow_pred);
|
||||
_igvn.replace_input_of(slow_head, LoopNode::EntryControl, ifslow_pred);
|
||||
set_idom(slow_head, ifslow_pred, dom_depth(slow_head));
|
||||
_igvn._worklist.push(slow_head);
|
||||
|
||||
recompute_dom_depth();
|
||||
|
||||
|
@ -1129,8 +1129,7 @@ void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) {
|
||||
// I'm mid-iteration over the Region's uses.
|
||||
for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
|
||||
Node* use = old_phi->last_out(i);
|
||||
igvn.hash_delete(use);
|
||||
igvn._worklist.push(use);
|
||||
igvn.rehash_node_delayed(use);
|
||||
uint uses_found = 0;
|
||||
for (uint j = 0; j < use->len(); j++) {
|
||||
if (use->in(j) == old_phi) {
|
||||
@ -1186,10 +1185,8 @@ void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) {
|
||||
phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx));
|
||||
phi = igvn.register_new_node_with_optimizer(phi, old_phi);
|
||||
// Make old Phi point to new Phi on the fall-in path
|
||||
igvn.hash_delete(old_phi);
|
||||
old_phi->set_req(LoopNode::EntryControl, phi);
|
||||
igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi);
|
||||
old_phi->del_req(outer_idx);
|
||||
igvn._worklist.push(old_phi);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1992,9 +1989,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
|
||||
// we do it here.
|
||||
for( uint i = 1; i < C->root()->req(); i++ ) {
|
||||
if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root?
|
||||
_igvn.hash_delete(C->root());
|
||||
C->root()->del_req(i);
|
||||
_igvn._worklist.push(C->root());
|
||||
_igvn.delete_input_of(C->root(), i);
|
||||
i--; // Rerun same iteration on compressed edges
|
||||
}
|
||||
}
|
||||
|
@ -216,9 +216,7 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc
|
||||
Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
|
||||
set_ctrl(con, C->root()); // Constant gets a new use
|
||||
// Hack the dominated test
|
||||
_igvn.hash_delete(iff);
|
||||
iff->set_req(1, con);
|
||||
_igvn._worklist.push(iff);
|
||||
_igvn.replace_input_of(iff, 1, con);
|
||||
|
||||
// If I dont have a reachable TRUE and FALSE path following the IfNode then
|
||||
// I can assume this path reaches an infinite loop. In this case it's not
|
||||
@ -245,10 +243,8 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc
|
||||
Node* cd = dp->fast_out(i); // Control-dependent node
|
||||
if (cd->depends_only_on_test()) {
|
||||
assert(cd->in(0) == dp, "");
|
||||
_igvn.hash_delete(cd);
|
||||
cd->set_req(0, prevdom);
|
||||
_igvn.replace_input_of(cd, 0, prevdom);
|
||||
set_early_ctrl(cd);
|
||||
_igvn._worklist.push(cd);
|
||||
IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
|
||||
if (old_loop != new_loop) {
|
||||
if (!old_loop->_child) old_loop->_body.yank(cd);
|
||||
@ -952,8 +948,7 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
|
||||
if (!n->is_Load() || late_load_ctrl != n_ctrl) {
|
||||
for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) {
|
||||
Node *u = n->last_out(j); // Clone private computation per use
|
||||
_igvn.hash_delete(u);
|
||||
_igvn._worklist.push(u);
|
||||
_igvn.rehash_node_delayed(u);
|
||||
Node *x = n->clone(); // Clone computation
|
||||
Node *x_ctrl = NULL;
|
||||
if( u->is_Phi() ) {
|
||||
@ -1089,9 +1084,7 @@ BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) {
|
||||
for( i = 1; i < phi->req(); i++ ) {
|
||||
Node *b = phi->in(i);
|
||||
if( b->is_Phi() ) {
|
||||
_igvn.hash_delete(phi);
|
||||
_igvn._worklist.push(phi);
|
||||
phi->set_req(i, clone_iff( b->as_Phi(), loop ));
|
||||
_igvn.replace_input_of(phi, i, clone_iff( b->as_Phi(), loop ));
|
||||
} else {
|
||||
assert( b->is_Bool(), "" );
|
||||
}
|
||||
@ -1161,9 +1154,7 @@ CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) {
|
||||
for( i = 1; i < phi->req(); i++ ) {
|
||||
Node *b = phi->in(i);
|
||||
if( b->is_Phi() ) {
|
||||
_igvn.hash_delete(phi);
|
||||
_igvn._worklist.push(phi);
|
||||
phi->set_req(i, clone_bool( b->as_Phi(), loop ));
|
||||
_igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop ));
|
||||
} else {
|
||||
assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
|
||||
}
|
||||
@ -1347,8 +1338,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
|
||||
// The original user of 'use' uses 'r' instead.
|
||||
for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
|
||||
Node* useuse = use->last_out(l);
|
||||
_igvn.hash_delete(useuse);
|
||||
_igvn._worklist.push(useuse);
|
||||
_igvn.rehash_node_delayed(useuse);
|
||||
uint uses_found = 0;
|
||||
if( useuse->in(0) == use ) {
|
||||
useuse->set_req(0, r);
|
||||
@ -1435,9 +1425,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
|
||||
if( use->is_Phi() ) // Phi use is in prior block
|
||||
cfg = prev->in(idx); // NOT in block of Phi itself
|
||||
if (cfg->is_top()) { // Use is dead?
|
||||
_igvn.hash_delete(use);
|
||||
_igvn._worklist.push(use);
|
||||
use->set_req(idx, C->top());
|
||||
_igvn.replace_input_of(use, idx, C->top());
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1487,9 +1475,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
|
||||
set_ctrl(phi, prev);
|
||||
}
|
||||
// Make 'use' use the Phi instead of the old loop body exit value
|
||||
_igvn.hash_delete(use);
|
||||
_igvn._worklist.push(use);
|
||||
use->set_req(idx, phi);
|
||||
_igvn.replace_input_of(use, idx, phi);
|
||||
if( use->_idx >= new_counter ) { // If updating new phis
|
||||
// Not needed for correctness, but prevents a weak assert
|
||||
// in AddPNode from tripping (when we end up with different
|
||||
@ -1517,9 +1503,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
|
||||
Node *iff = split_if_set->pop();
|
||||
if( iff->in(1)->is_Phi() ) {
|
||||
BoolNode *b = clone_iff( iff->in(1)->as_Phi(), loop );
|
||||
_igvn.hash_delete(iff);
|
||||
_igvn._worklist.push(iff);
|
||||
iff->set_req(1, b);
|
||||
_igvn.replace_input_of(iff, 1, b);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1529,9 +1513,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
|
||||
Node *phi = b->in(1);
|
||||
assert( phi->is_Phi(), "" );
|
||||
CmpNode *cmp = clone_bool( (PhiNode*)phi, loop );
|
||||
_igvn.hash_delete(b);
|
||||
_igvn._worklist.push(b);
|
||||
b->set_req(1, cmp);
|
||||
_igvn.replace_input_of(b, 1, cmp);
|
||||
}
|
||||
}
|
||||
if( split_cex_set ) {
|
||||
@ -1686,10 +1668,8 @@ ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTes
|
||||
ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
|
||||
int ddepth = dom_depth(proj);
|
||||
|
||||
_igvn.hash_delete(iff);
|
||||
_igvn._worklist.push(iff);
|
||||
_igvn.hash_delete(proj);
|
||||
_igvn._worklist.push(proj);
|
||||
_igvn.rehash_node_delayed(iff);
|
||||
_igvn.rehash_node_delayed(proj);
|
||||
|
||||
proj->set_req(0, NULL); // temporary disconnect
|
||||
ProjNode* proj2 = proj_clone(proj, iff);
|
||||
@ -1745,10 +1725,8 @@ RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
|
||||
ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
|
||||
int ddepth = dom_depth(proj);
|
||||
|
||||
_igvn.hash_delete(iff);
|
||||
_igvn._worklist.push(iff);
|
||||
_igvn.hash_delete(proj);
|
||||
_igvn._worklist.push(proj);
|
||||
_igvn.rehash_node_delayed(iff);
|
||||
_igvn.rehash_node_delayed(proj);
|
||||
|
||||
proj->set_req(0, NULL); // temporary disconnect
|
||||
ProjNode* proj2 = proj_clone(proj, iff);
|
||||
@ -1970,9 +1948,7 @@ void PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, N
|
||||
|
||||
// clone "n" and insert it between the inputs of "n" and the use outside the loop
|
||||
Node* n_clone = n->clone();
|
||||
_igvn.hash_delete(use);
|
||||
use->set_req(j, n_clone);
|
||||
_igvn._worklist.push(use);
|
||||
_igvn.replace_input_of(use, j, n_clone);
|
||||
Node* use_c;
|
||||
if (!use->is_Phi()) {
|
||||
use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
|
||||
@ -2028,8 +2004,7 @@ void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Nod
|
||||
#endif
|
||||
while( worklist.size() ) {
|
||||
Node *use = worklist.pop();
|
||||
_igvn.hash_delete(use);
|
||||
_igvn._worklist.push(use);
|
||||
_igvn.rehash_node_delayed(use);
|
||||
for (uint j = 1; j < use->req(); j++) {
|
||||
if (use->in(j) == n) {
|
||||
use->set_req(j, n_clone);
|
||||
@ -2055,9 +2030,7 @@ void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_va
|
||||
_igvn.remove_dead_node(phi);
|
||||
phi = hit;
|
||||
}
|
||||
_igvn.hash_delete(use);
|
||||
_igvn._worklist.push(use);
|
||||
use->set_req(idx, phi);
|
||||
_igvn.replace_input_of(use, idx, phi);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -2630,9 +2603,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
// use is in loop
|
||||
if (old_new[use->_idx] != NULL) { // null for dead code
|
||||
Node* use_clone = old_new[use->_idx];
|
||||
_igvn.hash_delete(use);
|
||||
use->set_req(j, C->top());
|
||||
_igvn._worklist.push(use);
|
||||
_igvn.replace_input_of(use, j, C->top());
|
||||
insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
|
||||
}
|
||||
} else {
|
||||
@ -2667,46 +2638,35 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
if (!n->is_CFG() && n->in(0) != NULL &&
|
||||
not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
|
||||
Node* n_clone = old_new[n->_idx];
|
||||
_igvn.hash_delete(n_clone);
|
||||
n_clone->set_req(0, new_head_clone);
|
||||
_igvn._worklist.push(n_clone);
|
||||
_igvn.replace_input_of(n_clone, 0, new_head_clone);
|
||||
}
|
||||
}
|
||||
|
||||
// Backedge of the surviving new_head (the clone) is original last_peel
|
||||
_igvn.hash_delete(new_head_clone);
|
||||
new_head_clone->set_req(LoopNode::LoopBackControl, last_peel);
|
||||
_igvn._worklist.push(new_head_clone);
|
||||
_igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
|
||||
|
||||
// Cut first node in original not_peel set
|
||||
_igvn.hash_delete(new_head);
|
||||
new_head->set_req(LoopNode::EntryControl, C->top());
|
||||
new_head->set_req(LoopNode::LoopBackControl, C->top());
|
||||
_igvn._worklist.push(new_head);
|
||||
_igvn.rehash_node_delayed(new_head); // Multiple edge updates:
|
||||
new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
|
||||
new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
|
||||
|
||||
// Copy head_clone back-branch info to original head
|
||||
// and remove original head's loop entry and
|
||||
// clone head's back-branch
|
||||
_igvn.hash_delete(head);
|
||||
_igvn.hash_delete(head_clone);
|
||||
head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
|
||||
_igvn.rehash_node_delayed(head); // Multiple edge updates
|
||||
head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
|
||||
head->set_req(LoopNode::LoopBackControl, C->top());
|
||||
head_clone->set_req(LoopNode::LoopBackControl, C->top());
|
||||
_igvn._worklist.push(head);
|
||||
_igvn._worklist.push(head_clone);
|
||||
_igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
|
||||
|
||||
// Similarly modify the phis
|
||||
for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
|
||||
Node* use = head->fast_out(k);
|
||||
if (use->is_Phi() && use->outcnt() > 0) {
|
||||
Node* use_clone = old_new[use->_idx];
|
||||
_igvn.hash_delete(use);
|
||||
_igvn.hash_delete(use_clone);
|
||||
use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
|
||||
_igvn.rehash_node_delayed(use); // Multiple edge updates
|
||||
use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
|
||||
use->set_req(LoopNode::LoopBackControl, C->top());
|
||||
use_clone->set_req(LoopNode::LoopBackControl, C->top());
|
||||
_igvn._worklist.push(use);
|
||||
_igvn._worklist.push(use_clone);
|
||||
_igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2792,8 +2752,7 @@ void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) {
|
||||
set_ctrl(neg_stride, C->root());
|
||||
Node *post = new (C, 3) AddINode( opaq, neg_stride);
|
||||
register_new_node( post, u_ctrl );
|
||||
_igvn.hash_delete(use);
|
||||
_igvn._worklist.push(use);
|
||||
_igvn.rehash_node_delayed(use);
|
||||
for (uint j = 1; j < use->req(); j++) {
|
||||
if (use->in(j) == phi)
|
||||
use->set_req(j, post);
|
||||
|
@ -1447,9 +1447,8 @@ void PhaseMacroExpand::expand_allocate_common(
|
||||
if (!always_slow && _memproj_fallthrough != NULL) {
|
||||
for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
|
||||
Node *use = _memproj_fallthrough->fast_out(i);
|
||||
_igvn.hash_delete(use);
|
||||
_igvn.rehash_node_delayed(use);
|
||||
imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem);
|
||||
_igvn._worklist.push(use);
|
||||
// back up iterator
|
||||
--i;
|
||||
}
|
||||
@ -1463,9 +1462,8 @@ void PhaseMacroExpand::expand_allocate_common(
|
||||
}
|
||||
for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) {
|
||||
Node *use = _memproj_catchall->fast_out(i);
|
||||
_igvn.hash_delete(use);
|
||||
_igvn.rehash_node_delayed(use);
|
||||
imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough);
|
||||
_igvn._worklist.push(use);
|
||||
// back up iterator
|
||||
--i;
|
||||
}
|
||||
@ -1481,9 +1479,8 @@ void PhaseMacroExpand::expand_allocate_common(
|
||||
if (_ioproj_fallthrough != NULL) {
|
||||
for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) {
|
||||
Node *use = _ioproj_fallthrough->fast_out(i);
|
||||
_igvn.hash_delete(use);
|
||||
_igvn.rehash_node_delayed(use);
|
||||
imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o);
|
||||
_igvn._worklist.push(use);
|
||||
// back up iterator
|
||||
--i;
|
||||
}
|
||||
@ -1497,9 +1494,8 @@ void PhaseMacroExpand::expand_allocate_common(
|
||||
}
|
||||
for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
|
||||
Node *use = _ioproj_catchall->fast_out(i);
|
||||
_igvn.hash_delete(use);
|
||||
_igvn.rehash_node_delayed(use);
|
||||
imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough);
|
||||
_igvn._worklist.push(use);
|
||||
// back up iterator
|
||||
--i;
|
||||
}
|
||||
@ -1857,18 +1853,16 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
|
||||
if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) {
|
||||
// Replace Box and mark eliminated all related locks and unlocks.
|
||||
alock->set_non_esc_obj();
|
||||
_igvn.hash_delete(alock);
|
||||
_igvn.rehash_node_delayed(alock);
|
||||
alock->set_box_node(newbox);
|
||||
_igvn._worklist.push(alock);
|
||||
next_edge = false;
|
||||
}
|
||||
}
|
||||
if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) {
|
||||
FastLockNode* flock = u->as_FastLock();
|
||||
assert(flock->box_node() == oldbox, "sanity");
|
||||
_igvn.hash_delete(flock);
|
||||
_igvn.rehash_node_delayed(flock);
|
||||
flock->set_box_node(newbox);
|
||||
_igvn._worklist.push(flock);
|
||||
next_edge = false;
|
||||
}
|
||||
|
||||
@ -1886,9 +1880,7 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
|
||||
Node* box_node = sfn->monitor_box(jvms, idx);
|
||||
if (box_node == oldbox && obj_node->eqv_uncast(obj)) {
|
||||
int j = jvms->monitor_box_offset(idx);
|
||||
_igvn.hash_delete(u);
|
||||
u->set_req(j, newbox);
|
||||
_igvn._worklist.push(u);
|
||||
_igvn.replace_input_of(u, j, newbox);
|
||||
next_edge = false;
|
||||
}
|
||||
}
|
||||
|
@ -717,6 +717,22 @@ Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
|
||||
adr = adr->in(1);
|
||||
continue;
|
||||
|
||||
case Op_EncodeP:
|
||||
// EncodeP node's control edge could be set by this method
|
||||
// when EncodeP node depends on CastPP node.
|
||||
//
|
||||
// Use its control edge for memory op because EncodeP may go away
|
||||
// later when it is folded with following or preceding DecodeN node.
|
||||
if (adr->in(0) == NULL) {
|
||||
// Keep looking for cast nodes.
|
||||
adr = adr->in(1);
|
||||
continue;
|
||||
}
|
||||
ccp->hash_delete(n);
|
||||
n->set_req(MemNode::Control, adr->in(0));
|
||||
ccp->hash_insert(n);
|
||||
return n;
|
||||
|
||||
case Op_CastPP:
|
||||
// If the CastPP is useless, just peek on through it.
|
||||
if( ccp->type(adr) == ccp->type(adr->in(1)) ) {
|
||||
|
@ -636,17 +636,6 @@ public:
|
||||
virtual bool depends_only_on_test() const { return true; }
|
||||
};
|
||||
|
||||
//------------------------------LoadLLockedNode---------------------------------
|
||||
// Load-locked a pointer from memory (either object or array).
|
||||
// On Sparc & Intel this is implemented as a normal long load.
|
||||
class LoadLLockedNode : public LoadLNode {
|
||||
public:
|
||||
LoadLLockedNode( Node *c, Node *mem, Node *adr )
|
||||
: LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
|
||||
virtual int Opcode() const;
|
||||
virtual int store_Opcode() const { return Op_StoreLConditional; }
|
||||
};
|
||||
|
||||
//------------------------------SCMemProjNode---------------------------------------
|
||||
// This class defines a projection of the memory state of a store conditional node.
|
||||
// These nodes return a value, but also update memory.
|
||||
|
@ -527,6 +527,9 @@ class Parse : public GraphKit {
|
||||
int repush_if_args();
|
||||
void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
|
||||
Block* path, Block* other_path);
|
||||
void sharpen_type_after_if(BoolTest::mask btest,
|
||||
Node* con, const Type* tcon,
|
||||
Node* val, const Type* tval);
|
||||
IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
|
||||
Node* jump_if_join(Node* iffalse, Node* iftrue);
|
||||
void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);
|
||||
|
@ -1233,6 +1233,71 @@ void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
|
||||
if (!have_con) // remaining adjustments need a con
|
||||
return;
|
||||
|
||||
sharpen_type_after_if(btest, con, tcon, val, tval);
|
||||
}
|
||||
|
||||
|
||||
static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
|
||||
Node* ldk;
|
||||
if (n->is_DecodeN()) {
|
||||
if (n->in(1)->Opcode() != Op_LoadNKlass) {
|
||||
return NULL;
|
||||
} else {
|
||||
ldk = n->in(1);
|
||||
}
|
||||
} else if (n->Opcode() != Op_LoadKlass) {
|
||||
return NULL;
|
||||
} else {
|
||||
ldk = n;
|
||||
}
|
||||
assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
|
||||
|
||||
Node* adr = ldk->in(MemNode::Address);
|
||||
intptr_t off = 0;
|
||||
Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
|
||||
if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
|
||||
return NULL;
|
||||
const TypePtr* tp = gvn->type(obj)->is_ptr();
|
||||
if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
|
||||
return NULL;
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
void Parse::sharpen_type_after_if(BoolTest::mask btest,
|
||||
Node* con, const Type* tcon,
|
||||
Node* val, const Type* tval) {
|
||||
// Look for opportunities to sharpen the type of a node
|
||||
// whose klass is compared with a constant klass.
|
||||
if (btest == BoolTest::eq && tcon->isa_klassptr()) {
|
||||
Node* obj = extract_obj_from_klass_load(&_gvn, val);
|
||||
const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
|
||||
if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
|
||||
// Found:
|
||||
// Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
|
||||
// or the narrowOop equivalent.
|
||||
const Type* obj_type = _gvn.type(obj);
|
||||
const TypeOopPtr* tboth = obj_type->join(con_type)->isa_oopptr();
|
||||
if (tboth != NULL && tboth != obj_type && tboth->higher_equal(obj_type)) {
|
||||
// obj has to be of the exact type Foo if the CmpP succeeds.
|
||||
assert(tboth->klass_is_exact(), "klass should be exact");
|
||||
int obj_in_map = map()->find_edge(obj);
|
||||
JVMState* jvms = this->jvms();
|
||||
if (obj_in_map >= 0 &&
|
||||
(jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
|
||||
TypeNode* ccast = new (C, 2) CheckCastPPNode(control(), obj, tboth);
|
||||
const Type* tcc = ccast->as_Type()->type();
|
||||
assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
|
||||
// Delay transform() call to allow recovery of pre-cast value
|
||||
// at the control merge.
|
||||
_gvn.set_type_bottom(ccast);
|
||||
record_for_igvn(ccast);
|
||||
// Here's the payoff.
|
||||
replace_in_map(obj, ccast);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int val_in_map = map()->find_edge(val);
|
||||
if (val_in_map < 0) return; // replace_in_map would be useless
|
||||
@ -1265,6 +1330,7 @@ void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
|
||||
// Exclude tests vs float/double 0 as these could be
|
||||
// either +0 or -0. Just because you are equal to +0
|
||||
// doesn't mean you ARE +0!
|
||||
// Note, following code also replaces Long and Oop values.
|
||||
if ((!tf || tf->_f != 0.0) &&
|
||||
(!td || td->_d != 0.0))
|
||||
cast = con; // Replace non-constant val by con.
|
||||
|
@ -460,6 +460,25 @@ public:
|
||||
subsume_node(old, nn);
|
||||
}
|
||||
|
||||
// Delayed node rehash: remove a node from the hash table and rehash it during
|
||||
// next optimizing pass
|
||||
void rehash_node_delayed(Node* n) {
|
||||
hash_delete(n);
|
||||
_worklist.push(n);
|
||||
}
|
||||
|
||||
// Replace ith edge of "n" with "in"
|
||||
void replace_input_of(Node* n, int i, Node* in) {
|
||||
rehash_node_delayed(n);
|
||||
n->set_req(i, in);
|
||||
}
|
||||
|
||||
// Delete ith edge of "n"
|
||||
void delete_input_of(Node* n, int i) {
|
||||
rehash_node_delayed(n);
|
||||
n->del_req(i);
|
||||
}
|
||||
|
||||
bool delay_transform() const { return _delay_transform; }
|
||||
|
||||
void set_delay_transform(bool delay) {
|
||||
|
@ -137,9 +137,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
|
||||
Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
|
||||
Node *x = bol->clone();
|
||||
register_new_node(x, iff_ctrl);
|
||||
_igvn.hash_delete(iff);
|
||||
iff->set_req(1, x);
|
||||
_igvn._worklist.push(iff);
|
||||
_igvn.replace_input_of(iff, 1, x);
|
||||
}
|
||||
_igvn.remove_dead_node( bol );
|
||||
--i;
|
||||
@ -151,9 +149,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
|
||||
assert( bol->in(1) == n, "" );
|
||||
Node *x = n->clone();
|
||||
register_new_node(x, get_ctrl(bol));
|
||||
_igvn.hash_delete(bol);
|
||||
bol->set_req(1, x);
|
||||
_igvn._worklist.push(bol);
|
||||
_igvn.replace_input_of(bol, 1, x);
|
||||
}
|
||||
_igvn.remove_dead_node( n );
|
||||
|
||||
@ -387,9 +383,7 @@ void PhaseIdealLoop::handle_use( Node *use, Node *def, small_cache *cache, Node
|
||||
if( use->in(i) == def )
|
||||
break;
|
||||
assert( i < use->req(), "def should be among use's inputs" );
|
||||
_igvn.hash_delete(use);
|
||||
use->set_req(i, new_def);
|
||||
_igvn._worklist.push(use);
|
||||
_igvn.replace_input_of(use, i, new_def);
|
||||
}
|
||||
|
||||
//------------------------------do_split_if------------------------------------
|
||||
|
@ -702,12 +702,84 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
|
||||
return TypeInt::CC;
|
||||
}
|
||||
|
||||
static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
|
||||
// Return the klass node for
|
||||
// LoadP(AddP(foo:Klass, #java_mirror))
|
||||
// or NULL if not matching.
|
||||
if (n->Opcode() != Op_LoadP) return NULL;
|
||||
|
||||
const TypeInstPtr* tp = phase->type(n)->isa_instptr();
|
||||
if (!tp || tp->klass() != phase->C->env()->Class_klass()) return NULL;
|
||||
|
||||
Node* adr = n->in(MemNode::Address);
|
||||
intptr_t off = 0;
|
||||
Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
|
||||
if (k == NULL) return NULL;
|
||||
const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
|
||||
if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return NULL;
|
||||
|
||||
// We've found the klass node of a Java mirror load.
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) {
|
||||
// for ConP(Foo.class) return ConP(Foo.klass)
|
||||
// otherwise return NULL
|
||||
if (!n->is_Con()) return NULL;
|
||||
|
||||
const TypeInstPtr* tp = phase->type(n)->isa_instptr();
|
||||
if (!tp) return NULL;
|
||||
|
||||
ciType* mirror_type = tp->java_mirror_type();
|
||||
// TypeInstPtr::java_mirror_type() returns non-NULL for compile-
|
||||
// time Class constants only.
|
||||
if (!mirror_type) return NULL;
|
||||
|
||||
// x.getClass() == int.class can never be true (for all primitive types)
|
||||
// Return a ConP(NULL) node for this case.
|
||||
if (mirror_type->is_classless()) {
|
||||
return phase->makecon(TypePtr::NULL_PTR);
|
||||
}
|
||||
|
||||
// return the ConP(Foo.klass)
|
||||
assert(mirror_type->is_klass(), "mirror_type should represent a klassOop");
|
||||
return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass()));
|
||||
}
|
||||
|
||||
//------------------------------Ideal------------------------------------------
|
||||
// Check for the case of comparing an unknown klass loaded from the primary
|
||||
// Normalize comparisons between Java mirror loads to compare the klass instead.
|
||||
//
|
||||
// Also check for the case of comparing an unknown klass loaded from the primary
|
||||
// super-type array vs a known klass with no subtypes. This amounts to
|
||||
// checking to see an unknown klass subtypes a known klass with no subtypes;
|
||||
// this only happens on an exact match. We can shorten this test by 1 load.
|
||||
Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
|
||||
// Normalize comparisons between Java mirrors into comparisons of the low-
|
||||
// level klass, where a dependent load could be shortened.
|
||||
//
|
||||
// The new pattern has a nice effect of matching the same pattern used in the
|
||||
// fast path of instanceof/checkcast/Class.isInstance(), which allows
|
||||
// redundant exact type check be optimized away by GVN.
|
||||
// For example, in
|
||||
// if (x.getClass() == Foo.class) {
|
||||
// Foo foo = (Foo) x;
|
||||
// // ... use a ...
|
||||
// }
|
||||
// a CmpPNode could be shared between if_acmpne and checkcast
|
||||
{
|
||||
Node* k1 = isa_java_mirror_load(phase, in(1));
|
||||
Node* k2 = isa_java_mirror_load(phase, in(2));
|
||||
Node* conk2 = isa_const_java_mirror(phase, in(2));
|
||||
|
||||
if (k1 && (k2 || conk2)) {
|
||||
Node* lhs = k1;
|
||||
Node* rhs = (k2 != NULL) ? k2 : conk2;
|
||||
this->set_req(1, lhs);
|
||||
this->set_req(2, rhs);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
// Constant pointer on right?
|
||||
const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
|
||||
if (t2 == NULL || !t2->klass_is_exact())
|
||||
|
@ -944,7 +944,7 @@ void SuperWord::schedule() {
|
||||
void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
|
||||
Node *uip, Unique_Node_List &sched_before) {
|
||||
Node* my_mem = current->in(MemNode::Memory);
|
||||
_igvn.hash_delete(current);
|
||||
_igvn.rehash_node_delayed(current);
|
||||
_igvn.hash_delete(my_mem);
|
||||
|
||||
//remove current_store from its current position in the memmory graph
|
||||
@ -952,7 +952,7 @@ void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
|
||||
Node* use = current->out(i);
|
||||
if (use->is_Mem()) {
|
||||
assert(use->in(MemNode::Memory) == current, "must be");
|
||||
_igvn.hash_delete(use);
|
||||
_igvn.rehash_node_delayed(use);
|
||||
if (use == prev) { // connect prev to my_mem
|
||||
use->set_req(MemNode::Memory, my_mem);
|
||||
} else if (sched_before.member(use)) {
|
||||
@ -962,7 +962,6 @@ void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
|
||||
_igvn.hash_delete(lip);
|
||||
use->set_req(MemNode::Memory, lip);
|
||||
}
|
||||
_igvn._worklist.push(use);
|
||||
--i; //deleted this edge; rescan position
|
||||
}
|
||||
}
|
||||
@ -976,25 +975,20 @@ void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
|
||||
Node* use = insert_pt->out(i);
|
||||
if (use->is_Mem()) {
|
||||
assert(use->in(MemNode::Memory) == insert_pt, "must be");
|
||||
_igvn.hash_delete(use);
|
||||
use->set_req(MemNode::Memory, current);
|
||||
_igvn._worklist.push(use);
|
||||
_igvn.replace_input_of(use, MemNode::Memory, current);
|
||||
--i; //deleted this edge; rescan position
|
||||
} else if (!sched_up && use->is_Phi() && use->bottom_type() == Type::MEMORY) {
|
||||
uint pos; //lip (lower insert point) must be the last one in the memory slice
|
||||
_igvn.hash_delete(use);
|
||||
for (pos=1; pos < use->req(); pos++) {
|
||||
if (use->in(pos) == insert_pt) break;
|
||||
}
|
||||
use->set_req(pos, current);
|
||||
_igvn._worklist.push(use);
|
||||
_igvn.replace_input_of(use, pos, current);
|
||||
--i;
|
||||
}
|
||||
}
|
||||
|
||||
//connect current to insert_pt
|
||||
current->set_req(MemNode::Memory, insert_pt);
|
||||
_igvn._worklist.push(current);
|
||||
}
|
||||
|
||||
//------------------------------co_locate_pack----------------------------------
|
||||
@ -1077,15 +1071,13 @@ void SuperWord::co_locate_pack(Node_List* pk) {
|
||||
Node* use = current->out(i);
|
||||
if (use->is_Mem() && use != previous) {
|
||||
assert(use->in(MemNode::Memory) == current, "must be");
|
||||
_igvn.hash_delete(use);
|
||||
if (schedule_before_pack.member(use)) {
|
||||
_igvn.hash_delete(upper_insert_pt);
|
||||
use->set_req(MemNode::Memory, upper_insert_pt);
|
||||
_igvn.replace_input_of(use, MemNode::Memory, upper_insert_pt);
|
||||
} else {
|
||||
_igvn.hash_delete(lower_insert_pt);
|
||||
use->set_req(MemNode::Memory, lower_insert_pt);
|
||||
_igvn.replace_input_of(use, MemNode::Memory, lower_insert_pt);
|
||||
}
|
||||
_igvn._worklist.push(use);
|
||||
--i; // deleted this edge; rescan position
|
||||
}
|
||||
}
|
||||
@ -1122,9 +1114,7 @@ void SuperWord::co_locate_pack(Node_List* pk) {
|
||||
// Give each load the same memory state
|
||||
for (uint i = 0; i < pk->size(); i++) {
|
||||
LoadNode* ld = pk->at(i)->as_Load();
|
||||
_igvn.hash_delete(ld);
|
||||
ld->set_req(MemNode::Memory, mem_input);
|
||||
_igvn._worklist.push(ld);
|
||||
_igvn.replace_input_of(ld, MemNode::Memory, mem_input);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1282,16 +1272,14 @@ void SuperWord::insert_extracts(Node_List* p) {
|
||||
|
||||
// Insert extract operation
|
||||
_igvn.hash_delete(def);
|
||||
_igvn.hash_delete(use);
|
||||
int def_pos = alignment(def) / data_size(def);
|
||||
const Type* def_t = velt_type(def);
|
||||
|
||||
Node* ex = ExtractNode::make(_phase->C, def, def_pos, def_t);
|
||||
_phase->_igvn.register_new_node_with_optimizer(ex);
|
||||
_phase->set_ctrl(ex, _phase->get_ctrl(def));
|
||||
use->set_req(idx, ex);
|
||||
_igvn.replace_input_of(use, idx, ex);
|
||||
_igvn._worklist.push(def);
|
||||
_igvn._worklist.push(use);
|
||||
|
||||
bb_insert_after(ex, bb_idx(def));
|
||||
set_velt_type(ex, def_t);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -378,6 +378,7 @@ JNI_ENTRY(jclass, jni_DefineClass(JNIEnv *env, const char *name, jobject loaderR
|
||||
jclass cls = NULL;
|
||||
DT_RETURN_MARK(DefineClass, jclass, (const jclass&)cls);
|
||||
|
||||
TempNewSymbol class_name = NULL;
|
||||
// Since exceptions can be thrown, class initialization can take place
|
||||
// if name is NULL no check for class name in .class stream has to be made.
|
||||
if (name != NULL) {
|
||||
@ -387,9 +388,8 @@ JNI_ENTRY(jclass, jni_DefineClass(JNIEnv *env, const char *name, jobject loaderR
|
||||
// into the constant pool.
|
||||
THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), name);
|
||||
}
|
||||
class_name = SymbolTable::new_symbol(name, CHECK_NULL);
|
||||
}
|
||||
TempNewSymbol class_name = SymbolTable::new_symbol(name, THREAD);
|
||||
|
||||
ResourceMark rm(THREAD);
|
||||
ClassFileStream st((u1*) buf, bufLen, NULL);
|
||||
Handle class_loader (THREAD, JNIHandles::resolve(loaderRef));
|
||||
|
@ -634,7 +634,7 @@ JNIEXPORT jobject JNICALL
|
||||
JVM_AssertionStatusDirectives(JNIEnv *env, jclass unused);
|
||||
|
||||
/*
|
||||
* sun.misc.AtomicLong
|
||||
* java.util.concurrent.atomic.AtomicLong
|
||||
*/
|
||||
JNIEXPORT jboolean JNICALL
|
||||
JVM_SupportsCX8(void);
|
||||
|
@ -631,9 +631,6 @@ class CommandLineFlags {
|
||||
develop(bool, InlineClassNatives, true, \
|
||||
"inline Class.isInstance, etc") \
|
||||
\
|
||||
develop(bool, InlineAtomicLong, true, \
|
||||
"inline sun.misc.AtomicLong") \
|
||||
\
|
||||
develop(bool, InlineThreadNatives, true, \
|
||||
"inline Thread.currentThread, etc") \
|
||||
\
|
||||
|
@ -358,7 +358,6 @@ static inline uint64_t cast_uint64_t(size_t x)
|
||||
nonstatic_field(methodDataOopDesc, _arg_stack, intx) \
|
||||
nonstatic_field(methodDataOopDesc, _arg_returned, intx) \
|
||||
nonstatic_field(methodOopDesc, _constMethod, constMethodOop) \
|
||||
nonstatic_field(methodOopDesc, _constants, constantPoolOop) \
|
||||
nonstatic_field(methodOopDesc, _method_data, methodDataOop) \
|
||||
nonstatic_field(methodOopDesc, _interpreter_invocation_count, int) \
|
||||
nonstatic_field(methodOopDesc, _access_flags, AccessFlags) \
|
||||
@ -378,7 +377,7 @@ static inline uint64_t cast_uint64_t(size_t x)
|
||||
volatile_nonstatic_field(methodOopDesc, _from_compiled_entry, address) \
|
||||
volatile_nonstatic_field(methodOopDesc, _from_interpreted_entry, address) \
|
||||
volatile_nonstatic_field(constMethodOopDesc, _fingerprint, uint64_t) \
|
||||
nonstatic_field(constMethodOopDesc, _method, methodOop) \
|
||||
nonstatic_field(constMethodOopDesc, _constants, constantPoolOop) \
|
||||
nonstatic_field(constMethodOopDesc, _stackmap_data, typeArrayOop) \
|
||||
nonstatic_field(constMethodOopDesc, _exception_table, typeArrayOop) \
|
||||
nonstatic_field(constMethodOopDesc, _constMethod_size, int) \
|
||||
@ -1876,7 +1875,6 @@ static inline uint64_t cast_uint64_t(size_t x)
|
||||
declare_c2_type(StoreNNode, StoreNode) \
|
||||
declare_c2_type(StoreCMNode, StoreNode) \
|
||||
declare_c2_type(LoadPLockedNode, LoadPNode) \
|
||||
declare_c2_type(LoadLLockedNode, LoadLNode) \
|
||||
declare_c2_type(SCMemProjNode, ProjNode) \
|
||||
declare_c2_type(LoadStoreNode, Node) \
|
||||
declare_c2_type(StorePConditionalNode, LoadStoreNode) \
|
||||
|
@ -220,9 +220,15 @@ inline int vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
|
||||
#define PRIu64 "I64u"
|
||||
#define PRIx64 "I64x"
|
||||
|
||||
#ifdef _LP64
|
||||
#define PRIdPTR "I64d"
|
||||
#define PRIuPTR "I64u"
|
||||
#define PRIxPTR "I64x"
|
||||
#else
|
||||
#define PRIdPTR "d"
|
||||
#define PRIuPTR "u"
|
||||
#define PRIxPTR "x"
|
||||
#endif
|
||||
|
||||
#define offset_of(klass,field) offsetof(klass,field)
|
||||
|
||||
|
111
hotspot/test/compiler/6732154/Test6732154.java
Normal file
111
hotspot/test/compiler/6732154/Test6732154.java
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6732154
|
||||
* @summary REG: Printing an Image using image/gif doc flavor crashes the VM, Solsparc
|
||||
*
|
||||
* @run main/othervm -Xcomp -XX:CompileOnly="Test6732154::ascii85Encode" Test6732154
|
||||
*/
|
||||
public class Test6732154 {
|
||||
|
||||
// Exact copy of sun.print.PSPrinterJob.ascii85Encode([b)[b
|
||||
private byte[] ascii85Encode(byte[] inArr) {
|
||||
byte[] outArr = new byte[((inArr.length+4) * 5 / 4) + 2];
|
||||
long p1 = 85;
|
||||
long p2 = p1*p1;
|
||||
long p3 = p1*p2;
|
||||
long p4 = p1*p3;
|
||||
byte pling = '!';
|
||||
|
||||
int i = 0;
|
||||
int olen = 0;
|
||||
long val, rem;
|
||||
|
||||
while (i+3 < inArr.length) {
|
||||
val = ((long)((inArr[i++]&0xff))<<24) +
|
||||
((long)((inArr[i++]&0xff))<<16) +
|
||||
((long)((inArr[i++]&0xff))<< 8) +
|
||||
((long)(inArr[i++]&0xff));
|
||||
if (val == 0) {
|
||||
outArr[olen++] = 'z';
|
||||
} else {
|
||||
rem = val;
|
||||
outArr[olen++] = (byte)(rem / p4 + pling); rem = rem % p4;
|
||||
outArr[olen++] = (byte)(rem / p3 + pling); rem = rem % p3;
|
||||
outArr[olen++] = (byte)(rem / p2 + pling); rem = rem % p2;
|
||||
outArr[olen++] = (byte)(rem / p1 + pling); rem = rem % p1;
|
||||
outArr[olen++] = (byte)(rem + pling);
|
||||
}
|
||||
}
|
||||
// input not a multiple of 4 bytes, write partial output.
|
||||
if (i < inArr.length) {
|
||||
int n = inArr.length - i; // n bytes remain to be written
|
||||
|
||||
val = 0;
|
||||
while (i < inArr.length) {
|
||||
val = (val << 8) + (inArr[i++]&0xff);
|
||||
}
|
||||
|
||||
int append = 4 - n;
|
||||
while (append-- > 0) {
|
||||
val = val << 8;
|
||||
}
|
||||
byte []c = new byte[5];
|
||||
rem = val;
|
||||
c[0] = (byte)(rem / p4 + pling); rem = rem % p4;
|
||||
c[1] = (byte)(rem / p3 + pling); rem = rem % p3;
|
||||
c[2] = (byte)(rem / p2 + pling); rem = rem % p2;
|
||||
c[3] = (byte)(rem / p1 + pling); rem = rem % p1;
|
||||
c[4] = (byte)(rem + pling);
|
||||
|
||||
for (int b = 0; b < n+1 ; b++) {
|
||||
outArr[olen++] = c[b];
|
||||
}
|
||||
}
|
||||
|
||||
// write EOD marker.
|
||||
outArr[olen++]='~'; outArr[olen++]='>';
|
||||
|
||||
/* The original intention was to insert a newline after every 78 bytes.
|
||||
* This was mainly intended for legibility but I decided against this
|
||||
* partially because of the (small) amount of extra space, and
|
||||
* partially because for line breaks either would have to hardwire
|
||||
* ascii 10 (newline) or calculate space in bytes to allocate for
|
||||
* the platform's newline byte sequence. Also need to be careful
|
||||
* about where its inserted:
|
||||
* Ascii 85 decoder ignores white space except for one special case:
|
||||
* you must ensure you do not split the EOD marker across lines.
|
||||
*/
|
||||
byte[] retArr = new byte[olen];
|
||||
System.arraycopy(outArr, 0, retArr, 0, olen);
|
||||
return retArr;
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
new Test6732154().ascii85Encode(new byte[0]);
|
||||
System.out.println("Test passed.");
|
||||
}
|
||||
}
|
43
hotspot/test/compiler/7169782/Test7169782.java
Normal file
43
hotspot/test/compiler/7169782/Test7169782.java
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 7169782
|
||||
* @summary C2: SIGSEGV in LShiftLNode::Ideal(PhaseGVN*, bool)
|
||||
*
|
||||
* @run main/othervm -Xcomp -XX:CompileOnly="Test7169782::<clinit>" Test7169782
|
||||
*/
|
||||
|
||||
public class Test7169782 {
|
||||
static long var_8;
|
||||
|
||||
static {
|
||||
var_8 /= (long)(1E100 + ("".startsWith("a", 0) ? 1 : 2));
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println("Test passed.");
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user