This commit is contained in:
Rickard Bäckman 2014-08-21 20:24:41 +02:00
commit a5b650be48
32 changed files with 2369 additions and 1229 deletions

@ -508,13 +508,9 @@ endif
ifeq ($(USE_CLANG),)
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif
endif

@ -365,16 +365,13 @@ endif
ifeq ($(USE_CLANG),)
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif
endif
# If we are building HEADLESS, pass on to VM
# so it can set the java.awt.headless property
ifdef HEADLESS

@ -240,11 +240,7 @@ ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
endif
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif

@ -66,129 +66,92 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
}
int VM_Version::platform_features(int features) {
// getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are
// supported on Solaris 10 and later.
if (os::Solaris::supports_getisax()) {
assert(os::Solaris::supports_getisax(), "getisax() must be available");
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
}
#endif
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
#ifndef AV2_SPARC_SPARC5
#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
#endif
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
// Next values are not defined before Solaris 10
// but Solaris 8 is used for jdk6 update builds.
// We only build on Solaris 10 and up, but some of the values below
// are not defined on all versions of Solaris 10, so we define them,
// if necessary.
#ifndef AV_SPARC_ASI_BLK_INIT
#define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */
#endif
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
#ifndef AV_SPARC_FMAF
#define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */
#endif
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
#ifndef AV_SPARC_FMAU
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#endif
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
#ifndef AV_SPARC_VIS3
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#endif
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
#ifndef AV_SPARC_CBCOND
#define AV_SPARC_CBCOND 0x10000000 /* compare and branch instrs supported */
#endif
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
#ifndef AV_SPARC_AES
#define AV_SPARC_AES 0x00020000 /* aes instrs supported */
#endif
if (av & AV_SPARC_AES) features |= aes_instructions_m;
if (av & AV_SPARC_AES) features |= aes_instructions_m;
#ifndef AV_SPARC_SHA1
#define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */
#endif
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
#ifndef AV_SPARC_SHA256
#define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */
#endif
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
#ifndef AV_SPARC_SHA512
#define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */
#endif
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) is not supported.");
#endif
char tmp;
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
char* buf = (char*) os::malloc(bufsize, mtInternal);
if (buf != NULL) {
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
// Figure out what kind of sparc we have
char *sparc_string = strstr(buf, "sparc");
if (sparc_string != NULL) { features |= v8_instructions_m;
if (sparc_string[5] == 'v') {
if (sparc_string[6] == '8') {
if (sparc_string[7] == '-') { features |= hardware_mul32_m;
features |= hardware_div32_m;
} else if (sparc_string[7] == 'p') features |= generic_v9_m;
else features |= generic_v8_m;
} else if (sparc_string[6] == '9') features |= generic_v9_m;
}
}
// Check for visualization instructions
char *vis = strstr(buf, "vis");
if (vis != NULL) { features |= vis1_instructions_m;
if (vis[3] == '2') features |= vis2_instructions_m;
}
}
os::free(buf);
}
}
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
// Determine the machine type.
do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
@ -203,27 +166,7 @@ int VM_Version::platform_features(int features) {
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
for (int i = 0; i < ksp->ks_ndata; i++) {
if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
#ifndef KSTAT_DATA_STRING
#define KSTAT_DATA_STRING 9
#endif
if (knm[i].data_type == KSTAT_DATA_CHAR) {
// VM is running on Solaris 8 which does not have value.str.
implementation = &(knm[i].value.c[0]);
} else if (knm[i].data_type == KSTAT_DATA_STRING) {
// VM is running on Solaris 10.
#ifndef KSTAT_NAMED_STR_PTR
// Solaris 8 was used to build VM, define the structure it misses.
struct str_t {
union {
char *ptr; /* NULL-term string */
char __pad[8]; /* 64-bit padding */
} addr;
uint32_t len; /* # bytes for strlen + '\0' */
};
#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr)
#endif
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
}
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("cpu_info.implementation: %s", implementation);
@ -234,6 +177,7 @@ int VM_Version::platform_features(int features) {
for (int i = 0; impl[i] != 0; i++)
impl[i] = (char)toupper((uint)impl[i]);
if (strstr(impl, "SPARC64") != NULL) {
features |= sparc64_family_m;
} else if (strstr(impl, "SPARC-M") != NULL) {
@ -248,8 +192,10 @@ int VM_Version::platform_features(int features) {
if (strstr(impl, "SPARC") == NULL) {
#ifndef PRODUCT
// kstat on Solaris 8 virtual machines (branded zones)
// returns "(unsupported)" implementation.
warning("kstat cpu_info implementation = '%s', should contain SPARC", impl);
// returns "(unsupported)" implementation. Solaris 8 is not
// supported anymore, but include this check to be on the
// safe side.
warning("kstat cpu_info implementation = '%s', assume generic SPARC", impl);
#endif
implementation = "SPARC";
}

@ -546,13 +546,18 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// normal bytecode execution.
thread->clear_exception_oop_and_pc();
Handle original_exception(thread, exception());
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
// If an exception was thrown during exception dispatch, the exception oop may have changed
thread->set_exception_oop(exception());
thread->set_exception_pc(pc);
// the exception cache is used only by non-implicit exceptions
if (continuation != NULL) {
// Update the exception cache only when there didn't happen
// another exception during the computation of the compiled
// exception handler.
if (continuation != NULL && original_exception() == exception()) {
nm->add_handler_for_exception_and_pc(exception, pc, continuation);
}
}

@ -130,15 +130,13 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_dom
}
bool Dictionary::do_unloading() {
void Dictionary::do_unloading() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
bool class_was_unloaded = false;
int index = 0; // Defined here for portability! Do not move
// Remove unloadable entries and classes from system dictionary
// The placeholder array has been handled in always_strong_oops_do.
DictionaryEntry* probe = NULL;
for (index = 0; index < table_size(); index++) {
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
probe = *p;
Klass* e = probe->klass();
@ -158,16 +156,8 @@ bool Dictionary::do_unloading() {
// Do we need to delete this system dictionary entry?
if (loader_data->is_unloading()) {
// If the loader is not live this entry should always be
// removed (will never be looked up again). Note that this is
// not the same as unloading the referred class.
if (k_def_class_loader_data == loader_data) {
// This is the defining entry, so the referred class is about
// to be unloaded.
class_was_unloaded = true;
}
// Also remove this system dictionary entry.
// removed (will never be looked up again).
purge_entry = true;
} else {
// The loader in this entry is alive. If the klass is dead,
// (determined by checking the defining class loader)
@ -196,7 +186,6 @@ bool Dictionary::do_unloading() {
p = probe->next_addr();
}
}
return class_was_unloaded;
}
void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {

@ -109,9 +109,8 @@ public:
return (loader_data->is_the_null_class_loader_data() || !ClassUnloading);
}
// Unload (that is, break root links to) all unmarked classes and
// loaders. Returns "true" iff something was unloaded.
bool do_unloading();
// Unload (that is, break root links to) all unmarked classes and loaders.
void do_unloading();
// Protection domains
Klass* find(int index, unsigned int hash, Symbol* name,

@ -1693,10 +1693,9 @@ public:
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
// First, mark for unload all ClassLoaderData referencing a dead class loader.
bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive);
bool unloading_occurred = false;
if (has_dead_loaders) {
unloading_occurred = dictionary()->do_unloading();
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive);
if (unloading_occurred) {
dictionary()->do_unloading();
constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors();
}

@ -1815,3 +1815,57 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
return result;
}
ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
: CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM), _alloc_tightly_coupled(alloc_tightly_coupled), _kind(ArrayCopy) {
init_class_id(Class_ArrayCopy);
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
uint ArrayCopyNode::size_of() const { return sizeof(*this); }
ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* length,
bool alloc_tightly_coupled,
Node* src_length, Node* dest_length,
Node* src_klass, Node* dest_klass) {
ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled);
Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac);
ac->init_req(ArrayCopyNode::Src, src);
ac->init_req(ArrayCopyNode::SrcPos, src_offset);
ac->init_req(ArrayCopyNode::Dest, dest);
ac->init_req(ArrayCopyNode::DestPos, dest_offset);
ac->init_req(ArrayCopyNode::Length, length);
ac->init_req(ArrayCopyNode::SrcLen, src_length);
ac->init_req(ArrayCopyNode::DestLen, dest_length);
ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
if (may_throw) {
ac->set_req(TypeFunc::I_O , kit->i_o());
kit->add_safepoint_edges(ac, false);
}
return ac;
}
void ArrayCopyNode::connect_outputs(GraphKit* kit) {
kit->set_all_memory_call(this, true);
kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true);
kit->set_all_memory_call(this);
}
#ifndef PRODUCT
const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
void ArrayCopyNode::dump_spec(outputStream *st) const {
CallNode::dump_spec(st);
st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
}
#endif

@ -1063,4 +1063,108 @@ public:
virtual bool guaranteed_safepoint() { return false; }
};
class GraphKit;
class ArrayCopyNode : public CallNode {
private:
// What kind of arraycopy variant is this?
enum {
ArrayCopy, // System.arraycopy()
ArrayCopyNoTest, // System.arraycopy(), all arguments validated
CloneBasic, // A clone that can be copied by 64 bit chunks
CloneOop, // An oop array clone
CopyOf, // Arrays.copyOf()
CopyOfRange // Arrays.copyOfRange()
} _kind;
#ifndef PRODUCT
static const char* _kind_names[CopyOfRange+1];
#endif
// Is the alloc obtained with
// AllocateArrayNode::Ideal_array_allocation() tighly coupled
// (arraycopy follows immediately the allocation)?
// We cache the result of LibraryCallKit::tightly_coupled_allocation
// here because it's much easier to find whether there's a tightly
// couple allocation at parse time than at macro expansion time. At
// macro expansion time, for every use of the allocation node we
// would need to figure out whether it happens after the arraycopy (and
// can be ignored) or between the allocation and the arraycopy. At
// parse time, it's straightforward because whatever happens after
// the arraycopy is not parsed yet so doesn't exist when
// LibraryCallKit::tightly_coupled_allocation() is called.
bool _alloc_tightly_coupled;
static const TypeFunc* arraycopy_type() {
const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
fields[Src] = TypeInstPtr::BOTTOM;
fields[SrcPos] = TypeInt::INT;
fields[Dest] = TypeInstPtr::BOTTOM;
fields[DestPos] = TypeInt::INT;
fields[Length] = TypeInt::INT;
fields[SrcLen] = TypeInt::INT;
fields[DestLen] = TypeInt::INT;
fields[SrcKlass] = TypeKlassPtr::BOTTOM;
fields[DestKlass] = TypeKlassPtr::BOTTOM;
const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
// create result type (range)
fields = TypeTuple::fields(0);
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
return TypeFunc::make(domain, range);
}
ArrayCopyNode(Compile* C, bool alloc_tightly_coupled);
public:
enum {
Src = TypeFunc::Parms,
SrcPos,
Dest,
DestPos,
Length,
SrcLen,
DestLen,
SrcKlass,
DestKlass,
ParmLimit
};
static ArrayCopyNode* make(GraphKit* kit, bool may_throw,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* length,
bool alloc_tightly_coupled,
Node* src_length = NULL, Node* dest_length = NULL,
Node* src_klass = NULL, Node* dest_klass = NULL);
void connect_outputs(GraphKit* kit);
bool is_arraycopy() const { return _kind == ArrayCopy; }
bool is_arraycopy_notest() const { return _kind == ArrayCopyNoTest; }
bool is_clonebasic() const { return _kind == CloneBasic; }
bool is_cloneoop() const { return _kind == CloneOop; }
bool is_copyof() const { return _kind == CopyOf; }
bool is_copyofrange() const { return _kind == CopyOfRange; }
void set_arraycopy() { _kind = ArrayCopy; }
void set_arraycopy_notest() { _kind = ArrayCopyNoTest; }
void set_clonebasic() { _kind = CloneBasic; }
void set_cloneoop() { _kind = CloneOop; }
void set_copyof() { _kind = CopyOf; }
void set_copyofrange() { _kind = CopyOfRange; }
virtual int Opcode() const;
virtual uint size_of() const; // Size is bigger
virtual bool guaranteed_safepoint() { return false; }
bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
};
#endif // SHARE_VM_OPTO_CALLNODE_HPP

@ -37,6 +37,7 @@ macro(Allocate)
macro(AllocateArray)
macro(AndI)
macro(AndL)
macro(ArrayCopy)
macro(AryEq)
macro(AtanD)
macro(Binary)

@ -3795,6 +3795,56 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
}
}
//----------------------------static_subtype_check-----------------------------
// Shortcut important common cases when superklass is exact:
// (0) superklass is java.lang.Object (can occur in reflective code)
// (1) subklass is already limited to a subtype of superklass => always ok
// (2) subklass does not overlap with superklass => always fail
// (3) superklass has NO subtypes and we can check with a simple compare.
int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
if (StressReflectiveCode) {
return SSC_full_test; // Let caller generate the general case.
}
if (superk == env()->Object_klass()) {
return SSC_always_true; // (0) this test cannot fail
}
ciType* superelem = superk;
if (superelem->is_array_klass())
superelem = superelem->as_array_klass()->base_element_type();
if (!subk->is_interface()) { // cannot trust static interface types yet
if (subk->is_subtype_of(superk)) {
return SSC_always_true; // (1) false path dead; no dynamic test needed
}
if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
!superk->is_subtype_of(subk)) {
return SSC_always_false;
}
}
// If casting to an instance klass, it must have no subtypes
if (superk->is_interface()) {
// Cannot trust interfaces yet.
// %%% S.B. superk->nof_implementors() == 1
} else if (superelem->is_instance_klass()) {
ciInstanceKlass* ik = superelem->as_instance_klass();
if (!ik->has_subklass() && !ik->is_interface()) {
if (!ik->is_final()) {
// Add a dependency if there is a chance of a later subclass.
dependencies()->assert_leaf_type(ik);
}
return SSC_easy_test; // (3) caller can do a simple ptr comparison
}
} else {
// A primitive array type has no subtypes.
return SSC_easy_test; // (3) caller can do a simple ptr comparison
}
return SSC_full_test;
}
// The message about the current inlining is accumulated in
// _print_inlining_stream and transfered into the _print_inlining_list
// once we know whether inlining succeeds or not. For regular

@ -1200,6 +1200,10 @@ class Compile : public Phase {
// Definitions of pd methods
static void pd_compiler2_init();
// Static parse-time type checking logic for gen_subtype_check:
enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
int static_subtype_check(ciKlass* superk, ciKlass* subk);
// Auxiliary method for randomized fuzzing/stressing
static bool randomized_select(int count);
};

@ -2520,6 +2520,21 @@ void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool sep
set_control(norm);
}
static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN* gvn, BasicType bt) {
Node* cmp = NULL;
switch(bt) {
case T_INT: cmp = new CmpINode(in1, in2); break;
case T_ADDRESS: cmp = new CmpPNode(in1, in2); break;
default: fatal(err_msg("unexpected comparison type %s", type2name(bt)));
}
gvn->transform(cmp);
Node* bol = gvn->transform(new BoolNode(cmp, test));
IfNode* iff = new IfNode(ctrl, bol, p, COUNT_UNKNOWN);
gvn->transform(iff);
if (!bol->is_Con()) gvn->record_for_igvn(iff);
return iff;
}
//-------------------------------gen_subtype_check-----------------------------
// Generate a subtyping check. Takes as input the subtype and supertype.
@ -2529,16 +2544,17 @@ void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool sep
// but that's not exposed to the optimizer. This call also doesn't take in an
// Object; if you wish to check an Object you need to load the Object's class
// prior to coming here.
Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, MergeMemNode* mem, PhaseGVN* gvn) {
Compile* C = gvn->C;
// Fast check for identical types, perhaps identical constants.
// The types can even be identical non-constants, in cases
// involving Array.newInstance, Object.clone, etc.
if (subklass == superklass)
return top(); // false path is dead; no test needed.
return C->top(); // false path is dead; no test needed.
if (_gvn.type(superklass)->singleton()) {
ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
ciKlass* subk = _gvn.type(subklass)->is_klassptr()->klass();
if (gvn->type(superklass)->singleton()) {
ciKlass* superk = gvn->type(superklass)->is_klassptr()->klass();
ciKlass* subk = gvn->type(subklass)->is_klassptr()->klass();
// In the common case of an exact superklass, try to fold up the
// test before generating code. You may ask, why not just generate
@ -2549,25 +2565,23 @@ Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
// Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
// Here, the type of 'fa' is often exact, so the store check
// of fa[1]=x will fold up, without testing the nullness of x.
switch (static_subtype_check(superk, subk)) {
case SSC_always_false:
switch (C->static_subtype_check(superk, subk)) {
case Compile::SSC_always_false:
{
Node* always_fail = control();
set_control(top());
Node* always_fail = *ctrl;
*ctrl = gvn->C->top();
return always_fail;
}
case SSC_always_true:
return top();
case SSC_easy_test:
case Compile::SSC_always_true:
return C->top();
case Compile::SSC_easy_test:
{
// Just do a direct pointer compare and be done.
Node* cmp = _gvn.transform( new CmpPNode(subklass, superklass) );
Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) );
IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
set_control( _gvn.transform( new IfTrueNode (iff) ) );
return _gvn.transform( new IfFalseNode(iff) );
IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS);
*ctrl = gvn->transform(new IfTrueNode(iff));
return gvn->transform(new IfFalseNode(iff));
}
case SSC_full_test:
case Compile::SSC_full_test:
break;
default:
ShouldNotReachHere();
@ -2579,11 +2593,11 @@ Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
// will always succeed. We could leave a dependency behind to ensure this.
// First load the super-klass's check-offset
Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
Node *chk_off = _gvn.transform(new LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(),
TypeInt::INT, MemNode::unordered));
Node *p1 = gvn->transform(new AddPNode(superklass, superklass, gvn->MakeConX(in_bytes(Klass::super_check_offset_offset()))));
Node* m = mem->memory_at(C->get_alias_index(gvn->type(p1)->is_ptr()));
Node *chk_off = gvn->transform(new LoadINode(NULL, m, p1, gvn->type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered));
int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
bool might_be_cache = (gvn->find_int_con(chk_off, cacheoff_con) == cacheoff_con);
// Load from the sub-klass's super-class display list, or a 1-word cache of
// the secondary superclass list, or a failing value with a sentinel offset
@ -2591,42 +2605,44 @@ Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
// hierarchy and we have to scan the secondary superclass list the hard way.
// Worst-case type is a little odd: NULL is allowed as a result (usually
// klass loads can never produce a NULL).
Node *chk_off_X = ConvI2X(chk_off);
Node *p2 = _gvn.transform( new AddPNode(subklass,subklass,chk_off_X) );
Node *chk_off_X = chk_off;
#ifdef _LP64
chk_off_X = gvn->transform(new ConvI2LNode(chk_off_X));
#endif
Node *p2 = gvn->transform(new AddPNode(subklass,subklass,chk_off_X));
// For some types like interfaces the following loadKlass is from a 1-word
// cache which is mutable so can't use immutable memory. Other
// types load from the super-class display table which is immutable.
Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
m = mem->memory_at(C->get_alias_index(gvn->type(p2)->is_ptr()));
Node *kmem = might_be_cache ? m : C->immutable_memory();
Node *nkls = gvn->transform(LoadKlassNode::make(*gvn, kmem, p2, gvn->type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
// Compile speed common case: ARE a subtype and we canNOT fail
if( superklass == nkls )
return top(); // false path is dead; no test needed.
return C->top(); // false path is dead; no test needed.
// See if we get an immediate positive hit. Happens roughly 83% of the
// time. Test to see if the value loaded just previously from the subklass
// is exactly the superklass.
Node *cmp1 = _gvn.transform( new CmpPNode( superklass, nkls ) );
Node *bol1 = _gvn.transform( new BoolNode( cmp1, BoolTest::eq ) );
IfNode *iff1 = create_and_xform_if( control(), bol1, PROB_LIKELY(0.83f), COUNT_UNKNOWN );
Node *iftrue1 = _gvn.transform( new IfTrueNode ( iff1 ) );
set_control( _gvn.transform( new IfFalseNode( iff1 ) ) );
IfNode *iff1 = gen_subtype_check_compare(*ctrl, superklass, nkls, BoolTest::eq, PROB_LIKELY(0.83f), gvn, T_ADDRESS);
Node *iftrue1 = gvn->transform( new IfTrueNode (iff1));
*ctrl = gvn->transform(new IfFalseNode(iff1));
// Compile speed common case: Check for being deterministic right now. If
// chk_off is a constant and not equal to cacheoff then we are NOT a
// subklass. In this case we need exactly the 1 test above and we can
// return those results immediately.
if (!might_be_cache) {
Node* not_subtype_ctrl = control();
set_control(iftrue1); // We need exactly the 1 test above
Node* not_subtype_ctrl = *ctrl;
*ctrl = iftrue1; // We need exactly the 1 test above
return not_subtype_ctrl;
}
// Gather the various success & failures here
RegionNode *r_ok_subtype = new RegionNode(4);
record_for_igvn(r_ok_subtype);
gvn->record_for_igvn(r_ok_subtype);
RegionNode *r_not_subtype = new RegionNode(3);
record_for_igvn(r_not_subtype);
gvn->record_for_igvn(r_not_subtype);
r_ok_subtype->init_req(1, iftrue1);
@ -2635,21 +2651,17 @@ Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
// check-offset points into the subklass display list or the 1-element
// cache. If it points to the display (and NOT the cache) and the display
// missed then it's not a subtype.
Node *cacheoff = _gvn.intcon(cacheoff_con);
Node *cmp2 = _gvn.transform( new CmpINode( chk_off, cacheoff ) );
Node *bol2 = _gvn.transform( new BoolNode( cmp2, BoolTest::ne ) );
IfNode *iff2 = create_and_xform_if( control(), bol2, PROB_LIKELY(0.63f), COUNT_UNKNOWN );
r_not_subtype->init_req(1, _gvn.transform( new IfTrueNode (iff2) ) );
set_control( _gvn.transform( new IfFalseNode(iff2) ) );
Node *cacheoff = gvn->intcon(cacheoff_con);
IfNode *iff2 = gen_subtype_check_compare(*ctrl, chk_off, cacheoff, BoolTest::ne, PROB_LIKELY(0.63f), gvn, T_INT);
r_not_subtype->init_req(1, gvn->transform(new IfTrueNode (iff2)));
*ctrl = gvn->transform(new IfFalseNode(iff2));
// Check for self. Very rare to get here, but it is taken 1/3 the time.
// No performance impact (too rare) but allows sharing of secondary arrays
// which has some footprint reduction.
Node *cmp3 = _gvn.transform( new CmpPNode( subklass, superklass ) );
Node *bol3 = _gvn.transform( new BoolNode( cmp3, BoolTest::eq ) );
IfNode *iff3 = create_and_xform_if( control(), bol3, PROB_LIKELY(0.36f), COUNT_UNKNOWN );
r_ok_subtype->init_req(2, _gvn.transform( new IfTrueNode ( iff3 ) ) );
set_control( _gvn.transform( new IfFalseNode( iff3 ) ) );
IfNode *iff3 = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_LIKELY(0.36f), gvn, T_ADDRESS);
r_ok_subtype->init_req(2, gvn->transform(new IfTrueNode(iff3)));
*ctrl = gvn->transform(new IfFalseNode(iff3));
// -- Roads not taken here: --
// We could also have chosen to perform the self-check at the beginning
@ -2672,68 +2684,16 @@ Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
// out of line, and it can only improve I-cache density.
// The decision to inline or out-of-line this final check is platform
// dependent, and is found in the AD file definition of PartialSubtypeCheck.
Node* psc = _gvn.transform(
new PartialSubtypeCheckNode(control(), subklass, superklass) );
Node* psc = gvn->transform(
new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
Node *cmp4 = _gvn.transform( new CmpPNode( psc, null() ) );
Node *bol4 = _gvn.transform( new BoolNode( cmp4, BoolTest::ne ) );
IfNode *iff4 = create_and_xform_if( control(), bol4, PROB_FAIR, COUNT_UNKNOWN );
r_not_subtype->init_req(2, _gvn.transform( new IfTrueNode (iff4) ) );
r_ok_subtype ->init_req(3, _gvn.transform( new IfFalseNode(iff4) ) );
IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn->zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
r_not_subtype->init_req(2, gvn->transform(new IfTrueNode (iff4)));
r_ok_subtype ->init_req(3, gvn->transform(new IfFalseNode(iff4)));
// Return false path; set default control to true path.
set_control( _gvn.transform(r_ok_subtype) );
return _gvn.transform(r_not_subtype);
}
//----------------------------static_subtype_check-----------------------------
// Shortcut important common cases when superklass is exact:
// (0) superklass is java.lang.Object (can occur in reflective code)
// (1) subklass is already limited to a subtype of superklass => always ok
// (2) subklass does not overlap with superklass => always fail
// (3) superklass has NO subtypes and we can check with a simple compare.
int GraphKit::static_subtype_check(ciKlass* superk, ciKlass* subk) {
if (StressReflectiveCode) {
return SSC_full_test; // Let caller generate the general case.
}
if (superk == env()->Object_klass()) {
return SSC_always_true; // (0) this test cannot fail
}
ciType* superelem = superk;
if (superelem->is_array_klass())
superelem = superelem->as_array_klass()->base_element_type();
if (!subk->is_interface()) { // cannot trust static interface types yet
if (subk->is_subtype_of(superk)) {
return SSC_always_true; // (1) false path dead; no dynamic test needed
}
if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
!superk->is_subtype_of(subk)) {
return SSC_always_false;
}
}
// If casting to an instance klass, it must have no subtypes
if (superk->is_interface()) {
// Cannot trust interfaces yet.
// %%% S.B. superk->nof_implementors() == 1
} else if (superelem->is_instance_klass()) {
ciInstanceKlass* ik = superelem->as_instance_klass();
if (!ik->has_subklass() && !ik->is_interface()) {
if (!ik->is_final()) {
// Add a dependency if there is a chance of a later subclass.
C->dependencies()->assert_leaf_type(ik);
}
return SSC_easy_test; // (3) caller can do a simple ptr comparison
}
} else {
// A primitive array type has no subtypes.
return SSC_easy_test; // (3) caller can do a simple ptr comparison
}
return SSC_full_test;
*ctrl = gvn->transform(r_ok_subtype);
return gvn->transform(r_not_subtype);
}
// Profile-driven exact type check:
@ -2813,7 +2773,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;
if (exact_kls != NULL) {// no cast failures here
if (require_klass == NULL ||
static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {
// If we narrow the type to match what the type profile sees or
// the speculative type, we can then remove the rest of the
// cast.
@ -2833,7 +2793,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
}
return exact_obj;
}
// assert(ssc == SSC_always_true)... except maybe the profile lied to us.
// assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us.
}
return NULL;
@ -2938,8 +2898,8 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac
ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
if (subk != NULL && subk->is_loaded()) {
int static_res = static_subtype_check(superk, subk);
known_statically = (static_res == SSC_always_true || static_res == SSC_always_false);
int static_res = C->static_subtype_check(superk, subk);
known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
}
}
@ -3007,13 +2967,13 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
if (tk->singleton()) {
const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
if (objtp != NULL && objtp->klass() != NULL) {
switch (static_subtype_check(tk->klass(), objtp->klass())) {
case SSC_always_true:
switch (C->static_subtype_check(tk->klass(), objtp->klass())) {
case Compile::SSC_always_true:
// If we know the type check always succeed then we don't use
// the profiling data at this bytecode. Don't lose it, feed it
// to the type system as a speculative type.
return record_profiled_receiver_for_speculation(obj);
case SSC_always_false:
case Compile::SSC_always_false:
// It needs a null check because a null will *pass* the cast check.
// A non-null value will always produce an exception.
return null_assert(obj);

@ -829,17 +829,13 @@ class GraphKit : public Phase {
Node* gen_checkcast( Node *subobj, Node* superkls,
Node* *failure_control = NULL );
// Generate a subtyping check. Takes as input the subtype and supertype.
// Returns 2 values: sets the default control() to the true path and
// returns the false path. Only reads from constant memory taken from the
// default memory; does not write anything. It also doesn't take in an
// Object; if you wish to check an Object you need to load the Object's
// class prior to coming here.
Node* gen_subtype_check(Node* subklass, Node* superklass);
// Static parse-time type checking logic for gen_subtype_check:
enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
int static_subtype_check(ciKlass* superk, ciKlass* subk);
Node* gen_subtype_check(Node* subklass, Node* superklass) {
MergeMemNode* mem = merged_memory();
Node* ctrl = control();
Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn);
set_control(ctrl);
return n;
}
// Exact type check used for predicted calls and casts.
// Rewrites (*casted_receiver) to be casted to the stronger type.

@ -503,7 +503,7 @@ int IfNode::is_range_check(Node* &range, Node* &index, jint &offset) {
jint off = 0;
if (l->is_top()) {
return 0;
} else if (l->is_Add()) {
} else if (l->Opcode() == Op_AddI) {
if ((off = l->in(1)->find_int_con(0)) != 0) {
ind = l->in(2);
} else if ((off = l->in(2)->find_int_con(0)) != 0) {

File diff suppressed because it is too large Load Diff

@ -763,9 +763,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
loop->dump_head();
}
#endif
} else if ((cl != NULL) && (proj->_con == predicate_proj->_con) &&
loop->is_range_check_if(iff, this, invar)) {
} else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
// Range check for counted loops
const Node* cmp = bol->in(1)->as_Cmp();
Node* idx = cmp->in(1);
@ -800,18 +798,31 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
}
// Test the lower bound
Node* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false);
BoolNode* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false);
// Negate test if necessary
bool negated = false;
if (proj->_con != predicate_proj->_con) {
lower_bound_bol = new BoolNode(lower_bound_bol->in(1), lower_bound_bol->_test.negate());
register_new_node(lower_bound_bol, ctrl);
negated = true;
}
IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
_igvn.hash_delete(lower_bound_iff);
lower_bound_iff->set_req(1, lower_bound_bol);
if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
if (TraceLoopPredicate) tty->print_cr("lower bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx);
// Test the upper bound
Node* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true);
BoolNode* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true);
negated = false;
if (proj->_con != predicate_proj->_con) {
upper_bound_bol = new BoolNode(upper_bound_bol->in(1), upper_bound_bol->_test.negate());
register_new_node(upper_bound_bol, ctrl);
negated = true;
}
IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
_igvn.hash_delete(upper_bound_iff);
upper_bound_iff->set_req(1, upper_bound_bol);
if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx);
if (TraceLoopPredicate) tty->print_cr("upper bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx);
// Fall through into rest of the clean up code which will move
// any dependent nodes onto the upper bound test.

@ -94,7 +94,8 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
newcall->add_req(old_in);
}
newcall->set_jvms(oldcall->jvms());
// JVMS may be shared so clone it before we modify it
newcall->set_jvms(oldcall->jvms() != NULL ? oldcall->jvms()->clone_deep(C) : NULL);
for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) {
jvms->set_map(newcall);
jvms->set_locoff(jvms->locoff()+jvms_adj);
@ -2469,6 +2470,8 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
_has_locks = true;
break;
case Node::Class_ArrayCopy:
break;
default:
assert(n->Opcode() == Op_LoopLimit ||
n->Opcode() == Op_Opaque1 ||
@ -2544,6 +2547,25 @@ bool PhaseMacroExpand::expand_macro_nodes() {
}
}
// expand arraycopy "macro" nodes first
// For ReduceBulkZeroing, we must first process all arraycopy nodes
// before the allocate nodes are expanded.
int macro_idx = C->macro_count() - 1;
while (macro_idx >= 0) {
Node * n = C->macro_node(macro_idx);
assert(n->is_macro(), "only macro nodes expected here");
if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) {
// node is unreachable, so don't try to expand it
C->remove_macro_node(n);
} else if (n->is_ArrayCopy()){
int macro_count = C->macro_count();
expand_arraycopy_node(n->as_ArrayCopy());
assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
}
if (C->failing()) return true;
macro_idx --;
}
// expand "macro" nodes
// nodes are removed from the macro list as they are processed
while (C->macro_count() > 0) {

@ -37,7 +37,7 @@ class PhaseMacroExpand : public Phase {
private:
PhaseIterGVN &_igvn;
// Helper methods roughly modelled after GraphKit:
// Helper methods roughly modeled after GraphKit:
Node* top() const { return C->top(); }
Node* intcon(jint con) const { return _igvn.intcon(con); }
Node* longcon(jlong con) const { return _igvn.longcon(con); }
@ -101,6 +101,86 @@ private:
void expand_lock_node(LockNode *lock);
void expand_unlock_node(UnlockNode *unlock);
// More helper methods modeled after GraphKit for array copy
void insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent = NULL);
Node* array_element_address(Node* ary, Node* idx, BasicType elembt);
Node* ConvI2L(Node* offset);
Node* make_leaf_call(Node* ctrl, Node* mem,
const TypeFunc* call_type, address call_addr,
const char* call_name,
const TypePtr* adr_type,
Node* parm0 = NULL, Node* parm1 = NULL,
Node* parm2 = NULL, Node* parm3 = NULL,
Node* parm4 = NULL, Node* parm5 = NULL,
Node* parm6 = NULL, Node* parm7 = NULL);
// helper methods modeled after LibraryCallKit for array copy
Node* generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob);
Node* generate_slow_guard(Node** ctrl, Node* test, RegionNode* region);
void generate_negative_guard(Node** ctrl, Node* index, RegionNode* region);
void generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region);
// More helper methods for array copy
Node* generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative);
void finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type);
address basictype2arraycopy(BasicType t,
Node* src_offset,
Node* dest_offset,
bool disjoint_bases,
const char* &name,
bool dest_uninitialized);
Node* generate_arraycopy(ArrayCopyNode *ac,
AllocateArrayNode* alloc,
Node** ctrl, MergeMemNode* mem, Node** io,
const TypePtr* adr_type,
BasicType basic_elem_type,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* copy_length,
bool disjoint_bases = false,
bool length_never_negative = false,
RegionNode* slow_region = NULL);
void generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
const TypePtr* adr_type,
Node* dest,
BasicType basic_elem_type,
Node* slice_idx,
Node* slice_len,
Node* dest_size);
bool generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io,
const TypePtr* adr_type,
BasicType basic_elem_type,
AllocateNode* alloc,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* dest_size, bool dest_uninitialized);
MergeMemNode* generate_slow_arraycopy(ArrayCopyNode *ac,
Node** ctrl, Node* mem, Node** io,
const TypePtr* adr_type,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* copy_length, bool dest_uninitialized);
Node* generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem,
const TypePtr* adr_type,
Node* dest_elem_klass,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* copy_length, bool dest_uninitialized);
Node* generate_generic_arraycopy(Node** ctrl, MergeMemNode** mem,
const TypePtr* adr_type,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* copy_length, bool dest_uninitialized);
void generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** mem,
const TypePtr* adr_type,
BasicType basic_elem_type,
bool disjoint_bases,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* copy_length, bool dest_uninitialized);
void expand_arraycopy_node(ArrayCopyNode *ac);
int replace_input(Node *use, Node *oldref, Node *newref);
void copy_call_debug_info(CallNode *oldcall, CallNode * newcall);
Node* opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path = false);

File diff suppressed because it is too large Load Diff

@ -1083,6 +1083,9 @@ bool Node::has_special_unique_user() const {
if( this->is_Store() ) {
// Condition for back-to-back stores folding.
return n->Opcode() == op && n->in(MemNode::Memory) == this;
} else if (this->is_Load()) {
// Condition for removing an unused LoadNode from the MemBarAcquire precedence input
return n->Opcode() == Op_MemBarAcquire;
} else if( op == Op_AddL ) {
// Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
return n->Opcode() == Op_ConvL2I && n->in(1) == this;

@ -40,6 +40,7 @@ class AddPNode;
class AliasInfo;
class AllocateArrayNode;
class AllocateNode;
class ArrayCopyNode;
class Block;
class BoolNode;
class BoxLockNode;
@ -561,6 +562,7 @@ public:
DEFINE_CLASS_ID(AbstractLock, Call, 3)
DEFINE_CLASS_ID(Lock, AbstractLock, 0)
DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
DEFINE_CLASS_ID(ArrayCopy, Call, 4)
DEFINE_CLASS_ID(MultiBranch, Multi, 1)
DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
DEFINE_CLASS_ID(Catch, PCTable, 0)
@ -707,6 +709,7 @@ public:
DEFINE_CLASS_QUERY(AddP)
DEFINE_CLASS_QUERY(Allocate)
DEFINE_CLASS_QUERY(AllocateArray)
DEFINE_CLASS_QUERY(ArrayCopy)
DEFINE_CLASS_QUERY(Bool)
DEFINE_CLASS_QUERY(BoxLock)
DEFINE_CLASS_QUERY(Call)

@ -27,7 +27,10 @@
#include "runtime/timer.hpp"
class Compile;
class IfNode;
class MergeMemNode;
class Node;
class PhaseGVN;
//------------------------------Phase------------------------------------------
// Most optimizations are done in Phases. Creating a phase does any long
@ -114,9 +117,20 @@ protected:
static elapsedTimer _t_instrSched;
static elapsedTimer _t_buildOopMaps;
#endif
// Generate a subtyping check. Takes as input the subtype and supertype.
// Returns 2 values: sets the default control() to the true path and
// returns the false path. Only reads from constant memory taken from the
// default memory; does not write anything. It also doesn't take in an
// Object; if you wish to check an Object you need to load the Object's
// class prior to coming here.
// Used in GraphKit and PhaseMacroExpand
static Node* gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, MergeMemNode* mem, PhaseGVN* gvn);
public:
Compile * C;
Phase( PhaseNumber pnum );
#ifndef PRODUCT
static void print_timers();
#endif

@ -390,6 +390,9 @@ public:
// in a faster or cheaper fashion.
Node *transform( Node *n );
Node *transform_no_reclaim( Node *n );
virtual void record_for_igvn(Node *n) {
C->record_for_igvn(n);
}
void replace_with(PhaseGVN* gvn) {
_table.replace_with(&gvn->_table);
@ -418,9 +421,6 @@ class PhaseIterGVN : public PhaseGVN {
protected:
// Idealize new Node 'n' with respect to its inputs and its value
virtual Node *transform( Node *a_node );
// Warm up hash table, type table and initial worklist
void init_worklist( Node *a_root );
@ -434,6 +434,10 @@ public:
PhaseIterGVN( PhaseGVN *gvn ); // Used after Parser
PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ); // Used after +VerifyOpto
// Idealize new Node 'n' with respect to its inputs and its value
virtual Node *transform( Node *a_node );
virtual void record_for_igvn(Node *n) { }
virtual PhaseIterGVN *is_IterGVN() { return this; }
Unique_Node_List _worklist; // Iterative worklist

@ -1201,6 +1201,54 @@ BoolNode* BoolNode::negate(PhaseGVN* phase) {
return new BoolNode(in(1), _test.negate());
}
// Change "bool eq/ne (cmp (add/sub A B) C)" into false/true if add/sub
// overflows and we can prove that C is not in the two resulting ranges.
// This optimization is similar to the one performed by CmpUNode::Value().
Node* BoolNode::fold_cmpI(PhaseGVN* phase, SubNode* cmp, Node* cmp1, int cmp_op,
int cmp1_op, const TypeInt* cmp2_type) {
// Only optimize eq/ne integer comparison of add/sub
if((_test._test == BoolTest::eq || _test._test == BoolTest::ne) &&
(cmp_op == Op_CmpI) && (cmp1_op == Op_AddI || cmp1_op == Op_SubI)) {
// Skip cases were inputs of add/sub are not integers or of bottom type
const TypeInt* r0 = phase->type(cmp1->in(1))->isa_int();
const TypeInt* r1 = phase->type(cmp1->in(2))->isa_int();
if ((r0 != NULL) && (r0 != TypeInt::INT) &&
(r1 != NULL) && (r1 != TypeInt::INT) &&
(cmp2_type != TypeInt::INT)) {
// Compute exact (long) type range of add/sub result
jlong lo_long = r0->_lo;
jlong hi_long = r0->_hi;
if (cmp1_op == Op_AddI) {
lo_long += r1->_lo;
hi_long += r1->_hi;
} else {
lo_long -= r1->_hi;
hi_long -= r1->_lo;
}
// Check for over-/underflow by casting to integer
int lo_int = (int)lo_long;
int hi_int = (int)hi_long;
bool underflow = lo_long != (jlong)lo_int;
bool overflow = hi_long != (jlong)hi_int;
if ((underflow != overflow) && (hi_int < lo_int)) {
// Overflow on one boundary, compute resulting type ranges:
// tr1 [MIN_INT, hi_int] and tr2 [lo_int, MAX_INT]
int w = MAX2(r0->_widen, r1->_widen); // _widen does not matter here
const TypeInt* tr1 = TypeInt::make(min_jint, hi_int, w);
const TypeInt* tr2 = TypeInt::make(lo_int, max_jint, w);
// Compare second input of cmp to both type ranges
const Type* sub_tr1 = cmp->sub(tr1, cmp2_type);
const Type* sub_tr2 = cmp->sub(tr2, cmp2_type);
if (sub_tr1 == TypeInt::CC_LT && sub_tr2 == TypeInt::CC_GT) {
// The result of the add/sub will never equal cmp2. Replace BoolNode
// by false (0) if it tests for equality and by true (1) otherwise.
return ConINode::make((_test._test == BoolTest::eq) ? 0 : 1);
}
}
}
}
return NULL;
}
//------------------------------Ideal------------------------------------------
Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) {
@ -1294,6 +1342,9 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return new BoolNode( ncmp, _test.commute() );
}
// Try to optimize signed integer comparison
return fold_cmpI(phase, cmp->as_Sub(), cmp1, cop, cmp1_op, cmp2_type);
// The transformation below is not valid for either signed or unsigned
// comparisons due to wraparound concerns at MAX_VALUE and MIN_VALUE.
// This transformation can be resurrected when we are able to
@ -1338,8 +1389,6 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// phase->type( cmp2->in(2) ) == TypeInt::ONE )
// return clone_cmp( cmp, cmp1, cmp2->in(1), phase, BoolTest::le );
// }
return NULL;
}
//------------------------------Value------------------------------------------

@ -286,6 +286,10 @@ class BoolNode : public Node {
virtual uint hash() const;
virtual uint cmp( const Node &n ) const;
virtual uint size_of() const;
// Try to optimize signed integer comparison
Node* fold_cmpI(PhaseGVN* phase, SubNode* cmp, Node* cmp1, int cmp_op,
int cmp1_op, const TypeInt* cmp2_type);
public:
const BoolTest _test;
BoolNode( Node *cc, BoolTest::mask t): _test(t), Node(0,cc) {

@ -0,0 +1,92 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestIntegerComparison
* @bug 8043284 8042786
* @summary "Tests optimizations of signed and unsigned integer comparison."
* @run main/othervm -server -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:CompileOnly=TestIntegerComparison::testSigned,TestIntegerComparison::testUnsigned TestIntegerComparison
*/
public class TestIntegerComparison {
/**
* Tests optimization of signed integer comparison (see BoolNode::Ideal).
* The body of the if statement is unreachable and should not be compiled.
* @param c Character (value in the integer range [0, 65535])
*/
public static void testSigned(char c) {
// The following addition may overflow. The result is in one
// of the two ranges [IntMax] and [IntMin, IntMin + CharMax - 1].
int result = c + Integer.MAX_VALUE;
// CmpINode has to consider both result ranges instead of only
// the general [IntMin, IntMax] range to be able to prove that
// result is always unequal to CharMax.
if (result == Character.MAX_VALUE) {
// Unreachable
throw new RuntimeException("Should not reach here!");
}
}
/**
* Tests optimization of unsigned integer comparison (see CmpUNode::Value).
* The body of the if statement is unreachable and should not be compiled.
* @param c Character (value in the integer range [0, 65535])
*/
public static void testUnsigned(char c) {
/*
* The following if statement consisting of two CmpIs is replaced
* by a CmpU during optimization (see 'IfNode::fold_compares').
*
* The signed (lo < i) and (i < hi) are replaced by the unsigned
* (i - (lo+1) < hi - (lo+1)). In this case the unsigned comparison
* equals (result - 2) < 98 leading to the following CmpUNode:
*
* CmpU (AddI result, -2) 98
*
* With the value of result this is simplified to:
*
* CmpU (AddI c, -(CharMax - IntMin)) 98
*
* The subtraction may underflow. The result is in one of the two
* ranges [IntMin], [IntMax - CharMax + 1]. Both ranges have to be
* considered instead of only the general [IntMin, IntMax] to prove
* that due to the overflow the signed comparison result < 98 is
* always false.
*/
int result = c - (Character.MAX_VALUE - Integer.MIN_VALUE) + 2;
if (1 < result && result < 100) {
// Unreachable
throw new RuntimeException("Should not reach here!");
}
}
/**
* Tests optimizations of signed and unsigned integer comparison.
*/
public static void main(String[] args) {
// We use characters to get a limited integer range for free
for (int i = Character.MIN_VALUE; i <= Character.MAX_VALUE; ++i) {
testSigned((char) i);
testUnsigned((char) i);
}
}
}

@ -0,0 +1,106 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8055153
* @summary missing control on LoadRange and LoadKlass when array copy macro node is expanded
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation TestMissingControl
*
*/
public class TestMissingControl {
static int[] m1(int[] a2) {
int[] a1 = new int[10];
System.arraycopy(a1, 0, a2, 0, 10);
return a1;
}
static class A {
}
static Object m2(Object[] a2) {
A[] a1 = new A[10];
System.arraycopy(a1, 0, a2, 0, 10);
return a1;
}
static void test1() {
int[] a2 = new int[10];
int[] a3 = new int[5];
// compile m1 with arraycopy intrinsified
for (int i = 0; i < 20000; i++) {
m1(a2);
}
// make m1 trap
for (int i = 0; i < 10; i++) {
try {
m1(a3);
} catch(IndexOutOfBoundsException ioobe) {
}
}
// recompile m1
for (int i = 0; i < 20000; i++) {
m1(a2);
}
try {
m1(null);
} catch(NullPointerException npe) {}
}
static void test2() {
A[] a2 = new A[10];
A[] a3 = new A[5];
// compile m2 with arraycopy intrinsified
for (int i = 0; i < 20000; i++) {
m2(a2);
}
// make m2 trap
for (int i = 0; i < 10; i++) {
try {
m2(a3);
} catch(IndexOutOfBoundsException ioobe) {
}
}
// recompile m2
for (int i = 0; i < 20000; i++) {
m2(a2);
}
try {
m2(null);
} catch(NullPointerException npe) {}
}
static public void main(String[] args) {
test1();
test2();
}
}

@ -0,0 +1,128 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import sun.hotspot.WhiteBox;
import sun.misc.Unsafe;
import sun.misc.IOUtils;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLConnection;
/*
* @test TestAnonymousClassUnloading
* @bug 8054402
* @summary "Tests unloading of anonymous classes."
* @library /testlibrary /testlibrary/whitebox
* @compile TestAnonymousClassUnloading.java
* @run main ClassFileInstaller TestAnonymousClassUnloading sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation TestAnonymousClassUnloading
*/
public class TestAnonymousClassUnloading {
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
private static final Unsafe UNSAFE = Unsafe.getUnsafe();
private static int COMP_LEVEL_SIMPLE = 1;
private static int COMP_LEVEL_FULL_OPTIMIZATION = 4;
/**
* We override hashCode here to be able to access this implementation
* via an Object reference (we cannot cast to TestAnonymousClassUnloading).
*/
@Override
public int hashCode() {
return 42;
}
/**
* Does some work by using the anonymousClass.
* @param anonymousClass Class performing some work (will be unloaded)
*/
static private void doWork(Class<?> anonymousClass) throws InstantiationException, IllegalAccessException {
// Create a new instance
Object anon = anonymousClass.newInstance();
// We would like to call a method of anonymousClass here but we cannot cast because the class
// was loaded by a different class loader. One solution would be to use reflection but since
// we want C2 to implement the call as an IC we call Object::hashCode() here which actually
// calls anonymousClass::hashCode(). C2 will then implement this call as an IC.
if (anon.hashCode() != 42) {
new RuntimeException("Work not done");
}
}
/**
* Makes sure that method is compiled by forcing compilation if not yet compiled.
* @param m Method to be checked
*/
static private void makeSureIsCompiled(Method m) {
// Make sure background compilation is disabled
if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
throw new RuntimeException("Background compilation enabled");
}
// Check if already compiled
if (!WHITE_BOX.isMethodCompiled(m)) {
// If not, try to compile it with C2
if(!WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION)) {
// C2 compiler not available, try to compile with C1
WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_SIMPLE);
}
// Because background compilation is disabled, method should now be compiled
if(!WHITE_BOX.isMethodCompiled(m)) {
throw new RuntimeException(m + " not compiled");
}
}
}
/**
* This test creates stale Klass* metadata referenced by a compiled IC.
*
* The following steps are performed:
* (1) An anonymous version of TestAnonymousClassUnloading is loaded by a custom class loader
* (2) The method doWork that calls a method of the anonymous class is compiled. The call
* is implemented as an IC referencing Klass* metadata of the anonymous class.
* (3) Unloading of the anonymous class is enforced. The IC now references dead metadata.
*/
static public void main(String[] args) throws Exception {
// (1) Load an anonymous version of this class using the corresponding Unsafe method
URL classUrl = TestAnonymousClassUnloading.class.getResource("TestAnonymousClassUnloading.class");
URLConnection connection = classUrl.openConnection();
byte[] classBytes = IOUtils.readFully(connection.getInputStream(), connection.getContentLength(), true);
Class<?> anonymousClass = UNSAFE.defineAnonymousClass(TestAnonymousClassUnloading.class, classBytes, null);
// (2) Make sure all paths of doWork are profiled and compiled
for (int i = 0; i < 100000; ++i) {
doWork(anonymousClass);
}
// Make sure doWork is compiled now
Method doWork = TestAnonymousClassUnloading.class.getDeclaredMethod("doWork", Class.class);
makeSureIsCompiled(doWork);
// (3) Throw away reference to anonymousClass to allow unloading
anonymousClass = null;
// Force garbage collection to trigger unloading of anonymousClass
// Dead metadata reference to anonymousClass triggers JDK-8054402
WHITE_BOX.fullGC();
}
}

@ -0,0 +1,44 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8054224
* @summary Recursive method compiled by C1 is unable to catch StackOverflowError
* @run main/othervm -Xcomp -XX:CompileOnly=Test.run -XX:+TieredCompilation -XX:TieredStopAtLevel=2 -Xss256K TestRecursiveReplacedException
*
*/
public class TestRecursiveReplacedException {
public static void main(String args[]) {
new TestRecursiveReplacedException().run();
}
public void run() {
try {
run();
} catch (Throwable t) {
}
}
}

@ -0,0 +1,53 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestMemBarAcquire
* @bug 8048879
* @summary "Tests optimization of MemBarAcquireNodes"
* @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation TestMemBarAcquire
*/
public class TestMemBarAcquire {
private volatile static Object defaultObj = new Object();
private Object obj;
public TestMemBarAcquire(Object param) {
// Volatile load. MemBarAcquireNode is added after the
// load to prevent following loads from floating up past.
// StoreNode is added to store result of load in 'obj'.
this.obj = defaultObj;
// Overrides 'obj' and therefore makes previous StoreNode
// and the corresponding LoadNode useless. However, the
// LoadNode is still connected to the MemBarAcquireNode
// that should now release the reference.
this.obj = param;
}
public static void main(String[] args) throws Exception {
// Make sure TestMemBarAcquire::<init> is compiled
for (int i = 0; i < 100000; ++i) {
TestMemBarAcquire p = new TestMemBarAcquire(new Object());
}
}
}