6895383: JCK test throws NPE for method compiled with Escape Analysis
Add missing checks for MemBar nodes in EA. Reviewed-by: never
This commit is contained in:
parent
dfbb0bf3e2
commit
9f5ca0249d
@ -1852,6 +1852,7 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
|
|||||||
!n->is_Phi() && // a few noisely useless nodes
|
!n->is_Phi() && // a few noisely useless nodes
|
||||||
!n->is_Proj() &&
|
!n->is_Proj() &&
|
||||||
!n->is_MachTemp() &&
|
!n->is_MachTemp() &&
|
||||||
|
!n->is_SafePointScalarObject() &&
|
||||||
!n->is_Catch() && // Would be nice to print exception table targets
|
!n->is_Catch() && // Would be nice to print exception table targets
|
||||||
!n->is_MergeMem() && // Not very interesting
|
!n->is_MergeMem() && // Not very interesting
|
||||||
!n->is_top() && // Debug info table constants
|
!n->is_top() && // Debug info table constants
|
||||||
|
@ -779,6 +779,13 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
|
|||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
} else if (result->is_ClearArray()) {
|
||||||
|
if (!ClearArrayNode::step_through(&result, (uint)tinst->instance_id(), phase)) {
|
||||||
|
// Can not bypass initialization of the instance
|
||||||
|
// we are looking for.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Otherwise skip it (the call updated 'result' value).
|
||||||
} else if (result->Opcode() == Op_SCMemProj) {
|
} else if (result->Opcode() == Op_SCMemProj) {
|
||||||
assert(result->in(0)->is_LoadStore(), "sanity");
|
assert(result->in(0)->is_LoadStore(), "sanity");
|
||||||
const Type *at = phase->type(result->in(0)->in(MemNode::Address));
|
const Type *at = phase->type(result->in(0)->in(MemNode::Address));
|
||||||
@ -808,7 +815,6 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Convert the types of unescaped object to instance types where possible,
|
// Convert the types of unescaped object to instance types where possible,
|
||||||
// propagate the new type information through the graph, and update memory
|
// propagate the new type information through the graph, and update memory
|
||||||
@ -900,7 +906,6 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
|
|||||||
//
|
//
|
||||||
void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {
|
void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {
|
||||||
GrowableArray<Node *> memnode_worklist;
|
GrowableArray<Node *> memnode_worklist;
|
||||||
GrowableArray<Node *> mergemem_worklist;
|
|
||||||
GrowableArray<PhiNode *> orig_phis;
|
GrowableArray<PhiNode *> orig_phis;
|
||||||
PhaseGVN *igvn = _compile->initial_gvn();
|
PhaseGVN *igvn = _compile->initial_gvn();
|
||||||
uint new_index_start = (uint) _compile->num_alias_types();
|
uint new_index_start = (uint) _compile->num_alias_types();
|
||||||
@ -1025,7 +1030,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
alloc_worklist.append_if_missing(addp2);
|
alloc_worklist.append_if_missing(addp2);
|
||||||
}
|
}
|
||||||
alloc_worklist.append_if_missing(use);
|
alloc_worklist.append_if_missing(use);
|
||||||
} else if (use->is_Initialize()) {
|
} else if (use->is_MemBar()) {
|
||||||
memnode_worklist.append_if_missing(use);
|
memnode_worklist.append_if_missing(use);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1035,10 +1040,12 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
PointsTo(ptset, get_addp_base(n), igvn);
|
PointsTo(ptset, get_addp_base(n), igvn);
|
||||||
assert(ptset.Size() == 1, "AddP address is unique");
|
assert(ptset.Size() == 1, "AddP address is unique");
|
||||||
uint elem = ptset.getelem(); // Allocation node's index
|
uint elem = ptset.getelem(); // Allocation node's index
|
||||||
if (elem == _phantom_object)
|
if (elem == _phantom_object) {
|
||||||
|
assert(false, "escaped allocation");
|
||||||
continue; // Assume the value was set outside this method.
|
continue; // Assume the value was set outside this method.
|
||||||
|
}
|
||||||
Node *base = get_map(elem); // CheckCastPP node
|
Node *base = get_map(elem); // CheckCastPP node
|
||||||
if (!split_AddP(n, base, igvn)) continue; // wrong type
|
if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
|
||||||
tinst = igvn->type(base)->isa_oopptr();
|
tinst = igvn->type(base)->isa_oopptr();
|
||||||
} else if (n->is_Phi() ||
|
} else if (n->is_Phi() ||
|
||||||
n->is_CheckCastPP() ||
|
n->is_CheckCastPP() ||
|
||||||
@ -1053,8 +1060,10 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
PointsTo(ptset, n, igvn);
|
PointsTo(ptset, n, igvn);
|
||||||
if (ptset.Size() == 1) {
|
if (ptset.Size() == 1) {
|
||||||
uint elem = ptset.getelem(); // Allocation node's index
|
uint elem = ptset.getelem(); // Allocation node's index
|
||||||
if (elem == _phantom_object)
|
if (elem == _phantom_object) {
|
||||||
|
assert(false, "escaped allocation");
|
||||||
continue; // Assume the value was set outside this method.
|
continue; // Assume the value was set outside this method.
|
||||||
|
}
|
||||||
Node *val = get_map(elem); // CheckCastPP node
|
Node *val = get_map(elem); // CheckCastPP node
|
||||||
TypeNode *tn = n->as_Type();
|
TypeNode *tn = n->as_Type();
|
||||||
tinst = igvn->type(val)->isa_oopptr();
|
tinst = igvn->type(val)->isa_oopptr();
|
||||||
@ -1069,8 +1078,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
tn_t = tn_type->isa_oopptr();
|
tn_t = tn_type->isa_oopptr();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tn_t != NULL &&
|
if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
|
||||||
tinst->cast_to_instance_id(TypeOopPtr::InstanceBot)->higher_equal(tn_t)) {
|
|
||||||
if (tn_type->isa_narrowoop()) {
|
if (tn_type->isa_narrowoop()) {
|
||||||
tn_type = tinst->make_narrowoop();
|
tn_type = tinst->make_narrowoop();
|
||||||
} else {
|
} else {
|
||||||
@ -1082,33 +1090,25 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
igvn->hash_insert(tn);
|
igvn->hash_insert(tn);
|
||||||
record_for_optimizer(n);
|
record_for_optimizer(n);
|
||||||
} else {
|
} else {
|
||||||
continue; // wrong type
|
assert(tn_type == TypePtr::NULL_PTR ||
|
||||||
|
tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
|
||||||
|
"unexpected type");
|
||||||
|
continue; // Skip dead path with different type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
debug_only(n->dump();)
|
||||||
|
assert(false, "EA: unexpected node");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// push users on appropriate worklist
|
// push allocation's users on appropriate worklist
|
||||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||||
Node *use = n->fast_out(i);
|
Node *use = n->fast_out(i);
|
||||||
if(use->is_Mem() && use->in(MemNode::Address) == n) {
|
if(use->is_Mem() && use->in(MemNode::Address) == n) {
|
||||||
|
// Load/store to instance's field
|
||||||
memnode_worklist.append_if_missing(use);
|
memnode_worklist.append_if_missing(use);
|
||||||
} else if (use->is_Initialize()) {
|
} else if (use->is_MemBar()) {
|
||||||
memnode_worklist.append_if_missing(use);
|
memnode_worklist.append_if_missing(use);
|
||||||
} else if (use->is_MergeMem()) {
|
|
||||||
mergemem_worklist.append_if_missing(use);
|
|
||||||
} else if (use->is_SafePoint() && tinst != NULL) {
|
|
||||||
// Look for MergeMem nodes for calls which reference unique allocation
|
|
||||||
// (through CheckCastPP nodes) even for debug info.
|
|
||||||
Node* m = use->in(TypeFunc::Memory);
|
|
||||||
uint iid = tinst->instance_id();
|
|
||||||
while (m->is_Proj() && m->in(0)->is_SafePoint() &&
|
|
||||||
m->in(0) != use && !m->in(0)->_idx != iid) {
|
|
||||||
m = m->in(0)->in(TypeFunc::Memory);
|
|
||||||
}
|
|
||||||
if (m->is_MergeMem()) {
|
|
||||||
mergemem_worklist.append_if_missing(m);
|
|
||||||
}
|
|
||||||
} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
|
} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
|
||||||
Node* addp2 = find_second_addp(use, n);
|
Node* addp2 = find_second_addp(use, n);
|
||||||
if (addp2 != NULL) {
|
if (addp2 != NULL) {
|
||||||
@ -1121,6 +1121,29 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
use->is_DecodeN() ||
|
use->is_DecodeN() ||
|
||||||
(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
|
(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
|
||||||
alloc_worklist.append_if_missing(use);
|
alloc_worklist.append_if_missing(use);
|
||||||
|
#ifdef ASSERT
|
||||||
|
} else if (use->is_Mem()) {
|
||||||
|
assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
|
||||||
|
} else if (use->is_MergeMem()) {
|
||||||
|
assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
|
||||||
|
} else if (use->is_SafePoint()) {
|
||||||
|
// Look for MergeMem nodes for calls which reference unique allocation
|
||||||
|
// (through CheckCastPP nodes) even for debug info.
|
||||||
|
Node* m = use->in(TypeFunc::Memory);
|
||||||
|
if (m->is_MergeMem()) {
|
||||||
|
assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
uint op = use->Opcode();
|
||||||
|
if (!(op == Op_CmpP || op == Op_Conv2B ||
|
||||||
|
op == Op_CastP2X || op == Op_StoreCM ||
|
||||||
|
op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
|
||||||
|
op == Op_StrEquals || op == Op_StrIndexOf)) {
|
||||||
|
n->dump();
|
||||||
|
use->dump();
|
||||||
|
assert(false, "EA: missing allocation reference path");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1138,13 +1161,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
Node *n = memnode_worklist.pop();
|
Node *n = memnode_worklist.pop();
|
||||||
if (visited.test_set(n->_idx))
|
if (visited.test_set(n->_idx))
|
||||||
continue;
|
continue;
|
||||||
if (n->is_Phi()) {
|
if (n->is_Phi() || n->is_ClearArray()) {
|
||||||
assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required");
|
// we don't need to do anything, but the users must be pushed
|
||||||
// we don't need to do anything, but the users must be pushed if we haven't processed
|
} else if (n->is_MemBar()) { // Initialize, MemBar nodes
|
||||||
// this Phi before
|
// we don't need to do anything, but the users must be pushed
|
||||||
} else if (n->is_Initialize()) {
|
n = n->as_MemBar()->proj_out(TypeFunc::Memory);
|
||||||
// we don't need to do anything, but the users of the memory projection must be pushed
|
|
||||||
n = n->as_Initialize()->proj_out(TypeFunc::Memory);
|
|
||||||
if (n == NULL)
|
if (n == NULL)
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
@ -1181,31 +1202,48 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
// push user on appropriate worklist
|
// push user on appropriate worklist
|
||||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||||
Node *use = n->fast_out(i);
|
Node *use = n->fast_out(i);
|
||||||
if (use->is_Phi()) {
|
if (use->is_Phi() || use->is_ClearArray()) {
|
||||||
memnode_worklist.append_if_missing(use);
|
memnode_worklist.append_if_missing(use);
|
||||||
} else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
|
} else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
|
||||||
|
if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
|
||||||
|
continue;
|
||||||
memnode_worklist.append_if_missing(use);
|
memnode_worklist.append_if_missing(use);
|
||||||
} else if (use->is_Initialize()) {
|
} else if (use->is_MemBar()) {
|
||||||
memnode_worklist.append_if_missing(use);
|
memnode_worklist.append_if_missing(use);
|
||||||
|
#ifdef ASSERT
|
||||||
|
} else if(use->is_Mem()) {
|
||||||
|
assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
|
||||||
} else if (use->is_MergeMem()) {
|
} else if (use->is_MergeMem()) {
|
||||||
mergemem_worklist.append_if_missing(use);
|
assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
|
||||||
|
} else {
|
||||||
|
uint op = use->Opcode();
|
||||||
|
if (!(op == Op_StoreCM ||
|
||||||
|
(op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
|
||||||
|
strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
|
||||||
|
op == Op_AryEq || op == Op_StrComp ||
|
||||||
|
op == Op_StrEquals || op == Op_StrIndexOf)) {
|
||||||
|
n->dump();
|
||||||
|
use->dump();
|
||||||
|
assert(false, "EA: missing memory path");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase 3: Process MergeMem nodes from mergemem_worklist.
|
// Phase 3: Process MergeMem nodes from mergemem_worklist.
|
||||||
// Walk each memory moving the first node encountered of each
|
// Walk each memory slice moving the first node encountered of each
|
||||||
// instance type to the the input corresponding to its alias index.
|
// instance type to the the input corresponding to its alias index.
|
||||||
while (mergemem_worklist.length() != 0) {
|
uint length = _mergemem_worklist.length();
|
||||||
Node *n = mergemem_worklist.pop();
|
for( uint next = 0; next < length; ++next ) {
|
||||||
assert(n->is_MergeMem(), "MergeMem node required.");
|
MergeMemNode* nmm = _mergemem_worklist.at(next);
|
||||||
if (visited.test_set(n->_idx))
|
assert(!visited.test_set(nmm->_idx), "should not be visited before");
|
||||||
continue;
|
|
||||||
MergeMemNode *nmm = n->as_MergeMem();
|
|
||||||
// Note: we don't want to use MergeMemStream here because we only want to
|
// Note: we don't want to use MergeMemStream here because we only want to
|
||||||
// scan inputs which exist at the start, not ones we add during processing.
|
// scan inputs which exist at the start, not ones we add during processing.
|
||||||
uint nslices = nmm->req();
|
// Note 2: MergeMem may already contains instance memory slices added
|
||||||
|
// during find_inst_mem() call when memory nodes were processed above.
|
||||||
igvn->hash_delete(nmm);
|
igvn->hash_delete(nmm);
|
||||||
|
uint nslices = nmm->req();
|
||||||
for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
|
for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
|
||||||
Node* mem = nmm->in(i);
|
Node* mem = nmm->in(i);
|
||||||
Node* cur = NULL;
|
Node* cur = NULL;
|
||||||
@ -1259,41 +1297,6 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
}
|
}
|
||||||
igvn->hash_insert(nmm);
|
igvn->hash_insert(nmm);
|
||||||
record_for_optimizer(nmm);
|
record_for_optimizer(nmm);
|
||||||
|
|
||||||
// Propagate new memory slices to following MergeMem nodes.
|
|
||||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
|
||||||
Node *use = n->fast_out(i);
|
|
||||||
if (use->is_Call()) {
|
|
||||||
CallNode* in = use->as_Call();
|
|
||||||
if (in->proj_out(TypeFunc::Memory) != NULL) {
|
|
||||||
Node* m = in->proj_out(TypeFunc::Memory);
|
|
||||||
for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
|
|
||||||
Node* mm = m->fast_out(j);
|
|
||||||
if (mm->is_MergeMem()) {
|
|
||||||
mergemem_worklist.append_if_missing(mm);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (use->is_Allocate()) {
|
|
||||||
use = use->as_Allocate()->initialization();
|
|
||||||
if (use == NULL) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (use->is_Initialize()) {
|
|
||||||
InitializeNode* in = use->as_Initialize();
|
|
||||||
if (in->proj_out(TypeFunc::Memory) != NULL) {
|
|
||||||
Node* m = in->proj_out(TypeFunc::Memory);
|
|
||||||
for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
|
|
||||||
Node* mm = m->fast_out(j);
|
|
||||||
if (mm->is_MergeMem()) {
|
|
||||||
mergemem_worklist.append_if_missing(mm);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase 4: Update the inputs of non-instance memory Phis and
|
// Phase 4: Update the inputs of non-instance memory Phis and
|
||||||
@ -1381,8 +1384,20 @@ bool ConnectionGraph::compute_escape() {
|
|||||||
ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
|
ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
|
||||||
has_allocations = true;
|
has_allocations = true;
|
||||||
}
|
}
|
||||||
if(n->is_AddP())
|
if(n->is_AddP()) {
|
||||||
|
// Collect address nodes which directly reference an allocation.
|
||||||
|
// Use them during stage 3 below to build initial connection graph
|
||||||
|
// field edges. Other field edges could be added after StoreP/LoadP
|
||||||
|
// nodes are processed during stage 4 below.
|
||||||
|
Node* base = get_addp_base(n);
|
||||||
|
if(base->is_Proj() && base->in(0)->is_Allocate()) {
|
||||||
cg_worklist.append(n->_idx);
|
cg_worklist.append(n->_idx);
|
||||||
|
}
|
||||||
|
} else if (n->is_MergeMem()) {
|
||||||
|
// Collect all MergeMem nodes to add memory slices for
|
||||||
|
// scalar replaceable objects in split_unique_types().
|
||||||
|
_mergemem_worklist.append(n->as_MergeMem());
|
||||||
|
}
|
||||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||||
Node* m = n->fast_out(i); // Get user
|
Node* m = n->fast_out(i); // Get user
|
||||||
worklist_init.push(m);
|
worklist_init.push(m);
|
||||||
@ -1423,12 +1438,13 @@ bool ConnectionGraph::compute_escape() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
VectorSet ptset(Thread::current()->resource_area());
|
Arena* arena = Thread::current()->resource_area();
|
||||||
|
VectorSet ptset(arena);
|
||||||
GrowableArray<uint> deferred_edges;
|
GrowableArray<uint> deferred_edges;
|
||||||
VectorSet visited(Thread::current()->resource_area());
|
VectorSet visited(arena);
|
||||||
|
|
||||||
// 5. Remove deferred edges from the graph and collect
|
// 5. Remove deferred edges from the graph and adjust
|
||||||
// information needed for type splitting.
|
// escape state of nonescaping objects.
|
||||||
cg_length = cg_worklist.length();
|
cg_length = cg_worklist.length();
|
||||||
for( uint next = 0; next < cg_length; ++next ) {
|
for( uint next = 0; next < cg_length; ++next ) {
|
||||||
int ni = cg_worklist.at(next);
|
int ni = cg_worklist.at(next);
|
||||||
@ -1438,98 +1454,9 @@ bool ConnectionGraph::compute_escape() {
|
|||||||
remove_deferred(ni, &deferred_edges, &visited);
|
remove_deferred(ni, &deferred_edges, &visited);
|
||||||
Node *n = ptn->_node;
|
Node *n = ptn->_node;
|
||||||
if (n->is_AddP()) {
|
if (n->is_AddP()) {
|
||||||
// Search for objects which are not scalar replaceable.
|
// Search for objects which are not scalar replaceable
|
||||||
// Mark their escape state as ArgEscape to propagate the state
|
// and adjust their escape state.
|
||||||
// to referenced objects.
|
verify_escape_state(ni, ptset, igvn);
|
||||||
// Note: currently there are no difference in compiler optimizations
|
|
||||||
// for ArgEscape objects and NoEscape objects which are not
|
|
||||||
// scalar replaceable.
|
|
||||||
|
|
||||||
int offset = ptn->offset();
|
|
||||||
Node *base = get_addp_base(n);
|
|
||||||
ptset.Clear();
|
|
||||||
PointsTo(ptset, base, igvn);
|
|
||||||
int ptset_size = ptset.Size();
|
|
||||||
|
|
||||||
// Check if a field's initializing value is recorded and add
|
|
||||||
// a corresponding NULL field's value if it is not recorded.
|
|
||||||
// Connection Graph does not record a default initialization by NULL
|
|
||||||
// captured by Initialize node.
|
|
||||||
//
|
|
||||||
// Note: it will disable scalar replacement in some cases:
|
|
||||||
//
|
|
||||||
// Point p[] = new Point[1];
|
|
||||||
// p[0] = new Point(); // Will be not scalar replaced
|
|
||||||
//
|
|
||||||
// but it will save us from incorrect optimizations in next cases:
|
|
||||||
//
|
|
||||||
// Point p[] = new Point[1];
|
|
||||||
// if ( x ) p[0] = new Point(); // Will be not scalar replaced
|
|
||||||
//
|
|
||||||
// Without a control flow analysis we can't distinguish above cases.
|
|
||||||
//
|
|
||||||
if (offset != Type::OffsetBot && ptset_size == 1) {
|
|
||||||
uint elem = ptset.getelem(); // Allocation node's index
|
|
||||||
// It does not matter if it is not Allocation node since
|
|
||||||
// only non-escaping allocations are scalar replaced.
|
|
||||||
if (ptnode_adr(elem)->_node->is_Allocate() &&
|
|
||||||
ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
|
|
||||||
AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
|
|
||||||
InitializeNode* ini = alloc->initialization();
|
|
||||||
Node* value = NULL;
|
|
||||||
if (ini != NULL) {
|
|
||||||
BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
|
|
||||||
Node* store = ini->find_captured_store(offset, type2aelembytes(ft), igvn);
|
|
||||||
if (store != NULL && store->is_Store())
|
|
||||||
value = store->in(MemNode::ValueIn);
|
|
||||||
}
|
|
||||||
if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
|
|
||||||
// A field's initializing value was not recorded. Add NULL.
|
|
||||||
uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
|
|
||||||
add_pointsto_edge(ni, null_idx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// An object is not scalar replaceable if the field which may point
|
|
||||||
// to it has unknown offset (unknown element of an array of objects).
|
|
||||||
//
|
|
||||||
if (offset == Type::OffsetBot) {
|
|
||||||
uint e_cnt = ptn->edge_count();
|
|
||||||
for (uint ei = 0; ei < e_cnt; ei++) {
|
|
||||||
uint npi = ptn->edge_target(ei);
|
|
||||||
set_escape_state(npi, PointsToNode::ArgEscape);
|
|
||||||
ptnode_adr(npi)->_scalar_replaceable = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Currently an object is not scalar replaceable if a LoadStore node
|
|
||||||
// access its field since the field value is unknown after it.
|
|
||||||
//
|
|
||||||
bool has_LoadStore = false;
|
|
||||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
|
||||||
Node *use = n->fast_out(i);
|
|
||||||
if (use->is_LoadStore()) {
|
|
||||||
has_LoadStore = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// An object is not scalar replaceable if the address points
|
|
||||||
// to unknown field (unknown element for arrays, offset is OffsetBot).
|
|
||||||
//
|
|
||||||
// Or the address may point to more then one object. This may produce
|
|
||||||
// the false positive result (set scalar_replaceable to false)
|
|
||||||
// since the flow-insensitive escape analysis can't separate
|
|
||||||
// the case when stores overwrite the field's value from the case
|
|
||||||
// when stores happened on different control branches.
|
|
||||||
//
|
|
||||||
if (ptset_size > 1 || ptset_size != 0 &&
|
|
||||||
(has_LoadStore || offset == Type::OffsetBot)) {
|
|
||||||
for( VectorSetI j(&ptset); j.test(); ++j ) {
|
|
||||||
set_escape_state(j.elem, PointsToNode::ArgEscape);
|
|
||||||
ptnode_adr(j.elem)->_scalar_replaceable = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1646,6 +1573,150 @@ bool ConnectionGraph::compute_escape() {
|
|||||||
return has_non_escaping_obj;
|
return has_non_escaping_obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Search for objects which are not scalar replaceable.
|
||||||
|
void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) {
|
||||||
|
PointsToNode* ptn = ptnode_adr(nidx);
|
||||||
|
Node* n = ptn->_node;
|
||||||
|
assert(n->is_AddP(), "Should be called for AddP nodes only");
|
||||||
|
// Search for objects which are not scalar replaceable.
|
||||||
|
// Mark their escape state as ArgEscape to propagate the state
|
||||||
|
// to referenced objects.
|
||||||
|
// Note: currently there are no difference in compiler optimizations
|
||||||
|
// for ArgEscape objects and NoEscape objects which are not
|
||||||
|
// scalar replaceable.
|
||||||
|
|
||||||
|
Compile* C = _compile;
|
||||||
|
|
||||||
|
int offset = ptn->offset();
|
||||||
|
Node* base = get_addp_base(n);
|
||||||
|
ptset.Clear();
|
||||||
|
PointsTo(ptset, base, phase);
|
||||||
|
int ptset_size = ptset.Size();
|
||||||
|
|
||||||
|
// Check if a oop field's initializing value is recorded and add
|
||||||
|
// a corresponding NULL field's value if it is not recorded.
|
||||||
|
// Connection Graph does not record a default initialization by NULL
|
||||||
|
// captured by Initialize node.
|
||||||
|
//
|
||||||
|
// Note: it will disable scalar replacement in some cases:
|
||||||
|
//
|
||||||
|
// Point p[] = new Point[1];
|
||||||
|
// p[0] = new Point(); // Will be not scalar replaced
|
||||||
|
//
|
||||||
|
// but it will save us from incorrect optimizations in next cases:
|
||||||
|
//
|
||||||
|
// Point p[] = new Point[1];
|
||||||
|
// if ( x ) p[0] = new Point(); // Will be not scalar replaced
|
||||||
|
//
|
||||||
|
// Do a simple control flow analysis to distinguish above cases.
|
||||||
|
//
|
||||||
|
if (offset != Type::OffsetBot && ptset_size == 1) {
|
||||||
|
uint elem = ptset.getelem(); // Allocation node's index
|
||||||
|
// It does not matter if it is not Allocation node since
|
||||||
|
// only non-escaping allocations are scalar replaced.
|
||||||
|
if (ptnode_adr(elem)->_node->is_Allocate() &&
|
||||||
|
ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
|
||||||
|
AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
|
||||||
|
InitializeNode* ini = alloc->initialization();
|
||||||
|
|
||||||
|
// Check only oop fields.
|
||||||
|
const Type* adr_type = n->as_AddP()->bottom_type();
|
||||||
|
BasicType basic_field_type = T_INT;
|
||||||
|
if (adr_type->isa_instptr()) {
|
||||||
|
ciField* field = C->alias_type(adr_type->isa_instptr())->field();
|
||||||
|
if (field != NULL) {
|
||||||
|
basic_field_type = field->layout_type();
|
||||||
|
} else {
|
||||||
|
// Ignore non field load (for example, klass load)
|
||||||
|
}
|
||||||
|
} else if (adr_type->isa_aryptr()) {
|
||||||
|
const Type* elemtype = adr_type->isa_aryptr()->elem();
|
||||||
|
basic_field_type = elemtype->array_element_basic_type();
|
||||||
|
} else {
|
||||||
|
// Raw pointers are used for initializing stores so skip it.
|
||||||
|
assert(adr_type->isa_rawptr() && base->is_Proj() &&
|
||||||
|
(base->in(0) == alloc),"unexpected pointer type");
|
||||||
|
}
|
||||||
|
if (basic_field_type == T_OBJECT ||
|
||||||
|
basic_field_type == T_NARROWOOP ||
|
||||||
|
basic_field_type == T_ARRAY) {
|
||||||
|
Node* value = NULL;
|
||||||
|
if (ini != NULL) {
|
||||||
|
BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
|
||||||
|
Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
|
||||||
|
if (store != NULL && store->is_Store()) {
|
||||||
|
value = store->in(MemNode::ValueIn);
|
||||||
|
} else if (ptn->edge_count() > 0) { // Are there oop stores?
|
||||||
|
// Check for a store which follows allocation without branches.
|
||||||
|
// For example, a volatile field store is not collected
|
||||||
|
// by Initialize node. TODO: it would be nice to use idom() here.
|
||||||
|
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||||
|
store = n->fast_out(i);
|
||||||
|
if (store->is_Store() && store->in(0) != NULL) {
|
||||||
|
Node* ctrl = store->in(0);
|
||||||
|
while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
|
||||||
|
ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
|
||||||
|
ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
|
||||||
|
ctrl = ctrl->in(0);
|
||||||
|
}
|
||||||
|
if (ctrl == ini || ctrl == alloc) {
|
||||||
|
value = store->in(MemNode::ValueIn);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
|
||||||
|
// A field's initializing value was not recorded. Add NULL.
|
||||||
|
uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
|
||||||
|
add_pointsto_edge(nidx, null_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An object is not scalar replaceable if the field which may point
|
||||||
|
// to it has unknown offset (unknown element of an array of objects).
|
||||||
|
//
|
||||||
|
if (offset == Type::OffsetBot) {
|
||||||
|
uint e_cnt = ptn->edge_count();
|
||||||
|
for (uint ei = 0; ei < e_cnt; ei++) {
|
||||||
|
uint npi = ptn->edge_target(ei);
|
||||||
|
set_escape_state(npi, PointsToNode::ArgEscape);
|
||||||
|
ptnode_adr(npi)->_scalar_replaceable = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently an object is not scalar replaceable if a LoadStore node
|
||||||
|
// access its field since the field value is unknown after it.
|
||||||
|
//
|
||||||
|
bool has_LoadStore = false;
|
||||||
|
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||||
|
Node *use = n->fast_out(i);
|
||||||
|
if (use->is_LoadStore()) {
|
||||||
|
has_LoadStore = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// An object is not scalar replaceable if the address points
|
||||||
|
// to unknown field (unknown element for arrays, offset is OffsetBot).
|
||||||
|
//
|
||||||
|
// Or the address may point to more then one object. This may produce
|
||||||
|
// the false positive result (set scalar_replaceable to false)
|
||||||
|
// since the flow-insensitive escape analysis can't separate
|
||||||
|
// the case when stores overwrite the field's value from the case
|
||||||
|
// when stores happened on different control branches.
|
||||||
|
//
|
||||||
|
if (ptset_size > 1 || ptset_size != 0 &&
|
||||||
|
(has_LoadStore || offset == Type::OffsetBot)) {
|
||||||
|
for( VectorSetI j(&ptset); j.test(); ++j ) {
|
||||||
|
set_escape_state(j.elem, PointsToNode::ArgEscape);
|
||||||
|
ptnode_adr(j.elem)->_scalar_replaceable = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
|
void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
|
||||||
|
|
||||||
switch (call->Opcode()) {
|
switch (call->Opcode()) {
|
||||||
@ -1657,6 +1728,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
|
|||||||
assert(false, "should be done already");
|
assert(false, "should be done already");
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
case Op_CallLeaf:
|
||||||
case Op_CallLeafNoFP:
|
case Op_CallLeafNoFP:
|
||||||
{
|
{
|
||||||
// Stub calls, objects do not escape but they are not scale replaceable.
|
// Stub calls, objects do not escape but they are not scale replaceable.
|
||||||
@ -1667,9 +1739,23 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
|
|||||||
const Type* at = d->field_at(i);
|
const Type* at = d->field_at(i);
|
||||||
Node *arg = call->in(i)->uncast();
|
Node *arg = call->in(i)->uncast();
|
||||||
const Type *aat = phase->type(arg);
|
const Type *aat = phase->type(arg);
|
||||||
if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) {
|
if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
|
||||||
|
ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
|
||||||
|
|
||||||
assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
|
assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
|
||||||
aat->isa_ptr() != NULL, "expecting an Ptr");
|
aat->isa_ptr() != NULL, "expecting an Ptr");
|
||||||
|
#ifdef ASSERT
|
||||||
|
if (!(call->Opcode() == Op_CallLeafNoFP &&
|
||||||
|
call->as_CallLeaf()->_name != NULL &&
|
||||||
|
(strstr(call->as_CallLeaf()->_name, "arraycopy") != 0) ||
|
||||||
|
call->as_CallLeaf()->_name != NULL &&
|
||||||
|
(strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
|
||||||
|
strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
|
||||||
|
) {
|
||||||
|
call->dump();
|
||||||
|
assert(false, "EA: unexpected CallLeaf");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
set_escape_state(arg->_idx, PointsToNode::ArgEscape);
|
set_escape_state(arg->_idx, PointsToNode::ArgEscape);
|
||||||
if (arg->is_AddP()) {
|
if (arg->is_AddP()) {
|
||||||
//
|
//
|
||||||
@ -1706,10 +1792,11 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
|
|||||||
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
|
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
|
||||||
const Type* at = d->field_at(i);
|
const Type* at = d->field_at(i);
|
||||||
int k = i - TypeFunc::Parms;
|
int k = i - TypeFunc::Parms;
|
||||||
|
|
||||||
if (at->isa_oopptr() != NULL) {
|
|
||||||
Node *arg = call->in(i)->uncast();
|
Node *arg = call->in(i)->uncast();
|
||||||
|
|
||||||
|
if (at->isa_oopptr() != NULL &&
|
||||||
|
ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
|
||||||
|
|
||||||
bool global_escapes = false;
|
bool global_escapes = false;
|
||||||
bool fields_escapes = false;
|
bool fields_escapes = false;
|
||||||
if (!call_analyzer->is_arg_stack(k)) {
|
if (!call_analyzer->is_arg_stack(k)) {
|
||||||
@ -1942,20 +2029,23 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
|
|||||||
record_for_optimizer(n);
|
record_for_optimizer(n);
|
||||||
_processed.set(n->_idx);
|
_processed.set(n->_idx);
|
||||||
} else {
|
} else {
|
||||||
// Have to process call's arguments first.
|
// Don't mark as processed since call's arguments have to be processed.
|
||||||
PointsToNode::NodeType nt = PointsToNode::UnknownType;
|
PointsToNode::NodeType nt = PointsToNode::UnknownType;
|
||||||
|
PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
|
||||||
|
|
||||||
// Check if a call returns an object.
|
// Check if a call returns an object.
|
||||||
const TypeTuple *r = n->as_Call()->tf()->range();
|
const TypeTuple *r = n->as_Call()->tf()->range();
|
||||||
if (n->is_CallStaticJava() && r->cnt() > TypeFunc::Parms &&
|
if (r->cnt() > TypeFunc::Parms &&
|
||||||
|
r->field_at(TypeFunc::Parms)->isa_ptr() &&
|
||||||
n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
|
n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
|
||||||
// Note: use isa_ptr() instead of isa_oopptr() here because
|
|
||||||
// the _multianewarray functions return a TypeRawPtr.
|
|
||||||
if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
|
|
||||||
nt = PointsToNode::JavaObject;
|
nt = PointsToNode::JavaObject;
|
||||||
|
if (!n->is_CallStaticJava()) {
|
||||||
|
// Since the called mathod is statically unknown assume
|
||||||
|
// the worst case that the returned value globally escapes.
|
||||||
|
es = PointsToNode::GlobalEscape;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
add_node(n, nt, PointsToNode::UnknownEscape, false);
|
add_node(n, nt, es, false);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -2088,18 +2178,27 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
|
|||||||
}
|
}
|
||||||
case Op_Proj:
|
case Op_Proj:
|
||||||
{
|
{
|
||||||
// we are only interested in the result projection from a call
|
// we are only interested in the oop result projection from a call
|
||||||
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
|
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
|
||||||
|
const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
|
||||||
|
assert(r->cnt() > TypeFunc::Parms, "sanity");
|
||||||
|
if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
|
||||||
add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
|
add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
|
||||||
|
int ti = n->in(0)->_idx;
|
||||||
|
// The call may not be registered yet (since not all its inputs are registered)
|
||||||
|
// if this is the projection from backbranch edge of Phi.
|
||||||
|
if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
|
||||||
process_call_result(n->as_Proj(), phase);
|
process_call_result(n->as_Proj(), phase);
|
||||||
|
}
|
||||||
if (!_processed.test(n->_idx)) {
|
if (!_processed.test(n->_idx)) {
|
||||||
// The call's result may need to be processed later if the call
|
// The call's result may need to be processed later if the call
|
||||||
// returns it's argument and the argument is not processed yet.
|
// returns it's argument and the argument is not processed yet.
|
||||||
_delayed_worklist.push(n);
|
_delayed_worklist.push(n);
|
||||||
}
|
}
|
||||||
} else {
|
break;
|
||||||
_processed.set(n->_idx);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
_processed.set(n->_idx);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Op_Return:
|
case Op_Return:
|
||||||
@ -2160,6 +2259,15 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case Op_AryEq:
|
||||||
|
case Op_StrComp:
|
||||||
|
case Op_StrEquals:
|
||||||
|
case Op_StrIndexOf:
|
||||||
|
{
|
||||||
|
// char[] arrays passed to string intrinsics are not scalar replaceable.
|
||||||
|
add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
case Op_ThreadLocal:
|
case Op_ThreadLocal:
|
||||||
{
|
{
|
||||||
add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
|
add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
|
||||||
@ -2174,6 +2282,7 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
|
|||||||
|
|
||||||
void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
||||||
uint n_idx = n->_idx;
|
uint n_idx = n->_idx;
|
||||||
|
assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
|
||||||
|
|
||||||
// Don't set processed bit for AddP, LoadP, StoreP since
|
// Don't set processed bit for AddP, LoadP, StoreP since
|
||||||
// they may need more then one pass to process.
|
// they may need more then one pass to process.
|
||||||
@ -2211,6 +2320,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
|||||||
case Op_DecodeN:
|
case Op_DecodeN:
|
||||||
{
|
{
|
||||||
int ti = n->in(1)->_idx;
|
int ti = n->in(1)->_idx;
|
||||||
|
assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
|
||||||
if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
|
if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
|
||||||
add_pointsto_edge(n_idx, ti);
|
add_pointsto_edge(n_idx, ti);
|
||||||
} else {
|
} else {
|
||||||
@ -2250,7 +2360,6 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
Node* adr = n->in(MemNode::Address)->uncast();
|
Node* adr = n->in(MemNode::Address)->uncast();
|
||||||
const Type *adr_type = phase->type(adr);
|
|
||||||
Node* adr_base;
|
Node* adr_base;
|
||||||
if (adr->is_AddP()) {
|
if (adr->is_AddP()) {
|
||||||
adr_base = get_addp_base(adr);
|
adr_base = get_addp_base(adr);
|
||||||
@ -2302,13 +2411,19 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
|||||||
}
|
}
|
||||||
case Op_Proj:
|
case Op_Proj:
|
||||||
{
|
{
|
||||||
// we are only interested in the result projection from a call
|
// we are only interested in the oop result projection from a call
|
||||||
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
|
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
|
||||||
|
assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
|
||||||
|
"all nodes should be registered");
|
||||||
|
const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
|
||||||
|
assert(r->cnt() > TypeFunc::Parms, "sanity");
|
||||||
|
if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
|
||||||
process_call_result(n->as_Proj(), phase);
|
process_call_result(n->as_Proj(), phase);
|
||||||
assert(_processed.test(n_idx), "all call results should be processed");
|
assert(_processed.test(n_idx), "all call results should be processed");
|
||||||
} else {
|
break;
|
||||||
assert(false, "Op_Proj");
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
assert(false, "Op_Proj");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Op_Return:
|
case Op_Return:
|
||||||
@ -2320,6 +2435,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
int ti = n->in(TypeFunc::Parms)->_idx;
|
int ti = n->in(TypeFunc::Parms)->_idx;
|
||||||
|
assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
|
||||||
if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
|
if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
|
||||||
add_pointsto_edge(n_idx, ti);
|
add_pointsto_edge(n_idx, ti);
|
||||||
} else {
|
} else {
|
||||||
@ -2354,14 +2470,38 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case Op_AryEq:
|
||||||
|
case Op_StrComp:
|
||||||
|
case Op_StrEquals:
|
||||||
|
case Op_StrIndexOf:
|
||||||
|
{
|
||||||
|
// char[] arrays passed to string intrinsic do not escape but
|
||||||
|
// they are not scalar replaceable. Adjust escape state for them.
|
||||||
|
// Start from in(2) edge since in(1) is memory edge.
|
||||||
|
for (uint i = 2; i < n->req(); i++) {
|
||||||
|
Node* adr = n->in(i)->uncast();
|
||||||
|
const Type *at = phase->type(adr);
|
||||||
|
if (!adr->is_top() && at->isa_ptr()) {
|
||||||
|
assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
|
||||||
|
at->isa_ptr() != NULL, "expecting an Ptr");
|
||||||
|
if (adr->is_AddP()) {
|
||||||
|
adr = get_addp_base(adr);
|
||||||
|
}
|
||||||
|
// Mark as ArgEscape everything "adr" could point to.
|
||||||
|
set_escape_state(adr->_idx, PointsToNode::ArgEscape);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_processed.set(n_idx);
|
||||||
|
break;
|
||||||
|
}
|
||||||
case Op_ThreadLocal:
|
case Op_ThreadLocal:
|
||||||
{
|
{
|
||||||
assert(false, "Op_ThreadLocal");
|
assert(false, "Op_ThreadLocal");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
;
|
// This method should be called only for EA specific nodes.
|
||||||
// nothing to do
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,6 +210,8 @@ private:
|
|||||||
Unique_Node_List _delayed_worklist; // Nodes to be processed before
|
Unique_Node_List _delayed_worklist; // Nodes to be processed before
|
||||||
// the call build_connection_graph().
|
// the call build_connection_graph().
|
||||||
|
|
||||||
|
GrowableArray<MergeMemNode *> _mergemem_worklist; // List of all MergeMem nodes
|
||||||
|
|
||||||
VectorSet _processed; // Records which nodes have been
|
VectorSet _processed; // Records which nodes have been
|
||||||
// processed.
|
// processed.
|
||||||
|
|
||||||
@ -315,6 +317,9 @@ private:
|
|||||||
// Set the escape state of a node
|
// Set the escape state of a node
|
||||||
void set_escape_state(uint ni, PointsToNode::EscapeState es);
|
void set_escape_state(uint ni, PointsToNode::EscapeState es);
|
||||||
|
|
||||||
|
// Search for objects which are not scalar replaceable.
|
||||||
|
void verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ConnectionGraph(Compile *C);
|
ConnectionGraph(Compile *C);
|
||||||
|
|
||||||
|
@ -616,8 +616,9 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
|||||||
assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
|
assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire &&
|
if( n->is_Mach() && n->req() > TypeFunc::Parms &&
|
||||||
n->req() > TypeFunc::Parms ) {
|
(n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
|
||||||
|
n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
|
||||||
// MemBarAcquire could be created without Precedent edge.
|
// MemBarAcquire could be created without Precedent edge.
|
||||||
// del_req() replaces the specified edge with the last input edge
|
// del_req() replaces the specified edge with the last input edge
|
||||||
// and then removes the last edge. If the specified edge > number of
|
// and then removes the last edge. If the specified edge > number of
|
||||||
|
@ -316,6 +316,21 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
|
|||||||
assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
|
assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
|
||||||
}
|
}
|
||||||
mem = mem->in(MemNode::Memory);
|
mem = mem->in(MemNode::Memory);
|
||||||
|
} else if (mem->is_ClearArray()) {
|
||||||
|
if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
|
||||||
|
// Can not bypass initialization of the instance
|
||||||
|
// we are looking.
|
||||||
|
debug_only(intptr_t offset;)
|
||||||
|
assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
|
||||||
|
InitializeNode* init = alloc->as_Allocate()->initialization();
|
||||||
|
// We are looking for stored value, return Initialize node
|
||||||
|
// or memory edge from Allocate node.
|
||||||
|
if (init != NULL)
|
||||||
|
return init;
|
||||||
|
else
|
||||||
|
return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
|
||||||
|
}
|
||||||
|
// Otherwise skip it (the call updated 'mem' value).
|
||||||
} else if (mem->Opcode() == Op_SCMemProj) {
|
} else if (mem->Opcode() == Op_SCMemProj) {
|
||||||
assert(mem->in(0)->is_LoadStore(), "sanity");
|
assert(mem->in(0)->is_LoadStore(), "sanity");
|
||||||
const TypePtr* atype = mem->in(0)->in(MemNode::Address)->bottom_type()->is_ptr();
|
const TypePtr* atype = mem->in(0)->in(MemNode::Address)->bottom_type()->is_ptr();
|
||||||
@ -823,6 +838,18 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
|
|||||||
Node *n = use->last_out(k);
|
Node *n = use->last_out(k);
|
||||||
uint oc2 = use->outcnt();
|
uint oc2 = use->outcnt();
|
||||||
if (n->is_Store()) {
|
if (n->is_Store()) {
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Verify that there is no dependent MemBarVolatile nodes,
|
||||||
|
// they should be removed during IGVN, see MemBarNode::Ideal().
|
||||||
|
for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
|
||||||
|
p < pmax; p++) {
|
||||||
|
Node* mb = n->fast_out(p);
|
||||||
|
assert(mb->is_Initialize() || !mb->is_MemBar() ||
|
||||||
|
mb->req() <= MemBarNode::Precedent ||
|
||||||
|
mb->in(MemBarNode::Precedent) != n,
|
||||||
|
"MemBarVolatile should be eliminated for non-escaping object");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
_igvn.replace_node(n, n->in(MemNode::Memory));
|
_igvn.replace_node(n, n->in(MemNode::Memory));
|
||||||
} else {
|
} else {
|
||||||
eliminate_card_mark(n);
|
eliminate_card_mark(n);
|
||||||
|
@ -123,6 +123,13 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
|
|||||||
} else {
|
} else {
|
||||||
assert(false, "unexpected projection");
|
assert(false, "unexpected projection");
|
||||||
}
|
}
|
||||||
|
} else if (result->is_ClearArray()) {
|
||||||
|
if (!ClearArrayNode::step_through(&result, instance_id, phase)) {
|
||||||
|
// Can not bypass initialization of the instance
|
||||||
|
// we are looking for.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Otherwise skip it (the call updated 'result' value).
|
||||||
} else if (result->is_MergeMem()) {
|
} else if (result->is_MergeMem()) {
|
||||||
result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
|
result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
|
||||||
}
|
}
|
||||||
@ -537,6 +544,15 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) {
|
|||||||
} else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
|
} else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
|
||||||
mem = mem->in(0)->in(TypeFunc::Memory);
|
mem = mem->in(0)->in(TypeFunc::Memory);
|
||||||
continue; // (a) advance through independent MemBar memory
|
continue; // (a) advance through independent MemBar memory
|
||||||
|
} else if (mem->is_ClearArray()) {
|
||||||
|
if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
|
||||||
|
// (the call updated 'mem' value)
|
||||||
|
continue; // (a) advance through independent allocation memory
|
||||||
|
} else {
|
||||||
|
// Can not bypass initialization of the instance
|
||||||
|
// we are looking for.
|
||||||
|
return mem;
|
||||||
|
}
|
||||||
} else if (mem->is_MergeMem()) {
|
} else if (mem->is_MergeMem()) {
|
||||||
int alias_idx = phase->C->get_alias_index(adr_type());
|
int alias_idx = phase->C->get_alias_index(adr_type());
|
||||||
mem = mem->as_MergeMem()->memory_at(alias_idx);
|
mem = mem->as_MergeMem()->memory_at(alias_idx);
|
||||||
@ -2454,6 +2470,31 @@ Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
|
|||||||
return mem;
|
return mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//----------------------------step_through----------------------------------
|
||||||
|
// Return allocation input memory edge if it is different instance
|
||||||
|
// or itself if it is the one we are looking for.
|
||||||
|
bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
|
||||||
|
Node* n = *np;
|
||||||
|
assert(n->is_ClearArray(), "sanity");
|
||||||
|
intptr_t offset;
|
||||||
|
AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
|
||||||
|
// This method is called only before Allocate nodes are expanded during
|
||||||
|
// macro nodes expansion. Before that ClearArray nodes are only generated
|
||||||
|
// in LibraryCallKit::generate_arraycopy() which follows allocations.
|
||||||
|
assert(alloc != NULL, "should have allocation");
|
||||||
|
if (alloc->_idx == instance_id) {
|
||||||
|
// Can not bypass initialization of the instance we are looking for.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Otherwise skip it.
|
||||||
|
InitializeNode* init = alloc->initialization();
|
||||||
|
if (init != NULL)
|
||||||
|
*np = init->in(TypeFunc::Memory);
|
||||||
|
else
|
||||||
|
*np = alloc->in(TypeFunc::Memory);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
//----------------------------clear_memory-------------------------------------
|
//----------------------------clear_memory-------------------------------------
|
||||||
// Generate code to initialize object storage to zero.
|
// Generate code to initialize object storage to zero.
|
||||||
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
|
||||||
@ -2627,7 +2668,30 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
|
|||||||
// Return a node which is more "ideal" than the current node. Strip out
|
// Return a node which is more "ideal" than the current node. Strip out
|
||||||
// control copies
|
// control copies
|
||||||
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||||
return remove_dead_region(phase, can_reshape) ? this : NULL;
|
if (remove_dead_region(phase, can_reshape)) return this;
|
||||||
|
|
||||||
|
// Eliminate volatile MemBars for scalar replaced objects.
|
||||||
|
if (can_reshape && req() == (Precedent+1) &&
|
||||||
|
(Opcode() == Op_MemBarAcquire || Opcode() == Op_MemBarVolatile)) {
|
||||||
|
// Volatile field loads and stores.
|
||||||
|
Node* my_mem = in(MemBarNode::Precedent);
|
||||||
|
if (my_mem != NULL && my_mem->is_Mem()) {
|
||||||
|
const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
|
||||||
|
// Check for scalar replaced object reference.
|
||||||
|
if( t_oop != NULL && t_oop->is_known_instance_field() &&
|
||||||
|
t_oop->offset() != Type::OffsetBot &&
|
||||||
|
t_oop->offset() != Type::OffsetTop) {
|
||||||
|
// Replace MemBar projections by its inputs.
|
||||||
|
PhaseIterGVN* igvn = phase->is_IterGVN();
|
||||||
|
igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
|
||||||
|
igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
|
||||||
|
// Must return either the original node (now dead) or a new node
|
||||||
|
// (Do not return a top here, since that would break the uniqueness of top.)
|
||||||
|
return new (phase->C, 1) ConINode(TypeInt::ZERO);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------Value------------------------------------------
|
//------------------------------Value------------------------------------------
|
||||||
|
@ -717,7 +717,10 @@ public:
|
|||||||
//------------------------------ClearArray-------------------------------------
|
//------------------------------ClearArray-------------------------------------
|
||||||
class ClearArrayNode: public Node {
|
class ClearArrayNode: public Node {
|
||||||
public:
|
public:
|
||||||
ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) : Node(ctrl,arymem,word_cnt,base) {}
|
ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
|
||||||
|
: Node(ctrl,arymem,word_cnt,base) {
|
||||||
|
init_class_id(Class_ClearArray);
|
||||||
|
}
|
||||||
virtual int Opcode() const;
|
virtual int Opcode() const;
|
||||||
virtual const Type *bottom_type() const { return Type::MEMORY; }
|
virtual const Type *bottom_type() const { return Type::MEMORY; }
|
||||||
// ClearArray modifies array elements, and so affects only the
|
// ClearArray modifies array elements, and so affects only the
|
||||||
@ -743,6 +746,9 @@ public:
|
|||||||
Node* start_offset,
|
Node* start_offset,
|
||||||
Node* end_offset,
|
Node* end_offset,
|
||||||
PhaseGVN* phase);
|
PhaseGVN* phase);
|
||||||
|
// Return allocation input memory edge if it is different instance
|
||||||
|
// or itself if it is the one we are looking for.
|
||||||
|
static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------StrComp-------------------------------------
|
//------------------------------StrComp-------------------------------------
|
||||||
|
@ -47,6 +47,7 @@ class CallStaticJavaNode;
|
|||||||
class CatchNode;
|
class CatchNode;
|
||||||
class CatchProjNode;
|
class CatchProjNode;
|
||||||
class CheckCastPPNode;
|
class CheckCastPPNode;
|
||||||
|
class ClearArrayNode;
|
||||||
class CmpNode;
|
class CmpNode;
|
||||||
class CodeBuffer;
|
class CodeBuffer;
|
||||||
class ConstraintCastNode;
|
class ConstraintCastNode;
|
||||||
@ -599,8 +600,9 @@ public:
|
|||||||
DEFINE_CLASS_ID(BoxLock, Node, 10)
|
DEFINE_CLASS_ID(BoxLock, Node, 10)
|
||||||
DEFINE_CLASS_ID(Add, Node, 11)
|
DEFINE_CLASS_ID(Add, Node, 11)
|
||||||
DEFINE_CLASS_ID(Mul, Node, 12)
|
DEFINE_CLASS_ID(Mul, Node, 12)
|
||||||
|
DEFINE_CLASS_ID(ClearArray, Node, 13)
|
||||||
|
|
||||||
_max_classes = ClassMask_Mul
|
_max_classes = ClassMask_ClearArray
|
||||||
};
|
};
|
||||||
#undef DEFINE_CLASS_ID
|
#undef DEFINE_CLASS_ID
|
||||||
|
|
||||||
@ -698,6 +700,7 @@ public:
|
|||||||
DEFINE_CLASS_QUERY(CatchProj)
|
DEFINE_CLASS_QUERY(CatchProj)
|
||||||
DEFINE_CLASS_QUERY(CheckCastPP)
|
DEFINE_CLASS_QUERY(CheckCastPP)
|
||||||
DEFINE_CLASS_QUERY(ConstraintCast)
|
DEFINE_CLASS_QUERY(ConstraintCast)
|
||||||
|
DEFINE_CLASS_QUERY(ClearArray)
|
||||||
DEFINE_CLASS_QUERY(CMove)
|
DEFINE_CLASS_QUERY(CMove)
|
||||||
DEFINE_CLASS_QUERY(Cmp)
|
DEFINE_CLASS_QUERY(Cmp)
|
||||||
DEFINE_CLASS_QUERY(CountedLoop)
|
DEFINE_CLASS_QUERY(CountedLoop)
|
||||||
|
@ -240,19 +240,19 @@ void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool
|
|||||||
// membar is dependent on the store, keeping any other membars generated
|
// membar is dependent on the store, keeping any other membars generated
|
||||||
// below from floating up past the store.
|
// below from floating up past the store.
|
||||||
int adr_idx = C->get_alias_index(adr_type);
|
int adr_idx = C->get_alias_index(adr_type);
|
||||||
insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx);
|
insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store);
|
||||||
|
|
||||||
// Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
|
// Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
|
||||||
// volatile alias indices. Skip this if the membar is redundant.
|
// volatile alias indices. Skip this if the membar is redundant.
|
||||||
if (adr_idx != Compile::AliasIdxBot) {
|
if (adr_idx != Compile::AliasIdxBot) {
|
||||||
insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot);
|
insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, place alias-index-specific membars for each volatile index
|
// Finally, place alias-index-specific membars for each volatile index
|
||||||
// that isn't the adr_idx membar. Typically there's only 1 or 2.
|
// that isn't the adr_idx membar. Typically there's only 1 or 2.
|
||||||
for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
|
for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
|
||||||
if (i != adr_idx && C->alias_type(i)->is_volatile()) {
|
if (i != adr_idx && C->alias_type(i)->is_volatile()) {
|
||||||
insert_mem_bar_volatile(Op_MemBarVolatile, i);
|
insert_mem_bar_volatile(Op_MemBarVolatile, i, store);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
51
hotspot/test/compiler/6895383/Test.java
Normal file
51
hotspot/test/compiler/6895383/Test.java
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6895383
|
||||||
|
* @summary JCK test throws NPE for method compiled with Escape Analysis
|
||||||
|
*
|
||||||
|
* @run main/othervm -Xcomp Test
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test {
|
||||||
|
public static void main(String argv[]) {
|
||||||
|
Test test = new Test();
|
||||||
|
test.testRemove1_IndexOutOfBounds();
|
||||||
|
test.testAddAll1_IndexOutOfBoundsException();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testRemove1_IndexOutOfBounds() {
|
||||||
|
CopyOnWriteArrayList c = new CopyOnWriteArrayList();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testAddAll1_IndexOutOfBoundsException() {
|
||||||
|
try {
|
||||||
|
CopyOnWriteArrayList c = new CopyOnWriteArrayList();
|
||||||
|
c.addAll(-1, new LinkedList()); // should throw IndexOutOfBoundsException
|
||||||
|
} catch (IndexOutOfBoundsException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user