This commit is contained in:
Zoltan Majo 2015-03-06 08:53:22 +01:00
commit 78b1c29418
29 changed files with 1939 additions and 422 deletions

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2096,8 +2096,14 @@ void LIR_OpProfileCall::print_instr(outputStream* out) const {
// LIR_OpProfileType
void LIR_OpProfileType::print_instr(outputStream* out) const {
out->print("exact = "); exact_klass()->print_name_on(out);
out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass());
out->print("exact = ");
if (exact_klass() == NULL) {
out->print("unknown");
} else {
exact_klass()->print_name_on(out);
}
out->print(" current = "); ciTypeEntries::print_ciklass(out, current_klass());
out->print(" ");
mdp()->print(out); out->print(" ");
obj()->print(out); out->print(" ");
tmp()->print(out); out->print(" ");

@ -43,7 +43,7 @@
#define TRACE_BCEA(level, code)
#endif
// Maintain a map of which aguments a local variable or
// Maintain a map of which arguments a local variable or
// stack slot may contain. In addition to tracking
// arguments, it tracks two special values, "allocated"
// which represents any object allocated in the current
@ -319,14 +319,16 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
bool must_record_dependencies = false;
for (i = arg_size - 1; i >= 0; i--) {
ArgumentMap arg = state.raw_pop();
if (!is_argument(arg))
// Check if callee arg is a caller arg or an allocated object
bool allocated = arg.contains_allocated();
if (!(is_argument(arg) || allocated))
continue;
for (int j = 0; j < _arg_size; j++) {
if (arg.contains(j)) {
_arg_modified[j] |= analyzer._arg_modified[i];
}
}
if (!is_arg_stack(arg)) {
if (!(is_arg_stack(arg) || allocated)) {
// arguments have already been recognized as escaping
} else if (analyzer.is_arg_stack(i) && !analyzer.is_arg_returned(i)) {
set_method_escape(arg);
@ -1392,12 +1394,12 @@ void BCEscapeAnalyzer::dump() {
method()->print_short_name();
tty->print_cr(has_dependencies() ? " (not stored)" : "");
tty->print(" non-escaping args: ");
_arg_local.print_on(tty);
_arg_local.print();
tty->print(" stack-allocatable args: ");
_arg_stack.print_on(tty);
_arg_stack.print();
if (_return_local) {
tty->print(" returned args: ");
_arg_returned.print_on(tty);
_arg_returned.print();
} else if (is_return_allocated()) {
tty->print_cr(" return allocated value");
} else {

@ -360,7 +360,7 @@ CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
* run the constructor for the CodeBlob subclass he is busy
* instantiating.
*/
CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) {
// Possibly wakes up the sweeper thread.
NMethodSweeper::notify(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
@ -379,11 +379,28 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
if (cb != NULL) break;
if (!heap->expand_by(CodeCacheExpansionSize)) {
// Expansion failed
if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
// Fallback solution: Store non-nmethod code in the non-profiled code heap.
// Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
// code heap and force stack scanning if less than 10% if the code heap are free.
return allocate(size, CodeBlobType::MethodNonProfiled);
if (SegmentedCodeCache && !strict) {
// Fallback solution: Try to store code in another code heap.
// Note that in the sweeper, we check the reverse_free_ratio of the code heap
// and force stack scanning if less than 10% of the code heap are free.
int type = code_blob_type;
switch (type) {
case CodeBlobType::NonNMethod:
type = CodeBlobType::MethodNonProfiled;
strict = false; // Allow recursive search for other heaps
break;
case CodeBlobType::MethodProfiled:
type = CodeBlobType::MethodNonProfiled;
strict = true;
break;
case CodeBlobType::MethodNonProfiled:
type = CodeBlobType::MethodProfiled;
strict = true;
break;
}
if (heap_available(type)) {
return allocate(size, type, strict);
}
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(code_blob_type);

@ -122,7 +122,7 @@ class CodeCache : AllStatic {
static void initialize();
// Allocation/administration
static CodeBlob* allocate(int size, int code_blob_type); // allocates a new CodeBlob
static CodeBlob* allocate(int size, int code_blob_type, bool strict = false); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)

@ -0,0 +1,597 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/graphKit.hpp"
ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
: CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM),
_alloc_tightly_coupled(alloc_tightly_coupled),
_kind(None),
_arguments_validated(false) {
init_class_id(Class_ArrayCopy);
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
uint ArrayCopyNode::size_of() const { return sizeof(*this); }
ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* length,
bool alloc_tightly_coupled,
Node* src_klass, Node* dest_klass,
Node* src_length, Node* dest_length) {
ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled);
Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac);
ac->init_req(ArrayCopyNode::Src, src);
ac->init_req(ArrayCopyNode::SrcPos, src_offset);
ac->init_req(ArrayCopyNode::Dest, dest);
ac->init_req(ArrayCopyNode::DestPos, dest_offset);
ac->init_req(ArrayCopyNode::Length, length);
ac->init_req(ArrayCopyNode::SrcLen, src_length);
ac->init_req(ArrayCopyNode::DestLen, dest_length);
ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
if (may_throw) {
ac->set_req(TypeFunc::I_O , kit->i_o());
kit->add_safepoint_edges(ac, false);
}
return ac;
}
void ArrayCopyNode::connect_outputs(GraphKit* kit) {
kit->set_all_memory_call(this, true);
kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true);
kit->set_all_memory_call(this);
}
#ifndef PRODUCT
const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
void ArrayCopyNode::dump_spec(outputStream *st) const {
CallNode::dump_spec(st);
st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
}
#endif
intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
// check that length is constant
Node* length = in(ArrayCopyNode::Length);
const Type* length_type = phase->type(length);
if (length_type == Type::TOP) {
return -1;
}
assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
}
int ArrayCopyNode::get_count(PhaseGVN *phase) const {
Node* src = in(ArrayCopyNode::Src);
const Type* src_type = phase->type(src);
if (is_clonebasic()) {
if (src_type->isa_instptr()) {
const TypeInstPtr* inst_src = src_type->is_instptr();
ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
// ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
// fields into account. They are rare anyway so easier to simply
// skip instances with injected fields.
if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
return -1;
}
int nb_fields = ik->nof_nonstatic_fields();
return nb_fields;
} else {
const TypeAryPtr* ary_src = src_type->isa_aryptr();
assert (ary_src != NULL, "not an array or instance?");
// clone passes a length as a rounded number of longs. If we're
// cloning an array we'll do it element by element. If the
// length input to ArrayCopyNode is constant, length of input
// array must be too.
assert((get_length_if_constant(phase) == -1) == !ary_src->size()->is_con() ||
phase->is_IterGVN(), "inconsistent");
if (ary_src->size()->is_con()) {
return ary_src->size()->get_con();
}
return -1;
}
}
return get_length_if_constant(phase);
}
Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
if (!is_clonebasic()) {
return NULL;
}
Node* src = in(ArrayCopyNode::Src);
Node* dest = in(ArrayCopyNode::Dest);
Node* ctl = in(TypeFunc::Control);
Node* in_mem = in(TypeFunc::Memory);
const Type* src_type = phase->type(src);
assert(src->is_AddP(), "should be base + off");
assert(dest->is_AddP(), "should be base + off");
Node* base_src = src->in(AddPNode::Base);
Node* base_dest = dest->in(AddPNode::Base);
MergeMemNode* mem = MergeMemNode::make(in_mem);
const TypeInstPtr* inst_src = src_type->isa_instptr();
if (inst_src == NULL) {
return NULL;
}
if (!inst_src->klass_is_exact()) {
ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy");
phase->C->dependencies()->assert_leaf_type(ik);
}
ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
for (int i = 0; i < count; i++) {
ciField* field = ik->nonstatic_field_at(i);
int fieldidx = phase->C->alias_type(field)->index();
const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
Node* off = phase->MakeConX(field->offset());
Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
BasicType bt = field->layout_type();
const Type *type;
if (bt == T_OBJECT) {
if (!field->type()->is_loaded()) {
type = TypeInstPtr::BOTTOM;
} else {
ciType* field_klass = field->type();
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
}
} else {
type = Type::get_const_basic_type(bt);
}
Node* v = LoadNode::make(*phase, ctl, mem->memory_at(fieldidx), next_src, adr_type, type, bt, MemNode::unordered);
v = phase->transform(v);
Node* s = StoreNode::make(*phase, ctl, mem->memory_at(fieldidx), next_dest, adr_type, v, bt, MemNode::unordered);
s = phase->transform(s);
mem->set_memory_at(fieldidx, s);
}
if (!finish_transform(phase, can_reshape, ctl, mem)) {
return NULL;
}
return mem;
}
bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
Node*& adr_src,
Node*& base_src,
Node*& adr_dest,
Node*& base_dest,
BasicType& copy_type,
const Type*& value_type,
bool& disjoint_bases) {
Node* src = in(ArrayCopyNode::Src);
Node* dest = in(ArrayCopyNode::Dest);
const Type* src_type = phase->type(src);
const TypeAryPtr* ary_src = src_type->isa_aryptr();
if (is_arraycopy() || is_copyofrange() || is_copyof()) {
const Type* dest_type = phase->type(dest);
const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
Node* src_offset = in(ArrayCopyNode::SrcPos);
Node* dest_offset = in(ArrayCopyNode::DestPos);
// newly allocated object is guaranteed to not overlap with source object
disjoint_bases = is_alloc_tightly_coupled();
if (ary_src == NULL || ary_src->klass() == NULL ||
ary_dest == NULL || ary_dest->klass() == NULL) {
// We don't know if arguments are arrays
return false;
}
BasicType src_elem = ary_src->klass()->as_array_klass()->element_type()->basic_type();
BasicType dest_elem = ary_dest->klass()->as_array_klass()->element_type()->basic_type();
if (src_elem == T_ARRAY) src_elem = T_OBJECT;
if (dest_elem == T_ARRAY) dest_elem = T_OBJECT;
if (src_elem != dest_elem || dest_elem == T_VOID) {
// We don't know if arguments are arrays of the same type
return false;
}
if (dest_elem == T_OBJECT && (!is_alloc_tightly_coupled() || !GraphKit::use_ReduceInitialCardMarks())) {
// It's an object array copy but we can't emit the card marking
// that is needed
return false;
}
value_type = ary_src->elem();
base_src = src;
base_dest = dest;
uint shift = exact_log2(type2aelembytes(dest_elem));
uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
adr_src = src;
adr_dest = dest;
src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
adr_src = phase->transform(new AddPNode(base_src, adr_src, src_scale));
adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, dest_scale));
adr_src = new AddPNode(base_src, adr_src, phase->MakeConX(header));
adr_dest = new AddPNode(base_dest, adr_dest, phase->MakeConX(header));
adr_src = phase->transform(adr_src);
adr_dest = phase->transform(adr_dest);
copy_type = dest_elem;
} else {
assert (is_clonebasic(), "should be");
disjoint_bases = true;
assert(src->is_AddP(), "should be base + off");
assert(dest->is_AddP(), "should be base + off");
adr_src = src;
base_src = src->in(AddPNode::Base);
adr_dest = dest;
base_dest = dest->in(AddPNode::Base);
assert(phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con() == phase->type(dest->in(AddPNode::Offset))->is_intptr_t()->get_con(), "same start offset?");
BasicType elem = ary_src->klass()->as_array_klass()->element_type()->basic_type();
if (elem == T_ARRAY) elem = T_OBJECT;
int diff = arrayOopDesc::base_offset_in_bytes(elem) - phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con();
assert(diff >= 0, "clone should not start after 1st array element");
if (diff > 0) {
adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
}
copy_type = elem;
value_type = ary_src->elem();
}
return true;
}
const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) {
const Type* at = phase->type(n);
assert(at != Type::TOP, "unexpected type");
const TypePtr* atp = at->isa_ptr();
// adjust atp to be the correct array element address type
atp = atp->add_offset(Type::OffsetBot);
return atp;
}
void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
Node* ctl = in(TypeFunc::Control);
if (!disjoint_bases && count > 1) {
Node* src_offset = in(ArrayCopyNode::SrcPos);
Node* dest_offset = in(ArrayCopyNode::DestPos);
assert(src_offset != NULL && dest_offset != NULL, "should be");
Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
phase->transform(iff);
forward_ctl = phase->transform(new IfFalseNode(iff));
backward_ctl = phase->transform(new IfTrueNode(iff));
} else {
forward_ctl = ctl;
}
}
Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
bool can_reshape,
Node* forward_ctl,
Node* start_mem_src,
Node* start_mem_dest,
const TypePtr* atp_src,
const TypePtr* atp_dest,
Node* adr_src,
Node* base_src,
Node* adr_dest,
Node* base_dest,
BasicType copy_type,
const Type* value_type,
int count) {
Node* mem = phase->C->top();
if (!forward_ctl->is_top()) {
// copy forward
mem = start_mem_dest;
if (count > 0) {
Node* v = LoadNode::make(*phase, forward_ctl, start_mem_src, adr_src, atp_src, value_type, copy_type, MemNode::unordered);
v = phase->transform(v);
mem = StoreNode::make(*phase, forward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered);
mem = phase->transform(mem);
for (int i = 1; i < count; i++) {
Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
v = LoadNode::make(*phase, forward_ctl, mem, next_src, atp_src, value_type, copy_type, MemNode::unordered);
v = phase->transform(v);
mem = StoreNode::make(*phase, forward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered);
mem = phase->transform(mem);
}
} else if(can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
igvn->_worklist.push(adr_src);
igvn->_worklist.push(adr_dest);
}
}
return mem;
}
Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
bool can_reshape,
Node* backward_ctl,
Node* start_mem_src,
Node* start_mem_dest,
const TypePtr* atp_src,
const TypePtr* atp_dest,
Node* adr_src,
Node* base_src,
Node* adr_dest,
Node* base_dest,
BasicType copy_type,
const Type* value_type,
int count) {
Node* mem = phase->C->top();
if (!backward_ctl->is_top()) {
// copy backward
mem = start_mem_dest;
if (count > 0) {
for (int i = count-1; i >= 1; i--) {
Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
Node* v = LoadNode::make(*phase, backward_ctl, mem, next_src, atp_src, value_type, copy_type, MemNode::unordered);
v = phase->transform(v);
mem = StoreNode::make(*phase, backward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered);
mem = phase->transform(mem);
}
Node* v = LoadNode::make(*phase, backward_ctl, mem, adr_src, atp_src, value_type, copy_type, MemNode::unordered);
v = phase->transform(v);
mem = StoreNode::make(*phase, backward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered);
mem = phase->transform(mem);
} else if(can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
igvn->_worklist.push(adr_src);
igvn->_worklist.push(adr_dest);
}
}
return mem;
}
bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
Node* ctl, Node *mem) {
if (can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
igvn->set_delay_transform(false);
if (is_clonebasic()) {
Node* out_mem = proj_out(TypeFunc::Memory);
if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
assert(!GraphKit::use_ReduceInitialCardMarks(), "can only happen with card marking");
return false;
}
igvn->replace_node(out_mem->raw_out(0), mem);
Node* out_ctl = proj_out(TypeFunc::Control);
igvn->replace_node(out_ctl, ctl);
} else {
// replace fallthrough projections of the ArrayCopyNode by the
// new memory, control and the input IO.
CallProjections callprojs;
extract_projections(&callprojs, true);
igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
igvn->replace_node(callprojs.fallthrough_memproj, mem);
igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
// The ArrayCopyNode is not disconnected. It still has the
// projections for the exception case. Replace current
// ArrayCopyNode with a dummy new one with a top() control so
// that this part of the graph stays consistent but is
// eventually removed.
set_req(0, phase->C->top());
remove_dead_region(phase, can_reshape);
}
} else {
if (in(TypeFunc::Control) != ctl) {
// we can't return new memory and control from Ideal at parse time
assert(!is_clonebasic(), "added control for clone?");
return false;
}
}
return true;
}
Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
if (StressArrayCopyMacroNode && !can_reshape) {
phase->record_for_igvn(this);
return NULL;
}
// See if it's a small array copy and we can inline it as
// loads/stores
// Here we can only do:
// - arraycopy if all arguments were validated before and we don't
// need card marking
// - clone for which we don't need to do card marking
if (!is_clonebasic() && !is_arraycopy_validated() &&
!is_copyofrange_validated() && !is_copyof_validated()) {
return NULL;
}
assert(in(TypeFunc::Control) != NULL &&
in(TypeFunc::Memory) != NULL &&
in(ArrayCopyNode::Src) != NULL &&
in(ArrayCopyNode::Dest) != NULL &&
in(ArrayCopyNode::Length) != NULL &&
((in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::DestPos) != NULL) ||
is_clonebasic()), "broken inputs");
if (in(TypeFunc::Control)->is_top() ||
in(TypeFunc::Memory)->is_top() ||
phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
(in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) ||
(in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) {
return NULL;
}
int count = get_count(phase);
if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
return NULL;
}
Node* mem = try_clone_instance(phase, can_reshape, count);
if (mem != NULL) {
return mem;
}
Node* adr_src = NULL;
Node* base_src = NULL;
Node* adr_dest = NULL;
Node* base_dest = NULL;
BasicType copy_type = T_ILLEGAL;
const Type* value_type = NULL;
bool disjoint_bases = false;
if (!prepare_array_copy(phase, can_reshape,
adr_src, base_src, adr_dest, base_dest,
copy_type, value_type, disjoint_bases)) {
return NULL;
}
Node* src = in(ArrayCopyNode::Src);
Node* dest = in(ArrayCopyNode::Dest);
const TypePtr* atp_src = get_address_type(phase, src);
const TypePtr* atp_dest = get_address_type(phase, dest);
uint alias_idx_src = phase->C->get_alias_index(atp_src);
uint alias_idx_dest = phase->C->get_alias_index(atp_dest);
Node *in_mem = in(TypeFunc::Memory);
Node *start_mem_src = in_mem;
Node *start_mem_dest = in_mem;
if (in_mem->is_MergeMem()) {
start_mem_src = in_mem->as_MergeMem()->memory_at(alias_idx_src);
start_mem_dest = in_mem->as_MergeMem()->memory_at(alias_idx_dest);
}
if (can_reshape) {
assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
phase->is_IterGVN()->set_delay_transform(true);
}
Node* backward_ctl = phase->C->top();
Node* forward_ctl = phase->C->top();
array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl);
Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl,
start_mem_src, start_mem_dest,
atp_src, atp_dest,
adr_src, base_src, adr_dest, base_dest,
copy_type, value_type, count);
Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl,
start_mem_src, start_mem_dest,
atp_src, atp_dest,
adr_src, base_src, adr_dest, base_dest,
copy_type, value_type, count);
Node* ctl = NULL;
if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
ctl = new RegionNode(3);
mem = new PhiNode(ctl, Type::MEMORY, atp_dest);
ctl->init_req(1, forward_ctl);
mem->init_req(1, forward_mem);
ctl->init_req(2, backward_ctl);
mem->init_req(2, backward_mem);
ctl = phase->transform(ctl);
mem = phase->transform(mem);
} else if (!forward_ctl->is_top()) {
ctl = forward_ctl;
mem = forward_mem;
} else {
assert(!backward_ctl->is_top(), "no copy?");
ctl = backward_ctl;
mem = backward_mem;
}
if (can_reshape) {
assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
phase->is_IterGVN()->set_delay_transform(false);
}
MergeMemNode* out_mem = MergeMemNode::make(in_mem);
out_mem->set_memory_at(alias_idx_dest, mem);
mem = out_mem;
if (!finish_transform(phase, can_reshape, ctl, mem)) {
return NULL;
}
return mem;
}

@ -0,0 +1,164 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OPTO_ARRAYCOPYNODE_HPP
#define SHARE_VM_OPTO_ARRAYCOPYNODE_HPP
#include "opto/callnode.hpp"
class GraphKit;
class ArrayCopyNode : public CallNode {
private:
// What kind of arraycopy variant is this?
enum {
None, // not set yet
ArrayCopy, // System.arraycopy()
CloneBasic, // A clone that can be copied by 64 bit chunks
CloneOop, // An oop array clone
CopyOf, // Arrays.copyOf()
CopyOfRange // Arrays.copyOfRange()
} _kind;
#ifndef PRODUCT
static const char* _kind_names[CopyOfRange+1];
#endif
// Is the alloc obtained with
// AllocateArrayNode::Ideal_array_allocation() tighly coupled
// (arraycopy follows immediately the allocation)?
// We cache the result of LibraryCallKit::tightly_coupled_allocation
// here because it's much easier to find whether there's a tightly
// couple allocation at parse time than at macro expansion time. At
// macro expansion time, for every use of the allocation node we
// would need to figure out whether it happens after the arraycopy (and
// can be ignored) or between the allocation and the arraycopy. At
// parse time, it's straightforward because whatever happens after
// the arraycopy is not parsed yet so doesn't exist when
// LibraryCallKit::tightly_coupled_allocation() is called.
bool _alloc_tightly_coupled;
bool _arguments_validated;
static const TypeFunc* arraycopy_type() {
const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
fields[Src] = TypeInstPtr::BOTTOM;
fields[SrcPos] = TypeInt::INT;
fields[Dest] = TypeInstPtr::BOTTOM;
fields[DestPos] = TypeInt::INT;
fields[Length] = TypeInt::INT;
fields[SrcLen] = TypeInt::INT;
fields[DestLen] = TypeInt::INT;
fields[SrcKlass] = TypeKlassPtr::BOTTOM;
fields[DestKlass] = TypeKlassPtr::BOTTOM;
const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
// create result type (range)
fields = TypeTuple::fields(0);
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
return TypeFunc::make(domain, range);
}
ArrayCopyNode(Compile* C, bool alloc_tightly_coupled);
intptr_t get_length_if_constant(PhaseGVN *phase) const;
int get_count(PhaseGVN *phase) const;
static const TypePtr* get_address_type(PhaseGVN *phase, Node* n);
Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count);
Node* conv_I2X_offset(PhaseGVN *phase, Node* offset, const TypeAryPtr* ary_t);
bool prepare_array_copy(PhaseGVN *phase, bool can_reshape,
Node*& adr_src, Node*& base_src, Node*& adr_dest, Node*& base_dest,
BasicType& copy_type, const Type*& value_type, bool& disjoint_bases);
void array_copy_test_overlap(PhaseGVN *phase, bool can_reshape,
bool disjoint_bases, int count,
Node*& forward_ctl, Node*& backward_ctl);
Node* array_copy_forward(PhaseGVN *phase, bool can_reshape, Node* ctl,
Node* start_mem_src, Node* start_mem_dest,
const TypePtr* atp_src, const TypePtr* atp_dest,
Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest,
BasicType copy_type, const Type* value_type, int count);
Node* array_copy_backward(PhaseGVN *phase, bool can_reshape, Node* ctl,
Node *start_mem_src, Node* start_mem_dest,
const TypePtr* atp_src, const TypePtr* atp_dest,
Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest,
BasicType copy_type, const Type* value_type, int count);
bool finish_transform(PhaseGVN *phase, bool can_reshape,
Node* ctl, Node *mem);
public:
enum {
Src = TypeFunc::Parms,
SrcPos,
Dest,
DestPos,
Length,
SrcLen,
DestLen,
SrcKlass,
DestKlass,
ParmLimit
};
static ArrayCopyNode* make(GraphKit* kit, bool may_throw,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* length,
bool alloc_tightly_coupled,
Node* src_klass = NULL, Node* dest_klass = NULL,
Node* src_length = NULL, Node* dest_length = NULL);
void connect_outputs(GraphKit* kit);
bool is_arraycopy() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; }
bool is_arraycopy_validated() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; }
bool is_clonebasic() const { assert(_kind != None, "should bet set"); return _kind == CloneBasic; }
bool is_cloneoop() const { assert(_kind != None, "should bet set"); return _kind == CloneOop; }
bool is_copyof() const { assert(_kind != None, "should bet set"); return _kind == CopyOf; }
bool is_copyof_validated() const { assert(_kind != None, "should bet set"); return _kind == CopyOf && _arguments_validated; }
bool is_copyofrange() const { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; }
bool is_copyofrange_validated() const { assert(_kind != None, "should bet set"); return _kind == CopyOfRange && _arguments_validated; }
void set_arraycopy(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; }
void set_clonebasic() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneBasic; }
void set_cloneoop() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOop; }
void set_copyof(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = validated; }
void set_copyofrange(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = validated; }
virtual int Opcode() const;
virtual uint size_of() const; // Size is bigger
virtual bool guaranteed_safepoint() { return false; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
};
#endif // SHARE_VM_OPTO_ARRAYCOPYNODE_HPP

@ -1875,194 +1875,3 @@ void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag) cons
log->tail(tag);
}
}
ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
: CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM),
_alloc_tightly_coupled(alloc_tightly_coupled),
_kind(None),
_arguments_validated(false) {
init_class_id(Class_ArrayCopy);
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
uint ArrayCopyNode::size_of() const { return sizeof(*this); }
ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* length,
bool alloc_tightly_coupled,
Node* src_klass, Node* dest_klass,
Node* src_length, Node* dest_length) {
ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled);
Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac);
ac->init_req(ArrayCopyNode::Src, src);
ac->init_req(ArrayCopyNode::SrcPos, src_offset);
ac->init_req(ArrayCopyNode::Dest, dest);
ac->init_req(ArrayCopyNode::DestPos, dest_offset);
ac->init_req(ArrayCopyNode::Length, length);
ac->init_req(ArrayCopyNode::SrcLen, src_length);
ac->init_req(ArrayCopyNode::DestLen, dest_length);
ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
if (may_throw) {
ac->set_req(TypeFunc::I_O , kit->i_o());
kit->add_safepoint_edges(ac, false);
}
return ac;
}
void ArrayCopyNode::connect_outputs(GraphKit* kit) {
kit->set_all_memory_call(this, true);
kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true);
kit->set_all_memory_call(this);
}
#ifndef PRODUCT
const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
void ArrayCopyNode::dump_spec(outputStream *st) const {
CallNode::dump_spec(st);
st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
}
#endif
int ArrayCopyNode::get_count(PhaseGVN *phase) const {
Node* src = in(ArrayCopyNode::Src);
const Type* src_type = phase->type(src);
assert(is_clonebasic(), "unexpected arraycopy type");
if (src_type->isa_instptr()) {
const TypeInstPtr* inst_src = src_type->is_instptr();
ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
// ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
// fields into account. They are rare anyway so easier to simply
// skip instances with injected fields.
if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
return -1;
}
int nb_fields = ik->nof_nonstatic_fields();
return nb_fields;
}
return -1;
}
Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
assert(is_clonebasic(), "unexpected arraycopy type");
Node* src = in(ArrayCopyNode::Src);
Node* dest = in(ArrayCopyNode::Dest);
Node* ctl = in(TypeFunc::Control);
Node* in_mem = in(TypeFunc::Memory);
const Type* src_type = phase->type(src);
const Type* dest_type = phase->type(dest);
assert(src->is_AddP(), "should be base + off");
assert(dest->is_AddP(), "should be base + off");
Node* base_src = src->in(AddPNode::Base);
Node* base_dest = dest->in(AddPNode::Base);
MergeMemNode* mem = MergeMemNode::make(in_mem);
const TypeInstPtr* inst_src = src_type->is_instptr();
if (!inst_src->klass_is_exact()) {
ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy");
phase->C->dependencies()->assert_leaf_type(ik);
}
ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
for (int i = 0; i < count; i++) {
ciField* field = ik->nonstatic_field_at(i);
int fieldidx = phase->C->alias_type(field)->index();
const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
Node* off = phase->MakeConX(field->offset());
Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
BasicType bt = field->layout_type();
const Type *type;
if (bt == T_OBJECT) {
if (!field->type()->is_loaded()) {
type = TypeInstPtr::BOTTOM;
} else {
ciType* field_klass = field->type();
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
}
} else {
type = Type::get_const_basic_type(bt);
}
Node* v = LoadNode::make(*phase, ctl, mem->memory_at(fieldidx), next_src, adr_type, type, bt, MemNode::unordered);
v = phase->transform(v);
Node* s = StoreNode::make(*phase, ctl, mem->memory_at(fieldidx), next_dest, adr_type, v, bt, MemNode::unordered);
s = phase->transform(s);
mem->set_memory_at(fieldidx, s);
}
if (!finish_transform(phase, can_reshape, ctl, mem)) {
return NULL;
}
return mem;
}
bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
Node* ctl, Node *mem) {
if (can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
assert(is_clonebasic(), "unexpected arraycopy type");
Node* out_mem = proj_out(TypeFunc::Memory);
if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
assert(!GraphKit::use_ReduceInitialCardMarks(), "can only happen with card marking");
return false;
}
igvn->replace_node(out_mem->raw_out(0), mem);
Node* out_ctl = proj_out(TypeFunc::Control);
igvn->replace_node(out_ctl, ctl);
}
return true;
}
Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
if (StressArrayCopyMacroNode && !can_reshape) return NULL;
// See if it's a small array copy and we can inline it as
// loads/stores
// Here we can only do:
// - clone for which we don't need to do card marking
if (!is_clonebasic()) {
return NULL;
}
if (in(TypeFunc::Control)->is_top() || in(TypeFunc::Memory)->is_top()) {
return NULL;
}
int count = get_count(phase);
if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
return NULL;
}
Node* mem = try_clone_instance(phase, can_reshape, count);
return mem;
}

@ -1083,117 +1083,4 @@ public:
#endif
};
class GraphKit;
class ArrayCopyNode : public CallNode {
private:
// What kind of arraycopy variant is this?
enum {
None, // not set yet
ArrayCopy, // System.arraycopy()
CloneBasic, // A clone that can be copied by 64 bit chunks
CloneOop, // An oop array clone
CopyOf, // Arrays.copyOf()
CopyOfRange // Arrays.copyOfRange()
} _kind;
#ifndef PRODUCT
static const char* _kind_names[CopyOfRange+1];
#endif
// Is the alloc obtained with
// AllocateArrayNode::Ideal_array_allocation() tighly coupled
// (arraycopy follows immediately the allocation)?
// We cache the result of LibraryCallKit::tightly_coupled_allocation
// here because it's much easier to find whether there's a tightly
// couple allocation at parse time than at macro expansion time. At
// macro expansion time, for every use of the allocation node we
// would need to figure out whether it happens after the arraycopy (and
// can be ignored) or between the allocation and the arraycopy. At
// parse time, it's straightforward because whatever happens after
// the arraycopy is not parsed yet so doesn't exist when
// LibraryCallKit::tightly_coupled_allocation() is called.
bool _alloc_tightly_coupled;
bool _arguments_validated;
static const TypeFunc* arraycopy_type() {
const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
fields[Src] = TypeInstPtr::BOTTOM;
fields[SrcPos] = TypeInt::INT;
fields[Dest] = TypeInstPtr::BOTTOM;
fields[DestPos] = TypeInt::INT;
fields[Length] = TypeInt::INT;
fields[SrcLen] = TypeInt::INT;
fields[DestLen] = TypeInt::INT;
fields[SrcKlass] = TypeKlassPtr::BOTTOM;
fields[DestKlass] = TypeKlassPtr::BOTTOM;
const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
// create result type (range)
fields = TypeTuple::fields(0);
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
return TypeFunc::make(domain, range);
}
ArrayCopyNode(Compile* C, bool alloc_tightly_coupled);
int get_count(PhaseGVN *phase) const;
static const TypePtr* get_address_type(PhaseGVN *phase, Node* n);
Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count);
bool finish_transform(PhaseGVN *phase, bool can_reshape,
Node* ctl, Node *mem);
public:
enum {
Src = TypeFunc::Parms,
SrcPos,
Dest,
DestPos,
Length,
SrcLen,
DestLen,
SrcKlass,
DestKlass,
ParmLimit
};
static ArrayCopyNode* make(GraphKit* kit, bool may_throw,
Node* src, Node* src_offset,
Node* dest, Node* dest_offset,
Node* length,
bool alloc_tightly_coupled,
Node* src_klass = NULL, Node* dest_klass = NULL,
Node* src_length = NULL, Node* dest_length = NULL);
void connect_outputs(GraphKit* kit);
bool is_arraycopy() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; }
bool is_arraycopy_validated() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; }
bool is_clonebasic() const { assert(_kind != None, "should bet set"); return _kind == CloneBasic; }
bool is_cloneoop() const { assert(_kind != None, "should bet set"); return _kind == CloneOop; }
bool is_copyof() const { assert(_kind != None, "should bet set"); return _kind == CopyOf; }
bool is_copyofrange() const { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; }
void set_arraycopy(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; }
void set_clonebasic() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneBasic; }
void set_cloneoop() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOop; }
void set_copyof() { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = false; }
void set_copyofrange() { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = false; }
virtual int Opcode() const;
virtual uint size_of() const; // Size is bigger
virtual bool guaranteed_safepoint() { return false; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
};
#endif // SHARE_VM_OPTO_CALLNODE_HPP

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "opto/addnode.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"

@ -42,6 +42,7 @@
#include "opto/chaitin.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/divnode.hpp"
#include "opto/escape.hpp"
#include "opto/idealGraphPrinter.hpp"
@ -3867,6 +3868,26 @@ int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
return SSC_full_test;
}
Node* Compile::conv_I2X_index(PhaseGVN *phase, Node* idx, const TypeInt* sizetype) {
#ifdef _LP64
// The scaled index operand to AddP must be a clean 64-bit value.
// Java allows a 32-bit int to be incremented to a negative
// value, which appears in a 64-bit register as a large
// positive number. Using that large positive number as an
// operand in pointer arithmetic has bad consequences.
// On the other hand, 32-bit overflow is rare, and the possibility
// can often be excluded, if we annotate the ConvI2L node with
// a type assertion that its value is known to be a small positive
// number. (The prior range check has ensured this.)
// This assertion is used by ConvI2LNode::Ideal.
int index_max = max_jint - 1; // array size is max_jint, index is one less
if (sizetype != NULL) index_max = sizetype->_hi - 1;
const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
idx = phase->transform(new ConvI2LNode(idx, lidxtype));
#endif
return idx;
}
// The message about the current inlining is accumulated in
// _print_inlining_stream and transfered into the _print_inlining_list
// once we know whether inlining succeeds or not. For regular

@ -74,6 +74,7 @@ class SafePointNode;
class JVMState;
class Type;
class TypeData;
class TypeInt;
class TypePtr;
class TypeOopPtr;
class TypeFunc;
@ -1221,6 +1222,8 @@ class Compile : public Phase {
enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
int static_subtype_check(ciKlass* superk, ciKlass* subk);
static Node* conv_I2X_index(PhaseGVN *phase, Node* offset, const TypeInt* sizetype);
// Auxiliary method for randomized fuzzing/stressing
static bool randomized_select(int count);
};

@ -1660,22 +1660,7 @@ Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
// must be correct type for alignment purposes
Node* base = basic_plus_adr(ary, header);
#ifdef _LP64
// The scaled index operand to AddP must be a clean 64-bit value.
// Java allows a 32-bit int to be incremented to a negative
// value, which appears in a 64-bit register as a large
// positive number. Using that large positive number as an
// operand in pointer arithmetic has bad consequences.
// On the other hand, 32-bit overflow is rare, and the possibility
// can often be excluded, if we annotate the ConvI2L node with
// a type assertion that its value is known to be a small positive
// number. (The prior range check has ensured this.)
// This assertion is used by ConvI2LNode::Ideal.
int index_max = max_jint - 1; // array size is max_jint, index is one less
if (sizetype != NULL) index_max = sizetype->_hi - 1;
const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
idx = _gvn.transform( new ConvI2LNode(idx, lidxtype) );
#endif
idx = Compile::conv_I2X_index(&_gvn, idx, sizetype);
Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
return basic_plus_adr(ary, base, scale);
}

@ -30,6 +30,7 @@
#include "compiler/compileLog.hpp"
#include "oops/objArrayKlass.hpp"
#include "opto/addnode.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
@ -3867,26 +3868,65 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
newcopy = new_array(klass_node, length, 0); // no arguments to push
// Generate a direct call to the right arraycopy function(s).
// We know the copy is disjoint but we might not know if the
// oop stores need checking.
// Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
// This will fail a store-check if x contains any non-nulls.
Node* alloc = tightly_coupled_allocation(newcopy, NULL);
// ArrayCopyNode:Ideal may transform the ArrayCopyNode to
// loads/stores but it is legal only if we're sure the
// Arrays.copyOf would succeed. So we need all input arguments
// to the copyOf to be validated, including that the copy to the
// new array won't trigger an ArrayStoreException. That subtype
// check can be optimized if we know something on the type of
// the input array from type speculation.
if (_gvn.type(klass_node)->singleton()) {
ciKlass* subk = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, alloc != NULL,
int test = C->static_subtype_check(superk, subk);
if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
if (t_original->speculative_type() != NULL) {
original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
}
}
}
bool validated = false;
// Reason_class_check rather than Reason_intrinsic because we
// want to intrinsify even if this traps.
if (!too_many_traps(Deoptimization::Reason_class_check)) {
Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original),
klass_node);
if (not_subtype_ctrl != top()) {
PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl);
uncommon_trap(Deoptimization::Reason_class_check,
Deoptimization::Action_make_not_entrant);
assert(stopped(), "Should be stopped");
}
validated = true;
}
newcopy = new_array(klass_node, length, 0); // no arguments to push
ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true,
load_object_klass(original), klass_node);
if (!is_copyOfRange) {
ac->set_copyof();
ac->set_copyof(validated);
} else {
ac->set_copyofrange();
ac->set_copyofrange(validated);
}
Node* n = _gvn.transform(ac);
assert(n == ac, "cannot disappear");
ac->connect_outputs(this);
if (n == ac) {
ac->connect_outputs(this);
} else {
assert(validated, "shouldn't transform if all arguments not validated");
set_all_memory(n);
}
}
} // original reexecute is set back here

@ -2032,7 +2032,7 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
// Hopefully, compiler will optimize for powers of 2.
Node *ctrl = get_ctrl(main_limit);
Node *stride = cl->stride();
Node *init = cl->init_trip();
Node *init = cl->init_trip()->uncast();
Node *span = new SubINode(main_limit,init);
register_new_node(span,ctrl);
Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1));

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "opto/arraycopynode.hpp"
#include "oops/objArrayKlass.hpp"
#include "opto/convertnode.hpp"
#include "opto/graphKit.hpp"
@ -519,7 +520,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
// Test S[] against D[], not S against D, because (probably)
// the secondary supertype cache is less busy for S[] than S.
// This usually only matters when D is an interface.
Node* not_subtype_ctrl = ac->is_arraycopy_validated() ? top() :
Node* not_subtype_ctrl = (ac->is_arraycopy_validated() || ac->is_copyof_validated() || ac->is_copyofrange_validated()) ? top() :
Phase::gen_subtype_check(src_klass, dest_klass, ctrl, mem, &_igvn);
// Plug failing path into checked_oop_disjoint_arraycopy
if (not_subtype_ctrl != top()) {

@ -974,21 +974,10 @@ const Type *TypeF::xdual() const {
//------------------------------eq---------------------------------------------
// Structural equality check for Type representations
bool TypeF::eq( const Type *t ) const {
if( g_isnan(_f) ||
g_isnan(t->getf()) ) {
// One or both are NANs. If both are NANs return true, else false.
return (g_isnan(_f) && g_isnan(t->getf()));
}
if (_f == t->getf()) {
// (NaN is impossible at this point, since it is not equal even to itself)
if (_f == 0.0) {
// difference between positive and negative zero
if (jint_cast(_f) != jint_cast(t->getf())) return false;
}
return true;
}
return false;
bool TypeF::eq(const Type *t) const {
// Bitwise comparison to distinguish between +/-0. These values must be treated
// as different to be consistent with C1 and the interpreter.
return (jint_cast(_f) == jint_cast(t->getf()));
}
//------------------------------hash-------------------------------------------
@ -1089,21 +1078,10 @@ const Type *TypeD::xdual() const {
//------------------------------eq---------------------------------------------
// Structural equality check for Type representations
bool TypeD::eq( const Type *t ) const {
if( g_isnan(_d) ||
g_isnan(t->getd()) ) {
// One or both are NANs. If both are NANs return true, else false.
return (g_isnan(_d) && g_isnan(t->getd()));
}
if (_d == t->getd()) {
// (NaN is impossible at this point, since it is not equal even to itself)
if (_d == 0.0) {
// difference between positive and negative zero
if (jlong_cast(_d) != jlong_cast(t->getd())) return false;
}
return true;
}
return false;
bool TypeD::eq(const Type *t) const {
// Bitwise comparison to distinguish between +/-0. These values must be treated
// as different to be consistent with C1 and the interpreter.
return (jlong_cast(_d) == jlong_cast(t->getd()));
}
//------------------------------hash-------------------------------------------

@ -289,7 +289,12 @@ bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
// Create MDO if necessary.
void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
if (mh->is_native() ||
mh->is_abstract() ||
mh->is_accessor() ||
mh->is_constant_getter()) {
return;
}
if (mh->method_data() == NULL) {
Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
}

@ -724,6 +724,7 @@ void NMethodSweeper::possibly_flush(nmethod* nm) {
// state of the code cache if it's requested.
void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
if (PrintMethodFlushing) {
ResourceMark rm;
stringStream s;
// Dump code cache state into a buffer before locking the tty,
// because log_state() will use locks causing lock conflicts.
@ -741,6 +742,7 @@ void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
}
if (LogCompilation && (xtty != NULL)) {
ResourceMark rm;
stringStream s;
// Dump code cache state into a buffer before locking the tty,
// because log_state() will use locks causing lock conflicts.

@ -114,7 +114,9 @@ needs_jre = \
compiler/c2/7047069/Test7047069.java \
runtime/6294277/SourceDebugExtension.java \
runtime/ClassFile/JsrRewriting.java \
runtime/ClassFile/OomWhileParsingRepeatedJsr.java
runtime/ClassFile/OomWhileParsingRepeatedJsr.java \
runtime/SharedArchiveFile/LimitSharedSizes.java \
runtime/SharedArchiveFile/SpaceUtilizationCheck.java
# Compact 3 adds further tests to compact2
#
@ -387,35 +389,7 @@ hotspot_compiler_3 = \
-compiler/runtime/6826736
hotspot_compiler_closed = \
closed/compiler/c1/ \
closed/compiler/c2/ \
closed/compiler/codegen/ \
closed/compiler/escapeAnalysis/ \
closed/compiler/interpreter/ \
closed/compiler/jsr292/ \
closed/compiler/loopopts/ \
closed/compiler/oracle/ \
closed/compiler/runtime/ \
closed/compiler/symantec/ \
-closed/compiler/c1/4477197 \
-closed/compiler/c1/5040872 \
-closed/compiler/c1/6507107 \
-closed/compiler/c2/4344895 \
-closed/compiler/c2/4485006 \
-closed/compiler/c2/4523683 \
-closed/compiler/c2/4620290 \
-closed/compiler/c2/4998314 \
-closed/compiler/c2/6329104 \
-closed/compiler/c2/6434117 \
-closed/compiler/c2/6547163 \
-closed/compiler/c2/6563987 \
-closed/compiler/c2/6595044 \
-closed/compiler/codegen/6440479 \
-closed/compiler/codegen/6603011 \
-closed/compiler/interpreter/5034475 \
-closed/compiler/jsr292/LongLambdaFormDynamicStackDepth.java \
-closed/compiler/loopopts/4463485 \
-closed/compiler/loopopts/8021898
sanity/ExecuteInternalVMTests.java
hotspot_gc = \
sanity/ExecuteInternalVMTests.java \

@ -0,0 +1,50 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8073792
* @summary assert broken when array size becomes known during igvn
* @run main/othervm -Xcomp -XX:CompileOnly=TestArrayCloneBadAssert.m TestArrayCloneBadAssert
*
*/
public class TestArrayCloneBadAssert {
static final int[] array = new int[5];
static int[] m(int[] arr) {
int i = 0;
for (; i < 2; i++) {
}
if (i == 2) {
arr = array;
}
return arr.clone();
}
static public void main(String[] args) {
int[] arr = new int[5];
m(arr);
}
}

@ -0,0 +1,617 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 6912521
* @summary small array copy as loads/stores
* @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestArrayCopyAsLoadsStores::m* -XX:TypeProfileLevel=200 TestArrayCopyAsLoadsStores
* @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestArrayCopyAsLoadsStores::m* -XX:+IgnoreUnrecognizedVMOptions -XX:+StressArrayCopyMacroNode -XX:TypeProfileLevel=200 TestArrayCopyAsLoadsStores
*
*/
import java.lang.annotation.*;
import java.lang.reflect.*;
import java.util.*;
public class TestArrayCopyAsLoadsStores {
public enum ArraySrc {
SMALL,
LARGE,
ZERO
}
public enum ArrayDst {
NONE,
NEW,
SRC
}
static class A {
}
static class B extends A {
}
static final A[] small_a_src = new A[5];
static final A[] large_a_src = new A[10];
static final A[] zero_a_src = new A[0];
static final int[] small_int_src = new int[5];
static final int[] large_int_src = new int[10];
static final int[] zero_int_src = new int[0];
static final Object[] small_object_src = new Object[5];
static Object src;
@Retention(RetentionPolicy.RUNTIME)
@interface Args {
ArraySrc src();
ArrayDst dst() default ArrayDst.NONE;
int[] extra_args() default {};
}
// array clone should be compiled as loads/stores
@Args(src=ArraySrc.SMALL)
static A[] m1() throws CloneNotSupportedException {
return (A[])small_a_src.clone();
}
@Args(src=ArraySrc.SMALL)
static int[] m2() throws CloneNotSupportedException {
return (int[])small_int_src.clone();
}
// new array allocation should be optimized out
@Args(src=ArraySrc.SMALL)
static int m3() throws CloneNotSupportedException {
int[] array_clone = (int[])small_int_src.clone();
return array_clone[0] + array_clone[1] + array_clone[2] +
array_clone[3] + array_clone[4];
}
// should not be compiled as loads/stores
@Args(src=ArraySrc.LARGE)
static int[] m4() throws CloneNotSupportedException {
return (int[])large_int_src.clone();
}
// check that array of length 0 is handled correctly
@Args(src=ArraySrc.ZERO)
static int[] m5() throws CloneNotSupportedException {
return (int[])zero_int_src.clone();
}
// array copy should be compiled as loads/stores
@Args(src=ArraySrc.SMALL, dst=ArrayDst.NEW)
static void m6(int[] src, int[] dest) {
System.arraycopy(src, 0, dest, 0, 5);
}
// array copy should not be compiled as loads/stores
@Args(src=ArraySrc.LARGE, dst=ArrayDst.NEW)
static void m7(int[] src, int[] dest) {
System.arraycopy(src, 0, dest, 0, 10);
}
// array copy should be compiled as loads/stores
@Args(src=ArraySrc.SMALL)
static A[] m8(A[] src) {
src[0] = src[0]; // force null check
A[] dest = new A[5];
System.arraycopy(src, 0, dest, 0, 5);
return dest;
}
// array copy should not be compiled as loads/stores: we would
// need to emit GC barriers
@Args(src=ArraySrc.SMALL, dst=ArrayDst.NEW)
static void m9(A[] src, A[] dest) {
System.arraycopy(src, 0, dest, 0, 5);
}
// overlapping array regions: copy backward
@Args(src=ArraySrc.SMALL, dst=ArrayDst.SRC)
static void m10(int[] src, int[] dest) {
System.arraycopy(src, 0, dest, 1, 4);
}
static boolean m10_check(int[] src, int[] dest) {
boolean failure = false;
for (int i = 0; i < 5; i++) {
int j = Math.max(i - 1, 0);
if (dest[i] != src[j]) {
System.out.println("Test m10 failed for " + i + " src[" + j +"]=" + src[j] + ", dest[" + i + "]=" + dest[i]);
failure = true;
}
}
return failure;
}
// overlapping array regions: copy forward
@Args(src=ArraySrc.SMALL, dst=ArrayDst.SRC)
static void m11(int[] src, int[] dest) {
System.arraycopy(src, 1, dest, 0, 4);
}
static boolean m11_check(int[] src, int[] dest) {
boolean failure = false;
for (int i = 0; i < 5; i++) {
int j = Math.min(i + 1, 4);
if (dest[i] != src[j]) {
System.out.println("Test m11 failed for " + i + " src[" + j +"]=" + src[j] + ", dest[" + i + "]=" + dest[i]);
failure = true;
}
}
return failure;
}
// overlapping array region with unknown src/dest offsets: compiled code must include both forward and backward copies
@Args(src=ArraySrc.SMALL, dst=ArrayDst.SRC, extra_args={0,1})
static void m12(int[] src, int[] dest, int srcPos, int destPos) {
System.arraycopy(src, srcPos, dest, destPos, 4);
}
static boolean m12_check(int[] src, int[] dest) {
boolean failure = false;
for (int i = 0; i < 5; i++) {
int j = Math.max(i - 1, 0);
if (dest[i] != src[j]) {
System.out.println("Test m10 failed for " + i + " src[" + j +"]=" + src[j] + ", dest[" + i + "]=" + dest[i]);
failure = true;
}
}
return failure;
}
// Array allocation and copy should optimize out
@Args(src=ArraySrc.SMALL)
static int m13(int[] src) {
int[] dest = new int[5];
System.arraycopy(src, 0, dest, 0, 5);
return dest[0] + dest[1] + dest[2] + dest[3] + dest[4];
}
// Check that copy of length 0 is handled correctly
@Args(src=ArraySrc.ZERO, dst=ArrayDst.NEW)
static void m14(int[] src, int[] dest) {
System.arraycopy(src, 0, dest, 0, 0);
}
// copyOf should compile to loads/stores
@Args(src=ArraySrc.SMALL)
static A[] m15() {
return Arrays.copyOf(small_a_src, 5, A[].class);
}
static Object[] helper16(int i) {
Object[] arr = null;
if ((i%2) == 0) {
arr = small_a_src;
} else {
arr = small_object_src;
}
return arr;
}
// CopyOf may need subtype check
@Args(src=ArraySrc.SMALL, dst=ArrayDst.NONE, extra_args={0})
static A[] m16(A[] unused_src, int i) {
Object[] arr = helper16(i);
return Arrays.copyOf(arr, 5, A[].class);
}
static Object[] helper17_1(int i) {
Object[] arr = null;
if ((i%2) == 0) {
arr = small_a_src;
} else {
arr = small_object_src;
}
return arr;
}
static A[] helper17_2(Object[] arr) {
return Arrays.copyOf(arr, 5, A[].class);
}
// CopyOf may leverage type speculation
@Args(src=ArraySrc.SMALL, dst=ArrayDst.NONE, extra_args={0})
static A[] m17(A[] unused_src, int i) {
Object[] arr = helper17_1(i);
return helper17_2(arr);
}
static Object[] helper18_1(int i) {
Object[] arr = null;
if ((i%2) == 0) {
arr = small_a_src;
} else {
arr = small_object_src;
}
return arr;
}
static Object[] helper18_2(Object[] arr) {
return Arrays.copyOf(arr, 5, Object[].class);
}
// CopyOf should not attempt to use type speculation if it's not needed
@Args(src=ArraySrc.SMALL, dst=ArrayDst.NONE, extra_args={0})
static Object[] m18(A[] unused_src, int i) {
Object[] arr = helper18_1(i);
return helper18_2(arr);
}
static Object[] helper19(int i) {
Object[] arr = null;
if ((i%2) == 0) {
arr = small_a_src;
} else {
arr = small_object_src;
}
return arr;
}
// CopyOf may need subtype check. Test is run to make type check
// fail and cause deoptimization. Next compilation should not
// compile as loads/stores because the first compilation
// deoptimized.
@Args(src=ArraySrc.SMALL, dst=ArrayDst.NONE, extra_args={0})
static A[] m19(A[] unused_src, int i) {
Object[] arr = helper19(i);
return Arrays.copyOf(arr, 5, A[].class);
}
// copyOf for large array should not compile to loads/stores
@Args(src=ArraySrc.LARGE)
static A[] m20() {
return Arrays.copyOf(large_a_src, 10, A[].class);
}
// check zero length copyOf is handled correctly
@Args(src=ArraySrc.ZERO)
static A[] m21() {
return Arrays.copyOf(zero_a_src, 0, A[].class);
}
// Run with srcPos=0 for a 1st compile, then with incorrect value
// of srcPos to cause deoptimization, then with srcPos=0 for a 2nd
// compile. The 2nd compile shouldn't turn arraycopy into
// loads/stores because input arguments are no longer known to be
// valid.
@Args(src=ArraySrc.SMALL, dst=ArrayDst.NEW, extra_args={0})
static void m22(int[] src, int[] dest, int srcPos) {
System.arraycopy(src, srcPos, dest, 0, 5);
}
// copyOfRange should compile to loads/stores
@Args(src=ArraySrc.SMALL)
static A[] m23() {
return Arrays.copyOfRange(small_a_src, 1, 4, A[].class);
}
static boolean m23_check(A[] src, A[] dest) {
boolean failure = false;
for (int i = 0; i < 3; i++) {
if (src[i+1] != dest[i]) {
System.out.println("Test m23 failed for " + i + " src[" + (i+1) +"]=" + dest[i] + ", dest[" + i + "]=" + dest[i]);
failure = true;
}
}
return failure;
}
// array copy should be compiled as loads/stores. Invoke then with
// incompatible array type to verify we don't allow a forbidden
// arraycopy to happen.
@Args(src=ArraySrc.SMALL)
static A[] m24(Object[] src) {
src[0] = src[0]; // force null check
A[] dest = new A[5];
System.arraycopy(src, 0, dest, 0, 5);
return dest;
}
// overlapping array region with unknown src/dest offsets but
// length 1: compiled code doesn't need both forward and backward
// copies
@Args(src=ArraySrc.SMALL, dst=ArrayDst.SRC, extra_args={0,1})
static void m25(int[] src, int[] dest, int srcPos, int destPos) {
System.arraycopy(src, srcPos, dest, destPos, 1);
}
static boolean m25_check(int[] src, int[] dest) {
boolean failure = false;
if (dest[1] != src[0]) {
System.out.println("Test m10 failed for src[0]=" + src[0] + ", dest[1]=" + dest[1]);
return true;
}
return false;
}
final HashMap<String,Method> tests = new HashMap<>();
{
for (Method m : this.getClass().getDeclaredMethods()) {
if (m.getName().matches("m[0-9]+(_check)?")) {
assert(Modifier.isStatic(m.getModifiers())) : m;
tests.put(m.getName(), m);
}
}
}
boolean success = true;
void doTest(String name) throws Exception {
Method m = tests.get(name);
Method m_check = tests.get(name + "_check");
Class[] paramTypes = m.getParameterTypes();
Object[] params = new Object[paramTypes.length];
Class retType = m.getReturnType();
boolean isIntArray = (retType.isPrimitive() && !retType.equals(Void.TYPE)) ||
(retType.equals(Void.TYPE) && paramTypes[0].getComponentType().isPrimitive()) ||
(retType.isArray() && retType.getComponentType().isPrimitive());
Args args = m.getAnnotation(Args.class);
Object src = null;
switch(args.src()) {
case SMALL: {
if (isIntArray) {
src = small_int_src;
} else {
src = small_a_src;
}
break;
}
case LARGE: {
if (isIntArray) {
src = large_int_src;
} else {
src = large_a_src;
}
break;
}
case ZERO: {
if (isIntArray) {
src = zero_int_src;
} else {
src = zero_a_src;
}
break;
}
}
for (int i = 0; i < 20000; i++) {
boolean failure = false;
int p = 0;
if (params.length > 0) {
if (isIntArray) {
params[0] = ((int[])src).clone();
} else {
params[0] = ((A[])src).clone();
}
p++;
}
if (params.length > 1) {
switch(args.dst()) {
case NEW: {
if (isIntArray) {
params[1] = new int[((int[])params[0]).length];
} else {
params[1] = new A[((A[])params[0]).length];
}
p++;
break;
}
case SRC: {
params[1] = params[0];
p++;
break;
}
case NONE: break;
}
}
for (int j = 0; j < args.extra_args().length; j++) {
params[p+j] = args.extra_args()[j];
}
Object res = m.invoke(null, params);
if (retType.isPrimitive() && !retType.equals(Void.TYPE)) {
int s = (int)res;
int sum = 0;
int[] int_res = (int[])src;
for (int j = 0; j < int_res.length; j++) {
sum += int_res[j];
}
failure = (s != sum);
if (failure) {
System.out.println("Test " + name + " failed: result = " + s + " != " + sum);
}
} else {
Object dest = null;
if (!retType.equals(Void.TYPE)) {
dest = res;
} else {
dest = params[1];
}
if (m_check != null) {
failure = (boolean)m_check.invoke(null, new Object[] { src, dest });
} else {
if (isIntArray) {
int[] int_res = (int[])src;
int[] int_dest = (int[])dest;
for (int j = 0; j < int_res.length; j++) {
if (int_res[j] != int_dest[j]) {
System.out.println("Test " + name + " failed for " + j + " src[" + j +"]=" + int_res[j] + ", dest[" + j + "]=" + int_dest[j]);
failure = true;
}
}
} else {
Object[] object_res = (Object[])src;
Object[] object_dest = (Object[])dest;
for (int j = 0; j < object_res.length; j++) {
if (object_res[j] != object_dest[j]) {
System.out.println("Test " + name + " failed for " + j + " src[" + j +"]=" + object_res[j] + ", dest[" + j + "]=" + object_dest[j]);
failure = true;
}
}
}
}
}
if (failure) {
success = false;
break;
}
}
}
public static void main(String[] args) throws Exception {
for (int i = 0; i < small_a_src.length; i++) {
small_a_src[i] = new A();
}
for (int i = 0; i < small_int_src.length; i++) {
small_int_src[i] = i;
}
for (int i = 0; i < large_int_src.length; i++) {
large_int_src[i] = i;
}
for (int i = 0; i < 5; i++) {
small_object_src[i] = new Object();
}
TestArrayCopyAsLoadsStores test = new TestArrayCopyAsLoadsStores();
test.doTest("m1");
test.doTest("m2");
test.doTest("m3");
test.doTest("m4");
test.doTest("m5");
test.doTest("m6");
test.doTest("m7");
test.doTest("m8");
test.doTest("m9");
test.doTest("m10");
test.doTest("m11");
test.doTest("m12");
test.doTest("m13");
test.doTest("m14");
test.doTest("m15");
// make both branches of the If appear taken
for (int i = 0; i < 20000; i++) {
helper16(i);
}
test.doTest("m16");
// load class B so type check in m17 would not be simple comparison
B b = new B();
// make both branches of the If appear taken
for (int i = 0; i < 20000; i++) {
helper17_1(i);
}
test.doTest("m17");
// make both branches of the If appear taken
for (int i = 0; i < 20000; i++) {
helper18_1(i);
}
test.doTest("m18");
// make both branches of the If appear taken
for (int i = 0; i < 20000; i++) {
helper19(i);
}
// Compile
for (int i = 0; i < 20000; i++) {
m19(null, 0);
}
// force deopt
boolean m19_exception = false;
for (int i = 0; i < 10; i++) {
try {
m19(null, 1);
} catch(ArrayStoreException ase) {
m19_exception = true;
}
}
if (!m19_exception) {
System.out.println("Test m19: exception wasn't thrown");
test.success = false;
}
test.doTest("m19");
test.doTest("m20");
test.doTest("m21");
// Compile
int[] dst = new int[small_int_src.length];
for (int i = 0; i < 20000; i++) {
m22(small_int_src, dst, 0);
}
// force deopt
for (int i = 0; i < 10; i++) {
try {
m22(small_int_src, dst, 5);
} catch(ArrayIndexOutOfBoundsException aioobe) {}
}
test.doTest("m22");
test.doTest("m23");
test.doTest("m24");
boolean m24_exception = false;
try {
m24(small_object_src);
} catch(ArrayStoreException ase) {
m24_exception = true;
}
if (!m24_exception) {
System.out.println("Test m24: exception wasn't thrown");
test.success = false;
}
test.doTest("m25");
if (!test.success) {
throw new RuntimeException("some tests failed");
}
}
}

@ -0,0 +1,163 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8073670
* @summary Test that causes C2 to fold two NaNs with different values into a single NaN.
* @run main/othervm -XX:-TieredCompilation -Xcomp -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_inf -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_zero -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_nan -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_inf -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_zero -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_nan FloatingPointFoldingTest
*/
public class FloatingPointFoldingTest {
// Double values.
public static final long MINUS_INF_LONGBITS = 0xfff0000000000000L;
public static final double DOUBLE_MINUS_INF = Double.longBitsToDouble(MINUS_INF_LONGBITS);
public static final long PLUS_INF_LONGBITS = 0x7ff0000000000000L;
public static final double DOUBLE_PLUS_INF = Double.longBitsToDouble(PLUS_INF_LONGBITS);
public static final long MINUS_ZERO_LONGBITS = 0x8000000000000000L;
public static final double DOUBLE_MINUS_ZERO = Double.longBitsToDouble(MINUS_ZERO_LONGBITS);
// We need two different NaN values. A floating point number is
// considered to be NaN is the sign bit is 0, all exponent bits
// are set to 1, and at least one bit of the exponent is not zero.
//
// As java.lang.Double.NaN is 0x7ff8000000000000L, we use
// 0x7ffc000000000000L as a second NaN double value.
public static final long NAN_LONGBITS = 0x7ffc000000000000L;
public static final double DOUBLE_NAN = Double.longBitsToDouble(NAN_LONGBITS);
// Float values.
public static final int MINUS_INF_INTBITS = 0xff800000;
public static final float FLOAT_MINUS_INF = Float.intBitsToFloat(MINUS_INF_INTBITS);
public static final int PLUS_INF_INTBITS = 0x7f800000;
public static final float FLOAT_PLUS_INF = Float.intBitsToFloat(PLUS_INF_INTBITS);
public static final int MINUS_ZERO_INTBITS = 0x80000000;
public static final float FLOAT_MINUS_ZERO = Float.intBitsToFloat(MINUS_ZERO_INTBITS);
// As java.lang.Float.NaN is 0x7fc00000, we use 0x7fe00000
// as a second NaN float value.
public static final int NAN_INTBITS = 0x7fe00000;
public static final float FLOAT_NAN = Float.intBitsToFloat(NAN_INTBITS);
// Double tests.
static void test_double_inf(long[] result) {
double d1 = DOUBLE_MINUS_INF;
double d2 = DOUBLE_PLUS_INF;
result[0] = Double.doubleToRawLongBits(d1);
result[1] = Double.doubleToRawLongBits(d2);
}
static void test_double_zero(long[] result) {
double d1 = DOUBLE_MINUS_ZERO;
double d2 = 0;
result[0] = Double.doubleToRawLongBits(d1);
result[1] = Double.doubleToRawLongBits(d2);
}
static void test_double_nan(long[] result) {
double d1 = DOUBLE_NAN;
double d2 = Double.NaN;
result[0] = Double.doubleToRawLongBits(d1);
result[1] = Double.doubleToRawLongBits(d2);
}
// Float tests.
static void test_float_inf(int[] result) {
float f1 = FLOAT_MINUS_INF;
float f2 = FLOAT_PLUS_INF;
result[0] = Float.floatToRawIntBits(f1);
result[1] = Float.floatToRawIntBits(f2);
}
static void test_float_zero(int[] result) {
float f1 = FLOAT_MINUS_ZERO;
float f2 = 0;
result[0] = Float.floatToRawIntBits(f1);
result[1] = Float.floatToRawIntBits(f2);
}
static void test_float_nan(int[] result) {
float f1 = FLOAT_NAN;
float f2 = Float.NaN;
result[0] = Float.floatToRawIntBits(f1);
result[1] = Float.floatToRawIntBits(f2);
}
// Check doubles.
static void check_double(long[] result, double d1, double d2) {
if (result[0] == result[1]) {
throw new RuntimeException("ERROR: Two different double values are considered equal. \n"
+ String.format("\toriginal values: 0x%x 0x%x\n", Double.doubleToRawLongBits(d1), Double.doubleToRawLongBits(d2))
+ String.format("\tvalues after execution of method test(): 0x%x 0x%x", result[0], result[1]));
}
}
// Check floats.
static void check_float(int[] result, float f1, float f2) {
if (result[0] == result[1]) {
throw new RuntimeException("ERROR: Two different float values are considered equal. \n"
+ String.format("\toriginal values: 0x%x 0x%x\n", Float.floatToRawIntBits(f1), Float.floatToRawIntBits(f2))
+ String.format("\tvalues after execution of method test(): 0x%x 0x%x", result[0], result[1]));
}
}
public static void main(String[] args) {
// Float tests.
int[] iresult = new int[2];
// -Inf and +Inf.
test_float_inf(iresult);
check_float(iresult, FLOAT_MINUS_INF, FLOAT_PLUS_INF);
// 0 and -0.
test_float_zero(iresult);
check_float(iresult, FLOAT_MINUS_ZERO, 0);
// Diferrent NaNs.
test_float_nan(iresult);
check_float(iresult, FLOAT_NAN, Float.NaN);
// Double tests.
long[] lresult = new long[2];
// -Inf and +Inf.
test_double_inf(lresult);
check_double(lresult, DOUBLE_MINUS_INF, DOUBLE_PLUS_INF);
// 0 and -0.
test_double_zero(lresult);
check_double(lresult, DOUBLE_MINUS_ZERO, 0);
// Diferrent NaNs.
test_double_nan(lresult);
check_double(lresult, DOUBLE_NAN, Double.NaN);
}
}

@ -51,7 +51,9 @@ public class UsageThresholdIncreasedTest {
public static void main(String[] args) {
for (BlobType btype : BlobType.getAvailable()) {
new UsageThresholdIncreasedTest(btype).runTest();
if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
new UsageThresholdIncreasedTest(btype).runTest();
}
}
}

@ -0,0 +1,74 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8073956
* @summary Tests C2 EA with allocated object escaping through a call.
* @run main/othervm -XX:CompileCommand=dontinline,TestEscapeThroughInvoke::create TestEscapeThroughInvoke
*/
public class TestEscapeThroughInvoke {
private A a;
public static void main(String[] args) {
TestEscapeThroughInvoke test = new TestEscapeThroughInvoke();
test.a = new A(42);
// Make sure run gets compiled by C2
for (int i = 0; i < 100_000; ++i) {
test.run();
}
}
private void run() {
// Allocate something to trigger EA
new Object();
// Create a new escaping instance of A and
// verify that it is always equal to 'a.saved'.
A escapingA = create(42);
a.check(escapingA);
}
// Create and return a new instance of A that escaped through 'A::saveInto'.
// The 'dummy' parameters are needed to avoid EA skipping the methods.
private A create(Integer dummy) {
A result = new A(dummy);
result.saveInto(a, dummy); // result escapes into 'a' here
return result;
}
}
class A {
private A saved;
public A(Integer dummy) { }
public void saveInto(A other, Integer dummy) {
other.saved = this;
}
public void check(A other) {
if (this.saved != other) {
throw new RuntimeException("TEST FAILED: Objects not equal.");
}
}
}

@ -36,18 +36,22 @@ public class CountedLoopProblem {
public static void main(String[] args) throws Exception {
Random r = new Random(42);
int x = 0;
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 1000000; ++i) {
int v = Math.abs(r.nextInt());
sb.append('+').append(v).append('\n');
x += v;
// To trigger the problem we must OSR in the following loop
// To make the problem 100% reproducible run with -XX:-TieredCompilation -XX:OSROnlyBCI=62
while(x < 0) x += 1000000000;
sb.append('=').append(x).append('\n');
}
if (sb.toString().hashCode() != 0xaba94591) {
throw new Exception("Unexpected result");
try {
StringBuilder sb = new StringBuilder();
for(int i = 0; i < 1000000; ++i) {
int v = Math.abs(r.nextInt());
sb.append('+').append(v).append('\n');
x += v;
// To trigger the problem we must OSR in the following loop
// To make the problem 100% reproducible run with -XX:-TieredCompilation -XX:OSROnlyBCI=62
while(x < 0) x += 1000000000;
sb.append('=').append(x).append('\n');
}
if (sb.toString().hashCode() != 0xaba94591) {
throw new Exception("Unexpected result");
}
} catch(OutOfMemoryError e) {
// small heap, ignore
}
}
}

@ -0,0 +1,46 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8073184
* @summary CastII that guards counted loops confuses range check elimination with LoopLimitCheck off
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-LoopLimitCheck -XX:CompileOnly=TestCastIINoLoopLimitCheck.m -Xcomp TestCastIINoLoopLimitCheck
*
*/
public class TestCastIINoLoopLimitCheck {
static void m(int i, int index, char[] buf) {
while (i >= 65536) {
i = i / 100;
buf [--index] = 0;
buf [--index] = 1;
}
}
static public void main(String[] args) {
m(0, 0, null);
}
}

@ -0,0 +1,68 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8073154
* @build TestProfileReturnTypePrinting
* @run main/othervm -XX:TypeProfileLevel=020
* -XX:CompileOnly=TestProfileReturnTypePrinting.testMethod
* -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintLIR
* TestProfileReturnTypePrinting
* @summary Verify that c1's LIR that contains ProfileType node could be dumped
* without a crash disregard to an exact class knowledge.
*/
public class TestProfileReturnTypePrinting {
private static final int ITERATIONS = 1_000_000;
public static void main(String args[]) {
for (int i = 0; i < ITERATIONS; i++) {
TestProfileReturnTypePrinting.testMethod(i);
}
}
private static int testMethod(int i) {
return TestProfileReturnTypePrinting.foo().hashCode()
+ TestProfileReturnTypePrinting.bar(i).hashCode();
}
/* Exact class of returned value is known statically. */
private static B foo() {
return new B();
}
/* Exact class of returned value is not known statically. */
private static Object bar(int i) {
if (i % 2 == 0) {
return new A();
} else {
return new B();
}
}
private static class A {
}
private static class B extends A {
}
}

@ -29,6 +29,7 @@ import java.util.concurrent.Callable;
/**
* @test LevelTransitionTest
* @library /testlibrary /../../test/lib /compiler/whitebox
* @ignore 8067651
* @build TransitionsTestExecutor LevelTransitionTest
* @run main ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm/timeout=240 -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions

@ -58,7 +58,7 @@ public class RandomGeneratorTest {
}
jvmArgs.add(RandomRunner.class.getName());
String[] cmdLineArgs = jvmArgs.toArray(new String[jvmArgs.size()]);
String etalon = ProcessTools.executeTestJvm(cmdLineArgs).getOutput().trim();
String etalon = ProcessTools.executeTestJvm(cmdLineArgs).getStdout().trim();
seedOpt.verify(etalon, cmdLineArgs);
}
@ -122,7 +122,7 @@ public class RandomGeneratorTest {
String lastLineOrig = getLastLine(orig);
String lastLine;
try {
lastLine = getLastLine(ProcessTools.executeTestJvm(cmdLine).getOutput().trim());
lastLine = getLastLine(ProcessTools.executeTestJvm(cmdLine).getStdout().trim());
} catch (Throwable t) {
throw new Error("TESTBUG: Unexpedted exception during jvm execution.", t);
}