8341411: C2: remove slice parameter from GraphKit::make_load() and GraphKit::store_to_memory()
Reviewed-by: thartmann, roland
This commit is contained in:
parent
b72fe75533
commit
8af304c60f
@ -156,8 +156,8 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
|
||||
}
|
||||
|
||||
store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), bt,
|
||||
access.addr().type(), mo, requires_atomic_access, unaligned,
|
||||
mismatched, unsafe, access.barrier_data());
|
||||
mo, requires_atomic_access, unaligned, mismatched,
|
||||
unsafe, access.barrier_data());
|
||||
} else {
|
||||
assert(access.is_opt_access(), "either parse or opt access");
|
||||
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
|
||||
@ -217,7 +217,7 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
|
||||
unaligned, mismatched, unsafe, access.barrier_data());
|
||||
load = kit->gvn().transform(load);
|
||||
} else {
|
||||
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
|
||||
load = kit->make_load(control, adr, val_type, access.type(), mo,
|
||||
dep, requires_atomic_access, unaligned, mismatched, unsafe,
|
||||
access.barrier_data());
|
||||
}
|
||||
|
@ -220,6 +220,10 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c
|
||||
Node* off = phase->MakeConX(field->offset_in_bytes());
|
||||
Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
|
||||
Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
|
||||
assert(phase->C->get_alias_index(adr_type) == phase->C->get_alias_index(phase->type(next_src)->isa_ptr()),
|
||||
"slice of address and input slice don't match");
|
||||
assert(phase->C->get_alias_index(adr_type) == phase->C->get_alias_index(phase->type(next_dest)->isa_ptr()),
|
||||
"slice of address and input slice don't match");
|
||||
BasicType bt = field->layout_type();
|
||||
|
||||
const Type *type;
|
||||
|
@ -101,7 +101,7 @@ void GraphKit::gen_stub(address C_function,
|
||||
//
|
||||
Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
|
||||
Node *last_sp = frameptr();
|
||||
store_to_memory(control(), adr_sp, last_sp, T_ADDRESS, NoAlias, MemNode::unordered);
|
||||
store_to_memory(control(), adr_sp, last_sp, T_ADDRESS, MemNode::unordered);
|
||||
|
||||
// Set _thread_in_native
|
||||
// The order of stores into TLS is critical! Setting _thread_in_native MUST
|
||||
@ -221,12 +221,12 @@ void GraphKit::gen_stub(address C_function,
|
||||
//-----------------------------
|
||||
|
||||
// Clear last_Java_sp
|
||||
store_to_memory(control(), adr_sp, null(), T_ADDRESS, NoAlias, MemNode::unordered);
|
||||
store_to_memory(control(), adr_sp, null(), T_ADDRESS, MemNode::unordered);
|
||||
// Clear last_Java_pc
|
||||
store_to_memory(control(), adr_last_Java_pc, null(), T_ADDRESS, NoAlias, MemNode::unordered);
|
||||
store_to_memory(control(), adr_last_Java_pc, null(), T_ADDRESS, MemNode::unordered);
|
||||
#if (defined(IA64) && !defined(AIX))
|
||||
Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
|
||||
store_to_memory(control(), adr_last_Java_fp, null(), T_ADDRESS, NoAlias, MemNode::unordered);
|
||||
store_to_memory(control(), adr_last_Java_fp, null(), T_ADDRESS, MemNode::unordered);
|
||||
#endif
|
||||
|
||||
// For is-fancy-jump, the C-return value is also the branch target
|
||||
@ -234,16 +234,16 @@ void GraphKit::gen_stub(address C_function,
|
||||
// Runtime call returning oop in TLS? Fetch it out
|
||||
if( pass_tls ) {
|
||||
Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset()));
|
||||
Node* vm_result = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
|
||||
Node* vm_result = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
|
||||
map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result
|
||||
// clear thread-local-storage(tls)
|
||||
store_to_memory(control(), adr, null(), T_ADDRESS, NoAlias, MemNode::unordered);
|
||||
store_to_memory(control(), adr, null(), T_ADDRESS, MemNode::unordered);
|
||||
}
|
||||
|
||||
//-----------------------------
|
||||
// check exception
|
||||
Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset()));
|
||||
Node* pending = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
|
||||
Node* pending = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
|
||||
|
||||
Node* exit_memory = reset_memory();
|
||||
|
||||
|
@ -510,7 +510,7 @@ void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptR
|
||||
// first must access the should_post_on_exceptions_flag in this thread's JavaThread
|
||||
Node* jthread = _gvn.transform(new ThreadLocalNode());
|
||||
Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
|
||||
Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
|
||||
|
||||
// Test the should_post_on_exceptions_flag vs. 0
|
||||
Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) );
|
||||
@ -1550,7 +1550,6 @@ void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
|
||||
|
||||
// factory methods in "int adr_idx"
|
||||
Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
|
||||
int adr_idx,
|
||||
MemNode::MemOrd mo,
|
||||
LoadNode::ControlDependency control_dependency,
|
||||
bool require_atomic_access,
|
||||
@ -1558,7 +1557,7 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
|
||||
bool mismatched,
|
||||
bool unsafe,
|
||||
uint8_t barrier_data) {
|
||||
assert(adr_idx == C->get_alias_index(_gvn.type(adr)->isa_ptr()), "slice of address and input slice don't match");
|
||||
int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
|
||||
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
|
||||
const TypePtr* adr_type = nullptr; // debug-mode-only argument
|
||||
debug_only(adr_type = C->get_adr_type(adr_idx));
|
||||
@ -1580,15 +1579,14 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
|
||||
}
|
||||
|
||||
Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
|
||||
int adr_idx,
|
||||
MemNode::MemOrd mo,
|
||||
bool require_atomic_access,
|
||||
bool unaligned,
|
||||
bool mismatched,
|
||||
bool unsafe,
|
||||
int barrier_data) {
|
||||
int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
|
||||
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
|
||||
assert(adr_idx == C->get_alias_index(_gvn.type(adr)->isa_ptr()), "slice of address and input slice don't match");
|
||||
const TypePtr* adr_type = nullptr;
|
||||
debug_only(adr_type = C->get_adr_type(adr_idx));
|
||||
Node *mem = memory(adr_idx);
|
||||
@ -2044,11 +2042,10 @@ void GraphKit::increment_counter(address counter_addr) {
|
||||
}
|
||||
|
||||
void GraphKit::increment_counter(Node* counter_addr) {
|
||||
int adr_type = Compile::AliasIdxRaw;
|
||||
Node* ctrl = control();
|
||||
Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered);
|
||||
Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, MemNode::unordered);
|
||||
Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1)));
|
||||
store_to_memory(ctrl, counter_addr, incr, T_LONG, adr_type, MemNode::unordered);
|
||||
store_to_memory(ctrl, counter_addr, incr, T_LONG, MemNode::unordered);
|
||||
}
|
||||
|
||||
|
||||
@ -4240,8 +4237,8 @@ void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* coun
|
||||
set_memory(mem, TypeAryPtr::BYTES);
|
||||
Node* ch = load_array_element(src, i_byte, TypeAryPtr::BYTES, /* set_ctrl */ true);
|
||||
Node* st = store_to_memory(control(), array_element_address(dst, i_char, T_BYTE),
|
||||
AndI(ch, intcon(0xff)), T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,
|
||||
false, false, true /* mismatched */);
|
||||
AndI(ch, intcon(0xff)), T_CHAR, MemNode::unordered, false,
|
||||
false, true /* mismatched */);
|
||||
|
||||
IfNode* iff = create_and_map_if(head, Bool(CmpI(i_byte, count), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN);
|
||||
head->init_req(2, IfTrue(iff));
|
||||
|
@ -540,26 +540,6 @@ class GraphKit : public Phase {
|
||||
// adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
|
||||
// of volatile fields.
|
||||
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
|
||||
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
|
||||
bool require_atomic_access = false, bool unaligned = false,
|
||||
bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
|
||||
// This version computes alias_index from bottom_type
|
||||
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
|
||||
mo, control_dependency, require_atomic_access,
|
||||
unaligned, mismatched, unsafe, barrier_data);
|
||||
}
|
||||
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
|
||||
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
|
||||
bool require_atomic_access = false, bool unaligned = false,
|
||||
bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
|
||||
// This version computes alias_index from an address type
|
||||
assert(adr_type != nullptr, "use other make_load factory");
|
||||
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
|
||||
mo, control_dependency, require_atomic_access,
|
||||
unaligned, mismatched, unsafe, barrier_data);
|
||||
}
|
||||
// This is the base version which is given an alias index.
|
||||
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
|
||||
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
|
||||
bool require_atomic_access = false, bool unaligned = false,
|
||||
bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
|
||||
@ -572,26 +552,8 @@ class GraphKit : public Phase {
|
||||
// procedure must indicate that the store requires `release'
|
||||
// semantics, if the stored value is an object reference that might
|
||||
// point to a new object and may become externally visible.
|
||||
Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
|
||||
const TypePtr* adr_type,
|
||||
MemNode::MemOrd mo,
|
||||
bool require_atomic_access = false,
|
||||
bool unaligned = false,
|
||||
bool mismatched = false,
|
||||
bool unsafe = false,
|
||||
int barrier_data = 0) {
|
||||
// This version computes alias_index from an address type
|
||||
assert(adr_type != nullptr, "use other store_to_memory factory");
|
||||
return store_to_memory(ctl, adr, val, bt,
|
||||
C->get_alias_index(adr_type),
|
||||
mo, require_atomic_access,
|
||||
unaligned, mismatched, unsafe,
|
||||
barrier_data);
|
||||
}
|
||||
// This is the base version which is given alias index
|
||||
// Return the new StoreXNode
|
||||
Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
|
||||
int adr_idx,
|
||||
MemNode::MemOrd,
|
||||
bool require_atomic_access = false,
|
||||
bool unaligned = false,
|
||||
|
@ -3109,7 +3109,7 @@ bool LibraryCallKit::inline_native_jvm_commit() {
|
||||
set_control(is_notified);
|
||||
|
||||
// Reset notified state.
|
||||
Node* notified_reset_memory = store_to_memory(control(), notified_offset, _gvn.intcon(0), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
Node* notified_reset_memory = store_to_memory(control(), notified_offset, _gvn.intcon(0), T_BOOLEAN, MemNode::unordered);
|
||||
|
||||
// Iff notified, the return address of the commit method is the current position of the backing java buffer. This is used to reset the event writer.
|
||||
Node* current_pos_X = _gvn.transform(new LoadXNode(control(), input_memory_state, java_buffer_pos_offset, TypeRawPtr::NOTNULL, TypeX_X, MemNode::unordered));
|
||||
@ -3129,9 +3129,9 @@ bool LibraryCallKit::inline_native_jvm_commit() {
|
||||
// Store the next_position to the underlying jfr java buffer.
|
||||
Node* commit_memory;
|
||||
#ifdef _LP64
|
||||
commit_memory = store_to_memory(control(), java_buffer_pos_offset, next_pos_X, T_LONG, Compile::AliasIdxRaw, MemNode::release);
|
||||
commit_memory = store_to_memory(control(), java_buffer_pos_offset, next_pos_X, T_LONG, MemNode::release);
|
||||
#else
|
||||
commit_memory = store_to_memory(control(), java_buffer_pos_offset, next_pos_X, T_INT, Compile::AliasIdxRaw, MemNode::release);
|
||||
commit_memory = store_to_memory(control(), java_buffer_pos_offset, next_pos_X, T_INT, MemNode::release);
|
||||
#endif
|
||||
|
||||
// Now load the flags from off the java buffer and decide if the buffer is a lease. If so, it needs to be returned post-commit.
|
||||
@ -3448,13 +3448,10 @@ bool LibraryCallKit::inline_native_getEventWriter() {
|
||||
Node* const event_writer_tid = load_field_from_object(event_writer, "threadID", "J");
|
||||
// Get the field offset to, conditionally, store an updated tid value later.
|
||||
Node* const event_writer_tid_field = field_address_from_object(event_writer, "threadID", "J", false);
|
||||
const TypePtr* event_writer_tid_field_type = _gvn.type(event_writer_tid_field)->isa_ptr();
|
||||
// Get the field offset to, conditionally, store an updated exclusion value later.
|
||||
Node* const event_writer_excluded_field = field_address_from_object(event_writer, "excluded", "Z", false);
|
||||
const TypePtr* event_writer_excluded_field_type = _gvn.type(event_writer_excluded_field)->isa_ptr();
|
||||
// Get the field offset to, conditionally, store an updated pinVirtualThread value later.
|
||||
Node* const event_writer_pin_field = field_address_from_object(event_writer, "pinVirtualThread", "Z", false);
|
||||
const TypePtr* event_writer_pin_field_type = _gvn.type(event_writer_pin_field)->isa_ptr();
|
||||
|
||||
RegionNode* event_writer_tid_compare_rgn = new RegionNode(PATH_LIMIT);
|
||||
record_for_igvn(event_writer_tid_compare_rgn);
|
||||
@ -3476,13 +3473,13 @@ bool LibraryCallKit::inline_native_getEventWriter() {
|
||||
record_for_igvn(tid_is_not_equal);
|
||||
|
||||
// Store the pin state to the event writer.
|
||||
store_to_memory(tid_is_not_equal, event_writer_pin_field, _gvn.transform(pinVirtualThread), T_BOOLEAN, event_writer_pin_field_type, MemNode::unordered);
|
||||
store_to_memory(tid_is_not_equal, event_writer_pin_field, _gvn.transform(pinVirtualThread), T_BOOLEAN, MemNode::unordered);
|
||||
|
||||
// Store the exclusion state to the event writer.
|
||||
store_to_memory(tid_is_not_equal, event_writer_excluded_field, _gvn.transform(exclusion), T_BOOLEAN, event_writer_excluded_field_type, MemNode::unordered);
|
||||
store_to_memory(tid_is_not_equal, event_writer_excluded_field, _gvn.transform(exclusion), T_BOOLEAN, MemNode::unordered);
|
||||
|
||||
// Store the tid to the event writer.
|
||||
store_to_memory(tid_is_not_equal, event_writer_tid_field, tid, T_LONG, event_writer_tid_field_type, MemNode::unordered);
|
||||
store_to_memory(tid_is_not_equal, event_writer_tid_field, tid, T_LONG, MemNode::unordered);
|
||||
|
||||
// Update control and phi nodes.
|
||||
event_writer_tid_compare_rgn->init_req(_true_path, tid_is_not_equal);
|
||||
@ -3561,7 +3558,7 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
|
||||
// False branch, is carrierThread.
|
||||
Node* thread_equal_carrierThread = _gvn.transform(new IfFalseNode(iff_thread_not_equal_carrierThread));
|
||||
// Store release
|
||||
Node* vthread_false_memory = store_to_memory(thread_equal_carrierThread, vthread_offset, _gvn.intcon(0), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
|
||||
Node* vthread_false_memory = store_to_memory(thread_equal_carrierThread, vthread_offset, _gvn.intcon(0), T_BOOLEAN, MemNode::release, true);
|
||||
|
||||
set_all_memory(input_memory_state);
|
||||
|
||||
@ -3582,7 +3579,7 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
|
||||
|
||||
// Store the vthread tid to the jfr thread local.
|
||||
Node* thread_id_offset = basic_plus_adr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_ID_OFFSET_JFR));
|
||||
Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, Compile::AliasIdxRaw, MemNode::unordered, true);
|
||||
Node* tid_memory = store_to_memory(control(), thread_id_offset, tid, T_LONG, MemNode::unordered, true);
|
||||
|
||||
// Branch is_excluded to conditionalize updating the epoch .
|
||||
Node* excluded_cmp = _gvn.transform(new CmpINode(is_excluded, _gvn.transform(excluded_mask)));
|
||||
@ -3604,7 +3601,7 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
|
||||
|
||||
// Store the vthread epoch to the jfr thread local.
|
||||
Node* vthread_epoch_offset = basic_plus_adr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EPOCH_OFFSET_JFR));
|
||||
Node* included_memory = store_to_memory(control(), vthread_epoch_offset, epoch, T_CHAR, Compile::AliasIdxRaw, MemNode::unordered, true);
|
||||
Node* included_memory = store_to_memory(control(), vthread_epoch_offset, epoch, T_CHAR, MemNode::unordered, true);
|
||||
|
||||
RegionNode* excluded_rgn = new RegionNode(PATH_LIMIT);
|
||||
record_for_igvn(excluded_rgn);
|
||||
@ -3627,10 +3624,10 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) {
|
||||
|
||||
// Store the vthread exclusion state to the jfr thread local.
|
||||
Node* thread_local_excluded_offset = basic_plus_adr(jt, in_bytes(THREAD_LOCAL_OFFSET_JFR + VTHREAD_EXCLUDED_OFFSET_JFR));
|
||||
store_to_memory(control(), thread_local_excluded_offset, _gvn.transform(exclusion), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::unordered, true);
|
||||
store_to_memory(control(), thread_local_excluded_offset, _gvn.transform(exclusion), T_BOOLEAN, MemNode::unordered, true);
|
||||
|
||||
// Store release
|
||||
Node * vthread_true_memory = store_to_memory(control(), vthread_offset, _gvn.intcon(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
|
||||
Node * vthread_true_memory = store_to_memory(control(), vthread_offset, _gvn.intcon(1), T_BOOLEAN, MemNode::release, true);
|
||||
|
||||
RegionNode* thread_compare_rgn = new RegionNode(PATH_LIMIT);
|
||||
record_for_igvn(thread_compare_rgn);
|
||||
@ -3786,7 +3783,7 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) {
|
||||
next_pin_count = _gvn.transform(new AddINode(pin_count, _gvn.intcon(1)));
|
||||
}
|
||||
|
||||
Node* updated_pin_count_memory = store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
Node* updated_pin_count_memory = store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, MemNode::unordered);
|
||||
|
||||
// True branch, pin count over/underflow.
|
||||
Node* pin_count_over_underflow = _gvn.transform(new IfTrueNode(iff_pin_count_over_underflow));
|
||||
@ -5063,7 +5060,7 @@ bool LibraryCallKit::inline_unsafe_copyMemory() {
|
||||
assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
|
||||
|
||||
// update volatile field
|
||||
store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, MemNode::unordered);
|
||||
|
||||
int flags = RC_LEAF | RC_NO_FP;
|
||||
|
||||
@ -5088,7 +5085,7 @@ bool LibraryCallKit::inline_unsafe_copyMemory() {
|
||||
dst_type,
|
||||
src_addr, dst_addr, size XTOP);
|
||||
|
||||
store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -5118,7 +5115,7 @@ bool LibraryCallKit::inline_unsafe_setMemory() {
|
||||
assert((sizeof(bool) * CHAR_BIT) == 8, "not implemented");
|
||||
|
||||
// update volatile field
|
||||
store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
store_to_memory(control(), doing_unsafe_access_addr, intcon(1), doing_unsafe_access_bt, MemNode::unordered);
|
||||
|
||||
int flags = RC_LEAF | RC_NO_FP;
|
||||
|
||||
@ -5139,7 +5136,7 @@ bool LibraryCallKit::inline_unsafe_setMemory() {
|
||||
dst_type,
|
||||
dst_addr, size XTOP, byte);
|
||||
|
||||
store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, MemNode::unordered);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -7022,6 +7019,8 @@ Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldNam
|
||||
assert(field_klass->is_loaded(), "should be loaded");
|
||||
const TypePtr* adr_type = C->alias_type(field)->adr_type();
|
||||
Node *adr = basic_plus_adr(fromObj, fromObj, offset);
|
||||
assert(C->get_alias_index(adr_type) == C->get_alias_index(_gvn.type(adr)->isa_ptr()),
|
||||
"slice of address and input slice don't match");
|
||||
BasicType bt = field->layout_type();
|
||||
|
||||
// Build the resultant type of the load
|
||||
|
@ -247,7 +247,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
|
||||
Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
|
||||
|
||||
|
||||
store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
|
||||
|
||||
// Build a bogus FastLockNode (no code will be generated) and push the
|
||||
// monitor into our debug info.
|
||||
@ -2272,7 +2272,7 @@ void Parse::add_safepoint() {
|
||||
Node *polladr;
|
||||
Node *thread = _gvn.transform(new ThreadLocalNode());
|
||||
Node *polling_page_load_addr = _gvn.transform(basic_plus_adr(top(), thread, in_bytes(JavaThread::polling_page_offset())));
|
||||
polladr = make_load(control(), polling_page_load_addr, TypeRawPtr::BOTTOM, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
polladr = make_load(control(), polling_page_load_addr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
|
||||
sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
|
||||
|
||||
// Fix up the JVM State edges
|
||||
|
@ -1376,9 +1376,9 @@ static volatile int _trap_stress_counter = 0;
|
||||
|
||||
void Parse::increment_trap_stress_counter(Node*& counter, Node*& incr_store) {
|
||||
Node* counter_addr = makecon(TypeRawPtr::make((address)&_trap_stress_counter));
|
||||
counter = make_load(control(), counter_addr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
counter = make_load(control(), counter_addr, TypeInt::INT, T_INT, MemNode::unordered);
|
||||
counter = _gvn.transform(new AddINode(counter, intcon(1)));
|
||||
incr_store = store_to_memory(control(), counter_addr, counter, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
|
||||
incr_store = store_to_memory(control(), counter_addr, counter, T_INT, MemNode::unordered);
|
||||
}
|
||||
|
||||
//----------------------------------do_ifnull----------------------------------
|
||||
|
@ -131,6 +131,8 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
|
||||
int offset = field->offset_in_bytes();
|
||||
const TypePtr* adr_type = C->alias_type(field)->adr_type();
|
||||
Node *adr = basic_plus_adr(obj, obj, offset);
|
||||
assert(C->get_alias_index(adr_type) == C->get_alias_index(_gvn.type(adr)->isa_ptr()),
|
||||
"slice of address and input slice don't match");
|
||||
|
||||
// Build the resultant type of the load
|
||||
const Type *type;
|
||||
@ -204,6 +206,8 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
|
||||
int offset = field->offset_in_bytes();
|
||||
const TypePtr* adr_type = C->alias_type(field)->adr_type();
|
||||
Node* adr = basic_plus_adr(obj, obj, offset);
|
||||
assert(C->get_alias_index(adr_type) == C->get_alias_index(_gvn.type(adr)->isa_ptr()),
|
||||
"slice of address and input slice don't match");
|
||||
BasicType bt = field->layout_type();
|
||||
// Value to be stored
|
||||
Node* val = type2size[bt] == 1 ? pop() : pop_pair();
|
||||
@ -406,7 +410,7 @@ void Parse::do_multianewarray() {
|
||||
// Fill-in it with values
|
||||
for (j = 0; j < ndimensions; j++) {
|
||||
Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
|
||||
store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered);
|
||||
store_to_memory(control(), dims_elem, length[j], T_INT, MemNode::unordered);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1325,7 +1325,7 @@ void PhaseStringOpts::getChars(GraphKit& kit, Node* arg, Node* dst_array, BasicT
|
||||
Node* index = __ SubI(charPos, __ intcon((bt == T_BYTE) ? 1 : 2));
|
||||
Node* ch = __ AddI(r, __ intcon('0'));
|
||||
Node* st = __ store_to_memory(kit.control(), kit.array_element_address(dst_array, index, T_BYTE),
|
||||
ch, bt, byte_adr_idx, MemNode::unordered, false /* require_atomic_access */,
|
||||
ch, bt, MemNode::unordered, false /* require_atomic_access */,
|
||||
false /* unaligned */, (bt != T_BYTE) /* mismatched */);
|
||||
|
||||
iff = kit.create_and_map_if(head, __ Bool(__ CmpI(q, __ intcon(0)), BoolTest::ne),
|
||||
@ -1364,8 +1364,8 @@ void PhaseStringOpts::getChars(GraphKit& kit, Node* arg, Node* dst_array, BasicT
|
||||
} else {
|
||||
Node* index = __ SubI(charPos, __ intcon((bt == T_BYTE) ? 1 : 2));
|
||||
st = __ store_to_memory(kit.control(), kit.array_element_address(dst_array, index, T_BYTE),
|
||||
sign, bt, byte_adr_idx, MemNode::unordered, false /* require_atomic_access */,
|
||||
false /* unaligned */, (bt != T_BYTE) /* mismatched */);
|
||||
sign, bt, MemNode::unordered, false /* require_atomic_access */, false /* unaligned */,
|
||||
(bt != T_BYTE) /* mismatched */);
|
||||
|
||||
final_merge->init_req(merge_index + 1, kit.control());
|
||||
final_mem->init_req(merge_index + 1, st);
|
||||
|
Loading…
Reference in New Issue
Block a user