This commit is contained in:
Christian Thalinger 2009-04-22 06:09:24 -07:00
commit b9f27b5eea
22 changed files with 1129 additions and 1237 deletions

View File

@ -25,24 +25,36 @@
#include "incls/_precompiled.incl"
#include "incls/_assembler_sparc.cpp.incl"
// Implementation of Address
Address::Address( addr_type t, int which ) {
switch (t) {
case extra_in_argument:
case extra_out_argument:
_base = t == extra_in_argument ? FP : SP;
_hi = 0;
// Warning: In LP64 mode, _disp will occupy more than 10 bits.
// This is inconsistent with the other constructors but op
// codes such as ld or ldx, only access disp() to get their
// simm13 argument.
_disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
break;
default:
ShouldNotReachHere();
break;
// Convert the raw encoding form into the form expected by the
// constructor for Address.
Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
assert(scale == 0, "not supported");
RelocationHolder rspec;
if (disp_is_oop) {
rspec = Relocation::spec_simple(relocInfo::oop_type);
}
Register rindex = as_Register(index);
if (rindex != G0) {
Address madr(as_Register(base), rindex);
madr._rspec = rspec;
return madr;
} else {
Address madr(as_Register(base), disp);
madr._rspec = rspec;
return madr;
}
}
Address Argument::address_in_frame() const {
// Warning: In LP64 mode disp will occupy more than 10 bits, but
// op codes such as ld or ldx, only access disp() to get
// their simm13 argument.
int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
if (is_in())
return Address(FP, disp); // In argument.
else
return Address(SP, disp); // Out argument.
}
static const char* argumentNames[][2] = {
@ -614,16 +626,17 @@ void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
}
// This code sequence is relocatable to any address, even on LP64.
void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) {
void MacroAssembler::jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
sethi(a, /*ForceRelocatable=*/ true);
patchable_sethi(addrlit, temp);
Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
if (TraceJumps) {
#ifndef PRODUCT
// Must do the add here so relocation can find the remainder of the
// value to be relocated.
add(a.base(), a.disp() + offset, a.base(), a.rspec(offset));
add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
save_frame(0);
verify_thread();
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
@ -652,15 +665,15 @@ void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file
restore();
jmpl(a.base(), G0, d);
#else
jmpl(a, d, offset);
jmpl(a.base(), a.disp(), d);
#endif /* PRODUCT */
} else {
jmpl(a, d, offset);
jmpl(a.base(), a.disp(), d);
}
}
void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) {
jumpl( a, G0, offset, file, line );
void MacroAssembler::jump(AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
jumpl(addrlit, temp, G0, offset, file, line);
}
@ -678,7 +691,8 @@ void MacroAssembler::set_varargs( Argument inArg, Register d ) {
st_ptr(savePtr.as_register(), savePtr.address_in_frame());
}
// return the address of the first memory slot
add(inArg.address_in_frame(), d);
Address a = inArg.address_in_frame();
add(a.base(), a.disp(), d);
}
// Conditional breakpoint (for assertion checks in assembly code)
@ -702,7 +716,6 @@ void MacroAssembler::flush_windows() {
// offset to write to within the page. This minimizes bus traffic
// due to cache line collision.
void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
Address mem_serialize_page(tmp1, os::get_memory_serialize_page());
srl(thread, os::get_serialize_page_shift_count(), tmp2);
if (Assembler::is_simm13(os::vm_page_size())) {
and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
@ -711,7 +724,7 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register t
set((os::vm_page_size() - sizeof(int)), tmp1);
and3(tmp2, tmp1, tmp2);
}
load_address(mem_serialize_page);
set(os::get_memory_serialize_page(), tmp1);
st(G0, tmp1, tmp2);
}
@ -830,10 +843,10 @@ void MacroAssembler::get_thread() {
mov(G3, L2); // avoid clobbering G3 also
mov(G4, L5); // avoid clobbering G4
#ifdef ASSERT
Address last_get_thread_addr(L3, (address)&last_get_thread);
sethi(last_get_thread_addr);
AddressLiteral last_get_thread_addrlit(&last_get_thread);
set(last_get_thread_addrlit, L3);
inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
st_ptr(L4, last_get_thread_addr);
st_ptr(L4, L3, 0);
#endif
call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
delayed()->nop();
@ -919,13 +932,9 @@ void MacroAssembler::restore_thread(const Register thread_cache) {
// %%% maybe get rid of [re]set_last_Java_frame
void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
assert_not_delayed();
Address flags(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) +
in_bytes(JavaFrameAnchor::flags_offset()));
Address pc_addr(G2_thread,
0,
in_bytes(JavaThread::last_Java_pc_offset()));
Address flags(G2_thread, JavaThread::frame_anchor_offset() +
JavaFrameAnchor::flags_offset());
Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
// Always set last_Java_pc and flags first because once last_Java_sp is visible
// has_last_Java_frame is true and users will look at the rest of the fields.
@ -977,22 +986,18 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
#endif // ASSERT
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
add( last_java_sp, STACK_BIAS, G4_scratch );
st_ptr(G4_scratch, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
#else
st_ptr(last_java_sp, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
#endif // _LP64
}
void MacroAssembler::reset_last_Java_frame(void) {
assert_not_delayed();
Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()));
Address pc_addr(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
Address flags(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
#ifdef ASSERT
// check that it WAS previously set
@ -1063,7 +1068,7 @@ void MacroAssembler::check_and_forward_exception(Register scratch_reg)
check_and_handle_popframe(scratch_reg);
check_and_handle_earlyret(scratch_reg);
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr(G2_thread, Thread::pending_exception_offset());
ld_ptr(exception_addr, scratch_reg);
br_null(scratch_reg,false,pt,L);
delayed()->nop();
@ -1186,7 +1191,7 @@ void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Re
void MacroAssembler::get_vm_result(Register oop_result) {
verify_thread();
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
ld_ptr( vm_result_addr, oop_result);
st_ptr(G0, vm_result_addr);
verify_oop(oop_result);
@ -1195,7 +1200,7 @@ void MacroAssembler::get_vm_result(Register oop_result) {
void MacroAssembler::get_vm_result_2(Register oop_result) {
verify_thread();
Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
ld_ptr(vm_result_addr_2, oop_result);
st_ptr(G0, vm_result_addr_2);
verify_oop(oop_result);
@ -1206,7 +1211,7 @@ void MacroAssembler::get_vm_result_2(Register oop_result) {
// leave it undisturbed.
void MacroAssembler::set_vm_result(Register oop_result) {
verify_thread();
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
verify_oop(oop_result);
# ifdef ASSERT
@ -1234,81 +1239,78 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base,
#else
srl(obj, CardTableModRefBS::card_shift, obj);
#endif
assert( tmp != obj, "need separate temp reg");
Address rs(tmp, (address)byte_map_base);
load_address(rs);
stb(G0, rs.base(), obj);
}
// %%% Note: The following six instructions have been moved,
// unchanged, from assembler_sparc.inline.hpp.
// They will be refactored at a later date.
void MacroAssembler::sethi(intptr_t imm22a,
Register d,
bool ForceRelocatable,
RelocationHolder const& rspec) {
Address adr( d, (address)imm22a, rspec );
MacroAssembler::sethi( adr, ForceRelocatable );
assert(tmp != obj, "need separate temp reg");
set((address) byte_map_base, tmp);
stb(G0, tmp, obj);
}
void MacroAssembler::sethi(Address& a, bool ForceRelocatable) {
void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
address save_pc;
int shiftcnt;
// if addr of local, do not need to load it
assert(a.base() != FP && a.base() != SP, "just use ld or st for locals");
#ifdef _LP64
# ifdef CHECK_DELAY
assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
assert_not_delayed((char*) "cannot put two instructions in delay slot");
# endif
v9_dep();
// ForceRelocatable = 1;
save_pc = pc();
if (a.hi32() == 0 && a.low32() >= 0) {
Assembler::sethi(a.low32(), a.base(), a.rspec());
int msb32 = (int) (addrlit.value() >> 32);
int lsb32 = (int) (addrlit.value());
if (msb32 == 0 && lsb32 >= 0) {
Assembler::sethi(lsb32, d, addrlit.rspec());
}
else if (a.hi32() == -1) {
Assembler::sethi(~a.low32(), a.base(), a.rspec());
xor3(a.base(), ~low10(~0), a.base());
else if (msb32 == -1) {
Assembler::sethi(~lsb32, d, addrlit.rspec());
xor3(d, ~low10(~0), d);
}
else {
Assembler::sethi(a.hi32(), a.base(), a.rspec() ); // 22
if ( a.hi32() & 0x3ff ) // Any bits?
or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32
if ( a.low32() & 0xFFFFFC00 ) { // done?
if( (a.low32() >> 20) & 0xfff ) { // Any bits set?
sllx(a.base(), 12, a.base()); // Make room for next 12 bits
or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12
shiftcnt = 0; // We already shifted
Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
if (msb32 & 0x3ff) // Any bits?
or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
if (lsb32 & 0xFFFFFC00) { // done?
if ((lsb32 >> 20) & 0xfff) { // Any bits set?
sllx(d, 12, d); // Make room for next 12 bits
or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
shiftcnt = 0; // We already shifted
}
else
shiftcnt = 12;
if( (a.low32() >> 10) & 0x3ff ) {
sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits
or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10
if ((lsb32 >> 10) & 0x3ff) {
sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
shiftcnt = 0;
}
else
shiftcnt = 10;
sllx(a.base(), shiftcnt+10 , a.base()); // Shift leaving disp field 0'd
sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
}
else
sllx( a.base(), 32, a.base() );
sllx(d, 32, d);
}
// Pad out the instruction sequence so it can be
// patched later.
if ( ForceRelocatable || (a.rtype() != relocInfo::none &&
a.rtype() != relocInfo::runtime_call_type) ) {
while ( pc() < (save_pc + (7 * BytesPerInstWord )) )
// Pad out the instruction sequence so it can be patched later.
if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
addrlit.rtype() != relocInfo::runtime_call_type)) {
while (pc() < (save_pc + (7 * BytesPerInstWord)))
nop();
}
#else
Assembler::sethi(a.hi(), a.base(), a.rspec());
Assembler::sethi(addrlit.value(), d, addrlit.rspec());
#endif
}
void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
internal_sethi(addrlit, d, false);
}
void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
internal_sethi(addrlit, d, true);
}
int MacroAssembler::size_of_sethi(address a, bool worst_case) {
#ifdef _LP64
if (worst_case) return 7;
@ -1339,61 +1341,50 @@ int MacroAssembler::worst_case_size_of_set() {
return size_of_sethi(NULL, true) + 1;
}
void MacroAssembler::set(intptr_t value, Register d,
RelocationHolder const& rspec) {
Address val( d, (address)value, rspec);
if ( rspec.type() == relocInfo::none ) {
void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
intptr_t value = addrlit.value();
if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
// can optimize
if (-4096 <= value && value <= 4095) {
if (-4096 <= value && value <= 4095) {
or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
return;
}
if (inv_hi22(hi22(value)) == value) {
sethi(val);
sethi(addrlit, d);
return;
}
}
assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
sethi( val );
if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) {
add( d, value & 0x3ff, d, rspec);
assert_not_delayed((char*) "cannot put two instructions in delay slot");
internal_sethi(addrlit, d, ForceRelocatable);
if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
add(d, addrlit.low10(), d, addrlit.rspec());
}
}
void MacroAssembler::setsw(int value, Register d,
RelocationHolder const& rspec) {
Address val( d, (address)value, rspec);
if ( rspec.type() == relocInfo::none ) {
// can optimize
if (-4096 <= value && value <= 4095) {
or3(G0, value, d);
return;
}
if (inv_hi22(hi22(value)) == value) {
sethi( val );
#ifndef _LP64
if ( value < 0 ) {
assert_not_delayed();
sra (d, G0, d);
}
#endif
return;
}
}
assert_not_delayed();
sethi( val );
add( d, value & 0x3ff, d, rspec);
// (A negative value could be loaded in 2 insns with sethi/xor,
// but it would take a more complex relocation.)
#ifndef _LP64
if ( value < 0)
sra(d, G0, d);
#endif
void MacroAssembler::set(const AddressLiteral& al, Register d) {
internal_set(al, d, false);
}
// %%% End of moved six set instructions.
void MacroAssembler::set(intptr_t value, Register d) {
AddressLiteral al(value);
internal_set(al, d, false);
}
void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
AddressLiteral al(addr, rspec);
internal_set(al, d, false);
}
void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
internal_set(al, d, true);
}
void MacroAssembler::patchable_set(intptr_t value, Register d) {
AddressLiteral al(value);
internal_set(al, d, true);
}
void MacroAssembler::set64(jlong value, Register d, Register tmp) {
@ -1512,17 +1503,17 @@ void MacroAssembler::save_frame_and_mov(int extraWords,
}
Address MacroAssembler::allocate_oop_address(jobject obj, Register d) {
AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->allocate_index(obj);
return Address(d, address(obj), oop_Relocation::spec(oop_index));
return AddressLiteral(obj, oop_Relocation::spec(oop_index));
}
Address MacroAssembler::constant_oop_address(jobject obj, Register d) {
AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj);
return Address(d, address(obj), oop_Relocation::spec(oop_index));
return AddressLiteral(obj, oop_Relocation::spec(oop_index));
}
void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
@ -1682,7 +1673,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * fil
sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
// Call indirectly to solve generation ordering problem
Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
// Make some space on stack above the current register window.
// Enough to hold 8 64-bit registers.
@ -1718,7 +1709,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char
sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
// Call indirectly to solve generation ordering problem
Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
// Make some space on stack above the current register window.
// Enough to hold 8 64-bit registers.
@ -1772,11 +1763,7 @@ void MacroAssembler::verify_oop_subroutine() {
{ // count number of verifies
Register O2_adr = O2;
Register O3_accum = O3;
Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() );
sethi(count_addr);
ld(count_addr, O3_accum);
inc(O3_accum);
st(O3_accum, count_addr);
inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
}
Register O2_mask = O2;
@ -1870,8 +1857,8 @@ void MacroAssembler::verify_oop_subroutine() {
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
// call indirectly to solve generation ordering problem
Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
load_ptr_contents(a, O5);
AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
load_ptr_contents(al, O5);
jmpl(O5, 0, O7);
delayed()->nop();
}
@ -1891,7 +1878,7 @@ void MacroAssembler::stop(const char* msg) {
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
// call indirectly to solve generation ordering problem
Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
load_ptr_contents(a, O5);
jmpl(O5, 0, O7);
delayed()->nop();
@ -2003,7 +1990,7 @@ void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresul
subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
Label no_extras;
br( negative, true, pt, no_extras ); // if neg, clear reg
delayed()->set( 0, Rresult); // annuled, so only if taken
delayed()->set(0, Rresult); // annuled, so only if taken
bind( no_extras );
}
@ -2623,7 +2610,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
return RegisterOrConstant(value + offset);
// load indirectly to solve generation ordering problem
Address a(tmp, (address) delayed_value_addr);
AddressLiteral a(delayed_value_addr);
load_ptr_contents(a, tmp);
#ifdef ASSERT
@ -3107,21 +3094,21 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
delayed()->nop();
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg);
andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
if (counters != NULL) {
cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
// Reload mark_reg as we may need it later
ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg);
ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
}
brx(Assembler::equal, true, Assembler::pt, done);
delayed()->nop();
Label try_revoke_bias;
Label try_rebias;
Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes());
Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
// At this point we know that the header has the bias pattern and
@ -3185,7 +3172,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that
@ -3216,7 +3203,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in
@ -3283,7 +3270,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
Register Rbox, Register Rscratch,
BiasedLockingCounters* counters,
bool try_bias) {
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
verify_oop(Roop);
Label done ;
@ -3386,7 +3373,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// If m->owner != null goto IsLocked
// Pessimistic form: Test-and-CAS vs CAS
// The optimistic form avoids RTS->RTO cache line upgrades.
ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()->nop() ;
@ -3482,7 +3469,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// Test-and-CAS vs CAS
// Pessimistic form avoids futile (doomed) CAS attempts
// The optimistic form avoids RTS->RTO cache line upgrades.
ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()->nop() ;
@ -3508,7 +3495,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
Register Rbox, Register Rscratch,
bool try_bias) {
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
Label done ;
@ -3568,14 +3555,14 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
// Note that we use 1-0 locking by default for the inflated case. We
// close the resultant (and rare) race by having contented threads in
// monitorenter periodically poll _owner.
ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ;
ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
ld_ptr (Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
xor3 (Rscratch, G2_thread, Rscratch) ;
orcc (Rbox, Rscratch, Rbox) ;
brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()->
ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ;
ld_ptr (Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
ld_ptr (Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
orcc (Rbox, Rscratch, G0) ;
if (EmitSync & 65536) {
Label LSucc ;
@ -3583,12 +3570,12 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
delayed()->nop() ;
br (Assembler::always, false, Assembler::pt, done) ;
delayed()->
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
bind (LSucc) ;
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
if (os::is_MP()) { membar (StoreLoad) ; }
ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pt, done) ;
delayed()-> andcc (G0, G0, G0) ;
@ -3606,7 +3593,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
delayed()->nop() ;
br (Assembler::always, false, Assembler::pt, done) ;
delayed()->
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
}
bind (LStacked) ;
@ -4005,20 +3992,26 @@ void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
bind(L);
}
void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) {
Address counter_addr(Rtmp1, counter_ptr);
load_contents(counter_addr, Rtmp2);
void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
AddressLiteral addrlit(counter_addr);
sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
ld(addr, Rtmp2);
inc(Rtmp2);
store_contents(Rtmp2, counter_addr);
st(Rtmp2, addr);
}
void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
inc_counter((address) counter_addr, Rtmp1, Rtmp2);
}
SkipIfEqual::SkipIfEqual(
MacroAssembler* masm, Register temp, const bool* flag_addr,
Assembler::Condition condition) {
_masm = masm;
Address flag(temp, (address)flag_addr, relocInfo::none);
_masm->sethi(flag);
_masm->ldub(flag, temp);
AddressLiteral flag(flag_addr);
_masm->sethi(flag, temp);
_masm->ldub(temp, flag.low10(), temp);
_masm->tst(temp);
_masm->br(condition, false, Assembler::pt, _label);
_masm->delayed()->nop();
@ -4333,8 +4326,8 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
#else
masm.srl(O0, CardTableModRefBS::card_shift, O0);
#endif
Address rs(O1, (address)byte_map_base);
masm.load_address(rs); // O1 := <card table base>
AddressLiteral addrlit(byte_map_base);
masm.set(addrlit, O1); // O1 := <card table base>
masm.ldub(O0, O1, O2); // O2 := [O0 + O1]
masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
@ -4494,10 +4487,9 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
#else
post_filter_masm->srl(store_addr, CardTableModRefBS::card_shift, store_addr);
#endif
assert( tmp != store_addr, "need separate temp reg");
Address rs(tmp, (address)bs->byte_map_base);
load_address(rs);
stb(G0, rs.base(), store_addr);
assert(tmp != store_addr, "need separate temp reg");
set(bs->byte_map_base, tmp);
stb(G0, tmp, store_addr);
}
bind(filtered);
@ -4516,24 +4508,6 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
card_table_write(bs->byte_map_base, tmp, store_addr);
}
// Loading values by size and signed-ness
void MacroAssembler::load_sized_value(Register s1, RegisterOrConstant s2, Register d,
int size_in_bytes, bool is_signed) {
switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
case ~8: // fall through:
case 8: ld_long( s1, s2, d ); break;
case ~4: ldsw( s1, s2, d ); break;
case 4: lduw( s1, s2, d ); break;
case ~2: ldsh( s1, s2, d ); break;
case 2: lduh( s1, s2, d ); break;
case ~1: ldsb( s1, s2, d ); break;
case 1: ldub( s1, s2, d ); break;
default: ShouldNotReachHere();
}
}
void MacroAssembler::load_klass(Register src_oop, Register klass) {
// The number of bytes in this code is used by
// MachCallDynamicJavaNode::ret_addr_offset()
@ -4563,12 +4537,12 @@ void MacroAssembler::store_klass_gap(Register s, Register d) {
}
}
void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) {
void MacroAssembler::load_heap_oop(const Address& s, Register d) {
if (UseCompressedOops) {
lduw(s, d, offset);
lduw(s, d);
decode_heap_oop(d);
} else {
ld_ptr(s, d, offset);
ld_ptr(s, d);
}
}
@ -4714,7 +4688,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops) {
// call indirectly to solve generation ordering problem
Address base(G6_heapbase, (address)Universe::narrow_oop_base_addr());
AddressLiteral base(Universe::narrow_oop_base_addr());
load_ptr_contents(base, G6_heapbase);
}
}

View File

@ -274,21 +274,90 @@ REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is comi
class Address VALUE_OBJ_CLASS_SPEC {
private:
Register _base;
#ifdef _LP64
int _hi32; // bits 63::32
int _low32; // bits 31::0
#endif
int _hi;
int _disp;
RelocationHolder _rspec;
Register _base; // Base register.
RegisterOrConstant _index_or_disp; // Index register or constant displacement.
RelocationHolder _rspec;
RelocationHolder rspec_from_rtype(relocInfo::relocType rt, address a = NULL) {
switch (rt) {
public:
Address() : _base(noreg), _index_or_disp(noreg) {}
Address(Register base, RegisterOrConstant index_or_disp)
: _base(base),
_index_or_disp(index_or_disp) {
}
Address(Register base, Register index)
: _base(base),
_index_or_disp(index) {
}
Address(Register base, int disp)
: _base(base),
_index_or_disp(disp) {
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
Address(Register base, ByteSize disp)
: _base(base),
_index_or_disp(in_bytes(disp)) {
}
#endif
// accessors
Register base() const { return _base; }
Register index() const { return _index_or_disp.as_register(); }
int disp() const { return _index_or_disp.as_constant(); }
bool has_index() const { return _index_or_disp.is_register(); }
bool has_disp() const { return _index_or_disp.is_constant(); }
const relocInfo::relocType rtype() { return _rspec.type(); }
const RelocationHolder& rspec() { return _rspec; }
RelocationHolder rspec(int offset) const {
return offset == 0 ? _rspec : _rspec.plus(offset);
}
inline bool is_simm13(int offset = 0); // check disp+offset for overflow
Address plus_disp(int plusdisp) const { // bump disp by a small amount
assert(_index_or_disp.is_constant(), "must have a displacement");
Address a(base(), disp() + plusdisp);
return a;
}
Address after_save() const {
Address a = (*this);
a._base = a._base->after_save();
return a;
}
Address after_restore() const {
Address a = (*this);
a._base = a._base->after_restore();
return a;
}
// Convert the raw encoding form into the form expected by the
// constructor for Address.
static Address make_raw(int base, int index, int scale, int disp, bool disp_is_oop);
friend class Assembler;
};
class AddressLiteral VALUE_OBJ_CLASS_SPEC {
private:
address _address;
RelocationHolder _rspec;
RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
switch (rtype) {
case relocInfo::external_word_type:
return external_word_Relocation::spec(a);
return external_word_Relocation::spec(addr);
case relocInfo::internal_word_type:
return internal_word_Relocation::spec(a);
return internal_word_Relocation::spec(addr);
#ifdef _LP64
case relocInfo::opt_virtual_call_type:
return opt_virtual_call_Relocation::spec();
@ -305,127 +374,86 @@ class Address VALUE_OBJ_CLASS_SPEC {
}
}
protected:
// creation
AddressLiteral() : _address(NULL), _rspec(NULL) {}
public:
Address(Register b, address a, relocInfo::relocType rt = relocInfo::none)
: _rspec(rspec_from_rtype(rt, a))
{
_base = b;
AddressLiteral(address addr, RelocationHolder const& rspec)
: _address(addr),
_rspec(rspec) {}
// Some constructors to avoid casting at the call site.
AddressLiteral(jobject obj, RelocationHolder const& rspec)
: _address((address) obj),
_rspec(rspec) {}
AddressLiteral(intptr_t value, RelocationHolder const& rspec)
: _address((address) value),
_rspec(rspec) {}
AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
// Some constructors to avoid casting at the call site.
AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
#ifdef _LP64
_hi32 = (intptr_t)a >> 32; // top 32 bits in 64 bit word
_low32 = (intptr_t)a & ~0; // low 32 bits in 64 bit word
// 32-bit complains about a multiple declaration for int*.
AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
#endif
_hi = (intptr_t)a & ~0x3ff; // top 22 bits in low word
_disp = (intptr_t)a & 0x3ff; // bottom 10 bits
}
Address(Register b, address a, RelocationHolder const& rspec)
: _rspec(rspec)
{
_base = b;
#ifdef _LP64
_hi32 = (intptr_t)a >> 32; // top 32 bits in 64 bit word
_low32 = (intptr_t)a & ~0; // low 32 bits in 64 bit word
#endif
_hi = (intptr_t)a & ~0x3ff; // top 22 bits
_disp = (intptr_t)a & 0x3ff; // bottom 10 bits
}
AddressLiteral(oop addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
Address(Register b, intptr_t h, intptr_t d, RelocationHolder const& rspec = RelocationHolder())
: _rspec(rspec)
{
_base = b;
#ifdef _LP64
// [RGV] Put in Assert to force me to check usage of this constructor
assert( h == 0, "Check usage of this constructor" );
_hi32 = h;
_low32 = d;
_hi = h;
_disp = d;
#else
_hi = h;
_disp = d;
#endif
}
AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
Address()
: _rspec(RelocationHolder())
{
_base = G0;
#ifdef _LP64
_hi32 = 0;
_low32 = 0;
#endif
_hi = 0;
_disp = 0;
}
AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
// fancier constructors
intptr_t value() const { return (intptr_t) _address; }
int low10() const;
enum addr_type {
extra_in_argument, // in the In registers
extra_out_argument // in the Outs
};
const relocInfo::relocType rtype() const { return _rspec.type(); }
const RelocationHolder& rspec() const { return _rspec; }
Address( addr_type, int );
// accessors
Register base() const { return _base; }
#ifdef _LP64
int hi32() const { return _hi32; }
int low32() const { return _low32; }
#endif
int hi() const { return _hi; }
int disp() const { return _disp; }
#ifdef _LP64
intptr_t value() const { return ((intptr_t)_hi32 << 32) |
(intptr_t)(uint32_t)_low32; }
#else
int value() const { return _hi | _disp; }
#endif
const relocInfo::relocType rtype() { return _rspec.type(); }
const RelocationHolder& rspec() { return _rspec; }
RelocationHolder rspec(int offset) const {
RelocationHolder rspec(int offset) const {
return offset == 0 ? _rspec : _rspec.plus(offset);
}
inline bool is_simm13(int offset = 0); // check disp+offset for overflow
Address plus_disp(int disp) const { // bump disp by a small amount
Address a = (*this);
a._disp += disp;
return a;
}
Address split_disp() const { // deal with disp overflow
Address a = (*this);
int hi_disp = _disp & ~0x3ff;
if (hi_disp != 0) {
a._disp -= hi_disp;
a._hi += hi_disp;
}
return a;
}
Address after_save() const {
Address a = (*this);
a._base = a._base->after_save();
return a;
}
Address after_restore() const {
Address a = (*this);
a._base = a._base->after_restore();
return a;
}
friend class Assembler;
};
inline Address RegisterImpl::address_in_saved_window() const {
return (Address(SP, 0, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
}
@ -495,11 +523,7 @@ class Argument VALUE_OBJ_CLASS_SPEC {
// When applied to a register-based argument, give the corresponding address
// into the 6-word area "into which callee may store register arguments"
// (This is a different place than the corresponding register-save area location.)
Address address_in_frame() const {
return Address( is_in() ? Address::extra_in_argument
: Address::extra_out_argument,
_number );
}
Address address_in_frame() const;
// debugging
const char* name() const;
@ -521,6 +545,7 @@ class Assembler : public AbstractAssembler {
friend class AbstractAssembler;
friend class AddressLiteral;
// code patchers need various routines like inv_wdisp()
friend class NativeInstruction;
@ -1093,11 +1118,11 @@ public:
// pp 135 (addc was addx in v8)
inline void add( Register s1, Register s2, Register d );
inline void add( Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
inline void add( Register s1, int simm13a, Register d, RelocationHolder const& rspec);
inline void add( Register s1, RegisterOrConstant s2, Register d, int offset = 0);
inline void add( const Address& a, Register d, int offset = 0);
inline void add(Register s1, Register s2, Register d );
inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
inline void add(const Address& a, Register d, int offset = 0) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); }
void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
@ -1252,14 +1277,12 @@ public:
void jmpl( Register s1, Register s2, Register d );
void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() );
inline void jmpl( Address& a, Register d, int offset = 0);
// 171
inline void ldf( FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d );
inline void ldf( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d );
inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d);
inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder());
inline void ldf( FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
inline void ldfsr( Register s1, Register s2 );
@ -1303,15 +1326,20 @@ public:
inline void ldd( Register s1, Register s2, Register d );
inline void ldd( Register s1, int simm13a, Register d);
inline void ldsb( const Address& a, Register d, int offset = 0 );
inline void ldsh( const Address& a, Register d, int offset = 0 );
inline void ldsw( const Address& a, Register d, int offset = 0 );
inline void ldub( const Address& a, Register d, int offset = 0 );
inline void lduh( const Address& a, Register d, int offset = 0 );
inline void lduw( const Address& a, Register d, int offset = 0 );
inline void ldx( const Address& a, Register d, int offset = 0 );
inline void ld( const Address& a, Register d, int offset = 0 );
inline void ldd( const Address& a, Register d, int offset = 0 );
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void ld( Register s1, ByteSize simm13a, Register d);
#endif
inline void ldsb(const Address& a, Register d, int offset = 0);
inline void ldsh(const Address& a, Register d, int offset = 0);
inline void ldsw(const Address& a, Register d, int offset = 0);
inline void ldub(const Address& a, Register d, int offset = 0);
inline void lduh(const Address& a, Register d, int offset = 0);
inline void lduw(const Address& a, Register d, int offset = 0);
inline void ldx( const Address& a, Register d, int offset = 0);
inline void ld( const Address& a, Register d, int offset = 0);
inline void ldd( const Address& a, Register d, int offset = 0);
inline void ldub( Register s1, RegisterOrConstant s2, Register d );
inline void ldsb( Register s1, RegisterOrConstant s2, Register d );
@ -1536,6 +1564,11 @@ public:
inline void std( Register d, Register s1, Register s2 );
inline void std( Register d, Register s1, int simm13a);
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void st( Register d, Register s1, ByteSize simm13a);
#endif
inline void stb( Register d, const Address& a, int offset = 0 );
inline void sth( Register d, const Address& a, int offset = 0 );
inline void stw( Register d, const Address& a, int offset = 0 );
@ -1684,8 +1717,8 @@ class RegistersForDebugging : public StackObj {
#define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
#define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
#define JUMP(a, off) jump(a, off, __FILE__, __LINE__)
#define JUMPL(a, d, off) jumpl(a, d, off, __FILE__, __LINE__)
#define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__)
#define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
class MacroAssembler: public Assembler {
@ -1830,17 +1863,26 @@ class MacroAssembler: public Assembler {
#endif
// sethi Macro handles optimizations and relocations
void sethi( Address& a, bool ForceRelocatable = false );
void sethi( intptr_t imm22a, Register d, bool ForceRelocatable = false, RelocationHolder const& rspec = RelocationHolder());
private:
void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
public:
void sethi(const AddressLiteral& addrlit, Register d);
void patchable_sethi(const AddressLiteral& addrlit, Register d);
// compute the size of a sethi/set
static int size_of_sethi( address a, bool worst_case = false );
static int worst_case_size_of_set();
// set may be either setsw or setuw (high 32 bits may be zero or sign)
void set( intptr_t value, Register d, RelocationHolder const& rspec = RelocationHolder() );
void setsw( int value, Register d, RelocationHolder const& rspec = RelocationHolder() );
void set64( jlong value, Register d, Register tmp);
private:
void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
public:
void set(const AddressLiteral& addrlit, Register d);
void set(intptr_t value, Register d);
void set(address addr, Register d, RelocationHolder const& rspec);
void patchable_set(const AddressLiteral& addrlit, Register d);
void patchable_set(intptr_t value, Register d);
void set64(jlong value, Register d, Register tmp);
// sign-extend 32 to 64
inline void signx( Register s, Register d ) { sra( s, G0, d); }
@ -1930,24 +1972,22 @@ class MacroAssembler: public Assembler {
inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
// address pseudos: make these names unlike instruction names to avoid confusion
inline void split_disp( Address& a, Register temp );
inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
inline void load_address( Address& a, int offset = 0 );
inline void load_contents( Address& a, Register d, int offset = 0 );
inline void load_ptr_contents( Address& a, Register d, int offset = 0 );
inline void store_contents( Register s, Address& a, int offset = 0 );
inline void store_ptr_contents( Register s, Address& a, int offset = 0 );
inline void jumpl_to( Address& a, Register d, int offset = 0 );
inline void jump_to( Address& a, int offset = 0 );
inline void jump_indirect_to( Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0 );
inline void load_contents(AddressLiteral& addrlit, Register d, int offset = 0);
inline void load_ptr_contents(AddressLiteral& addrlit, Register d, int offset = 0);
inline void store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0);
inline void store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0);
inline void jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
inline void jump_to(AddressLiteral& addrlit, Register temp, int offset = 0);
inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
// ring buffer traceable jumps
void jmp2( Register r1, Register r2, const char* file, int line );
void jmp ( Register r1, int offset, const char* file, int line );
void jumpl( Address& a, Register d, int offset, const char* file, int line );
void jump ( Address& a, int offset, const char* file, int line );
void jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
void jump (AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
// argument pseudos:
@ -1972,29 +2012,31 @@ class MacroAssembler: public Assembler {
// Functions for isolating 64 bit loads for LP64
// ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
// st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
inline void ld_ptr( Register s1, Register s2, Register d );
inline void ld_ptr( Register s1, int simm13a, Register d);
inline void ld_ptr( Register s1, RegisterOrConstant s2, Register d );
inline void ld_ptr( const Address& a, Register d, int offset = 0 );
inline void st_ptr( Register d, Register s1, Register s2 );
inline void st_ptr( Register d, Register s1, int simm13a);
inline void st_ptr( Register d, Register s1, RegisterOrConstant s2 );
inline void st_ptr( Register d, const Address& a, int offset = 0 );
inline void ld_ptr(Register s1, Register s2, Register d);
inline void ld_ptr(Register s1, int simm13a, Register d);
inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
inline void ld_ptr(const Address& a, Register d, int offset = 0);
inline void st_ptr(Register d, Register s1, Register s2);
inline void st_ptr(Register d, Register s1, int simm13a);
inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
inline void st_ptr(Register d, const Address& a, int offset = 0);
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
inline void st_ptr(Register d, Register s1, ByteSize simm13a);
#endif
// ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
// st_long will perform st for 32 bit VM's and stx for 64 bit VM's
inline void ld_long( Register s1, Register s2, Register d );
inline void ld_long( Register s1, int simm13a, Register d );
inline void ld_long( Register s1, RegisterOrConstant s2, Register d );
inline void ld_long( const Address& a, Register d, int offset = 0 );
inline void st_long( Register d, Register s1, Register s2 );
inline void st_long( Register d, Register s1, int simm13a );
inline void st_long( Register d, Register s1, RegisterOrConstant s2 );
inline void st_long( Register d, const Address& a, int offset = 0 );
// Loading values by size and signed-ness
void load_sized_value(Register s1, RegisterOrConstant s2, Register d,
int size_in_bytes, bool is_signed);
inline void ld_long(Register s1, Register s2, Register d);
inline void ld_long(Register s1, int simm13a, Register d);
inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
inline void ld_long(const Address& a, Register d, int offset = 0);
inline void st_long(Register d, Register s1, Register s2);
inline void st_long(Register d, Register s1, int simm13a);
inline void st_long(Register d, Register s1, RegisterOrConstant s2);
inline void st_long(Register d, const Address& a, int offset = 0);
// Helpers for address formation.
// They update the dest in place, whether it is a register or constant.
@ -2049,8 +2091,8 @@ class MacroAssembler: public Assembler {
// These are idioms to flag the need for care with accessing bools but on
// this platform we assume byte size
inline void stbool( Register d, const Address& a, int offset = 0 ) { stb(d, a, offset); }
inline void ldbool( const Address& a, Register d, int offset = 0 ) { ldsb( a, d, offset ); }
inline void stbool(Register d, const Address& a) { stb(d, a); }
inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
inline void tstbool( Register s ) { tst(s); }
inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
@ -2060,7 +2102,7 @@ class MacroAssembler: public Assembler {
void store_klass_gap(Register s, Register dst_oop);
// oop manipulations
void load_heap_oop(const Address& s, Register d, int offset = 0);
void load_heap_oop(const Address& s, Register d);
void load_heap_oop(Register s1, Register s2, Register d);
void load_heap_oop(Register s1, int simm13a, Register d);
void store_heap_oop(Register d, Register s1, Register s2);
@ -2190,11 +2232,11 @@ class MacroAssembler: public Assembler {
void print_CPU_state();
// oops in code
Address allocate_oop_address( jobject obj, Register d ); // allocate_index
Address constant_oop_address( jobject obj, Register d ); // find_index
inline void set_oop ( jobject obj, Register d ); // uses allocate_oop_address
inline void set_oop_constant( jobject obj, Register d ); // uses constant_oop_address
inline void set_oop ( Address obj_addr ); // same as load_address
AddressLiteral allocate_oop_address(jobject obj); // allocate_index
AddressLiteral constant_oop_address(jobject obj); // find_index
inline void set_oop (jobject obj, Register d); // uses allocate_oop_address
inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address
inline void set_oop (AddressLiteral& obj_addr, Register d); // same as load_address
void set_narrow_oop( jobject obj, Register d );
@ -2410,7 +2452,8 @@ class MacroAssembler: public Assembler {
// Conditionally (non-atomically) increments passed counter address, preserving condition codes.
void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
// Unconditional increment.
void inc_counter(address counter_addr, Register Rtemp1, Register Rtemp2);
void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2);
#undef VIRTUAL

View File

@ -38,6 +38,11 @@ inline void MacroAssembler::pd_print_patched_instruction(address branch) {
inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
inline int AddressLiteral::low10() const {
return Assembler::low10(value());
}
// inlines for SPARC assembler -- dmu 5/97
inline void Assembler::check_delay() {
@ -63,10 +68,9 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
}
inline void Assembler::add( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add( Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
inline void Assembler::add( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
inline void Assembler::add( const Address& a, Register d, int offset) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); }
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
@ -95,13 +99,10 @@ inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
inline void Assembler::jmpl( Address& a, Register d, int offset) { jmpl( a.base(), a.disp() + offset, d, a.rspec(offset)); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
inline void Assembler::ldf( FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldf( FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
@ -136,50 +137,69 @@ inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep();
#ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
#else
inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
#endif
inline void Assembler::ldub( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ldsb(s1, s2.as_register(), d);
else ldsb(s1, s2.as_constant(), d);
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
# ifdef _LP64
inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
# else
inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
# endif
#endif
inline void Assembler::ld( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
else { ld( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsb( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ldsb(s1, s2.as_register(), d);
else ldsb(s1, s2.as_constant(), d);
inline void Assembler::ldsb(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
else { ldsb(a.base(), a.disp() + offset, d); }
}
inline void Assembler::lduh( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ldsh(s1, s2.as_register(), d);
else ldsh(s1, s2.as_constant(), d);
inline void Assembler::ldsh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
else { ldsh(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsh( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ldsh(s1, s2.as_register(), d);
else ldsh(s1, s2.as_constant(), d);
inline void Assembler::ldsw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
else { ldsw(a.base(), a.disp() + offset, d); }
}
inline void Assembler::lduw( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ldsw(s1, s2.as_register(), d);
else ldsw(s1, s2.as_constant(), d);
inline void Assembler::ldub(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
else { ldub(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsw( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ldsw(s1, s2.as_register(), d);
else ldsw(s1, s2.as_constant(), d);
inline void Assembler::lduh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
else { lduh(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ldx(s1, s2.as_register(), d);
else ldx(s1, s2.as_constant(), d);
inline void Assembler::lduw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
else { lduw(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ld(s1, s2.as_register(), d);
else ld(s1, s2.as_constant(), d);
inline void Assembler::ldd( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
else { ldd( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) ldd(s1, s2.as_register(), d);
else ldd(s1, s2.as_constant(), d);
inline void Assembler::ldx( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
else { ldx( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
// form effective addresses this way:
inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
@ -187,17 +207,6 @@ inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, in
if (offset != 0) add(d, offset, d);
}
inline void Assembler::ld( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ld( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldsb( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsb( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldsh( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsh( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldsw( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsw( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldub( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldub( a.base(), a.disp() + offset, d ); }
inline void Assembler::lduh( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); lduh( a.base(), a.disp() + offset, d ); }
inline void Assembler::lduw( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); lduw( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldd( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldd( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldx( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldx( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
@ -240,36 +249,44 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
inline void Assembler::stb( Register d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) stb(d, s1, s2.as_register());
else stb(d, s1, s2.as_constant());
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
#endif
inline void Assembler::stb(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
else { stb(d, a.base(), a.disp() + offset); }
}
inline void Assembler::sth( Register d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) sth(d, s1, s2.as_register());
else sth(d, s1, s2.as_constant());
inline void Assembler::sth(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
else { sth(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stx( Register d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) stx(d, s1, s2.as_register());
else stx(d, s1, s2.as_constant());
inline void Assembler::stw(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
else { stw(d, a.base(), a.disp() + offset); }
}
inline void Assembler::std( Register d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) std(d, s1, s2.as_register());
else std(d, s1, s2.as_constant());
inline void Assembler::st( Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
else { st( d, a.base(), a.disp() + offset); }
}
inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) st(d, s1, s2.as_register());
else st(d, s1, s2.as_constant());
inline void Assembler::std(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
else { std(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stx(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
else { stx(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stb( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stb( d, a.base(), a.disp() + offset); }
inline void Assembler::sth( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); sth( d, a.base(), a.disp() + offset); }
inline void Assembler::stw( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stw( d, a.base(), a.disp() + offset); }
inline void Assembler::st( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); st( d, a.base(), a.disp() + offset); }
inline void Assembler::std( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); std( d, a.base(), a.disp() + offset); }
inline void Assembler::stx( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stx( d, a.base(), a.disp() + offset); }
inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
// v8 p 99
@ -294,39 +311,46 @@ inline void Assembler::swap( Address& a, Register d, int offset ) { relocate(
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx( s1, s2, d);
Assembler::ldx(s1, s2, d);
#else
Assembler::ld( s1, s2, d);
Assembler::ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx( s1, simm13a, d);
Assembler::ldx(s1, simm13a, d);
#else
Assembler::ld( s1, simm13a, d);
Assembler::ld( s1, simm13a, d);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
ld_ptr(s1, in_bytes(simm13a), d);
}
#endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
Assembler::ldx( s1, s2, d);
Assembler::ldx(s1, s2, d);
#else
Assembler::ld( s1, s2, d);
Assembler::ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr( const Address& a, Register d, int offset ) {
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
#ifdef _LP64
Assembler::ldx( a, d, offset );
Assembler::ldx(a, d, offset);
#else
Assembler::ld( a, d, offset );
Assembler::ld( a, d, offset);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx( d, s1, s2);
Assembler::stx(d, s1, s2);
#else
Assembler::st( d, s1, s2);
#endif
@ -334,25 +358,32 @@ inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx( d, s1, simm13a);
Assembler::stx(d, s1, simm13a);
#else
Assembler::st( d, s1, simm13a);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
st_ptr(d, s1, in_bytes(simm13a));
}
#endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
Assembler::stx( d, s1, s2);
Assembler::stx(d, s1, s2);
#else
Assembler::st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr( Register d, const Address& a, int offset) {
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
#ifdef _LP64
Assembler::stx( d, a, offset);
Assembler::stx(d, a, offset);
#else
Assembler::st( d, a, offset);
Assembler::st( d, a, offset);
#endif
}
@ -381,11 +412,11 @@ inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Registe
#endif
}
inline void MacroAssembler::ld_long( const Address& a, Register d, int offset ) {
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
#ifdef _LP64
Assembler::ldx(a, d, offset );
Assembler::ldx(a, d, offset);
#else
Assembler::ldd(a, d, offset );
Assembler::ldd(a, d, offset);
#endif
}
@ -427,7 +458,7 @@ inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, s2, d);
#else
Assembler::sll(s1, s2, d);
Assembler::sll( s1, s2, d);
#endif
}
@ -435,7 +466,7 @@ inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, imm6a, d);
#else
Assembler::sll(s1, imm6a, d);
Assembler::sll( s1, imm6a, d);
#endif
}
@ -443,7 +474,7 @@ inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, s2, d);
#else
Assembler::srl(s1, s2, d);
Assembler::srl( s1, s2, d);
#endif
}
@ -451,7 +482,7 @@ inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, imm6a, d);
#else
Assembler::srl(s1, imm6a, d);
Assembler::srl( s1, imm6a, d);
#endif
}
@ -541,9 +572,8 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
disp = (intptr_t)d - (intptr_t)pc();
if ( disp != (intptr_t)(int32_t)disp ) {
relocate(rt);
Address dest(O7, (address)d);
sethi(dest, /*ForceRelocatable=*/ true);
jmpl(dest, O7);
AddressLiteral dest(d);
jumpl_to(dest, O7, O7);
}
else {
Assembler::call( d, rt );
@ -603,96 +633,72 @@ inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip
return thepc;
}
inline void MacroAssembler::load_address( Address& a, int offset ) {
inline void MacroAssembler::load_contents(AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
#ifdef _LP64
sethi(a);
add(a, a.base(), offset);
#else
if (a.hi() == 0 && a.rtype() == relocInfo::none) {
set(a.disp() + offset, a.base());
}
else {
sethi(a);
add(a, a.base(), offset);
}
#endif
sethi(addrlit, d);
ld(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::split_disp( Address& a, Register temp ) {
inline void MacroAssembler::load_ptr_contents(AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
a = a.split_disp();
Assembler::sethi(a.hi(), temp, a.rspec());
add(a.base(), temp, a.base());
sethi(addrlit, d);
ld_ptr(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_contents( Address& a, Register d, int offset ) {
inline void MacroAssembler::store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
sethi(a);
ld(a, d, offset);
sethi(addrlit, temp);
st(s, temp, addrlit.low10() + offset);
}
inline void MacroAssembler::load_ptr_contents( Address& a, Register d, int offset ) {
inline void MacroAssembler::store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
sethi(a);
ld_ptr(a, d, offset);
}
inline void MacroAssembler::store_contents( Register s, Address& a, int offset ) {
assert_not_delayed();
sethi(a);
st(s, a, offset);
}
inline void MacroAssembler::store_ptr_contents( Register s, Address& a, int offset ) {
assert_not_delayed();
sethi(a);
st_ptr(s, a, offset);
sethi(addrlit, temp);
st_ptr(s, temp, addrlit.low10() + offset);
}
// This code sequence is relocatable to any address, even on LP64.
inline void MacroAssembler::jumpl_to( Address& a, Register d, int offset ) {
inline void MacroAssembler::jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
sethi(a, /*ForceRelocatable=*/ true);
jmpl(a, d, offset);
patchable_sethi(addrlit, temp);
jmpl(temp, addrlit.low10() + offset, d);
}
inline void MacroAssembler::jump_to( Address& a, int offset ) {
jumpl_to( a, G0, offset );
inline void MacroAssembler::jump_to(AddressLiteral& addrlit, Register temp, int offset) {
jumpl_to(addrlit, temp, G0, offset);
}
inline void MacroAssembler::jump_indirect_to( Address& a, Register temp,
int ld_offset, int jmp_offset ) {
inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
int ld_offset, int jmp_offset) {
assert_not_delayed();
//sethi(a); // sethi is caller responsibility for this one
//sethi(al); // sethi is caller responsibility for this one
ld_ptr(a, temp, ld_offset);
jmp(temp, jmp_offset);
}
inline void MacroAssembler::set_oop( jobject obj, Register d ) {
set_oop(allocate_oop_address(obj, d));
inline void MacroAssembler::set_oop(jobject obj, Register d) {
set_oop(allocate_oop_address(obj), d);
}
inline void MacroAssembler::set_oop_constant( jobject obj, Register d ) {
set_oop(constant_oop_address(obj, d));
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
set_oop(constant_oop_address(obj), d);
}
inline void MacroAssembler::set_oop( Address obj_addr ) {
assert(obj_addr.rspec().type()==relocInfo::oop_type, "must be an oop reloc");
load_address(obj_addr);
inline void MacroAssembler::set_oop(AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
set(obj_addr, d);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -277,10 +277,11 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
if (_id == load_klass_id) {
// produce a copy of the load klass instruction for use by the being initialized case
#ifdef ASSERT
address start = __ pc();
Address addr = Address(_obj, address(NULL), oop_Relocation::spec(_oop_index));
__ sethi(addr, true);
__ add(addr, _obj, 0);
#endif
AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index));
__ patchable_set(addrlit, _obj);
#ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) {

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -327,7 +327,7 @@ void FrameMap::init () {
Address FrameMap::make_new_address(ByteSize sp_offset) const {
return Address(SP, 0, STACK_BIAS + in_bytes(sp_offset));
return Address(SP, STACK_BIAS + in_bytes(sp_offset));
}

View File

@ -196,7 +196,7 @@ void LIR_Assembler::osr_entry() {
// verify the interpreter's monitor has a non-null object
{
Label L;
__ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7);
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
__ cmp(G0, O7);
__ br(Assembler::notEqual, false, Assembler::pt, L);
__ delayed()->nop();
@ -205,9 +205,9 @@ void LIR_Assembler::osr_entry() {
}
#endif // ASSERT
// Copy the lock field into the compiled activation.
__ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::lock_offset_in_bytes()), O7);
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7);
__ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
__ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7);
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
__ st_ptr(O7, frame_map()->address_for_monitor_object(i));
}
}
@ -238,21 +238,21 @@ void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst
int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
int count_offset = java_lang_String:: count_offset_in_bytes();
__ ld_ptr(Address(str0, 0, value_offset), tmp0);
__ ld(Address(str0, 0, offset_offset), tmp2);
__ ld_ptr(str0, value_offset, tmp0);
__ ld(str0, offset_offset, tmp2);
__ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
__ ld(Address(str0, 0, count_offset), str0);
__ ld(str0, count_offset, str0);
__ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
// str1 may be null
add_debug_info_for_null_check_here(info);
__ ld_ptr(Address(str1, 0, value_offset), tmp1);
__ ld_ptr(str1, value_offset, tmp1);
__ add(tmp0, tmp2, tmp0);
__ ld(Address(str1, 0, offset_offset), tmp2);
__ ld(str1, offset_offset, tmp2);
__ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
__ ld(Address(str1, 0, count_offset), str1);
__ ld(str1, count_offset, str1);
__ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
__ subcc(str0, str1, O7);
__ add(tmp1, tmp2, tmp1);
@ -412,9 +412,9 @@ void LIR_Assembler::emit_deopt_handler() {
#endif // ASSERT
compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
Address deopt_blob(G3_scratch, SharedRuntime::deopt_blob()->unpack());
AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
__ JUMP(deopt_blob, 0); // sethi;jmp
__ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
__ delayed()->nop();
assert(code_offset() - offset <= deopt_handler_size, "overflow");
@ -441,13 +441,12 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info)
int oop_index = __ oop_recorder()->allocate_index((jobject)NULL);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index);
Address addr = Address(reg, address(NULL), oop_Relocation::spec(oop_index));
assert(addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
// It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
// NULL will be dynamically patched later and the patched value may be large. We must
// therefore generate the sethi/add as a placeholders
__ sethi(addr, true);
__ add(addr, reg, 0);
__ patchable_set(addrlit, reg);
patching_epilog(patch, lir_patch_normal, reg, info);
}
@ -706,7 +705,7 @@ void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
add_debug_info_for_null_check_here(info);
__ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), G3_scratch);
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
if (__ is_simm13(vtable_offset) ) {
__ ld_ptr(G3_scratch, vtable_offset, G5_method);
} else {
@ -715,7 +714,7 @@ void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
// ld_ptr, set_hi, set
__ ld_ptr(G3_scratch, G5_method, G5_method);
}
__ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch);
__ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch);
__ callr(G3_scratch, G0);
// the peephole pass fills the delay slot
}
@ -738,8 +737,7 @@ int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, Cod
default : ShouldNotReachHere();
}
} else {
__ sethi(disp & ~0x3ff, O7, true);
__ add(O7, disp & 0x3ff, O7);
__ set(disp, O7);
if (info != NULL) add_debug_info_for_null_check_here(info);
load_offset = code_offset();
switch(ld_type) {
@ -775,8 +773,7 @@ void LIR_Assembler::store(Register value, Register base, int offset, BasicType t
default : ShouldNotReachHere();
}
} else {
__ sethi(offset & ~0x3ff, O7, true);
__ add(O7, offset & 0x3ff, O7);
__ set(offset, O7);
if (info != NULL) add_debug_info_for_null_check_here(info);
switch (type) {
case T_BOOLEAN: // fall through
@ -813,8 +810,7 @@ void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_typ
__ ldf(w, s, disp, d);
}
} else {
__ sethi(disp & ~0x3ff, O7, true);
__ add(O7, disp & 0x3ff, O7);
__ set(disp, O7);
if (info != NULL) add_debug_info_for_null_check_here(info);
__ ldf(w, s, O7, d);
}
@ -839,8 +835,7 @@ void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicT
__ stf(w, value, base, offset);
}
} else {
__ sethi(offset & ~0x3ff, O7, true);
__ add(O7, offset & 0x3ff, O7);
__ set(offset, O7);
if (info != NULL) add_debug_info_for_null_check_here(info);
__ stf(w, value, O7, base);
}
@ -852,8 +847,7 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
assert(!unaligned, "can't handle this");
// for offsets larger than a simm13 we setup the offset in O7
__ sethi(offset & ~0x3ff, O7, true);
__ add(O7, offset & 0x3ff, O7);
__ set(offset, O7);
store_offset = store(from_reg, base, O7, type);
} else {
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
@ -937,8 +931,7 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
assert(base != O7, "destroying register");
assert(!unaligned, "can't handle this");
// for offsets larger than a simm13 we setup the offset in O7
__ sethi(offset & ~0x3ff, O7, true);
__ add(O7, offset & 0x3ff, O7);
__ set(offset, O7);
load_offset = load(base, O7, to_reg, type);
} else {
load_offset = code_offset();
@ -1213,7 +1206,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
assert(to_reg->is_single_fpu(), "wrong register kind");
__ set(con, O7);
Address temp_slot(SP, 0, (frame::register_save_words * wordSize) + STACK_BIAS);
Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
__ st(O7, temp_slot);
__ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
}
@ -1238,8 +1231,8 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
} else {
ShouldNotReachHere();
assert(to_reg->is_double_fpu(), "wrong register kind");
Address temp_slot_lo(SP, 0, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
Address temp_slot_hi(SP, 0, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
__ set(low(con), O7);
__ st(O7, temp_slot_lo);
__ set(high(con), O7);
@ -1267,17 +1260,16 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
break;
}
RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
AddressLiteral const_addrlit(const_addr, rspec);
if (to_reg->is_single_fpu()) {
__ sethi( (intx)const_addr & ~0x3ff, O7, true, rspec);
__ patchable_sethi(const_addrlit, O7);
__ relocate(rspec);
int offset = (intx)const_addr & 0x3ff;
__ ldf (FloatRegisterImpl::S, O7, offset, to_reg->as_float_reg());
__ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
} else {
assert(to_reg->is_single_cpu(), "Must be a cpu register.");
__ set((intx)const_addr, O7, rspec);
__ set(const_addrlit, O7);
load(O7, 0, to_reg->as_register(), T_INT);
}
}
@ -1293,10 +1285,10 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
if (to_reg->is_double_fpu()) {
__ sethi( (intx)const_addr & ~0x3ff, O7, true, rspec);
int offset = (intx)const_addr & 0x3ff;
AddressLiteral const_addrlit(const_addr, rspec);
__ patchable_sethi(const_addrlit, O7);
__ relocate(rspec);
__ ldf (FloatRegisterImpl::D, O7, offset, to_reg->as_double_reg());
__ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
} else {
assert(to_reg->is_double_cpu(), "Must be a long register.");
#ifdef _LP64
@ -1317,7 +1309,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
Address LIR_Assembler::as_Address(LIR_Address* addr) {
Register reg = addr->base()->as_register();
return Address(reg, 0, addr->disp());
return Address(reg, addr->disp());
}
@ -1360,13 +1352,13 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
Address base = as_Address(addr);
return Address(base.base(), 0, base.disp() + hi_word_offset_in_bytes);
return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
}
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
Address base = as_Address(addr);
return Address(base.base(), 0, base.disp() + lo_word_offset_in_bytes);
return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
}
@ -1396,8 +1388,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
if (addr->index()->is_illegal()) {
if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
if (needs_patching) {
__ sethi(0, O7, true);
__ add(O7, 0, O7);
__ patchable_set(0, O7);
} else {
__ set(disp_value, O7);
}
@ -1544,8 +1535,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
if (addr->index()->is_illegal()) {
if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
if (needs_patching) {
__ sethi(0, O7, true);
__ add(O7, 0, O7);
__ patchable_set(0, O7);
} else {
__ set(disp_value, O7);
}
@ -1627,8 +1617,8 @@ void LIR_Assembler::emit_static_call_stub() {
__ set_oop(NULL, G5);
// must be set to -1 at code generation time
Address a(G3, (address)-1);
__ jump_to(a, 0);
AddressLiteral addrlit(-1);
__ jump_to(addrlit, G3);
__ delayed()->nop();
assert(__ offset() - start <= call_stub_size, "stub too big");
@ -2063,7 +2053,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
address pc_for_athrow = __ pc();
int pc_for_athrow_offset = __ offset();
RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
__ set((intptr_t)pc_for_athrow, Oissuing_pc, rspec);
__ set(pc_for_athrow, Oissuing_pc, rspec);
add_call_info(pc_for_athrow_offset, info); // for exception handler
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
@ -2451,7 +2441,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
}
Address flags_addr(mdo, 0, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
__ ldub(flags_addr, data_val);
__ or3(data_val, BitData::null_seen_byte_constant(), data_val);
__ stb(data_val, flags_addr);
@ -2738,7 +2728,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ add(mdo, O7, mdo);
}
Address counter_addr(mdo, 0, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
__ lduw(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr);
@ -2764,8 +2754,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) {
Address data_addr(mdo, 0, md->byte_offset_of_slot(data,
VirtualCallData::receiver_count_offset(i)) -
Address data_addr(mdo, md->byte_offset_of_slot(data,
VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
__ lduw(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
@ -2782,11 +2772,11 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) {
Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
jobject2reg(known_klass->encoding(), tmp1);
__ st_ptr(tmp1, recv_addr);
Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
__ lduw(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
@ -2795,20 +2785,20 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
}
}
} else {
load(Address(recv, 0, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
Label update_done;
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
Address receiver_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
__ ld_ptr(receiver_addr, tmp1);
__ verify_oop(tmp1);
__ cmp(recv, tmp1);
__ brx(Assembler::notEqual, false, Assembler::pt, next_test);
__ delayed()->nop();
Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
__ lduw(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
@ -2821,7 +2811,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
load(recv_addr, tmp1, T_OBJECT);
__ tst(tmp1);
@ -2829,8 +2819,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ delayed()->nop();
__ st_ptr(recv, recv_addr);
__ set(DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, Address(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias));
__ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
if (i < (VirtualCallData::row_limit() - 1)) {
__ br(Assembler::always, false, Assembler::pt, update_done);
__ delayed()->nop();

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,13 +29,13 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
Label L;
const Register temp_reg = G3_scratch;
// Note: needs more testing of out-of-line vs. inline slow case
Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
verify_oop(receiver);
ld_ptr(receiver, oopDesc::klass_offset_in_bytes(), temp_reg);
cmp(temp_reg, iCache);
brx(Assembler::equal, true, Assembler::pt, L);
delayed()->nop();
jump_to(ic_miss, 0);
AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
jump_to(ic_miss, temp_reg);
delayed()->nop();
align(CodeEntryAlignment);
bind(L);
@ -84,7 +84,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
Label done;
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
// The following move must be the first instruction of emitted since debug
// information may be generated for it.
@ -132,7 +132,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
Label done;
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
if (UseBiasedLocking) {
@ -370,7 +370,7 @@ void C1_MacroAssembler::allocate_array(
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
if (!VerifyOops) return;
verify_oop_addr(Address(SP, 0, stack_offset + STACK_BIAS));
verify_oop_addr(Address(SP, stack_offset + STACK_BIAS));
}
void C1_MacroAssembler::verify_not_null_oop(Register r) {

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,13 +57,13 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
// check for pending exceptions
{ Label L;
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr(G2_thread, Thread::pending_exception_offset());
ld_ptr(exception_addr, Gtemp);
br_null(Gtemp, false, pt, L);
delayed()->nop();
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
st_ptr(G0, vm_result_addr);
Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
st_ptr(G0, vm_result_addr_2);
if (frame_size() == no_frame_size) {
@ -73,8 +73,8 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
} else if (_stub_id == Runtime1::forward_exception_id) {
should_not_reach_here();
} else {
Address exc(G4, Runtime1::entry_for(Runtime1::forward_exception_id));
jump_to(exc, 0);
AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));
jump_to(exc, G4);
delayed()->nop();
}
bind(L);
@ -85,7 +85,7 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
get_vm_result (oop_result1);
} else {
// be a little paranoid and clear the result
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
st_ptr(G0, vm_result_addr);
}
@ -93,7 +93,7 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
get_vm_result_2(oop_result2);
} else {
// be a little paranoid and clear the result
Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
st_ptr(G0, vm_result_addr_2);
}
@ -479,8 +479,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register G4_length = G4; // Incoming
Register O0_obj = O0; // Outgoing
Address klass_lh(G5_klass, 0, ((klassOopDesc::header_size() * HeapWordSize)
+ Klass::layout_helper_offset_in_bytes()));
Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize)
+ Klass::layout_helper_offset_in_bytes()));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
// Use this offset to pick out an individual byte of the layout_helper:
@ -902,8 +902,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ srl(addr, CardTableModRefBS::card_shift, addr);
#endif
Address rs(cardtable, (address)byte_map_base);
__ load_address(rs); // cardtable := <card table base>
AddressLiteral rs(byte_map_base);
__ set(rs, cardtable); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
__ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
@ -1022,8 +1022,8 @@ void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_map
__ restore();
Address exc(G4, Runtime1::entry_for(Runtime1::unwind_exception_id));
__ jump_to(exc, 0);
AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id));
__ jump_to(exc, G4);
__ delayed()->nop();

View File

@ -1,5 +1,5 @@
/*
* Copyright 2004-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2004-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -106,8 +106,7 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
__ and3(L0, 255, L4); // Isolate L3 = method offset;.
__ sll(L4, LogBytesPerWord, L4);
__ ld_ptr(L3, L4, L4); // Get address of correct virtual method
Address method(L4, 0);
__ jmpl(method, G0); // Jump to correct method.
__ jmpl(L4, 0, G0); // Jump to correct method.
__ delayed()->restore(); // Restore registers.
__ flush();

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,14 +46,13 @@ void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, oop cached_o
// (1) the oop is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear
assert(cached_oop == NULL || cached_oop->is_perm(), "must be old oop");
Address cached_oop_addr(G5_inline_cache_reg, address(cached_oop));
// Force the sethi to generate the fixed sequence so next_instruction_address works
masm->sethi(cached_oop_addr, true /* ForceRelocatable */ );
masm->add(cached_oop_addr, G5_inline_cache_reg);
AddressLiteral cached_oop_addrlit(cached_oop, relocInfo::none);
// Force the set to generate the fixed sequence so next_instruction_address works
masm->patchable_set(cached_oop_addrlit, G5_inline_cache_reg);
assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub");
assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub");
Address entry(G3_scratch, entry_point);
masm->JUMP(entry, 0);
AddressLiteral entry(entry_point);
masm->JUMP(entry, G3_scratch, 0);
masm->delayed()->nop();
masm->flush();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,8 +35,8 @@
// This file specializes the assember with interpreter-specific macros
const Address InterpreterMacroAssembler::l_tmp( FP, 0, (frame::interpreter_frame_l_scratch_fp_offset * wordSize ) + STACK_BIAS);
const Address InterpreterMacroAssembler::d_tmp( FP, 0, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
#else // CC_INTERP
#ifndef STATE
@ -78,14 +78,12 @@ void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
#else
ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
// dispatch table to use
Address tbl(G3_scratch, (address)Interpreter::dispatch_table(state));
sethi(tbl);
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
add(tbl, tbl.base(), 0);
ld_ptr( G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
AddressLiteral tbl(Interpreter::dispatch_table(state));
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
set(tbl, G3_scratch); // compute addr of table
ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
#endif
}
@ -165,8 +163,7 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
Label L;
// Check the "pending popframe condition" flag in the current thread
Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset()));
ld(popframe_condition_addr, scratch_reg);
ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);
// Initiate popframe handling only if it is not already being processed. If the flag
// has the popframe_processing bit set, it means that this code is called *during* popframe
@ -192,11 +189,10 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
Register thr_state = G4_scratch;
ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())),
thr_state);
const Address tos_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_tos_offset()));
const Address oop_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_oop_offset()));
const Address val_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_value_offset()));
ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());
const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());
const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());
switch (state) {
case ltos: ld_long(val_addr, Otos_l); break;
case atos: ld_ptr(oop_addr, Otos_l);
@ -222,8 +218,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
if (JvmtiExport::can_force_early_return()) {
Label L;
Register thr_state = G3_scratch;
ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())),
thr_state);
ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
tst(thr_state);
br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
delayed()->nop();
@ -231,16 +226,14 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
// Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code
// is called *during* earlyret handling - we don't want to reenter.
ld(Address(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_state_offset())),
G4_scratch);
ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
cmp(G4_scratch, JvmtiThreadState::earlyret_pending);
br(Assembler::notEqual, false, pt, L);
delayed()->nop();
// Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code
Address tos_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_tos_offset()));
ld(tos_addr, Otos_l1);
ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
// Jump to Interpreter::_remove_activation_early_entry
@ -294,10 +287,9 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
} else {
#endif
// dispatch table to use
Address tbl(G3_scratch, (address)table);
AddressLiteral tbl(table);
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
load_address(tbl); // compute addr of table
set(tbl, G3_scratch); // compute addr of table
ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
#ifdef FAST_DISPATCH
}
@ -601,26 +593,17 @@ void InterpreterMacroAssembler::empty_expression_stack() {
// Reset SP by subtracting more space from Lesp.
Label done;
const Address max_stack (Lmethod, 0, in_bytes(methodOopDesc::max_stack_offset()));
const Address access_flags(Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset()));
verify_oop(Lmethod);
assert( G4_scratch != Gframe_size,
"Only you can prevent register aliasing!");
assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
// A native does not need to do this, since its callee does not change SP.
ld(access_flags, Gframe_size);
ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags.
btst(JVM_ACC_NATIVE, Gframe_size);
br(Assembler::notZero, false, Assembler::pt, done);
delayed()->nop();
//
// Compute max expression stack+register save area
//
lduh( max_stack, Gframe_size );
lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack.
if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size); // max_stack * 2 for TAGS
add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
@ -721,8 +704,7 @@ void InterpreterMacroAssembler::call_from_interpreter(Register target, Register
verify_thread();
Label skip_compiled_code;
const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset()));
const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
ld(interp_only, scratch);
tst(scratch);
br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
@ -916,8 +898,8 @@ void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point,
Register Rscratch,
Label& ok ) {
assert(throw_entry_point != NULL, "entry point must be generated by now");
Address dest(Rscratch, throw_entry_point);
jump_to(dest);
AddressLiteral dest(throw_entry_point);
jump_to(dest, Rscratch);
delayed()->nop();
bind(ok);
}
@ -1035,18 +1017,18 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
Label unlocked, unlock, no_unlock;
// get the value of _do_not_unlock_if_synchronized into G1_scratch
const Address do_not_unlock_if_synchronized(G2_thread, 0,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
const Address do_not_unlock_if_synchronized(G2_thread,
JavaThread::do_not_unlock_if_synchronized_offset());
ldbool(do_not_unlock_if_synchronized, G1_scratch);
stbool(G0, do_not_unlock_if_synchronized); // reset the flag
// check if synchronized method
const Address access_flags(Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset()));
const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
push(state); // save tos
ld(access_flags, G3_scratch);
ld(access_flags, G3_scratch); // Load access flags.
btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
br( zero, false, pt, unlocked);
br(zero, false, pt, unlocked);
delayed()->nop();
// Don't unlock anything if the _do_not_unlock_if_synchronized flag
@ -1236,8 +1218,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
Register obj_reg = Object;
Register mark_reg = G4_scratch;
Register temp_reg = G1_scratch;
Address lock_addr = Address(lock_reg, 0, BasicObjectLock::lock_offset_in_bytes());
Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes());
Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
Label done;
Label slow_case;
@ -1315,9 +1297,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
Register obj_reg = G3_scratch;
Register mark_reg = G4_scratch;
Register displaced_header_reg = G1_scratch;
Address lock_addr = Address(lock_reg, 0, BasicObjectLock::lock_offset_in_bytes());
Address lockobj_addr = Address(lock_reg, 0, BasicObjectLock::obj_offset_in_bytes());
Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes());
Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());
Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
Label done;
if (UseBiasedLocking) {
@ -1328,7 +1309,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
}
// Test first if we are in the fast recursive case
ld_ptr(lock_addr, displaced_header_reg, BasicLock::displaced_header_offset_in_bytes());
Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
ld_ptr(lock_addr, displaced_header_reg);
br_null(displaced_header_reg, true, Assembler::pn, done);
delayed()->st_ptr(G0, lockobj_addr); // free entry
@ -1384,7 +1366,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
Label zero_continue;
// Test MDO to avoid the call if it is NULL.
ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr);
test_method_data_pointer(zero_continue);
call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
set_method_data_pointer_offset(O0);
@ -1413,7 +1395,7 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
// If the mdp is valid, it will point to a DataLayout header which is
// consistent with the bcp. The converse is highly probable also.
lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), O5);
ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
add(G3_scratch, O5, G3_scratch);
cmp(Lbcp, G3_scratch);
@ -1424,7 +1406,7 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
// %%% should use call_VM_leaf here?
//call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);
Address d_save(FP, 0, -sizeof(jdouble) + STACK_BIAS);
Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
stf(FloatRegisterImpl::D, Ftos_d, d_save);
mov(temp_reg->after_save(), O2);
save_thread(L7_thread_cache);
@ -1456,14 +1438,14 @@ void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocat
#endif
// Test to see if we should create a method data oop
Address profile_limit(Rtmp, (address)&InvocationCounter::InterpreterProfileLimit);
AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
#ifdef _LP64
delayed()->nop();
sethi(profile_limit);
sethi(profile_limit, Rtmp);
#else
delayed()->sethi(profile_limit);
delayed()->sethi(profile_limit, Rtmp);
#endif
ld(profile_limit, Rtmp);
ld(Rtmp, profile_limit.low10(), Rtmp);
cmp(invocation_count, Rtmp);
br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
delayed()->nop();
@ -1521,7 +1503,7 @@ void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
Register bumped_count,
bool decrement) {
// Locate the counter at a fixed offset from the mdp:
Address counter(ImethodDataPtr, 0, constant);
Address counter(ImethodDataPtr, constant);
increment_mdp_data_at(counter, bumped_count, decrement);
}
@ -1535,7 +1517,7 @@ void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
bool decrement) {
// Add the constant to reg to get the offset.
add(ImethodDataPtr, reg, scratch2);
Address counter(scratch2, 0, constant);
Address counter(scratch2, constant);
increment_mdp_data_at(counter, bumped_count, decrement);
}
@ -2201,7 +2183,7 @@ int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
Address InterpreterMacroAssembler::top_most_monitor() {
return Address(FP, 0, top_most_monitor_byte_offset());
return Address(FP, top_most_monitor_byte_offset());
}
@ -2214,15 +2196,15 @@ void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) {
assert(UseCompiler, "incrementing must be useful");
#ifdef CC_INTERP
Address inv_counter(G5_method, 0, in_bytes(methodOopDesc::invocation_counter_offset()
+ InvocationCounter::counter_offset()));
Address be_counter(G5_method, 0, in_bytes(methodOopDesc::backedge_counter_offset()
+ InvocationCounter::counter_offset()));
Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
InvocationCounter::counter_offset());
Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
#else
Address inv_counter(Lmethod, 0, in_bytes(methodOopDesc::invocation_counter_offset()
+ InvocationCounter::counter_offset()));
Address be_counter(Lmethod, 0, in_bytes(methodOopDesc::backedge_counter_offset()
+ InvocationCounter::counter_offset()));
Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
InvocationCounter::counter_offset());
Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
#endif /* CC_INTERP */
int delta = InvocationCounter::count_increment;
@ -2250,15 +2232,15 @@ void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Reg
void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) {
assert(UseCompiler, "incrementing must be useful");
#ifdef CC_INTERP
Address be_counter(G5_method, 0, in_bytes(methodOopDesc::backedge_counter_offset()
+ InvocationCounter::counter_offset()));
Address inv_counter(G5_method, 0, in_bytes(methodOopDesc::invocation_counter_offset()
+ InvocationCounter::counter_offset()));
Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
InvocationCounter::counter_offset());
#else
Address be_counter(Lmethod, 0, in_bytes(methodOopDesc::backedge_counter_offset()
+ InvocationCounter::counter_offset()));
Address inv_counter(Lmethod, 0, in_bytes(methodOopDesc::invocation_counter_offset()
+ InvocationCounter::counter_offset()));
Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
InvocationCounter::counter_offset());
#endif /* CC_INTERP */
int delta = InvocationCounter::count_increment;
// Load each counter in a register
@ -2289,7 +2271,7 @@ void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_c
assert_different_registers(backedge_count, Rtmp, branch_bcp);
assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
Address limit(Rtmp, address(&InvocationCounter::InterpreterBackwardBranchLimit));
AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
load_contents(limit, Rtmp);
cmp(backedge_count, Rtmp);
br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow);
@ -2435,9 +2417,7 @@ void InterpreterMacroAssembler::notify_method_entry() {
if (JvmtiExport::can_post_interpreter_events()) {
Label L;
Register temp_reg = O5;
const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset()));
const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
ld(interp_only, temp_reg);
tst(temp_reg);
br(zero, false, pt, L);
@ -2489,9 +2469,7 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
Label L;
Register temp_reg = O5;
const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset()));
const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
ld(interp_only, temp_reg);
tst(temp_reg);
br(zero, false, pt, L);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -105,7 +105,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
// the handle for a receiver will never be null
bool do_NULL_check = offset() != 0 || is_static();
Address h_arg = Address(Llocals, 0, Interpreter::local_offset_in_bytes(offset()));
Address h_arg = Address(Llocals, Interpreter::local_offset_in_bytes(offset()));
__ ld_ptr(h_arg, Rtmp1);
#ifdef ASSERT
if (TaggedStackInterpreter) {
@ -120,14 +120,14 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
}
#endif // ASSERT
if (!do_NULL_check) {
__ add(h_arg, Rtmp2);
__ add(h_arg.base(), h_arg.disp(), Rtmp2);
} else {
if (Rtmp1 == Rtmp2)
__ tst(Rtmp1);
else __ addcc(G0, Rtmp1, Rtmp2); // optimize mov/test pair
Label L;
__ brx(Assembler::notZero, true, Assembler::pt, L);
__ delayed()->add(h_arg, Rtmp2);
__ delayed()->add(h_arg.base(), h_arg.disp(), Rtmp2);
__ bind(L);
}
__ store_ptr_argument(Rtmp2, jni_arg); // this is often a no-op
@ -140,10 +140,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprin
iterate(fingerprint);
// return result handler
Address result_handler(Lscratch, Interpreter::result_handler(method()->result_type()));
__ sethi(result_handler);
AddressLiteral result_handler(Interpreter::result_handler(method()->result_type()));
__ sethi(result_handler, Lscratch);
__ retl();
__ delayed()->add(result_handler, result_handler.base());
__ delayed()->add(Lscratch, result_handler.low10(), Lscratch);
__ flush();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2004-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,10 +57,10 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
Label label1, label2;
address cnt_addr = SafepointSynchronize::safepoint_counter_addr();
Address ca(O3, cnt_addr);
__ sethi (ca);
__ ld (ca, G4);
AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr());
__ sethi (cnt_addrlit, O3);
Address cnt_addr(O3, cnt_addrlit.low10());
__ ld (cnt_addr, G4);
__ andcc (G4, 1, G0);
__ br (Assembler::notZero, false, Assembler::pn, label1);
__ delayed()->srl (O2, 2, O4);
@ -77,7 +77,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
default: ShouldNotReachHere();
}
__ ld (ca, O5);
__ ld (cnt_addr, O5);
__ cmp (O5, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1);
@ -136,10 +136,10 @@ address JNI_FastGetField::generate_fast_get_long_field() {
Label label1, label2;
address cnt_addr = SafepointSynchronize::safepoint_counter_addr();
Address ca(G3, cnt_addr);
__ sethi (ca);
__ ld (ca, G4);
AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr());
__ sethi (cnt_addrlit, G3);
Address cnt_addr(G3, cnt_addrlit.low10());
__ ld (cnt_addr, G4);
__ andcc (G4, 1, G0);
__ br (Assembler::notZero, false, Assembler::pn, label1);
__ delayed()->srl (O2, 2, O4);
@ -159,7 +159,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
__ ldx (O5, 0, O3);
#endif
__ ld (ca, G1);
__ ld (cnt_addr, G1);
__ cmp (G1, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1);
@ -208,10 +208,10 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
Label label1, label2;
address cnt_addr = SafepointSynchronize::safepoint_counter_addr();
Address ca(O3, cnt_addr);
__ sethi (ca);
__ ld (ca, G4);
AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr());
__ sethi (cnt_addrlit, O3);
Address cnt_addr(O3, cnt_addrlit.low10());
__ ld (cnt_addr, G4);
__ andcc (G4, 1, G0);
__ br (Assembler::notZero, false, Assembler::pn, label1);
__ delayed()->srl (O2, 2, O4);
@ -225,7 +225,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
default: ShouldNotReachHere();
}
__ ld (ca, O5);
__ ld (cnt_addr, O5);
__ cmp (O5, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,8 +38,7 @@ void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
destreg = inv_rd(*(unsigned int *)instaddr);
// Generate a the new sequence
Address dest( destreg, (address)x );
_masm->sethi( dest, true );
_masm->patchable_sethi(x, destreg);
ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
}
@ -227,8 +226,8 @@ void NativeFarCall::set_destination(address dest) {
CodeBuffer buf(addr_at(0), instruction_size + 1);
MacroAssembler* _masm = new MacroAssembler(&buf);
// Generate the new sequence
Address(O7, dest);
_masm->jumpl_to(dest, O7);
AddressLiteral(dest);
_masm->jumpl_to(dest, O7, O7);
ICache::invalidate_range(addr_at(0), instruction_size );
#endif
}
@ -361,10 +360,12 @@ void NativeMovConstReg::test() {
VM_Version::allow_all();
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none);
a->add(I3, low10(0xaaaabbbb), I3);
a->sethi(0xccccdddd, O2, true, RelocationHolder::none);
a->add(O2, low10(0xccccdddd), O2);
AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
a->sethi(al1, I3);
a->add(I3, al1.low10(), I3);
AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
a->sethi(al2, O2);
a->add(O2, al2.low10(), O2);
nm = nativeMovConstReg_at( cb.code_begin() );
nm->print();
@ -468,12 +469,14 @@ void NativeMovConstRegPatching::test() {
VM_Version::allow_all();
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none);
AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
a->sethi(al1, I3);
a->nop();
a->add(I3, low10(0xaaaabbbb), I3);
a->sethi(0xccccdddd, O2, true, RelocationHolder::none);
a->add(I3, al1.low10(), I3);
AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
a->sethi(al2, O2);
a->nop();
a->add(O2, low10(0xccccdddd), O2);
a->add(O2, al2.low10(), O2);
nm = nativeMovConstRegPatching_at( cb.code_begin() );
nm->print();
@ -562,51 +565,53 @@ void NativeMovRegMem::test() {
VM_Version::allow_all();
a->ldsw( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
a->ldsw( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldsw( G5, I3, G4 ); idx++;
a->ldsb( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->ldsb( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldsb( G5, I3, G4 ); idx++;
a->ldsh( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->ldsh( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldsh( G5, I3, G4 ); idx++;
a->lduw( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->lduw( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->lduw( G5, I3, G4 ); idx++;
a->ldub( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->ldub( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldub( G5, I3, G4 ); idx++;
a->lduh( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->lduh( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->lduh( G5, I3, G4 ); idx++;
a->ldx( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->ldx( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldx( G5, I3, G4 ); idx++;
a->ldd( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->ldd( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldd( G5, I3, G4 ); idx++;
a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
a->stw( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->stw( G5, G4, al1.low10() ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->stw( G5, G4, I3 ); idx++;
a->stb( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->stb( G5, G4, al1.low10() ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->stb( G5, G4, I3 ); idx++;
a->sth( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->sth( G5, G4, al1.low10() ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->sth( G5, G4, I3 ); idx++;
a->stx( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->stx( G5, G4, al1.low10() ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->stx( G5, G4, I3 ); idx++;
a->std( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->std( G5, G4, al1.low10() ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->std( G5, G4, I3 ); idx++;
a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
nm = nativeMovRegMem_at( cb.code_begin() );
@ -705,51 +710,52 @@ void NativeMovRegMemPatching::test() {
VM_Version::allow_all();
a->ldsw( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
AddressLiteral al(0xffffffff, relocInfo::external_word_type);
a->ldsw( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldsw( G5, I3, G4 ); idx++;
a->ldsb( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->ldsb( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldsb( G5, I3, G4 ); idx++;
a->ldsh( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->ldsh( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldsh( G5, I3, G4 ); idx++;
a->lduw( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->lduw( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->lduw( G5, I3, G4 ); idx++;
a->ldub( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->ldub( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldub( G5, I3, G4 ); idx++;
a->lduh( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->lduh( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->lduh( G5, I3, G4 ); idx++;
a->ldx( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->ldx( G5, I3, G4 ); idx++;
a->ldd( G5, low10(0xffffffff), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->ldd( G5, I3, G4 ); idx++;
a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
a->ldx( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldx( G5, I3, G4 ); idx++;
a->ldd( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldd( G5, I3, G4 ); idx++;
a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
a->stw( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->stw( G5, G4, al.low10()); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->stw( G5, G4, I3 ); idx++;
a->stb( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->stb( G5, G4, al.low10()); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->stb( G5, G4, I3 ); idx++;
a->sth( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->sth( G5, G4, al.low10()); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->sth( G5, G4, I3 ); idx++;
a->stx( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->stx( G5, G4, al.low10()); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->stx( G5, G4, I3 ); idx++;
a->std( G5, G4, low10(0xffffffff) ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->std( G5, G4, al.low10()); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->std( G5, G4, I3 ); idx++;
a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
nm = nativeMovRegMemPatching_at( cb.code_begin() );
@ -833,11 +839,12 @@ void NativeJump::test() {
VM_Version::allow_all();
a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none);
a->jmpl(I3, low10(0x7fffbbbb), G0, RelocationHolder::none);
AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
a->sethi(al, I3);
a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
a->delayed()->nop();
a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none);
a->jmpl(I3, low10(0x7fffbbbb), L3, RelocationHolder::none);
a->sethi(al, I3);
a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
a->delayed()->nop();
nj = nativeJump_at( cb.code_begin() );

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -99,13 +99,6 @@ void Relocation::pd_set_data_value(address x, intptr_t o) {
break;
}
ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
#ifdef COMPILER2
// [RGV] Someone must have missed putting in a reloc entry for the
// add in compiler2.
inst2 = ip->long_at( NativeMovConstReg::add_offset );
guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op");
ip->set_long_at(NativeMovConstReg::add_offset,ip->set_data32_simm13( inst2, (intptr_t)x+o));
#endif
#else
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
inst &= ~Assembler::hi22( -1);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,8 +74,8 @@ void OptoRuntime::generate_exception_blob() {
int start = __ offset();
__ verify_thread();
__ st_ptr(Oexception, Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset())));
__ st_ptr(Oissuing_pc, Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset())));
__ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
__ st_ptr(Oissuing_pc, G2_thread, JavaThread::exception_pc_offset());
// This call does all the hard work. It checks if an exception catch
// exists in the method.
@ -120,19 +120,19 @@ void OptoRuntime::generate_exception_blob() {
// Since this may be the deopt blob we must set O7 to look like we returned
// from the original pc that threw the exception
__ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset())), O7);
__ ld_ptr(G2_thread, JavaThread::exception_pc_offset(), O7);
__ sub(O7, frame::pc_return_offset, O7);
assert(Assembler::is_simm13(in_bytes(JavaThread::exception_oop_offset())), "exception offset overflows simm13, following ld instruction cannot be in delay slot");
__ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset())), Oexception); // O0
__ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); // O0
#ifdef ASSERT
__ st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_handler_pc_offset())));
__ st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset())));
__ st_ptr(G0, G2_thread, JavaThread::exception_handler_pc_offset());
__ st_ptr(G0, G2_thread, JavaThread::exception_pc_offset());
#endif
__ JMP(G3_scratch, 0);
// Clear the exception oop so GC no longer processes it as a root.
__ delayed()->st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset())));
__ delayed()->st_ptr(G0, G2_thread, JavaThread::exception_oop_offset());
// -------------
// make sure all code is generated

View File

@ -625,9 +625,9 @@ void AdapterGenerator::patch_callers_callsite() {
__ mov(I7, O1); // VM needs caller's callsite
// Must be a leaf call...
// can be very far once the blob has been relocated
Address dest(O7, CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
__ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7);
__ jumpl_to(dest, O7, O7);
__ delayed()->mov(G2_thread, L7_thread_cache);
__ mov(L7_thread_cache, G2_thread);
__ mov(L1, G1);
@ -1152,7 +1152,7 @@ void AdapterGenerator::gen_i2c_adapter(
#ifndef _LP64
if (g3_crushed) {
// Rats load was wasted, at least it is in cache...
__ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
__ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
}
#endif /* _LP64 */
@ -1165,7 +1165,7 @@ void AdapterGenerator::gen_i2c_adapter(
// we try and find the callee by normal means a safepoint
// is possible. So we stash the desired callee in the thread
// and the vm will find there should this case occur.
Address callee_target_addr(G2_thread, 0, in_bytes(JavaThread::callee_target_offset()));
Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
__ st_ptr(G5_method, callee_target_addr);
if (StressNonEntrant) {
@ -1218,7 +1218,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Register R_temp = G1; // another scratch register
#endif
Address ic_miss(G3_scratch, SharedRuntime::get_ic_miss_stub());
AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
__ verify_oop(O0);
__ verify_oop(G5_method);
@ -1240,7 +1240,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label ok, ok2;
__ brx(Assembler::equal, false, Assembler::pt, ok);
__ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
__ jump_to(ic_miss);
__ jump_to(ic_miss, G3_scratch);
__ delayed()->nop();
__ bind(ok);
@ -1251,7 +1251,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
__ bind(ok2);
__ br_null(G3_scratch, false, __ pt, skip_fixup);
__ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
__ jump_to(ic_miss);
__ jump_to(ic_miss, G3_scratch);
__ delayed()->nop();
}
@ -1444,8 +1444,8 @@ static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_o
// without calling into the VM: it's the empty function. Just pop this
// frame and then jump to forward_exception_entry; O7 will contain the
// native caller's return PC.
Address exception_entry(G3_scratch, StubRoutines::forward_exception_entry());
__ jump_to(exception_entry);
AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
__ jump_to(exception_entry, G3_scratch);
__ delayed()->restore(); // Pop this frame off.
__ bind(L);
}
@ -1822,14 +1822,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
{
Label L;
const Register temp_reg = G3_scratch;
Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
__ verify_oop(O0);
__ load_klass(O0, temp_reg);
__ cmp(temp_reg, G5_inline_cache_reg);
__ brx(Assembler::equal, true, Assembler::pt, L);
__ delayed()->nop();
__ jump_to(ic_miss, 0);
__ jump_to(ic_miss, temp_reg);
__ delayed()->nop();
__ align(CodeEntryAlignment);
__ bind(L);
@ -2261,21 +2261,19 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Transition from _thread_in_Java to _thread_in_native.
__ set(_thread_in_native, G3_scratch);
__ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset()));
__ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
// We flushed the windows ages ago now mark them as flushed
// mark windows as flushed
__ set(JavaFrameAnchor::flushed, G3_scratch);
Address flags(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
#ifdef _LP64
Address dest(O7, method->native_function());
AddressLiteral dest(method->native_function());
__ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7);
__ jumpl_to(dest, O7, O7);
#else
__ call(method->native_function(), relocInfo::runtime_call_type);
#endif
@ -2316,7 +2314,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking.
{ Label no_block;
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
// Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization
@ -2326,7 +2324,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Thread A is resumed to finish this native method, but doesn't block here since it
// didn't see any synchronization is progress, and escapes.
__ set(_thread_in_native_trans, G3_scratch);
__ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset()));
__ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
if(os::is_MP()) {
if (UseMembar) {
// Force this write out before the read below
@ -2343,10 +2341,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
Label L;
Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
__ br(Assembler::notEqual, false, Assembler::pn, L);
__ delayed()->
ld(suspend_state, G3_scratch);
__ delayed()->ld(suspend_state, G3_scratch);
__ cmp(G3_scratch, 0);
__ br(Assembler::equal, false, Assembler::pt, no_block);
__ delayed()->nop();
@ -2372,11 +2369,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ set(_thread_in_Java, G3_scratch);
__ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset()));
__ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
Label no_reguard;
__ ld(G2_thread, in_bytes(JavaThread::stack_guard_state_offset()), G3_scratch);
__ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
__ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
__ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
__ delayed()->nop();
@ -2684,14 +2681,14 @@ nmethod *SharedRuntime::generate_dtrace_nmethod(
{
Label L;
const Register temp_reg = G3_scratch;
Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
__ verify_oop(O0);
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
__ cmp(temp_reg, G5_inline_cache_reg);
__ brx(Assembler::equal, true, Assembler::pt, L);
__ delayed()->nop();
__ jump_to(ic_miss, 0);
__ jump_to(ic_miss, temp_reg);
__ delayed()->nop();
__ align(CodeEntryAlignment);
__ bind(L);
@ -3155,15 +3152,13 @@ static void make_new_frames(MacroAssembler* masm, bool deopt) {
// Do this after the caller's return address is on top of stack
if (UseStackBanging) {
// Get total frame size for interpreted frames
__ ld(Address(O2UnrollBlock, 0,
Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()), O4);
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
__ bang_stack_size(O4, O3, G3_scratch);
}
__ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()), O4array_size);
__ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()), G3pcs);
__ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()), O3array);
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
__ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
__ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
// Adjust old interpreter frame to make space for new frame's extra java locals
//
@ -3176,7 +3171,7 @@ static void make_new_frames(MacroAssembler* masm, bool deopt) {
// for each frame we create and keep up the illusion every where.
//
__ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()), O7);
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
__ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
__ sub(SP, O7, SP);
@ -3225,9 +3220,9 @@ void SharedRuntime::generate_deopt_blob() {
Register I5exception_tmp = I5;
Register G4exception_tmp = G4_scratch;
int frame_size_words;
Address saved_Freturn0_addr(FP, 0, -sizeof(double) + STACK_BIAS);
Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
#if !defined(_LP64) && defined(COMPILER2)
Address saved_Greturn1_addr(FP, 0, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
#endif
Label cont;
@ -3289,7 +3284,7 @@ void SharedRuntime::generate_deopt_blob() {
// save exception oop in JavaThread and fall through into the
// exception_in_tls case since they are handled in same way except
// for where the pending exception is kept.
__ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
__ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
//
// Vanilla deoptimization with an exception pending in exception_oop
@ -3306,7 +3301,7 @@ void SharedRuntime::generate_deopt_blob() {
{
// verify that there is really an exception oop in exception_oop
Label has_exception;
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
__ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
__ br_notnull(Oexception, false, Assembler::pt, has_exception);
__ delayed()-> nop();
__ stop("no exception in thread");
@ -3314,7 +3309,7 @@ void SharedRuntime::generate_deopt_blob() {
// verify that there is no pending exception
Label no_pending_exception;
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr(G2_thread, Thread::pending_exception_offset());
__ ld_ptr(exception_addr, Oexception);
__ br_null(Oexception, false, Assembler::pt, no_pending_exception);
__ delayed()->nop();

View File

@ -980,8 +980,8 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocTyp
// This code sequence is relocatable to any address, even on LP64.
if ( force_far_call ) {
__ relocate(rtype);
Address dest(O7, (address)entry_point);
__ jumpl_to(dest, O7);
AddressLiteral dest(entry_point);
__ jumpl_to(dest, O7, O7);
}
else
#endif
@ -1031,17 +1031,6 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocTyp
void emit_lo(CodeBuffer &cbuf, int val) { }
void emit_hi(CodeBuffer &cbuf, int val) { }
void emit_ptr(CodeBuffer &cbuf, intptr_t val, Register reg, bool ForceRelocatable) {
MacroAssembler _masm(&cbuf);
if (ForceRelocatable) {
Address addr(reg, (address)val);
__ sethi(addr, ForceRelocatable);
__ add(addr, reg);
} else {
__ set(val, reg);
}
}
//=============================================================================
@ -1149,8 +1138,8 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// If this does safepoint polling, then do it here
if( do_polling() && ra_->C->is_method_compilation() ) {
Address polling_page(L0, (address)os::get_polling_page());
__ sethi(polling_page, false);
AddressLiteral polling_page(os::get_polling_page());
__ sethi(polling_page, L0);
__ relocate(relocInfo::poll_return_type);
__ ld_ptr( L0, 0, G0 );
}
@ -1576,8 +1565,8 @@ void emit_java_to_interp(CodeBuffer &cbuf ) {
__ set_oop(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode()));
__ set_inst_mark();
Address a(G3, (address)-1);
__ JUMP(a, 0);
AddressLiteral addrlit(-1);
__ JUMP(addrlit, G3, 0);
__ delayed()->nop();
@ -1662,7 +1651,7 @@ uint size_deopt_handler() {
// Emit exception handler code.
int emit_exception_handler(CodeBuffer& cbuf) {
Register temp_reg = G3;
Address exception_blob(temp_reg, OptoRuntime::exception_blob()->instructions_begin());
AddressLiteral exception_blob(OptoRuntime::exception_blob()->instructions_begin());
MacroAssembler _masm(&cbuf);
address base =
@ -1671,7 +1660,7 @@ int emit_exception_handler(CodeBuffer& cbuf) {
int offset = __ offset();
__ JUMP(exception_blob, 0); // sethi;jmp
__ JUMP(exception_blob, temp_reg, 0); // sethi;jmp
__ delayed()->nop();
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
@ -1685,7 +1674,7 @@ int emit_deopt_handler(CodeBuffer& cbuf) {
// Can't use any of the current frame's registers as we may have deopted
// at a poll and everything (including G3) can be live.
Register temp_reg = L0;
Address deopt_blob(temp_reg, SharedRuntime::deopt_blob()->unpack());
AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
MacroAssembler _masm(&cbuf);
address base =
@ -1694,7 +1683,7 @@ int emit_deopt_handler(CodeBuffer& cbuf) {
int offset = __ offset();
__ save_frame(0);
__ JUMP(deopt_blob, 0); // sethi;jmp
__ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp
__ delayed()->restore();
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
@ -2261,9 +2250,8 @@ encode %{
address table_base = __ address_table_constant(_index2label);
RelocationHolder rspec = internal_word_Relocation::spec(table_base);
// Load table address
Address the_pc(table_reg, table_base, rspec);
__ load_address(the_pc);
// Move table address into a register.
__ set(table_base, table_reg, rspec);
// Jump to base address + switch value
__ ld_ptr(table_reg, switch_reg, table_reg);
@ -2402,13 +2390,13 @@ encode %{
// The 64 bit pointer is stored in the generated code stream
enc_class SetPtr( immP src, iRegP rd ) %{
Register dest = reg_to_register_object($rd$$reg);
MacroAssembler _masm(&cbuf);
// [RGV] This next line should be generated from ADLC
if ( _opnds[1]->constant_is_oop() ) {
intptr_t val = $src$$constant;
MacroAssembler _masm(&cbuf);
__ set_oop_constant((jobject)val, dest);
} else { // non-oop pointers, e.g. card mark base, heap top
emit_ptr(cbuf, $src$$constant, dest, /*ForceRelocatable=*/ false);
__ set($src$$constant, dest);
}
%}
@ -2789,46 +2777,6 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
__ set64( $src$$constant, dest, temp );
%}
enc_class LdImmF(immF src, regF dst, o7RegP tmp) %{ // Load Immediate
address float_address = MacroAssembler(&cbuf).float_constant($src$$constant);
RelocationHolder rspec = internal_word_Relocation::spec(float_address);
#ifdef _LP64
Register tmp_reg = reg_to_register_object($tmp$$reg);
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit_ptr(cbuf, (intptr_t)float_address, tmp_reg, /*ForceRelocatable=*/ true);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::ldf_op3, $tmp$$reg, 0 );
#else // _LP64
uint *code;
int tmp_reg = $tmp$$reg;
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) float_address );
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::ldf_op3, tmp_reg, (intptr_t) float_address );
#endif // _LP64
%}
enc_class LdImmD(immD src, regD dst, o7RegP tmp) %{ // Load Immediate
address double_address = MacroAssembler(&cbuf).double_constant($src$$constant);
RelocationHolder rspec = internal_word_Relocation::spec(double_address);
#ifdef _LP64
Register tmp_reg = reg_to_register_object($tmp$$reg);
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit_ptr(cbuf, (intptr_t)double_address, tmp_reg, /*ForceRelocatable=*/ true);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, $tmp$$reg, 0 );
#else // _LP64
uint *code;
int tmp_reg = $tmp$$reg;
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) double_address );
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, tmp_reg, (intptr_t) double_address );
#endif // _LP64
%}
enc_class LdReplImmI(immI src, regD dst, o7RegP tmp, int count, int width) %{
// Load a constant replicated "count" times with width "width"
int bit_width = $width$$constant * 8;
@ -2840,28 +2788,13 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
val |= elt_val;
}
jdouble dval = *(jdouble*)&val; // coerce to double type
address double_address = MacroAssembler(&cbuf).double_constant(dval);
MacroAssembler _masm(&cbuf);
address double_address = __ double_constant(dval);
RelocationHolder rspec = internal_word_Relocation::spec(double_address);
#ifdef _LP64
Register tmp_reg = reg_to_register_object($tmp$$reg);
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit_ptr(cbuf, (intptr_t)double_address, tmp_reg, /*ForceRelocatable=*/ true);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, $tmp$$reg, 0 );
#else // _LP64
uint *code;
int tmp_reg = $tmp$$reg;
AddressLiteral addrlit(double_address, rspec);
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) double_address );
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, tmp_reg, (intptr_t) double_address );
#endif // _LP64
%}
enc_class ShouldNotEncodeThis ( ) %{
ShouldNotCallThis();
__ sethi(addrlit, $tmp$$Register);
__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
%}
// Compiler ensures base is doubleword aligned and cnt is count of doublewords
@ -2901,19 +2834,19 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
int count_offset = java_lang_String:: count_offset_in_bytes();
// load str1 (jchar*) base address into tmp1_reg
__ load_heap_oop(Address(str1_reg, 0, value_offset), tmp1_reg);
__ ld(Address(str1_reg, 0, offset_offset), result_reg);
__ load_heap_oop(str1_reg, value_offset, tmp1_reg);
__ ld(str1_reg, offset_offset, result_reg);
__ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg);
__ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted
__ ld(str1_reg, count_offset, str1_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
__ load_heap_oop(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted
__ load_heap_oop(str2_reg, value_offset, tmp2_reg); // hoisted
__ add(result_reg, tmp1_reg, tmp1_reg);
// load str2 (jchar*) base address into tmp2_reg
// __ ld_ptr(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted
__ ld(Address(str2_reg, 0, offset_offset), result_reg);
// __ ld_ptr(str2_reg, value_offset, tmp2_reg); // hoisted
__ ld(str2_reg, offset_offset, result_reg);
__ add(tmp2_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp2_reg);
__ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted
__ ld(str2_reg, count_offset, str2_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
__ subcc(str1_reg, str2_reg, O7); // hoisted
__ add(result_reg, tmp2_reg, tmp2_reg);
@ -2922,8 +2855,8 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
// difference of the string lengths (stack)
// discard string base pointers, after loading up the lengths
// __ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted
// __ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted
// __ ld(str1_reg, count_offset, str1_reg); // hoisted
// __ ld(str2_reg, count_offset, str2_reg); // hoisted
// See if the lengths are different, and calculate min in str1_reg.
// Stash diff in O7 in case we need it for a tie-breaker.
@ -3020,19 +2953,19 @@ enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2,
int count_offset = java_lang_String:: count_offset_in_bytes();
// load str1 (jchar*) base address into tmp1_reg
__ load_heap_oop(Address(str1_reg, 0, value_offset), tmp1_reg);
__ ld(Address(str1_reg, 0, offset_offset), result_reg);
__ load_heap_oop(Address(str1_reg, value_offset), tmp1_reg);
__ ld(Address(str1_reg, offset_offset), result_reg);
__ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg);
__ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted
__ ld(Address(str1_reg, count_offset), str1_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
__ load_heap_oop(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted
__ load_heap_oop(Address(str2_reg, value_offset), tmp2_reg); // hoisted
__ add(result_reg, tmp1_reg, tmp1_reg);
// load str2 (jchar*) base address into tmp2_reg
// __ ld_ptr(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted
__ ld(Address(str2_reg, 0, offset_offset), result_reg);
// __ ld_ptr(Address(str2_reg, value_offset), tmp2_reg); // hoisted
__ ld(Address(str2_reg, offset_offset), result_reg);
__ add(tmp2_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp2_reg);
__ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted
__ ld(Address(str2_reg, count_offset), str2_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
__ cmp(str1_reg, str2_reg); // hoisted
__ add(result_reg, tmp2_reg, tmp2_reg);
@ -3139,8 +3072,8 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, g4RegP tmp2, n
__ delayed()->mov(G0, result_reg); // not equal
//load the lengths of arrays
__ ld(Address(ary1_reg, 0, length_offset), tmp1_reg);
__ ld(Address(ary2_reg, 0, length_offset), tmp2_reg);
__ ld(Address(ary1_reg, length_offset), tmp1_reg);
__ ld(Address(ary2_reg, length_offset), tmp2_reg);
// return false if the two arrays are not equal length
__ cmp(tmp1_reg, tmp2_reg);
@ -3202,19 +3135,20 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, g4RegP tmp2, n
enc_class enc_rethrow() %{
cbuf.set_inst_mark();
Register temp_reg = G3;
Address rethrow_stub(temp_reg, OptoRuntime::rethrow_stub());
AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub());
assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg");
MacroAssembler _masm(&cbuf);
#ifdef ASSERT
__ save_frame(0);
Address last_rethrow_addr(L1, (address)&last_rethrow);
__ sethi(last_rethrow_addr);
AddressLiteral last_rethrow_addrlit(&last_rethrow);
__ sethi(last_rethrow_addrlit, L1);
Address addr(L1, last_rethrow_addrlit.low10());
__ get_pc(L2);
__ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
__ st_ptr(L2, last_rethrow_addr);
__ st_ptr(L2, addr);
__ restore();
#endif
__ JUMP(rethrow_stub, 0); // sethi;jmp
__ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp
__ delayed()->nop();
%}
@ -5493,8 +5427,9 @@ instruct loadB(iRegI dst, memory mem) %{
size(4);
format %{ "LDSB $mem,$dst\t! byte" %}
opcode(Assembler::ldsb_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ ldsb($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
@ -5505,8 +5440,9 @@ instruct loadB2L(iRegL dst, memory mem) %{
size(4);
format %{ "LDSB $mem,$dst\t! byte -> long" %}
opcode(Assembler::ldsb_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ ldsb($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
@ -5517,8 +5453,9 @@ instruct loadUB(iRegI dst, memory mem) %{
size(4);
format %{ "LDUB $mem,$dst\t! ubyte" %}
opcode(Assembler::ldub_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ ldub($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
@ -5529,8 +5466,9 @@ instruct loadUB2L(iRegL dst, memory mem) %{
size(4);
format %{ "LDUB $mem,$dst\t! ubyte -> long" %}
opcode(Assembler::ldub_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ ldub($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
@ -5541,8 +5479,9 @@ instruct loadS(iRegI dst, memory mem) %{
size(4);
format %{ "LDSH $mem,$dst\t! short" %}
opcode(Assembler::ldsh_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ ldsh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
@ -5553,8 +5492,9 @@ instruct loadS2L(iRegL dst, memory mem) %{
size(4);
format %{ "LDSH $mem,$dst\t! short -> long" %}
opcode(Assembler::ldsh_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ ldsh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
@ -5565,8 +5505,9 @@ instruct loadUS(iRegI dst, memory mem) %{
size(4);
format %{ "LDUH $mem,$dst\t! ushort/char" %}
opcode(Assembler::lduh_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ lduh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
@ -5577,8 +5518,9 @@ instruct loadUS2L(iRegL dst, memory mem) %{
size(4);
format %{ "LDUH $mem,$dst\t! ushort/char -> long" %}
opcode(Assembler::lduh_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ lduh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem);
%}
@ -5589,8 +5531,9 @@ instruct loadI(iRegI dst, memory mem) %{
size(4);
format %{ "LDUW $mem,$dst\t! int" %}
opcode(Assembler::lduw_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
@ -5601,8 +5544,9 @@ instruct loadI2L(iRegL dst, memory mem) %{
size(4);
format %{ "LDSW $mem,$dst\t! int -> long" %}
opcode(Assembler::ldsw_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ ldsw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
@ -5613,8 +5557,9 @@ instruct loadUI2L(iRegL dst, memory mem) %{
size(4);
format %{ "LDUW $mem,$dst\t! uint -> long" %}
opcode(Assembler::lduw_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
@ -5625,8 +5570,9 @@ instruct loadL(iRegL dst, memory mem ) %{
size(4);
format %{ "LDX $mem,$dst\t! long" %}
opcode(Assembler::ldx_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
@ -5721,31 +5667,29 @@ instruct loadP(iRegP dst, memory mem) %{
#ifndef _LP64
format %{ "LDUW $mem,$dst\t! ptr" %}
opcode(Assembler::lduw_op3, 0, REGP_OP);
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
#else
format %{ "LDX $mem,$dst\t! ptr" %}
opcode(Assembler::ldx_op3, 0, REGP_OP);
ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
#endif
ins_encode( form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
// Load Compressed Pointer
instruct loadN(iRegN dst, memory mem) %{
match(Set dst (LoadN mem));
ins_cost(MEMORY_REF_COST);
size(4);
match(Set dst (LoadN mem));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "LDUW $mem,$dst\t! compressed ptr" %}
ins_encode %{
Register index = $mem$$index$$Register;
if (index != G0) {
__ lduw($mem$$base$$Register, index, $dst$$Register);
} else {
__ lduw($mem$$base$$Register, $mem$$disp, $dst$$Register);
}
%}
ins_pipe(iload_mem);
format %{ "LDUW $mem,$dst\t! compressed ptr" %}
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
// Load Klass Pointer
@ -5756,12 +5700,15 @@ instruct loadKlass(iRegP dst, memory mem) %{
#ifndef _LP64
format %{ "LDUW $mem,$dst\t! klass ptr" %}
opcode(Assembler::lduw_op3, 0, REGP_OP);
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
#else
format %{ "LDX $mem,$dst\t! klass ptr" %}
opcode(Assembler::ldx_op3, 0, REGP_OP);
ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
#endif
ins_encode( form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
@ -5772,16 +5719,8 @@ instruct loadNKlass(iRegN dst, memory mem) %{
size(4);
format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
ins_encode %{
Register base = as_Register($mem$$base);
Register index = as_Register($mem$$index);
Register dst = $dst$$Register;
if (index != G0) {
__ lduw(base, index, dst);
} else {
__ lduw(base, $mem$$disp, dst);
}
__ lduw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem);
%}
@ -5867,8 +5806,8 @@ instruct loadConP_poll(iRegP dst, immP_poll src) %{
ins_cost(DEFAULT_COST);
format %{ "SET $src,$dst\t!ptr" %}
ins_encode %{
Address polling_page(reg_to_register_object($dst$$reg), (address)os::get_polling_page());
__ sethi(polling_page, false );
AddressLiteral polling_page(os::get_polling_page());
__ sethi(polling_page, reg_to_register_object($dst$$reg));
%}
ins_pipe(loadConP_poll);
%}
@ -5927,14 +5866,21 @@ instruct loadConF(regF dst, immF src, o7RegP tmp) %{
effect(KILL tmp);
#ifdef _LP64
size(36);
size(8*4);
#else
size(8);
size(2*4);
#endif
format %{ "SETHI hi(&$src),$tmp\t!get float $src from table\n\t"
"LDF [$tmp+lo(&$src)],$dst" %}
ins_encode( LdImmF(src, dst, tmp) );
ins_encode %{
address float_address = __ float_constant($src$$constant);
RelocationHolder rspec = internal_word_Relocation::spec(float_address);
AddressLiteral addrlit(float_address, rspec);
__ sethi(addrlit, $tmp$$Register);
__ ldf(FloatRegisterImpl::S, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
%}
ins_pipe(loadConFD);
%}
@ -5943,14 +5889,21 @@ instruct loadConD(regD dst, immD src, o7RegP tmp) %{
effect(KILL tmp);
#ifdef _LP64
size(36);
size(8*4);
#else
size(8);
size(2*4);
#endif
format %{ "SETHI hi(&$src),$tmp\t!get double $src from table\n\t"
"LDDF [$tmp+lo(&$src)],$dst" %}
ins_encode( LdImmD(src, dst, tmp) );
ins_encode %{
address double_address = __ double_constant($src$$constant);
RelocationHolder rspec = internal_word_Relocation::spec(double_address);
AddressLiteral addrlit(double_address, rspec);
__ sethi(addrlit, $tmp$$Register);
__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
%}
ins_pipe(loadConFD);
%}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,16 +68,9 @@ class StubGenerator: public StubCodeGenerator {
#ifdef PRODUCT
#define inc_counter_np(a,b,c) (0)
#else
void inc_counter_np_(int& counter, Register t1, Register t2) {
Address counter_addr(t2, (address) &counter);
__ sethi(counter_addr);
__ ld(counter_addr, t1);
__ inc(t1);
__ st(t1, counter_addr);
}
#define inc_counter_np(counter, t1, t2) \
BLOCK_COMMENT("inc_counter " #counter); \
inc_counter_np_(counter, t1, t2);
__ inc_counter(&counter, t1, t2);
#endif
//----------------------------------------------------------------------------------------------------
@ -325,9 +318,9 @@ class StubGenerator: public StubCodeGenerator {
__ verify_thread();
const Register& temp_reg = Gtemp;
Address pending_exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_file_offset_addr(G2_thread, 0, in_bytes(Thread::exception_file_offset ()));
Address exception_line_offset_addr(G2_thread, 0, in_bytes(Thread::exception_line_offset ()));
Address pending_exception_addr (G2_thread, Thread::pending_exception_offset());
Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset ());
Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset ());
// set pending exception
__ verify_oop(Oexception);
@ -340,8 +333,8 @@ class StubGenerator: public StubCodeGenerator {
// complete return to VM
assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
Address stub_ret(temp_reg, StubRoutines::_call_stub_return_address);
__ jump_to(stub_ret);
AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
__ jump_to(stub_ret, temp_reg);
__ delayed()->nop();
return start;
@ -366,7 +359,7 @@ class StubGenerator: public StubCodeGenerator {
const Register& handler_reg = Gtemp;
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr(G2_thread, Thread::pending_exception_offset());
#ifdef ASSERT
// make sure that this code is only executed if there is a pending exception
@ -456,8 +449,7 @@ class StubGenerator: public StubCodeGenerator {
int frame_complete = __ offset();
if (restore_saved_exception_pc) {
Address saved_exception_pc(G2_thread, 0, in_bytes(JavaThread::saved_exception_pc_offset()));
__ ld_ptr(saved_exception_pc, I7);
__ ld_ptr(G2_thread, JavaThread::saved_exception_pc_offset(), I7);
__ sub(I7, frame::pc_return_offset, I7);
}
@ -481,7 +473,7 @@ class StubGenerator: public StubCodeGenerator {
#ifdef ASSERT
Label L;
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr(G2_thread, Thread::pending_exception_offset());
Register scratch_reg = Gtemp;
__ ld_ptr(exception_addr, scratch_reg);
__ br_notnull(scratch_reg, false, Assembler::pt, L);
@ -835,7 +827,7 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc();
const int preserve_register_words = (64 * 2);
Address preserve_addr(FP, 0, (-preserve_register_words * wordSize) + STACK_BIAS);
Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
Register Lthread = L7_thread_cache;
int i;
@ -1106,21 +1098,19 @@ class StubGenerator: public StubCodeGenerator {
__ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
__ srl_ptr(count, CardTableModRefBS::card_shift, count);
__ sub(count, addr, count);
Address rs(tmp, (address)ct->byte_map_base);
__ load_address(rs);
AddressLiteral rs(ct->byte_map_base);
__ set(rs, tmp);
__ BIND(L_loop);
__ stb(G0, rs.base(), addr);
__ stb(G0, tmp, addr);
__ subcc(count, 1, count);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->add(addr, 1, addr);
}
}
break;
case BarrierSet::ModRef:
break;
default :
default:
ShouldNotReachHere();
}
}

View File

@ -87,8 +87,8 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
}
// throw exception
assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
Address thrower(G3_scratch, Interpreter::throw_exception_entry());
__ jump_to (thrower);
AddressLiteral thrower(Interpreter::throw_exception_entry());
__ jump_to(thrower, G3_scratch);
__ delayed()->nop();
return entry;
}
@ -187,8 +187,8 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
const Register cache = G3_scratch;
const Register size = G1_scratch;
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
__ ld_ptr(Address(cache, 0, in_bytes(constantPoolCacheOopDesc::base_offset()) +
in_bytes(ConstantPoolCacheEntry::flags_offset())), size);
__ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset(), size);
__ and3(size, 0xFF, size); // argument size in words
__ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes
__ add(Lesp, size, Lesp); // pop arguments
@ -202,9 +202,8 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
address entry = __ pc();
__ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
{ Label L;
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
__ ld_ptr(exception_addr, Gtemp);
Address exception_addr(G2_thread, Thread::pending_exception_offset());
__ ld_ptr(exception_addr, Gtemp); // Load pending exception.
__ tst(Gtemp);
__ brx(Assembler::equal, false, Assembler::pt, L);
__ delayed()->nop();
@ -283,7 +282,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
// Update standard invocation counters
__ increment_invocation_counter(O0, G3_scratch);
if (ProfileInterpreter) { // %%% Merge this into methodDataOop
Address interpreter_invocation_counter(Lmethod, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset());
__ ld(interpreter_invocation_counter, G3_scratch);
__ inc(G3_scratch);
__ st(G3_scratch, interpreter_invocation_counter);
@ -291,9 +290,9 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
Address profile_limit(G3_scratch, (address)&InvocationCounter::InterpreterProfileLimit);
__ sethi(profile_limit);
__ ld(profile_limit, G3_scratch);
AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit);
__ sethi(profile_limit, G3_scratch);
__ ld(G3_scratch, profile_limit.low10(), G3_scratch);
__ cmp(O0, G3_scratch);
__ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
__ delayed()->nop();
@ -302,9 +301,9 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
__ test_method_data_pointer(*profile_method);
}
Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit);
__ sethi(invocation_limit);
__ ld(invocation_limit, G3_scratch);
AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit);
__ sethi(invocation_limit, G3_scratch);
__ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
__ cmp(O0, G3_scratch);
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
__ delayed()->nop();
@ -315,8 +314,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
// ebx - methodOop
//
void InterpreterGenerator::lock_method(void) {
const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset()));
__ ld(access_flags, O0);
__ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags.
#ifdef ASSERT
{ Label ok;
@ -360,8 +358,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
Register Rscratch,
Register Rscratch2) {
const int page_size = os::vm_page_size();
Address saved_exception_pc(G2_thread, 0,
in_bytes(JavaThread::saved_exception_pc_offset()));
Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
Label after_frame_check;
assert_different_registers(Rframe_size, Rscratch, Rscratch2);
@ -373,7 +370,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
__ delayed()->nop();
// get the stack base, and in debug, verify it is non-zero
__ ld_ptr( G2_thread, in_bytes(Thread::stack_base_offset()), Rscratch );
__ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
#ifdef ASSERT
Label base_not_zero;
__ cmp( Rscratch, G0 );
@ -385,7 +382,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
// get the stack size, and in debug, verify it is non-zero
assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
__ ld_ptr( G2_thread, in_bytes(Thread::stack_size_offset()), Rscratch2 );
__ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
#ifdef ASSERT
Label size_not_zero;
__ cmp( Rscratch2, G0 );
@ -460,9 +457,9 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// (gri - 2/25/2000)
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset()));
const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
const Address max_stack (G5_method, methodOopDesc::max_stack_offset());
int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
const int extra_space =
@ -539,8 +536,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
if (native_call) {
__ mov(G0, Lbcp);
} else {
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), Lbcp );
__ add(Address(Lbcp, 0, in_bytes(constMethodOopDesc::codes_offset())), Lbcp );
__ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp);
__ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
}
__ mov( G5_method, Lmethod); // set Lmethod
__ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
@ -578,8 +575,8 @@ address InterpreterGenerator::generate_empty_entry(void) {
// do nothing for empty methods (do not even increment invocation counter)
if ( UseFastEmptyMethods) {
// If we need a safepoint check, generate full interpreter entry.
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ set(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
__ delayed()->nop();
@ -617,7 +614,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if ( UseFastAccessorMethods && !UseCompressedOops ) {
// Check if we need to reach a safepoint and generate full interpreter
// frame if so.
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
@ -632,8 +629,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// read first instruction word and extract bytecode @ 1 and index @ 2
// get first 4 bytes of the bytecodes (big endian!)
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch);
__ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch);
__ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
__ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch);
// move index @ 2 far left then to the right most two bytes.
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
@ -641,7 +638,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
// get constant pool cache
__ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch);
__ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
__ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
// get specific constant pool cache entry
@ -650,7 +647,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// Check the constant Pool cache entry to see if it has been resolved.
// If not, need the slow path.
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
__ and3(G1_scratch, 0xFF, G1_scratch);
__ cmp(G1_scratch, Bytecodes::_getfield);
@ -658,8 +655,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ delayed()->nop();
// Get the type and return field offset from the constant pool cache
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
Label xreturn_path;
// Need to differentiate between igetfield, agetfield, bgetfield etc.
@ -718,7 +715,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
const Address Laccess_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset()));
const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset());
__ verify_oop(G5_method);
@ -728,7 +725,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// make sure method is native & not abstract
// rethink these assertions - they can be simplified and shared (gri 2/25/2000)
#ifdef ASSERT
__ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1);
__ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
{
Label L;
__ btst(JVM_ACC_NATIVE, Gtmp1);
@ -755,10 +752,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// this slot will be set later, we initialize it to null here just in
// case we get a GC before the actual value is stored later
__ st_ptr(G0, Address(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS));
__ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
const Address do_not_unlock_if_synchronized(G2_thread, 0,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
const Address do_not_unlock_if_synchronized(G2_thread,
JavaThread::do_not_unlock_if_synchronized_offset());
// Since at this point in the method invocation the exception handler
// would try to exit the monitor of synchronized methods which hasn't
// been entered yet, we set the thread local variable
@ -825,12 +822,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get signature handler
{ Label L;
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch);
Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
__ ld_ptr(signature_handler, G3_scratch);
__ tst(G3_scratch);
__ brx(Assembler::notZero, false, Assembler::pt, L);
__ delayed()->nop();
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch);
__ ld_ptr(signature_handler, G3_scratch);
__ bind(L);
}
@ -843,10 +841,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Flush the method pointer to the register save area
__ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
__ mov(Llocals, O1);
// calculate where the mirror handle body is allocated in the interpreter frame:
Address mirror(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
__ add(mirror, O2);
// calculate where the mirror handle body is allocated in the interpreter frame:
__ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
// Calculate current frame size
__ sub(SP, FP, O3); // Calculate negative of current frame size
@ -883,14 +880,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ ld(Laccess_flags, O0);
__ btst(JVM_ACC_STATIC, O0);
__ br( Assembler::zero, false, Assembler::pt, not_static);
__ delayed()->
// get native function entry point(O0 is a good temp until the very end)
ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::native_function_offset())), O0);
// get native function entry point(O0 is a good temp until the very end)
__ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
// for static methods insert the mirror argument
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc:: constants_offset())), O1);
__ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1);
__ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
__ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
__ ld_ptr(O1, mirror_offset, O1);
#ifdef ASSERT
if (!PrintSignatureHandlers) // do not dirty the output with this
@ -945,15 +941,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ flush_windows();
// mark windows as flushed
Address flags(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
__ set(JavaFrameAnchor::flushed, G3_scratch);
__ st(G3_scratch, flags);
// Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset()));
Address thread_state(G2_thread, JavaThread::thread_state_offset());
#ifdef ASSERT
{ Label L;
__ ld(thread_state, G3_scratch);
@ -983,7 +977,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking.
{ Label no_block;
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
// Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization
@ -1010,10 +1004,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
Label L;
Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
__ br(Assembler::notEqual, false, Assembler::pn, L);
__ delayed()->
ld(suspend_state, G3_scratch);
__ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
__ cmp(G3_scratch, 0);
__ br(Assembler::equal, false, Assembler::pt, no_block);
__ delayed()->nop();
@ -1055,7 +1047,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ st(G3_scratch, thread_state);
// reset handle block
__ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch);
__ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
__ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
// If we have an oop result store it where it will be safe for any further gc
@ -1084,8 +1076,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// handle exceptions (exception handling will handle unlocking!)
{ Label L;
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr(G2_thread, Thread::pending_exception_offset());
__ ld_ptr(exception_addr, Gtemp);
__ tst(Gtemp);
__ brx(Assembler::equal, false, Assembler::pt, L);
@ -1171,11 +1162,11 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset()));
const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
// Seems like G5_method is live at the point this is used. So we could make this look consistent
// and use in the asserts.
const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset()));
const Address access_flags (Lmethod, methodOopDesc::access_flags_offset());
__ verify_oop(G5_method);
@ -1185,7 +1176,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// make sure method is not native & not abstract
// rethink these assertions - they can be simplified and shared (gri 2/25/2000)
#ifdef ASSERT
__ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1);
__ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
{
Label L;
__ btst(JVM_ACC_NATIVE, Gtmp1);
@ -1240,8 +1231,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
__ delayed()->st_ptr( init_value, O2, 0 );
const Address do_not_unlock_if_synchronized(G2_thread, 0,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
const Address do_not_unlock_if_synchronized(G2_thread,
JavaThread::do_not_unlock_if_synchronized_offset());
// Since at this point in the method invocation the exception handler
// would try to exit the monitor of synchronized methods which hasn't
// been entered yet, we set the thread local variable
@ -1717,7 +1708,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
//
Interpreter::_remove_activation_preserving_args_entry = __ pc();
Address popframe_condition_addr (G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset()));
Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
// Set the popframe_processing bit in popframe_condition indicating that we are
// currently handling popframe, so that call_VMs that may happen later do not trigger new
// popframe handling cycles.
@ -1759,7 +1750,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
// Inform deoptimization that it is responsible for restoring these arguments
__ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset()));
Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
__ st(Gtmp1, popframe_condition_addr);
// Return from the current method
@ -1808,7 +1799,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ verify_oop(Oexception);
const int return_reg_adjustment = frame::pc_return_offset;
Address issuing_pc_addr(I7, 0, return_reg_adjustment);
Address issuing_pc_addr(I7, return_reg_adjustment);
// We are done with this activation frame; find out where to go next.
// The continuation point will be an exception handler, which expects
@ -1854,8 +1845,8 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ empty_expression_stack();
__ load_earlyret_value(state);
__ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), G3_scratch);
Address cond_addr(G3_scratch, 0, in_bytes(JvmtiThreadState::earlyret_state_offset()));
__ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state
__ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
@ -1922,43 +1913,33 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
// helpers for generate_and_dispatch
void TemplateInterpreterGenerator::count_bytecode() {
Address c(G3_scratch, (address)&BytecodeCounter::_counter_value);
__ load_contents(c, G4_scratch);
__ inc(G4_scratch);
__ st(G4_scratch, c);
__ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
}
void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
Address bucket( G3_scratch, (address) &BytecodeHistogram::_counters[t->bytecode()] );
__ load_contents(bucket, G4_scratch);
__ inc(G4_scratch);
__ st(G4_scratch, bucket);
__ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
}
void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
address index_addr = (address)&BytecodePairHistogram::_index;
Address index(G3_scratch, index_addr);
address counters_addr = (address)&BytecodePairHistogram::_counters;
Address counters(G3_scratch, counters_addr);
AddressLiteral index (&BytecodePairHistogram::_index);
AddressLiteral counters((address) &BytecodePairHistogram::_counters);
// get index, shift out old bytecode, bring in new bytecode, and store it
// _index = (_index >> log2_number_of_codes) |
// (bytecode << log2_number_of_codes);
__ load_contents( index, G4_scratch );
__ load_contents(index, G4_scratch);
__ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
__ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
__ or3( G3_scratch, G4_scratch, G4_scratch );
__ store_contents( G4_scratch, index );
__ store_contents(G4_scratch, index, G3_scratch);
// bump bucket contents
// _counters[_index] ++;
__ load_address( counters ); // loads into G3_scratch
__ set(counters, G3_scratch); // loads into G3_scratch
__ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
__ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
__ ld (G3_scratch, 0, G4_scratch);
@ -1979,9 +1960,9 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
void TemplateInterpreterGenerator::stop_interpreter_at() {
Address counter(G3_scratch , (address)&BytecodeCounter::_counter_value);
__ load_contents (counter, G3_scratch );
Address stop_at(G4_scratch, (address)&StopInterpreterAt);
AddressLiteral counter(&BytecodeCounter::_counter_value);
__ load_contents(counter, G3_scratch);
AddressLiteral stop_at(&StopInterpreterAt);
__ load_ptr_contents(stop_at, G4_scratch);
__ cmp(G3_scratch, G4_scratch);
__ breakpoint_trap(Assembler::equal);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,7 +131,7 @@ Assembler::Condition ccNot(TemplateTable::Condition cc) {
Address TemplateTable::at_bcp(int offset) {
assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
return Address( Lbcp, 0, offset);
return Address(Lbcp, offset);
}
@ -217,9 +217,9 @@ void TemplateTable::fconst(int value) {
case 1: p = &one; break;
case 2: p = &two; break;
}
Address a(G3_scratch, (address)p);
__ sethi(a);
__ ldf(FloatRegisterImpl::S, a, Ftos_f);
AddressLiteral a(p);
__ sethi(a, G3_scratch);
__ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
}
@ -232,9 +232,9 @@ void TemplateTable::dconst(int value) {
case 0: p = &zero; break;
case 1: p = &one; break;
}
Address a(G3_scratch, (address)p);
__ sethi(a);
__ ldf(FloatRegisterImpl::D, a, Ftos_d);
AddressLiteral a(p);
__ sethi(a, G3_scratch);
__ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
}
@ -1548,7 +1548,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// non-JSR normal-branch stuff occurring below.
if( is_jsr ) {
// compute return address as bci in Otos_i
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
__ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
__ sub(Lbcp, G3_scratch, G3_scratch);
__ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
@ -1665,7 +1665,7 @@ void TemplateTable::ret() {
__ profile_ret(vtos, Otos_i, G4_scratch);
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
__ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
__ add(G3_scratch, Otos_i, G3_scratch);
__ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
__ dispatch_next(vtos);
@ -1680,7 +1680,7 @@ void TemplateTable::wide_ret() {
__ profile_ret(vtos, Otos_i, G4_scratch);
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
__ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
__ add(G3_scratch, Otos_i, G3_scratch);
__ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
__ dispatch_next(vtos);
@ -1968,8 +1968,8 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1);
__ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset())), Lbyte_code);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
__ srl( Lbyte_code, shift_count, Lbyte_code );
__ and3( Lbyte_code, 0xFF, Lbyte_code );
@ -2029,11 +2029,11 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
resolve_cache_and_index(byte_no, Rcache, Rscratch);
}
__ ld_ptr(Address(Rcache, 0, method_offset), Rmethod);
__ ld_ptr(Rcache, method_offset, Rmethod);
if (Ritable_index != noreg) {
__ ld_ptr(Address(Rcache, 0, index_offset), Ritable_index);
__ ld_ptr(Rcache, index_offset, Ritable_index);
}
__ ld_ptr(Address(Rcache, 0, flags_offset), Rflags);
__ ld_ptr(Rcache, flags_offset, Rflags);
}
// The Rcache register must be set before call
@ -2047,13 +2047,10 @@ void TemplateTable::load_field_cp_cache_entry(Register Robj,
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset())), Rflags);
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f2_offset())), Roffset);
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
if (is_static) {
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f1_offset())), Robj);
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
}
}
@ -2070,9 +2067,7 @@ void TemplateTable::jvmti_post_field_access(Register Rcache,
// the time to call into the VM.
Label Label1;
assert_different_registers(Rcache, index, G1_scratch);
Address get_field_access_count_addr(G1_scratch,
(address)JvmtiExport::get_field_access_count_addr(),
relocInfo::none);
AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
__ load_contents(get_field_access_count_addr, G1_scratch);
__ tst(G1_scratch);
__ br(Assembler::zero, false, Assembler::pt, Label1);
@ -2293,7 +2288,7 @@ void TemplateTable::fast_accessfield(TosState state) {
__ get_cache_and_index_at_bcp(Rcache, index, 1);
jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Roffset);
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
__ null_check(Otos_i);
__ verify_oop(Otos_i);
@ -2304,7 +2299,7 @@ void TemplateTable::fast_accessfield(TosState state) {
Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
if (__ membar_has_effect(membar_bits)) {
// Get volatile flag
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Rflags);
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
}
@ -2355,7 +2350,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
// Check to see if a field modification watch has been set before we take
// the time to call into the VM.
Label done;
Address get_field_modification_count_addr(G4_scratch, (address)JvmtiExport::get_field_modification_count_addr(), relocInfo::none);
AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
__ load_contents(get_field_modification_count_addr, G4_scratch);
__ tst(G4_scratch);
__ br(Assembler::zero, false, Assembler::pt, done);
@ -2408,9 +2403,7 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool i
// the time to call into the VM.
Label Label1;
assert_different_registers(Rcache, index, G1_scratch);
Address get_field_modification_count_addr(G1_scratch,
(address)JvmtiExport::get_field_modification_count_addr(),
relocInfo::none);
AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
__ load_contents(get_field_modification_count_addr, G1_scratch);
__ tst(G1_scratch);
__ br(Assembler::zero, false, Assembler::pt, Label1);
@ -2433,7 +2426,7 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool i
// the type to determine where the object is.
Label two_word, valsizeknown;
__ ld_ptr(Address(G1_scratch, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())), Rflags);
__ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
__ mov(Lesp, G4_scratch);
__ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
// Make sure we don't need to mask Rflags for tosBits after the above shift
@ -2689,8 +2682,7 @@ void TemplateTable::fast_storefield(TosState state) {
Label notVolatile, checkVolatile, exit;
if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset())), Rflags);
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
__ and3(Rflags, Lscratch, Lscratch);
if (__ membar_has_effect(read_bits)) {
@ -2702,8 +2694,7 @@ void TemplateTable::fast_storefield(TosState state) {
}
}
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f2_offset())), Roffset);
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
pop_and_check_object(Rclass);
switch (bytecode()) {
@ -2755,7 +2746,7 @@ void TemplateTable::fast_xaccess(TosState state) {
// access constant pool cache (is resolved)
__ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
__ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())), Roffset);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
__ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
__ verify_oop(Rreceiver);
@ -2775,7 +2766,7 @@ void TemplateTable::fast_xaccess(TosState state) {
if (__ membar_has_effect(membar_bits)) {
// Get is_volatile value in Rflags and check if membar is needed
__ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())), Rflags);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
// Test volatile
Label notVolatile;
@ -2853,8 +2844,8 @@ void TemplateTable::invokevirtual(int byte_no) {
__ verify_oop(O0);
// get return address
Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
__ load_address(table);
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
@ -2886,7 +2877,7 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
__ verify_oop(G5_method);
// Load receiver from stack slot
__ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch);
__ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
__ load_receiver(G4_scratch, O0);
// receiver NULL check
@ -2895,8 +2886,8 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
__ profile_final_call(O4);
// get return address
Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
__ load_address(table);
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
@ -2920,7 +2911,7 @@ void TemplateTable::invokespecial(int byte_no) {
__ verify_oop(G5_method);
__ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch);
__ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
__ load_receiver(G4_scratch, O0);
// receiver NULL check
@ -2929,8 +2920,8 @@ void TemplateTable::invokespecial(int byte_no) {
__ profile_call(O4);
// get return address
Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
__ load_address(table);
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
@ -2956,8 +2947,8 @@ void TemplateTable::invokestatic(int byte_no) {
__ profile_call(O4);
// get return address
Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
__ load_address(table);
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
@ -3021,8 +3012,8 @@ void TemplateTable::invokeinterface(int byte_no) {
__ mov(Rflags, Rret);
// get return address
Address table(Rscratch, (address)Interpreter::return_5_addrs_by_index_table());
__ load_address(table);
AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
__ set(table, Rscratch);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
@ -3059,7 +3050,7 @@ void TemplateTable::invokeinterface(int byte_no) {
Label search;
Register Rtemp = Rflags;
__ ld(Address(RklassOop, 0, instanceKlass::vtable_length_offset() * wordSize), Rtemp);
__ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp);
if (align_object_offset(1) > 1) {
__ round_to(Rtemp, align_object_offset(1));
}
@ -3642,9 +3633,9 @@ void TemplateTable::wide() {
transition(vtos, vtos);
__ ldub(Lbcp, 1, G3_scratch);// get next bc
__ sll(G3_scratch, LogBytesPerWord, G3_scratch);
Address ep(G4_scratch, (address)Interpreter::_wentry_point);
__ load_address(ep);
__ ld_ptr(ep.base(), G3_scratch, G3_scratch);
AddressLiteral ep(Interpreter::_wentry_point);
__ set(ep, G4_scratch);
__ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
__ jmp(G3_scratch, G0);
__ delayed()->nop();
// Note: the Lbcp increment step is part of the individual wide bytecode implementations

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,11 +48,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
#ifndef PRODUCT
if (CountCompiledCalls) {
Address ctr(G5, SharedRuntime::nof_megamorphic_calls_addr());
__ sethi(ctr);
__ ld(ctr, G3_scratch);
__ inc(G3_scratch);
__ st(G3_scratch, ctr);
__ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch);
}
#endif /* PRODUCT */
@ -154,11 +150,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#ifndef PRODUCT
if (CountCompiledCalls) {
Address ctr(L0, SharedRuntime::nof_megamorphic_calls_addr());
__ sethi(ctr);
__ ld(ctr, L1);
__ inc(L1);
__ st(L1, ctr);
__ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1);
}
#endif /* PRODUCT */
@ -198,8 +190,8 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ delayed()->nop();
__ bind(throw_icce);
Address icce(G3_scratch, StubRoutines::throw_IncompatibleClassChangeError_entry());
__ jump_to(icce, 0);
AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry());
__ jump_to(icce, G3_scratch);
__ delayed()->restore();
masm->flush();