6829193: JSR 292 needs to support SPARC

There are unimplemented portions of the hotspot code for method handles and invokedynamic specific to SPARC.

Reviewed-by: kvn, never, jrose
This commit is contained in:
Christian Thalinger 2010-04-29 06:30:25 -07:00
parent 78e6939c37
commit 6d2a896bcd
14 changed files with 1210 additions and 126 deletions

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2333,6 +2333,18 @@ void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
#endif
void MacroAssembler::load_sized_value(Address src, Register dst,
size_t size_in_bytes, bool is_signed) {
switch (size_in_bytes) {
case 8: ldx(src, dst); break;
case 4: ld( src, dst); break;
case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
default: ShouldNotReachHere();
}
}
void MacroAssembler::float_cmp( bool is_float, int unordered_result,
FloatRegister Fa, FloatRegister Fb,
Register Rresult) {
@ -2625,40 +2637,103 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
}
void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
assert(dest.register_or_noreg() != G0, "lost side effect");
if ((src.is_constant() && src.as_constant() == 0) ||
(src.is_register() && src.as_register() == G0)) {
// do nothing
} else if (dest.is_register()) {
add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register());
} else if (src.is_constant()) {
intptr_t res = dest.as_constant() + src.as_constant();
dest = RegisterOrConstant(res); // side effect seen by caller
RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
assert(d.register_or_noreg() != G0, "lost side effect");
if ((s2.is_constant() && s2.as_constant() == 0) ||
(s2.is_register() && s2.as_register() == G0)) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
mov(s1.as_register(), d.as_register());
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
assert_different_registers(s1.as_register(), temp);
if (d.is_constant()) d = temp;
andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
return d;
} else {
assert(temp != noreg, "cannot handle constant += register");
add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp);
dest = RegisterOrConstant(temp); // side effect seen by caller
if (s2.is_register()) {
assert_different_registers(s2.as_register(), temp);
if (d.is_constant()) d = temp;
set(s1.as_constant(), temp);
andn(temp, s2.as_register(), d.as_register());
return d;
} else {
intptr_t res = s1.as_constant() & ~s2.as_constant();
return res;
}
}
}
void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
assert(dest.register_or_noreg() != G0, "lost side effect");
if (!is_simm13(src.constant_or_zero()))
src = (src.as_constant() & 0xFF);
if ((src.is_constant() && src.as_constant() == 0) ||
(src.is_register() && src.as_register() == G0)) {
// do nothing
} else if (dest.is_register()) {
sll_ptr(dest.as_register(), src, dest.as_register());
} else if (src.is_constant()) {
intptr_t res = dest.as_constant() << src.as_constant();
dest = RegisterOrConstant(res); // side effect seen by caller
RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
assert(d.register_or_noreg() != G0, "lost side effect");
if ((s2.is_constant() && s2.as_constant() == 0) ||
(s2.is_register() && s2.as_register() == G0)) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
mov(s1.as_register(), d.as_register());
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
assert_different_registers(s1.as_register(), temp);
if (d.is_constant()) d = temp;
add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
return d;
} else {
assert(temp != noreg, "cannot handle constant <<= register");
set(dest.as_constant(), temp);
sll_ptr(temp, src, temp);
dest = RegisterOrConstant(temp); // side effect seen by caller
if (s2.is_register()) {
assert_different_registers(s2.as_register(), temp);
if (d.is_constant()) d = temp;
add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
return d;
} else {
intptr_t res = s1.as_constant() + s2.as_constant();
return res;
}
}
}
RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
assert(d.register_or_noreg() != G0, "lost side effect");
if (!is_simm13(s2.constant_or_zero()))
s2 = (s2.as_constant() & 0xFF);
if ((s2.is_constant() && s2.as_constant() == 0) ||
(s2.is_register() && s2.as_register() == G0)) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
mov(s1.as_register(), d.as_register());
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
assert_different_registers(s1.as_register(), temp);
if (d.is_constant()) d = temp;
sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
return d;
} else {
if (s2.is_register()) {
assert_different_registers(s2.as_register(), temp);
if (d.is_constant()) d = temp;
set(s1.as_constant(), temp);
sll_ptr(temp, s2.as_register(), d.as_register());
return d;
} else {
intptr_t res = s1.as_constant() << s2.as_constant();
return res;
}
}
}
@ -2708,8 +2783,8 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
RegisterOrConstant itable_offset = itable_index;
regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes());
itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
@ -2805,7 +2880,7 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
assert_different_registers(sub_klass, super_klass, temp_reg);
if (super_check_offset.is_register()) {
assert_different_registers(sub_klass, super_klass,
assert_different_registers(sub_klass, super_klass, temp_reg,
super_check_offset.as_register());
} else if (must_load_sco) {
assert(temp2_reg != noreg, "supply either a temp or a register offset");
@ -2855,6 +2930,8 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
// The super check offset is always positive...
lduw(super_klass, sco_offset, temp2_reg);
super_check_offset = RegisterOrConstant(temp2_reg);
// super_check_offset is register.
assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
}
ld_ptr(sub_klass, super_check_offset, temp_reg);
cmp(super_klass, temp_reg);
@ -3014,11 +3091,10 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
}
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
Register temp_reg,
Label& wrong_method_type) {
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
assert_different_registers(mtype_reg, mh_reg, temp_reg);
// compare method type against that of the receiver
RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
@ -3029,10 +3105,33 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_re
}
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) {
// A method handle has a "vmslots" field which gives the size of its
// argument list in JVM stack slots. This field is either located directly
// in every method handle, or else is indirectly accessed through the
// method handle's MethodType. This macro hides the distinction.
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg) {
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
// load mh.type.form.vmslots
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
// hoist vmslots into every mh to avoid dependent load chain
ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
} else {
Register temp2_reg = vmslots_reg;
ld_ptr(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
}
}
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
assert_different_registers(mh_reg, temp_reg);
if (UseCompressedOops) unimplemented("coop"); // field accesses must decode
// pick out the interpreted side of the handler
ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
@ -3043,17 +3142,18 @@ void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_
// for the various stubs which take control at this point,
// see MethodHandles::generate_method_handle_stub
// (Can any caller use this delay slot? If so, add an option for supression.)
delayed()->nop();
// Some callers can fill the delay slot.
if (emit_delayed_nop) {
delayed()->nop();
}
}
RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
int extra_slot_offset) {
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
int stackElementSize = Interpreter::stackElementWords() * wordSize;
int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
assert(offset1 - offset == stackElementSize, "correct arithmetic");
int stackElementSize = Interpreter::stackElementSize();
int offset = extra_slot_offset * stackElementSize;
if (arg_slot.is_constant()) {
offset += arg_slot.as_constant() * stackElementSize;
return offset;
@ -3067,6 +3167,11 @@ RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
}
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
int extra_slot_offset) {
return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
}
void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
Register temp_reg,

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1380,24 +1380,25 @@ public:
// pp 181
void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void andcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void andcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void andn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
void andn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void andn( Register s1, RegisterOrConstant s2, Register d);
void andncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void andncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void orcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void orcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void orn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
void orn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void orncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void orncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void xorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void xorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void xnor( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); }
@ -2026,8 +2027,8 @@ public:
inline void st_ptr(Register d, Register s1, ByteSize simm13a);
#endif
// ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
// st_long will perform st for 32 bit VM's and stx for 64 bit VM's
// ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
// st_long will perform std for 32 bit VM's and stx for 64 bit VM's
inline void ld_long(Register s1, Register s2, Register d);
inline void ld_long(Register s1, int simm13a, Register d);
inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
@ -2038,23 +2039,19 @@ public:
inline void st_long(Register d, const Address& a, int offset = 0);
// Helpers for address formation.
// They update the dest in place, whether it is a register or constant.
// They emit no code at all if src is a constant zero.
// If dest is a constant and src is a register, the temp argument
// is required, and becomes the result.
// If dest is a register and src is a non-simm13 constant,
// the temp argument is required, and is used to materialize the constant.
void regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
Register temp = noreg );
void regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
Register temp = noreg );
// - They emit only a move if s2 is a constant zero.
// - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
// - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant roc, Register Rtemp) {
guarantee(Rtemp != noreg, "constant offset overflow");
if (is_simm13(roc.constant_or_zero()))
return roc; // register or short constant
set(roc.as_constant(), Rtemp);
return RegisterOrConstant(Rtemp);
RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
if (is_simm13(src.constant_or_zero()))
return src; // register or short constant
guarantee(temp != noreg, "constant offset overflow");
set(src.as_constant(), temp);
return temp;
}
// --------------------------------------------------
@ -2303,6 +2300,9 @@ public:
void lcmp( Register Ra, Register Rb, Register Rresult);
#endif
// Loading values by size and signed-ness
void load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed);
void float_cmp( bool is_float, int unordered_result,
FloatRegister Fa, FloatRegister Fb,
Register Rresult);
@ -2421,12 +2421,16 @@ public:
void check_method_handle_type(Register mtype_reg, Register mh_reg,
Register temp_reg,
Label& wrong_method_type);
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg);
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
// offset relative to Gargs of argument at tos[arg_slot].
// (arg_slot == 0 means the last argument, not the first).
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
int extra_slot_offset = 0);
// Address of Gargs and argument_offset.
Address argument_address(RegisterOrConstant arg_slot,
int extra_slot_offset = 0);
// Stack overflow checking

@ -206,12 +206,17 @@ inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
// form effective addresses this way:
inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
else { add(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) andn(s1, s2.as_register(), d);
else andn(s1, s2.as_constant(), d);
}
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }

@ -814,22 +814,39 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset) {
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) {
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
} else {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
xor3(tmp, -1, tmp); // convert to plain index
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp);
assert_not_delayed();
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
// convert from field index to ConstantPoolCacheEntry index
// and from word index to byte offset
get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
// convert from field index to ConstantPoolCacheEntry index and from
// word index to byte offset
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
add(LcpoolCache, tmp, cache);
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp);
assert_not_delayed();
assert(!giant_index,"NYI");
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
// convert from field index to ConstantPoolCacheEntry index
// and from word index to byte offset
@ -1675,15 +1692,31 @@ void InterpreterMacroAssembler::profile_final_call(Register scratch) {
// Count a virtual call in the bytecodes.
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register scratch) {
Register scratch,
bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(profile_continue);
Label skip_receiver_profile;
if (receiver_can_be_null) {
Label not_null;
tst(receiver);
brx(Assembler::notZero, false, Assembler::pt, not_null);
delayed()->nop();
// We are making a call. Increment the count for null receiver.
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
ba(false, skip_receiver_profile);
delayed()->nop();
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(receiver, scratch, true);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));

@ -191,8 +191,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register Rdst,
setCCOrNot should_set_CC = dont_set_CC );
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
// common code
@ -304,7 +305,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register scratch);
void profile_call(Register scratch);
void profile_final_call(Register scratch);
void profile_virtual_call(Register receiver, Register scratch);
void profile_virtual_call(Register receiver, Register scratch, bool receiver_can_be_null = false);
void profile_ret(TosState state, Register return_bci, Register scratch);
void profile_null_seen(Register scratch);
void profile_typecheck(Register klass, Register scratch);

@ -235,19 +235,17 @@ address InterpreterGenerator::generate_abstract_entry(void) {
}
// Method handle invoker
// Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableMethodHandles) {
return generate_abstract_entry();
}
return generate_abstract_entry(); //6815692//
return MethodHandles::generate_method_handle_interpreter_entry(_masm);
}
//----------------------------------------------------------------------------------------------------
// Entry points & stack frame layout
//

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,9 @@
address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
address interpreted_entry) {
// Just before the actual machine code entry point, allocate space
// for a MethodHandleEntry::Data record, so that we can manage everything
// from one base pointer.
__ align(wordSize);
address target = __ pc() + sizeof(Data);
while (__ pc() < target) {
@ -59,12 +62,876 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
ShouldNotReachHere(); //NYI, 6815692
return NULL;
// I5_savedSP: sender SP (must preserve)
// G4 (Gargs): incoming argument list (must preserve)
// G5_method: invoke methodOop; becomes method type.
// G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
// O0, O1: garbage temps, blown away
Register O0_argslot = O0;
Register O1_scratch = O1;
// emit WrongMethodType path first, to enable back-branch from main path
Label wrong_method_type;
__ bind(wrong_method_type);
__ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
__ delayed()->nop();
// here's where control starts out:
__ align(CodeEntryAlignment);
address entry_point = __ pc();
// fetch the MethodType from the method handle into G5_method_type
{
Register tem = G5_method;
assert(tem == G5_method_type, "yes, it's the same register");
for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
__ ld_ptr(Address(tem, *pchase), G5_method_type);
}
}
// given the MethodType, find out where the MH argument is buried
__ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O0_argslot);
__ ldsw( Address(O0_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
__ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
__ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
return entry_point;
}
#ifdef ASSERT
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
// Verify that argslot lies within (Gargs, FP].
Label L_ok, L_bad;
#ifdef _LP64
__ add(FP, STACK_BIAS, temp_reg);
__ cmp(argslot_reg, temp_reg);
#else
__ cmp(argslot_reg, FP);
#endif
__ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
__ delayed()->nop();
__ cmp(Gargs, argslot_reg);
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ bind(L_bad);
__ stop(error_message);
__ bind(L_ok);
}
#endif
// Helper to insert argument slots into the stack.
// arg_slots must be a multiple of stack_move_unit() and <= 0
void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
int arg_mask,
Register argslot_reg,
Register temp_reg, Register temp2_reg, Register temp3_reg) {
assert(temp3_reg != noreg, "temp3 required");
assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
(!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
#ifdef ASSERT
verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
__ br(Assembler::greater, false, Assembler::pn, L_bad);
__ delayed()->nop();
__ btst(-stack_move_unit() - 1, arg_slots.as_register());
__ br(Assembler::zero, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ bind(L_bad);
__ stop("assert arg_slots <= 0 and clear low bits");
__ bind(L_ok);
} else {
assert(arg_slots.as_constant() <= 0, "");
assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
}
#endif // ASSERT
#ifdef _LP64
if (arg_slots.is_register()) {
// Was arg_slots register loaded as signed int?
Label L_ok;
__ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
__ sra(temp_reg, BitsPerInt, temp_reg);
__ cmp(arg_slots.as_register(), temp_reg);
__ br(Assembler::equal, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ stop("arg_slots register not loaded as signed int");
__ bind(L_ok);
}
#endif
// Make space on the stack for the inserted argument(s).
// Then pull down everything shallower than argslot_reg.
// The stacked return address gets pulled down with everything else.
// That is, copy [sp, argslot) downward by -size words. In pseudo-code:
// sp -= size;
// for (temp = sp + size; temp < argslot; temp++)
// temp[-size] = temp[0]
// argslot -= size;
RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
// Keep the stack pointer 2*wordSize aligned.
const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
__ add(SP, masked_offset, SP);
__ mov(Gargs, temp_reg); // source pointer for copy
__ add(Gargs, offset, Gargs);
{
Label loop;
__ bind(loop);
// pull one word down each time through the loop
__ ld_ptr(Address(temp_reg, 0), temp2_reg);
__ st_ptr(temp2_reg, Address(temp_reg, offset));
__ add(temp_reg, wordSize, temp_reg);
__ cmp(temp_reg, argslot_reg);
__ brx(Assembler::less, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
}
// Now move the argslot down, to point to the opened-up space.
__ add(argslot_reg, offset, argslot_reg);
}
// Helper to remove argument slots from the stack.
// arg_slots must be a multiple of stack_move_unit() and >= 0
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register argslot_reg,
Register temp_reg, Register temp2_reg, Register temp3_reg) {
assert(temp3_reg != noreg, "temp3 required");
assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
(!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
#ifdef ASSERT
// Verify that [argslot..argslot+size) lies within (Gargs, FP).
__ add(argslot_reg, offset, temp2_reg);
verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
__ br(Assembler::less, false, Assembler::pn, L_bad);
__ delayed()->nop();
__ btst(-stack_move_unit() - 1, arg_slots.as_register());
__ br(Assembler::zero, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ bind(L_bad);
__ stop("assert arg_slots >= 0 and clear low bits");
__ bind(L_ok);
} else {
assert(arg_slots.as_constant() >= 0, "");
assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
}
#endif // ASSERT
// Pull up everything shallower than argslot.
// Then remove the excess space on the stack.
// The stacked return address gets pulled up with everything else.
// That is, copy [sp, argslot) upward by size words. In pseudo-code:
// for (temp = argslot-1; temp >= sp; --temp)
// temp[size] = temp[0]
// argslot += size;
// sp += size;
__ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy
{
Label loop;
__ bind(loop);
// pull one word up each time through the loop
__ ld_ptr(Address(temp_reg, 0), temp2_reg);
__ st_ptr(temp2_reg, Address(temp_reg, offset));
__ sub(temp_reg, wordSize, temp_reg);
__ cmp(temp_reg, Gargs);
__ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
}
// Now move the argslot up, to point to the just-copied block.
__ add(Gargs, offset, Gargs);
// And adjust the argslot address to point at the deletion point.
__ add(argslot_reg, offset, argslot_reg);
// Keep the stack pointer 2*wordSize aligned.
const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
__ add(SP, masked_offset, SP);
}
#ifndef PRODUCT
extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername,
oop mh) {
#if 0
intptr_t* entry_sp,
intptr_t* saved_sp,
intptr_t* saved_bp) {
// called as a leaf from native code: do not block the JVM!
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
if (last_sp != saved_sp)
printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
#endif
printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
print_method_handle(mh);
}
#endif // PRODUCT
//------------------------------------------------------------------------------
// MethodHandles::generate_method_handle_stub
//
// Generate an "entry" field for a method handle.
// This determines how the method handle will respond to calls.
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
ShouldNotReachHere(); //NYI, 6815692
// Here is the register state during an interpreted call,
// as set up by generate_method_handle_interpreter_entry():
// - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
// - G3: receiver method handle
// - O5_savedSP: sender SP (must preserve)
Register O0_argslot = O0;
Register O1_scratch = O1;
Register O2_scratch = O2;
Register O3_scratch = O3;
Register G5_index = G5;
guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
// Some handy addresses:
Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset()));
Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
Address G3_dmh_vmindex( G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
Address G3_bmh_argument( G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
if (have_entry(ek)) {
__ nop(); // empty stubs make SG sick
return;
}
address interp_entry = __ pc();
if (UseCompressedOops) __ unimplemented("UseCompressedOops");
#ifndef PRODUCT
if (TraceMethodHandles) {
// save: Gargs, O5_savedSP
__ save(SP, -16*wordSize, SP);
__ set((intptr_t) entry_name(ek), O0);
__ mov(G3_method_handle, O1);
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
__ restore(SP, 16*wordSize, SP);
}
#endif // PRODUCT
switch ((int) ek) {
case _raise_exception:
{
// Not a real MH entry, but rather shared code for raising an
// exception. Extra local arguments are passed in scratch
// registers, as required type in O3, failing object (or NULL)
// in O2, failing bytecode type in O1.
__ mov(O5_savedSP, SP); // Cut the stack back to where the caller started.
// Push arguments as if coming from the interpreter.
Register O0_scratch = O0_argslot;
int stackElementSize = Interpreter::stackElementSize();
// Make space on the stack for the arguments.
__ sub(SP, 4*stackElementSize, SP);
__ sub(Gargs, 3*stackElementSize, Gargs);
//__ sub(Lesp, 3*stackElementSize, Lesp);
// void raiseException(int code, Object actual, Object required)
__ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code
__ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize)); // actual
__ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize)); // required
Label no_method;
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
__ set(AddressLiteral((address) &_raise_exception_method), G5_method);
__ ld_ptr(Address(G5_method, 0), G5_method);
__ tst(G5_method);
__ brx(Assembler::zero, false, Assembler::pn, no_method);
__ delayed()->nop();
int jobject_oop_offset = 0;
__ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
__ tst(G5_method);
__ brx(Assembler::zero, false, Assembler::pn, no_method);
__ delayed()->nop();
__ verify_oop(G5_method);
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
// If we get here, the Java runtime did not do its job of creating the exception.
// Do something that is at least causes a valid throw from the interpreter.
__ bind(no_method);
__ unimplemented("_raise_exception no method");
}
break;
case _invokestatic_mh:
case _invokespecial_mh:
{
__ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop
__ verify_oop(G5_method);
// Same as TemplateTable::invokestatic or invokespecial,
// minus the CP setup and profiling:
if (ek == _invokespecial_mh) {
// Must load & check the first argument before entering the target method.
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
__ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
__ null_check(G3_method_handle);
__ verify_oop(G3_method_handle);
}
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
}
break;
case _invokevirtual_mh:
{
// Same as TemplateTable::invokevirtual,
// minus the CP setup and profiling:
// Pick out the vtable index and receiver offset from the MH,
// and then we can discard it:
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
__ ldsw(G3_dmh_vmindex, G5_index);
// Note: The verifier allows us to ignore G3_mh_vmtarget.
__ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
__ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
// Get receiver klass:
Register O0_klass = O0_argslot;
__ load_klass(G3_method_handle, O0_klass);
__ verify_oop(O0_klass);
// Get target methodOop & entry point:
const int base = instanceKlass::vtable_start_offset() * wordSize;
assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
__ sll_ptr(G5_index, LogBytesPerWord, G5_index);
__ add(O0_klass, G5_index, O0_klass);
Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
__ ld_ptr(vtable_entry_addr, G5_method);
__ verify_oop(G5_method);
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
}
break;
case _invokeinterface_mh:
{
// Same as TemplateTable::invokeinterface,
// minus the CP setup and profiling:
__ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
Register O1_intf = O1_scratch;
__ ld_ptr(G3_mh_vmtarget, O1_intf);
__ ldsw(G3_dmh_vmindex, G5_index);
__ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
__ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
// Get receiver klass:
Register O0_klass = O0_argslot;
__ load_klass(G3_method_handle, O0_klass);
__ verify_oop(O0_klass);
// Get interface:
Label no_such_interface;
__ verify_oop(O1_intf);
__ lookup_interface_method(O0_klass, O1_intf,
// Note: next two args must be the same:
G5_index, G5_method,
O2_scratch,
O3_scratch,
no_such_interface);
__ verify_oop(G5_method);
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
__ bind(no_such_interface);
// Throw an exception.
// For historical reasons, it will be IncompatibleClassChangeError.
__ unimplemented("not tested yet");
__ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch); // required interface
__ mov(O0_klass, O2_scratch); // bad receiver
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
__ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch); // who is complaining?
}
break;
case _bound_ref_mh:
case _bound_int_mh:
case _bound_long_mh:
case _bound_ref_direct_mh:
case _bound_int_direct_mh:
case _bound_long_direct_mh:
{
const bool direct_to_method = (ek >= _bound_ref_direct_mh);
BasicType arg_type = T_ILLEGAL;
int arg_mask = _INSERT_NO_MASK;
int arg_slots = -1;
get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
// Make room for the new argument:
__ ldsw(G3_bmh_vmargslot, O0_argslot);
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
// Store bound argument into the new stack slot:
__ ld_ptr(G3_bmh_argument, O1_scratch);
if (arg_type == T_OBJECT) {
__ st_ptr(O1_scratch, Address(O0_argslot, 0));
} else {
Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
__ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
if (arg_slots == 2) {
__ unimplemented("not yet tested");
#ifndef _LP64
__ signx(O2_scratch, O3_scratch); // Sign extend
#endif
__ st_long(O2_scratch, Address(O0_argslot, 0)); // Uses O2/O3 on !_LP64
} else {
__ st_ptr( O2_scratch, Address(O0_argslot, 0));
}
}
if (direct_to_method) {
__ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop
__ verify_oop(G5_method);
__ jump_indirect_to(G5_method_fie, O1_scratch);
__ delayed()->nop();
} else {
__ ld_ptr(G3_mh_vmtarget, G3_method_handle); // target is a methodOop
__ verify_oop(G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
}
break;
case _adapter_retype_only:
case _adapter_retype_raw:
// Immediately jump to the next MH layer:
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
// This is OK when all parameter types widen.
// It is also OK when a return type narrows.
break;
case _adapter_check_cast:
{
// Temps:
Register G5_klass = G5_index; // Interesting AMH data.
// Check a reference argument before jumping to the next layer of MH:
__ ldsw(G3_amh_vmargslot, O0_argslot);
Address vmarg = __ argument_address(O0_argslot);
// What class are we casting to?
__ ld_ptr(G3_amh_argument, G5_klass); // This is a Class object!
__ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
Label done;
__ ld_ptr(vmarg, O1_scratch);
__ tst(O1_scratch);
__ brx(Assembler::zero, false, Assembler::pn, done); // No cast if null.
__ delayed()->nop();
__ load_klass(O1_scratch, O1_scratch);
// Live at this point:
// - G5_klass : klass required by the target method
// - O1_scratch : argument klass to test
// - G3_method_handle: adapter method handle
__ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
// If we get here, the type check failed!
__ ldsw(G3_amh_vmargslot, O0_argslot); // reload argslot field
__ ld_ptr(G3_amh_argument, O3_scratch); // required class
__ ld_ptr(vmarg, O2_scratch); // bad object
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
__ delayed()->mov(Bytecodes::_checkcast, O1_scratch); // who is complaining?
__ bind(done);
// Get the new MH:
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_prim_to_prim:
case _adapter_ref_to_prim:
// Handled completely by optimized cases.
__ stop("init_AdapterMethodHandle should not issue this");
break;
case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim
//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim
case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim
case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim
{
// Perform an in-place conversion to int or an int subword.
__ ldsw(G3_amh_vmargslot, O0_argslot);
Address vmarg = __ argument_address(O0_argslot);
Address value;
bool value_left_justified = false;
switch (ek) {
case _adapter_opt_i2i:
case _adapter_opt_l2i:
__ unimplemented(entry_name(ek));
value = vmarg;
break;
case _adapter_opt_unboxi:
{
// Load the value up from the heap.
__ ld_ptr(vmarg, O1_scratch);
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
#ifdef ASSERT
for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
if (is_subword_type(BasicType(bt)))
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
}
#endif
__ null_check(O1_scratch, value_offset);
value = Address(O1_scratch, value_offset);
#ifdef _BIG_ENDIAN
// Values stored in objects are packed.
value_left_justified = true;
#endif
}
break;
default:
ShouldNotReachHere();
}
// This check is required on _BIG_ENDIAN
Register G5_vminfo = G5_index;
__ ldsw(G3_amh_conversion, G5_vminfo);
assert(CONV_VMINFO_SHIFT == 0, "preshifted");
// Original 32-bit vmdata word must be of this form:
// | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
__ lduw(value, O1_scratch);
if (!value_left_justified)
__ sll(O1_scratch, G5_vminfo, O1_scratch);
Label zero_extend, done;
__ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
__ br(Assembler::zero, false, Assembler::pn, zero_extend);
__ delayed()->nop();
// this path is taken for int->byte, int->short
__ sra(O1_scratch, G5_vminfo, O1_scratch);
__ ba(false, done);
__ delayed()->nop();
__ bind(zero_extend);
// this is taken for int->char
__ srl(O1_scratch, G5_vminfo, O1_scratch);
__ bind(done);
__ st(O1_scratch, vmarg);
// Get the new MH:
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim
case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim
{
// Perform an in-place int-to-long or ref-to-long conversion.
__ ldsw(G3_amh_vmargslot, O0_argslot);
// On big-endian machine we duplicate the slot and store the MSW
// in the first slot.
__ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
Address arg_lsw(O0_argslot, 0);
Address arg_msw(O0_argslot, -Interpreter::stackElementSize());
switch (ek) {
case _adapter_opt_i2l:
{
__ ldsw(arg_lsw, O2_scratch); // Load LSW
#ifndef _LP64
__ signx(O2_scratch, O3_scratch); // Sign extend
#endif
__ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64
}
break;
case _adapter_opt_unboxl:
{
// Load the value up from the heap.
__ ld_ptr(arg_lsw, O1_scratch);
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
__ null_check(O1_scratch, value_offset);
__ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64
__ st_long(O2_scratch, arg_msw);
}
break;
default:
ShouldNotReachHere();
}
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim
case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim
{
// perform an in-place floating primitive conversion
__ unimplemented(entry_name(ek));
}
break;
case _adapter_prim_to_ref:
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
break;
case _adapter_swap_args:
case _adapter_rot_args:
// handled completely by optimized cases
__ stop("init_AdapterMethodHandle should not issue this");
break;
case _adapter_opt_swap_1:
case _adapter_opt_swap_2:
case _adapter_opt_rot_1_up:
case _adapter_opt_rot_1_down:
case _adapter_opt_rot_2_up:
case _adapter_opt_rot_2_down:
{
int swap_bytes = 0, rotate = 0;
get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
// 'argslot' is the position of the first argument to swap.
__ ldsw(G3_amh_vmargslot, O0_argslot);
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
// 'vminfo' is the second.
Register O1_destslot = O1_scratch;
__ ldsw(G3_amh_conversion, O1_destslot);
assert(CONV_VMINFO_SHIFT == 0, "preshifted");
__ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
__ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
if (!rotate) {
for (int i = 0; i < swap_bytes; i += wordSize) {
__ ld_ptr(Address(O0_argslot, i), O2_scratch);
__ ld_ptr(Address(O1_destslot, i), O3_scratch);
__ st_ptr(O3_scratch, Address(O0_argslot, i));
__ st_ptr(O2_scratch, Address(O1_destslot, i));
}
} else {
// Save the first chunk, which is going to get overwritten.
switch (swap_bytes) {
case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
default: ShouldNotReachHere();
}
if (rotate > 0) {
// Rorate upward.
__ sub(O0_argslot, swap_bytes, O0_argslot);
#if ASSERT
{
// Verify that argslot > destslot, by at least swap_bytes.
Label L_ok;
__ cmp(O0_argslot, O1_destslot);
__ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ stop("source must be above destination (upward rotation)");
__ bind(L_ok);
}
#endif
// Work argslot down to destslot, copying contiguous data upwards.
// Pseudo-code:
// argslot = src_addr - swap_bytes
// destslot = dest_addr
// while (argslot >= destslot) {
// *(argslot + swap_bytes) = *(argslot + 0);
// argslot--;
// }
Label loop;
__ bind(loop);
__ ld_ptr(Address(O0_argslot, 0), G5_index);
__ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
__ sub(O0_argslot, wordSize, O0_argslot);
__ cmp(O0_argslot, O1_destslot);
__ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
} else {
__ add(O0_argslot, swap_bytes, O0_argslot);
#if ASSERT
{
// Verify that argslot < destslot, by at least swap_bytes.
Label L_ok;
__ cmp(O0_argslot, O1_destslot);
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
__ delayed()->nop();
__ stop("source must be above destination (upward rotation)");
__ bind(L_ok);
}
#endif
// Work argslot up to destslot, copying contiguous data downwards.
// Pseudo-code:
// argslot = src_addr + swap_bytes
// destslot = dest_addr
// while (argslot >= destslot) {
// *(argslot - swap_bytes) = *(argslot + 0);
// argslot++;
// }
Label loop;
__ bind(loop);
__ ld_ptr(Address(O0_argslot, 0), G5_index);
__ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
__ add(O0_argslot, wordSize, O0_argslot);
__ cmp(O0_argslot, O1_destslot);
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
}
// Store the original first chunk into the destination slot, now free.
switch (swap_bytes) {
case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
default: ShouldNotReachHere();
}
}
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_dup_args:
{
// 'argslot' is the position of the first argument to duplicate.
__ ldsw(G3_amh_vmargslot, O0_argslot);
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
// 'stack_move' is negative number of words to duplicate.
Register G5_stack_move = G5_index;
__ ldsw(G3_amh_conversion, G5_stack_move);
__ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
// Remember the old Gargs (argslot[0]).
Register O1_oldarg = O1_scratch;
__ mov(Gargs, O1_oldarg);
// Move Gargs down to make room for dups.
__ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
__ add(Gargs, G5_stack_move, Gargs);
// Compute the new Gargs (argslot[0]).
Register O2_newarg = O2_scratch;
__ mov(Gargs, O2_newarg);
// Copy from oldarg[0...] down to newarg[0...]
// Pseude-code:
// O1_oldarg = old-Gargs
// O2_newarg = new-Gargs
// O0_argslot = argslot
// while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
Label loop;
__ bind(loop);
__ ld_ptr(Address(O0_argslot, 0), O3_scratch);
__ st_ptr(O3_scratch, Address(O2_newarg, 0));
__ add(O0_argslot, wordSize, O0_argslot);
__ add(O2_newarg, wordSize, O2_newarg);
__ cmp(O2_newarg, O1_oldarg);
__ brx(Assembler::less, false, Assembler::pt, loop);
__ delayed()->nop(); // FILLME
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_drop_args:
{
// 'argslot' is the position of the first argument to nuke.
__ ldsw(G3_amh_vmargslot, O0_argslot);
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
// 'stack_move' is number of words to drop.
Register G5_stack_move = G5_index;
__ ldsw(G3_amh_conversion, G5_stack_move);
__ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
__ ld_ptr(G3_mh_vmtarget, G3_method_handle);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
}
break;
case _adapter_collect_args:
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
break;
case _adapter_spread_args:
// Handled completely by optimized cases.
__ stop("init_AdapterMethodHandle should not issue this");
break;
case _adapter_opt_spread_0:
case _adapter_opt_spread_1:
case _adapter_opt_spread_more:
{
// spread an array out into a group of arguments
__ unimplemented(entry_name(ek));
}
break;
case _adapter_flyby:
case _adapter_ricochet:
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
break;
default:
ShouldNotReachHere();
}
address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
__ unimplemented(entry_name(ek)); // %%% FIXME: NYI
init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
}

@ -2917,6 +2917,16 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
// Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations.
}

@ -151,8 +151,10 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
address compiled_entry = __ pc();
TosState incoming_state = state;
Label cont;
address compiled_entry = __ pc();
address entry = __ pc();
#if !defined(_LP64) && defined(COMPILER2)
@ -165,12 +167,11 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
if( state == ltos ) {
__ srl (G1, 0,O1);
__ srlx(G1,32,O0);
if (incoming_state == ltos) {
__ srl (G1, 0, O1);
__ srlx(G1, 32, O0);
}
#endif /* !_LP64 && COMPILER2 */
#endif // !_LP64 && COMPILER2
__ bind(cont);
@ -182,10 +183,17 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ mov(Llast_SP, SP); // Remove any adapter added stack space.
Label L_got_cache, L_giant_index;
const Register cache = G3_scratch;
const Register size = G1_scratch;
if (EnableInvokeDynamic) {
__ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode.
__ cmp(G1_scratch, Bytecodes::_invokedynamic);
__ br(Assembler::equal, false, Assembler::pn, L_giant_index);
__ delayed()->nop();
}
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
__ bind(L_got_cache);
__ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset(), size);
__ and3(size, 0xFF, size); // argument size in words
@ -193,6 +201,14 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ add(Lesp, size, Lesp); // pop arguments
__ dispatch_next(state, step);
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
__ ba(false, L_got_cache);
__ delayed()->nop();
}
return entry;
}

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1963,19 +1963,30 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
// ----------------------------------------------------------------------------
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
// Depends on cpCacheOop layout!
const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
if (is_invokedynamic) {
// We are resolved if the f1 field contains a non-null CallSite object.
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
__ tst(Lbyte_code);
__ br(Assembler::notEqual, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
} else {
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
__ srl( Lbyte_code, shift_count, Lbyte_code );
__ and3( Lbyte_code, 0xFF, Lbyte_code );
__ cmp( Lbyte_code, (int)bytecode());
__ br( Assembler::equal, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
__ srl( Lbyte_code, shift_count, Lbyte_code );
__ and3( Lbyte_code, 0xFF, Lbyte_code );
__ cmp( Lbyte_code, (int)bytecode());
__ br( Assembler::equal, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1);
}
address entry;
switch (bytecode()) {
@ -1987,12 +1998,13 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
default : ShouldNotReachHere(); break;
}
// first time invocation - must resolve first
__ call_VM(noreg, entry, O1);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1);
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ bind(resolved);
}
@ -3130,7 +3142,42 @@ void TemplateTable::invokedynamic(int byte_no) {
return;
}
__ stop("invokedynamic NYI");//6815692//
// G5: CallSite object (f1)
// XX: unused (f2)
// G3: receiver address
// XX: flags (unused)
Register G5_callsite = G5_method;
Register Rscratch = G3_scratch;
Register Rtemp = G1_scratch;
Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_callsite);
// profile this call
__ profile_call(O4);
// get return address
AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
__ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
__ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rtemp, Rret, Rret); // get return address
__ ld_ptr(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
__ null_check(G3_method_handle);
// Adjust Rret first so Llast_SP can be same as Rret
__ add(Rret, -frame::pc_return_offset, O7);
__ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
__ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
// Record SP so we can remove any stack space allocated by adapter transition
__ delayed()->mov(SP, Llast_SP);
}

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -6492,24 +6492,19 @@ int MacroAssembler::load_unsigned_short(Register dst, Address src) {
}
void MacroAssembler::load_sized_value(Register dst, Address src,
int size_in_bytes, bool is_signed) {
switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
size_t size_in_bytes, bool is_signed) {
switch (size_in_bytes) {
#ifndef _LP64
// For case 8, caller is responsible for manually loading
// the second word into another register.
case ~8: // fall through:
case 8: movl( dst, src ); break;
case 8: movl(dst, src); break;
#else
case ~8: // fall through:
case 8: movq( dst, src ); break;
case 8: movq(dst, src); break;
#endif
case ~4: // fall through:
case 4: movl( dst, src ); break;
case ~2: load_signed_short( dst, src ); break;
case 2: load_unsigned_short( dst, src ); break;
case ~1: load_signed_byte( dst, src ); break;
case 1: load_unsigned_byte( dst, src ); break;
default: ShouldNotReachHere();
case 4: movl(dst, src); break;
case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
default: ShouldNotReachHere();
}
}
@ -7706,6 +7701,7 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_re
// method handle's MethodType. This macro hides the distinction.
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg) {
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
if (UseCompressedOops) unimplemented(); // field accesses must decode
// load mh.type.form.vmslots
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {

@ -1,5 +1,5 @@
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1511,7 +1511,7 @@ class MacroAssembler: public Assembler {
void extend_sign(Register hi, Register lo);
// Loading values by size and signed-ness
void load_sized_value(Register dst, Address src, int size_in_bytes, bool is_signed);
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
// Support for inc/dec with optimal instruction selection depending on value

@ -127,7 +127,8 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
int arg_mask,
Register rax_argslot,
Register rbx_temp, Register rdx_temp) {
Register rbx_temp, Register rdx_temp, Register temp3_reg) {
assert(temp3_reg == noreg, "temp3 not required");
assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
(!arg_slots.is_register() ? rsp : arg_slots.as_register()));
@ -185,7 +186,8 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register rax_argslot,
Register rbx_temp, Register rdx_temp) {
Register rbx_temp, Register rdx_temp, Register temp3_reg) {
assert(temp3_reg == noreg, "temp3 not required");
assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
(!arg_slots.is_register() ? rsp : arg_slots.as_register()));

@ -430,12 +430,12 @@ class MethodHandles: AllStatic {
RegisterOrConstant arg_slots,
int arg_mask,
Register argslot_reg,
Register temp_reg, Register temp2_reg);
Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
static void remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register argslot_reg,
Register temp_reg, Register temp2_reg);
Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
};