8296477: Foreign linker implementation update following JEP 434

Co-authored-by: Jorn Vernee <jvernee@openjdk.org>
Co-authored-by: Nick Gasson <ngasson@openjdk.org>
Co-authored-by: Per Minborg <pminborg@openjdk.org>
Reviewed-by: rehn, mcimadamore, vlivanov
This commit is contained in:
Jorn Vernee 2022-12-05 14:47:12 +00:00
parent 73baadceb6
commit 0452c39fec
72 changed files with 2584 additions and 933 deletions

View File

@ -42,13 +42,14 @@ class DowncallStubGenerator : public StubCodeGenerator {
BasicType _ret_bt;
const ABIDescriptor& _abi;
const GrowableArray<VMReg>& _input_registers;
const GrowableArray<VMReg>& _output_registers;
const GrowableArray<VMStorage>& _input_registers;
const GrowableArray<VMStorage>& _output_registers;
bool _needs_return_buffer;
int _captured_state_mask;
int _frame_complete;
int _framesize;
int _frame_size_slots;
OopMapSet* _oop_maps;
public:
DowncallStubGenerator(CodeBuffer* buffer,
@ -56,9 +57,10 @@ public:
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer)
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask)
: StubCodeGenerator(buffer, PrintMethodHandleStubs),
_signature(signature),
_num_args(num_args),
@ -67,8 +69,9 @@ public:
_input_registers(input_registers),
_output_registers(output_registers),
_needs_return_buffer(needs_return_buffer),
_captured_state_mask(captured_state_mask),
_frame_complete(0),
_framesize(0),
_frame_size_slots(0),
_oop_maps(NULL) {
}
@ -79,7 +82,7 @@ public:
}
int framesize() const {
return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
return (_frame_size_slots >> (LogBytesPerWord - LogBytesPerInt));
}
OopMapSet* oop_maps() const {
@ -93,12 +96,15 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer) {
int locs_size = 64;
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
int locs_size = 64;
CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi, input_registers, output_registers, needs_return_buffer);
DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi,
input_registers, output_registers,
needs_return_buffer, captured_state_mask);
g.generate();
code.log_section_sizes("nep_invoker_blob");
@ -137,10 +143,10 @@ void DowncallStubGenerator::generate() {
Register tmp1 = r9;
Register tmp2 = r10;
Register shuffle_reg = r19;
VMStorage shuffle_reg = as_VMStorage(r19);
JavaCallingConvention in_conv;
NativeCallingConvention out_conv(_input_registers);
ArgumentShuffle arg_shuffle(_signature, _num_args, _signature, _num_args, &in_conv, &out_conv, shuffle_reg->as_VMReg());
ArgumentShuffle arg_shuffle(_signature, _num_args, _signature, _num_args, &in_conv, &out_conv, shuffle_reg);
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
@ -152,32 +158,36 @@ void DowncallStubGenerator::generate() {
#endif
int allocated_frame_size = 0;
if (_needs_return_buffer) {
allocated_frame_size += 8; // for address spill
}
allocated_frame_size += arg_shuffle.out_arg_stack_slots() <<LogBytesPerInt;
assert(_abi._shadow_space_bytes == 0, "not expecting shadow space on AArch64");
allocated_frame_size += arg_shuffle.out_arg_bytes();
int ret_buf_addr_sp_offset = -1;
if (_needs_return_buffer) {
// in sync with the above
ret_buf_addr_sp_offset = allocated_frame_size - 8;
}
bool should_save_return_value = !_needs_return_buffer;
RegSpiller out_reg_spiller(_output_registers);
int spill_offset = -1;
if (!_needs_return_buffer) {
if (should_save_return_value) {
spill_offset = 0;
// spill area can be shared with the above, so we take the max of the 2
// spill area can be shared with shadow space and out args,
// since they are only used before the call,
// and spill area is only used after.
allocated_frame_size = out_reg_spiller.spill_size_bytes() > allocated_frame_size
? out_reg_spiller.spill_size_bytes()
: allocated_frame_size;
}
_framesize = align_up(framesize
+ (allocated_frame_size >> LogBytesPerInt), 4);
assert(is_even(_framesize/2), "sp not 16-byte aligned");
StubLocations locs;
locs.set(StubLocations::TARGET_ADDRESS, _abi._scratch1);
if (_needs_return_buffer) {
locs.set_frame_data(StubLocations::RETURN_BUFFER, allocated_frame_size);
allocated_frame_size += BytesPerWord; // for address spill
}
if (_captured_state_mask != 0) {
locs.set_frame_data(StubLocations::CAPTURED_STATE_BUFFER, allocated_frame_size);
allocated_frame_size += BytesPerWord;
}
_frame_size_slots = align_up(framesize + (allocated_frame_size >> LogBytesPerInt), 4);
assert(is_even(_frame_size_slots/2), "sp not 16-byte aligned");
_oop_maps = new OopMapSet();
address start = __ pc();
@ -185,13 +195,13 @@ void DowncallStubGenerator::generate() {
__ enter();
// lr and fp are already in place
__ sub(sp, rfp, ((unsigned)_framesize-4) << LogBytesPerInt); // prolog
__ sub(sp, rfp, ((unsigned)_frame_size_slots-4) << LogBytesPerInt); // prolog
_frame_complete = __ pc() - start;
address the_pc = __ pc();
__ set_last_Java_frame(sp, rfp, the_pc, tmp1);
OopMap* map = new OopMap(_framesize, 0);
OopMap* map = new OopMap(_frame_size_slots, 0);
_oop_maps->add_gc_map(the_pc - start, map);
// State transition
@ -200,27 +210,22 @@ void DowncallStubGenerator::generate() {
__ stlrw(tmp1, tmp2);
__ block_comment("{ argument shuffle");
arg_shuffle.generate(_masm, shuffle_reg->as_VMReg(), 0, _abi._shadow_space_bytes);
if (_needs_return_buffer) {
assert(ret_buf_addr_sp_offset != -1, "no return buffer addr spill");
__ str(_abi._ret_buf_addr_reg, Address(sp, ret_buf_addr_sp_offset));
}
arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes, locs);
__ block_comment("} argument shuffle");
__ blr(_abi._target_addr_reg);
__ blr(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
// this call is assumed not to have killed rthread
if (_needs_return_buffer) {
assert(ret_buf_addr_sp_offset != -1, "no return buffer addr spill");
__ ldr(tmp1, Address(sp, ret_buf_addr_sp_offset));
__ ldr(tmp1, Address(sp, locs.data_offset(StubLocations::RETURN_BUFFER)));
int offset = 0;
for (int i = 0; i < _output_registers.length(); i++) {
VMReg reg = _output_registers.at(i);
if (reg->is_Register()) {
__ str(reg->as_Register(), Address(tmp1, offset));
VMStorage reg = _output_registers.at(i);
if (reg.type() == StorageType::INTEGER) {
__ str(as_Register(reg), Address(tmp1, offset));
offset += 8;
} else if(reg->is_FloatRegister()) {
__ strd(reg->as_FloatRegister(), Address(tmp1, offset));
} else if (reg.type() == StorageType::VECTOR) {
__ strd(as_FloatRegister(reg), Address(tmp1, offset));
offset += 16;
} else {
ShouldNotReachHere();
@ -228,6 +233,28 @@ void DowncallStubGenerator::generate() {
}
}
//////////////////////////////////////////////////////////////////////////////
if (_captured_state_mask != 0) {
__ block_comment("{ save thread local");
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_offset);
}
__ ldr(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ movw(c_rarg1, _captured_state_mask);
__ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state), tmp1);
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
}
__ block_comment("} save thread local");
}
//////////////////////////////////////////////////////////////////////////////
__ mov(tmp1, _thread_in_native_trans);
__ strw(tmp1, Address(rthread, JavaThread::thread_state_offset()));
@ -272,7 +299,7 @@ void DowncallStubGenerator::generate() {
__ block_comment("{ L_safepoint_poll_slow_path");
__ bind(L_safepoint_poll_slow_path);
if (!_needs_return_buffer) {
if (should_save_return_value) {
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
}
@ -282,7 +309,7 @@ void DowncallStubGenerator::generate() {
__ lea(tmp1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ blr(tmp1);
if (!_needs_return_buffer) {
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
}
@ -294,13 +321,13 @@ void DowncallStubGenerator::generate() {
__ block_comment("{ L_reguard");
__ bind(L_reguard);
if (!_needs_return_buffer) {
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_offset);
}
__ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), tmp1);
if (!_needs_return_buffer) {
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
}

View File

@ -30,6 +30,7 @@
#include "oops/oopCast.inline.hpp"
#include "prims/foreignGlobals.hpp"
#include "prims/foreignGlobals.inline.hpp"
#include "prims/vmstorage.hpp"
#include "utilities/formatBuffer.hpp"
bool ABIDescriptor::is_volatile_reg(Register reg) const {
@ -42,112 +43,183 @@ bool ABIDescriptor::is_volatile_reg(FloatRegister reg) const {
|| _vector_additional_volatile_registers.contains(reg);
}
static constexpr int INTEGER_TYPE = 0;
static constexpr int VECTOR_TYPE = 1;
const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
oop abi_oop = JNIHandles::resolve_non_null(jabi);
ABIDescriptor abi;
constexpr Register (*to_Register)(int) = as_Register;
objArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
parse_register_array(inputStorage, INTEGER_TYPE, abi._integer_argument_registers, to_Register);
parse_register_array(inputStorage, VECTOR_TYPE, abi._vector_argument_registers, as_FloatRegister);
parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
parse_register_array(inputStorage, StorageType::VECTOR, abi._vector_argument_registers, as_FloatRegister);
objArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
parse_register_array(outputStorage, INTEGER_TYPE, abi._integer_return_registers, to_Register);
parse_register_array(outputStorage, VECTOR_TYPE, abi._vector_return_registers, as_FloatRegister);
parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
parse_register_array(outputStorage, StorageType::VECTOR, abi._vector_return_registers, as_FloatRegister);
objArrayOop volatileStorage = jdk_internal_foreign_abi_ABIDescriptor::volatileStorage(abi_oop);
parse_register_array(volatileStorage, INTEGER_TYPE, abi._integer_additional_volatile_registers, to_Register);
parse_register_array(volatileStorage, VECTOR_TYPE, abi._vector_additional_volatile_registers, as_FloatRegister);
parse_register_array(volatileStorage, StorageType::INTEGER, abi._integer_additional_volatile_registers, as_Register);
parse_register_array(volatileStorage, StorageType::VECTOR, abi._vector_additional_volatile_registers, as_FloatRegister);
abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
abi._target_addr_reg = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::targetAddrStorage(abi_oop))->as_Register();
abi._ret_buf_addr_reg = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::retBufAddrStorage(abi_oop))->as_Register();
abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
return abi;
}
enum class RegType {
INTEGER = 0,
VECTOR = 1,
STACK = 3
};
VMReg ForeignGlobals::vmstorage_to_vmreg(int type, int index) {
switch(static_cast<RegType>(type)) {
case RegType::INTEGER: return ::as_Register(index)->as_VMReg();
case RegType::VECTOR: return ::as_FloatRegister(index)->as_VMReg();
case RegType::STACK: return VMRegImpl::stack2reg(index LP64_ONLY(* 2));
}
return VMRegImpl::Bad();
}
int RegSpiller::pd_reg_size(VMReg reg) {
if (reg->is_Register()) {
int RegSpiller::pd_reg_size(VMStorage reg) {
if (reg.type() == StorageType::INTEGER) {
return 8;
} else if (reg->is_FloatRegister()) {
} else if (reg.type() == StorageType::VECTOR) {
return 16; // Always spill/unspill Q registers
}
return 0; // stack and BAD
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMReg reg) {
if (reg->is_Register()) {
masm->spill(reg->as_Register(), true, offset);
} else if (reg->is_FloatRegister()) {
masm->spill(reg->as_FloatRegister(), masm->Q, offset);
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
if (reg.type() == StorageType::INTEGER) {
masm->spill(as_Register(reg), true, offset);
} else if (reg.type() == StorageType::VECTOR) {
masm->spill(as_FloatRegister(reg), masm->Q, offset);
} else {
// stack and BAD
}
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMReg reg) {
if (reg->is_Register()) {
masm->unspill(reg->as_Register(), true, offset);
} else if (reg->is_FloatRegister()) {
masm->unspill(reg->as_FloatRegister(), masm->Q, offset);
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
if (reg.type() == StorageType::INTEGER) {
masm->unspill(as_Register(reg), true, offset);
} else if (reg.type() == StorageType::VECTOR) {
masm->unspill(as_FloatRegister(reg), masm->Q, offset);
} else {
// stack and BAD
}
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
assert(in_stk_bias == 0 && out_stk_bias == 0, "bias not implemented");
Register tmp_reg = tmp->as_Register();
static constexpr int RFP_BIAS = 16; // skip old rfp and lr
static void move_reg64(MacroAssembler* masm, int out_stk_bias,
Register from_reg, VMStorage to_reg) {
int out_bias = 0;
switch (to_reg.type()) {
case StorageType::INTEGER:
assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
masm->mov(as_Register(to_reg), from_reg);
break;
case StorageType::STACK:
out_bias = out_stk_bias;
case StorageType::FRAME_DATA: {
Address dest(sp, to_reg.offset() + out_bias);
switch (to_reg.stack_size()) {
case 8: masm->str (from_reg, dest); break;
case 4: masm->strw(from_reg, dest); break;
case 2: masm->strh(from_reg, dest); break;
case 1: masm->strb(from_reg, dest); break;
default: ShouldNotReachHere();
}
} break;
default: ShouldNotReachHere();
}
}
static void move_stack(MacroAssembler* masm, Register tmp_reg, int in_stk_bias, int out_stk_bias,
VMStorage from_reg, VMStorage to_reg) {
Address from_addr(rfp, RFP_BIAS + from_reg.offset() + in_stk_bias);
int out_bias = 0;
switch (to_reg.type()) {
case StorageType::INTEGER:
assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
switch (from_reg.stack_size()) {
case 8: masm->ldr (as_Register(to_reg), from_addr); break;
case 4: masm->ldrw(as_Register(to_reg), from_addr); break;
case 2: masm->ldrh(as_Register(to_reg), from_addr); break;
case 1: masm->ldrb(as_Register(to_reg), from_addr); break;
default: ShouldNotReachHere();
}
break;
case StorageType::VECTOR:
assert(to_reg.segment_mask() == V128_MASK, "only moves to v128 registers supported");
switch (from_reg.stack_size()) {
case 8:
masm->ldrd(as_FloatRegister(to_reg), from_addr);
break;
case 4:
masm->ldrs(as_FloatRegister(to_reg), from_addr);
break;
default: ShouldNotReachHere();
}
break;
case StorageType::STACK:
out_bias = out_stk_bias;
case StorageType::FRAME_DATA: {
switch (from_reg.stack_size()) {
case 8: masm->ldr (tmp_reg, from_addr); break;
case 4: masm->ldrw(tmp_reg, from_addr); break;
case 2: masm->ldrh(tmp_reg, from_addr); break;
case 1: masm->ldrb(tmp_reg, from_addr); break;
default: ShouldNotReachHere();
}
Address dest(sp, to_reg.offset() + out_bias);
switch (to_reg.stack_size()) {
case 8: masm->str (tmp_reg, dest); break;
case 4: masm->strw(tmp_reg, dest); break;
case 2: masm->strh(tmp_reg, dest); break;
case 1: masm->strb(tmp_reg, dest); break;
default: ShouldNotReachHere();
}
} break;
default: ShouldNotReachHere();
}
}
static void move_v128(MacroAssembler* masm, int out_stk_bias,
FloatRegister from_reg, VMStorage to_reg) {
switch (to_reg.type()) {
case StorageType::VECTOR:
assert(to_reg.segment_mask() == V128_MASK, "only moves to v128 registers supported");
masm->fmovd(as_FloatRegister(to_reg), from_reg);
break;
case StorageType::STACK: {
Address dest(sp, to_reg.offset() + out_stk_bias);
switch (to_reg.stack_size()) {
case 8: masm->strd(from_reg, dest); break;
case 4: masm->strs(from_reg, dest); break;
default: ShouldNotReachHere();
}
} break;
default: ShouldNotReachHere();
}
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Register tmp_reg = as_Register(tmp);
for (int i = 0; i < _moves.length(); i++) {
Move move = _moves.at(i);
BasicType arg_bt = move.bt;
VMRegPair from_vmreg = move.from;
VMRegPair to_vmreg = move.to;
VMStorage from_reg = move.from;
VMStorage to_reg = move.to;
masm->block_comment(err_msg("bt=%s", null_safe_string(type2name(arg_bt))));
switch (arg_bt) {
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT:
masm->move32_64(from_vmreg, to_vmreg, tmp_reg);
// replace any placeholders
if (from_reg.type() == StorageType::PLACEHOLDER) {
from_reg = locs.get(from_reg);
}
if (to_reg.type() == StorageType::PLACEHOLDER) {
to_reg = locs.get(to_reg);
}
switch (from_reg.type()) {
case StorageType::INTEGER:
assert(from_reg.segment_mask() == REG64_MASK, "only 64-bit register supported");
move_reg64(masm, out_stk_bias, as_Register(from_reg), to_reg);
break;
case T_FLOAT:
masm->float_move(from_vmreg, to_vmreg, tmp_reg);
case StorageType::VECTOR:
assert(from_reg.segment_mask() == V128_MASK, "only v128 register supported");
move_v128(masm, out_stk_bias, as_FloatRegister(from_reg), to_reg);
break;
case T_DOUBLE:
masm->double_move(from_vmreg, to_vmreg, tmp_reg);
case StorageType::STACK:
move_stack(masm, tmp_reg, in_stk_bias, out_stk_bias, from_reg, to_reg);
break;
case T_LONG :
masm->long_move(from_vmreg, to_vmreg, tmp_reg);
break;
default:
fatal("found in upcall args: %s", type2name(arg_bt));
default: ShouldNotReachHere();
}
}
}

View File

@ -42,8 +42,8 @@ struct ABIDescriptor {
int32_t _stack_alignment_bytes;
int32_t _shadow_space_bytes;
Register _target_addr_reg;
Register _ret_buf_addr_reg;
VMStorage _scratch1;
VMStorage _scratch2;
bool is_volatile_reg(Register reg) const;
bool is_volatile_reg(FloatRegister reg) const;

View File

@ -52,9 +52,9 @@ class Register {
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
constexpr int raw_encoding() const { return this - first(); }
constexpr int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
constexpr bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// derived registers, offsets, and addresses
inline Register successor() const;
@ -71,7 +71,7 @@ class Register {
int operator==(const Register r) const { return _encoding == r._encoding; }
int operator!=(const Register r) const { return _encoding != r._encoding; }
const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; }
constexpr const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; }
};
extern Register::RegisterImpl all_RegisterImpls[Register::number_of_declared_registers + 1] INTERNAL_VISIBILITY;
@ -175,9 +175,9 @@ class FloatRegister {
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
constexpr int raw_encoding() const { return this - first(); }
constexpr int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
constexpr bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// derived registers, offsets, and addresses
inline FloatRegister successor() const;
@ -192,7 +192,7 @@ class FloatRegister {
int operator==(const FloatRegister r) const { return _encoding == r._encoding; }
int operator!=(const FloatRegister r) const { return _encoding != r._encoding; }
const FloatRegisterImpl* operator->() const { return FloatRegisterImpl::first() + _encoding; }
constexpr const FloatRegisterImpl* operator->() const { return FloatRegisterImpl::first() + _encoding; }
};
extern FloatRegister::FloatRegisterImpl all_FloatRegisterImpls[FloatRegister::number_of_registers + 1] INTERNAL_VISIBILITY;

View File

@ -128,9 +128,10 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
Register shuffle_reg = r19;
JavaCallingConvention out_conv;
NativeCallingConvention in_conv(call_regs._arg_regs);
ArgumentShuffle arg_shuffle(in_sig_bt, total_in_args, out_sig_bt, total_out_args, &in_conv, &out_conv, shuffle_reg->as_VMReg());
int stack_slots = SharedRuntime::out_preserve_stack_slots() + arg_shuffle.out_arg_stack_slots();
int out_arg_area = align_up(stack_slots * VMRegImpl::stack_slot_size, StackAlignmentInBytes);
ArgumentShuffle arg_shuffle(in_sig_bt, total_in_args, out_sig_bt, total_out_args, &in_conv, &out_conv, as_VMStorage(shuffle_reg));
int preserved_bytes = SharedRuntime::out_preserve_stack_slots() * VMRegImpl::stack_slot_size;
int stack_bytes = preserved_bytes + arg_shuffle.out_arg_bytes();
int out_arg_area = align_up(stack_bytes , StackAlignmentInBytes);
#ifndef PRODUCT
LogTarget(Trace, foreign, upcall) lt;
@ -158,10 +159,14 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
int frame_data_offset = reg_save_area_offset + reg_save_area_size;
int frame_bottom_offset = frame_data_offset + sizeof(UpcallStub::FrameData);
StubLocations locs;
int ret_buf_offset = -1;
if (needs_return_buffer) {
ret_buf_offset = frame_bottom_offset;
frame_bottom_offset += ret_buf_size;
// use a free register for shuffling code to pick up return
// buffer address from
locs.set(StubLocations::RETURN_BUFFER, abi._scratch1);
}
int frame_size = frame_bottom_offset;
@ -218,9 +223,9 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
arg_spilller.generate_fill(_masm, arg_save_area_offset);
if (needs_return_buffer) {
assert(ret_buf_offset != -1, "no return buffer allocated");
__ lea(abi._ret_buf_addr_reg, Address(sp, ret_buf_offset));
__ lea(as_Register(locs.get(StubLocations::RETURN_BUFFER)), Address(sp, ret_buf_offset));
}
arg_shuffle.generate(_masm, shuffle_reg->as_VMReg(), abi._shadow_space_bytes, 0);
arg_shuffle.generate(_masm, as_VMStorage(shuffle_reg), abi._shadow_space_bytes, 0, locs);
__ block_comment("} argument shuffle");
__ block_comment("{ receiver ");
@ -239,7 +244,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
if (!needs_return_buffer) {
#ifdef ASSERT
if (call_regs._ret_regs.length() == 1) { // 0 or 1
VMReg j_expected_result_reg;
VMStorage j_expected_result_reg;
switch (ret_type) {
case T_BOOLEAN:
case T_BYTE:
@ -247,19 +252,18 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
case T_CHAR:
case T_INT:
case T_LONG:
j_expected_result_reg = r0->as_VMReg();
j_expected_result_reg = as_VMStorage(r0);
break;
case T_FLOAT:
case T_DOUBLE:
j_expected_result_reg = v0->as_VMReg();
j_expected_result_reg = as_VMStorage(v0);
break;
default:
fatal("unexpected return type: %s", type2name(ret_type));
}
// No need to move for now, since CallArranger can pick a return type
// that goes in the same reg for both CCs. But, at least assert they are the same
assert(call_regs._ret_regs.at(0) == j_expected_result_reg,
"unexpected result register: %s != %s", call_regs._ret_regs.at(0)->name(), j_expected_result_reg->name());
assert(call_regs._ret_regs.at(0) == j_expected_result_reg, "unexpected result register");
}
#endif
} else {
@ -267,12 +271,12 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ lea(rscratch1, Address(sp, ret_buf_offset));
int offset = 0;
for (int i = 0; i < call_regs._ret_regs.length(); i++) {
VMReg reg = call_regs._ret_regs.at(i);
if (reg->is_Register()) {
__ ldr(reg->as_Register(), Address(rscratch1, offset));
VMStorage reg = call_regs._ret_regs.at(i);
if (reg.type() == StorageType::INTEGER) {
__ ldr(as_Register(reg), Address(rscratch1, offset));
offset += 8;
} else if (reg->is_FloatRegister()) {
__ ldrd(reg->as_FloatRegister(), Address(rscratch1, offset));
} else if (reg.type() == StorageType::VECTOR) {
__ ldrd(as_FloatRegister(reg), Address(rscratch1, offset));
offset += 16; // needs to match VECTOR_REG_SIZE in AArch64Architecture (Java)
} else {
ShouldNotReachHere();
@ -328,9 +332,13 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
receiver,
in_ByteSize(frame_data_offset));
if (TraceOptimizedUpcallStubs) {
blob->print_on(tty);
#ifndef PRODUCT
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
blob->print_on(&ls);
}
#endif
return blob->code_begin();
}

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_AARCH64_VMSTORAGE_AARCH64_INLINE_HPP
#define CPU_AARCH64_VMSTORAGE_AARCH64_INLINE_HPP
#include <cstdint>
#include "asm/register.hpp"
// keep in sync with jdk/internal/foreign/abi/aarch64/AArch64Architecture
enum class StorageType : int8_t {
INTEGER = 0,
VECTOR = 1,
STACK = 2,
PLACEHOLDER = 3,
// special locations used only by native code
FRAME_DATA = PLACEHOLDER + 1,
INVALID = -1
};
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
return type == StorageType::INTEGER || type == StorageType::VECTOR;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
constexpr uint16_t REG64_MASK = 0b0000000000000001;
constexpr uint16_t V128_MASK = 0b0000000000000001;
inline Register as_Register(VMStorage vms) {
assert(vms.type() == StorageType::INTEGER, "not the right type");
return ::as_Register(vms.index());
}
inline FloatRegister as_FloatRegister(VMStorage vms) {
assert(vms.type() == StorageType::VECTOR, "not the right type");
return ::as_FloatRegister(vms.index());
}
constexpr inline VMStorage as_VMStorage(Register reg) {
return VMStorage::reg_storage(StorageType::INTEGER, REG64_MASK, reg->encoding());
}
constexpr inline VMStorage as_VMStorage(FloatRegister reg) {
return VMStorage::reg_storage(StorageType::VECTOR, V128_MASK, reg->encoding());
}
inline VMStorage as_VMStorage(VMReg reg) {
if (reg->is_Register()) {
return as_VMStorage(reg->as_Register());
} else if (reg->is_FloatRegister()) {
return as_VMStorage(reg->as_FloatRegister());
} else if (reg->is_stack()) {
return VMStorage::stack_storage(reg);
} else if (!reg->is_valid()) {
return VMStorage::invalid();
}
ShouldNotReachHere();
return VMStorage::invalid();
}
#endif // CPU_AARCH64_VMSTORAGE_AARCH64_INLINE_HPP

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,9 +30,10 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer) {
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
Unimplemented();
return nullptr;
}

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -33,24 +34,19 @@ const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
return {};
}
VMReg ForeignGlobals::vmstorage_to_vmreg(int type, int index) {
Unimplemented();
return VMRegImpl::Bad();
}
int RegSpiller::pd_reg_size(VMReg reg) {
int RegSpiller::pd_reg_size(VMStorage reg) {
Unimplemented();
return -1;
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Unimplemented();
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_ARM_VMSTORAGE_ARM_INLINE_HPP
#define CPU_ARM_VMSTORAGE_ARM_INLINE_HPP
#include <cstdint>
#include "asm/register.hpp"
enum class StorageType : int8_t {
STACK = 0,
PLACEHOLDER = 1,
// special locations used only by native code
FRAME_DATA = PLACEHOLDER + 1,
INVALID = -1
};
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
return false;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
inline VMStorage as_VMStorage(VMReg reg) {
ShouldNotReachHere();
return VMStorage::invalid();
}
#endif // CPU_ARM_VMSTORAGE_ARM_INLINE_HPP

View File

@ -30,9 +30,10 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer) {
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
Unimplemented();
return nullptr;
}

View File

@ -35,24 +35,19 @@ const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
return {};
}
VMReg ForeignGlobals::vmstorage_to_vmreg(int type, int index) {
Unimplemented();
return VMRegImpl::Bad();
}
int RegSpiller::pd_reg_size(VMReg reg) {
int RegSpiller::pd_reg_size(VMStorage reg) {
Unimplemented();
return -1;
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Unimplemented();
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_PPC_VMSTORAGE_PPC_INLINE_HPP
#define CPU_PPC_VMSTORAGE_PPC_INLINE_HPP
#include <cstdint>
#include "asm/register.hpp"
enum class StorageType : int8_t {
STACK = 0,
PLACEHOLDER = 1,
// special locations used only by native code
FRAME_DATA = PLACEHOLDER + 1,
INVALID = -1
};
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
return false;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
inline VMStorage as_VMStorage(VMReg reg) {
ShouldNotReachHere();
return VMStorage::invalid();
}
#endif // CPU_PPC_VMSTORAGE_PPC_INLINE_HPP

View File

@ -31,9 +31,10 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer) {
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
Unimplemented();
return nullptr;
}

View File

@ -35,25 +35,19 @@ const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
return {};
}
VMReg ForeignGlobals::vmstorage_to_vmreg(int type, int index) {
Unimplemented();
return VMRegImpl::Bad();
}
int RegSpiller::pd_reg_size(VMReg reg) {
int RegSpiller::pd_reg_size(VMStorage reg) {
Unimplemented();
return -1;
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Unimplemented();
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_RISCV_VMSTORAGE_RISCV_INLINE_HPP
#define CPU_RISCV_VMSTORAGE_RISCV_INLINE_HPP
#include <cstdint>
#include "asm/register.hpp"
enum class StorageType : int8_t {
STACK = 0,
PLACEHOLDER = 1,
// special locations used only by native code
FRAME_DATA = PLACEHOLDER + 1,
INVALID = -1
};
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
return false;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
inline VMStorage as_VMStorage(VMReg reg) {
ShouldNotReachHere();
return VMStorage::invalid();
}
#endif // CPU_RISCV_VMSTORAGE_RISCV_INLINE_HPP

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,9 +30,10 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer) {
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
Unimplemented();
return nullptr;
}

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -33,24 +34,19 @@ const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
return {};
}
VMReg ForeignGlobals::vmstorage_to_vmreg(int type, int index) {
Unimplemented();
return VMRegImpl::Bad();
}
int RegSpiller::pd_reg_size(VMReg reg) {
int RegSpiller::pd_reg_size(VMStorage reg) {
Unimplemented();
return -1;
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Unimplemented();
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_S390_VMSTORAGE_S390_INLINE_HPP
#define CPU_S390_VMSTORAGE_S390_INLINE_HPP
#include <cstdint>
#include "asm/register.hpp"
enum class StorageType : int8_t {
STACK = 0,
PLACEHOLDER = 1,
// special locations used only by native code
FRAME_DATA = PLACEHOLDER + 1,
INVALID = -1
};
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
return false;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
inline VMStorage as_VMStorage(VMReg reg) {
ShouldNotReachHere();
return VMStorage::invalid();
}
#endif // CPU_S390_VMSTORAGE_S390_INLINE_HPP

View File

@ -28,9 +28,10 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer) {
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
Unimplemented();
return nullptr;
}

View File

@ -41,13 +41,14 @@ class DowncallStubGenerator : public StubCodeGenerator {
BasicType _ret_bt;
const ABIDescriptor& _abi;
const GrowableArray<VMReg>& _input_registers;
const GrowableArray<VMReg>& _output_registers;
const GrowableArray<VMStorage>& _input_registers;
const GrowableArray<VMStorage>& _output_registers;
bool _needs_return_buffer;
int _captured_state_mask;
int _frame_complete;
int _framesize;
int _frame_size_slots;
OopMapSet* _oop_maps;
public:
DowncallStubGenerator(CodeBuffer* buffer,
@ -55,9 +56,10 @@ public:
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer)
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask)
: StubCodeGenerator(buffer, PrintMethodHandleStubs),
_signature(signature),
_num_args(num_args),
@ -66,8 +68,9 @@ public:
_input_registers(input_registers),
_output_registers(output_registers),
_needs_return_buffer(needs_return_buffer),
_captured_state_mask(captured_state_mask),
_frame_complete(0),
_framesize(0),
_frame_size_slots(0),
_oop_maps(NULL) {
}
@ -77,8 +80,8 @@ public:
return _frame_complete;
}
int framesize() const {
return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
int framesize() const { // frame size in 64-bit words
return (_frame_size_slots >> (LogBytesPerWord - LogBytesPerInt));
}
OopMapSet* oop_maps() const {
@ -92,12 +95,15 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer) {
int locs_size = 64;
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
int locs_size = 64;
CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi, input_registers, output_registers, needs_return_buffer);
DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi,
input_registers, output_registers,
needs_return_buffer, captured_state_mask);
g.generate();
code.log_section_sizes("nep_invoker_blob");
@ -133,10 +139,10 @@ void DowncallStubGenerator::generate() {
// out arg area (e.g. for stack args)
};
Register shufffle_reg = rbx;
VMStorage shuffle_reg = as_VMStorage(rbx);
JavaCallingConvention in_conv;
NativeCallingConvention out_conv(_input_registers);
ArgumentShuffle arg_shuffle(_signature, _num_args, _signature, _num_args, &in_conv, &out_conv, shufffle_reg->as_VMReg());
ArgumentShuffle arg_shuffle(_signature, _num_args, _signature, _num_args, &in_conv, &out_conv, shuffle_reg);
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
@ -149,34 +155,38 @@ void DowncallStubGenerator::generate() {
// in bytes
int allocated_frame_size = 0;
if (_needs_return_buffer) {
allocated_frame_size += 8; // store address
}
allocated_frame_size += arg_shuffle.out_arg_stack_slots() << LogBytesPerInt;
allocated_frame_size += _abi._shadow_space_bytes;
allocated_frame_size += arg_shuffle.out_arg_bytes();
int ret_buf_addr_rsp_offset = -1;
if (_needs_return_buffer) {
// the above
ret_buf_addr_rsp_offset = allocated_frame_size - 8;
}
// when we don't use a return buffer we need to spill the return value around our slowpath calls
// when we use a return buffer case this SHOULD be unused.
// when we don't use a return buffer we need to spill the return value around our slow path calls
bool should_save_return_value = !_needs_return_buffer;
RegSpiller out_reg_spiller(_output_registers);
int spill_rsp_offset = -1;
if (!_needs_return_buffer) {
if (should_save_return_value) {
spill_rsp_offset = 0;
// spill area can be shared with the above, so we take the max of the 2
// spill area can be shared with shadow space and out args,
// since they are only used before the call,
// and spill area is only used after.
allocated_frame_size = out_reg_spiller.spill_size_bytes() > allocated_frame_size
? out_reg_spiller.spill_size_bytes()
: allocated_frame_size;
}
StubLocations locs;
locs.set(StubLocations::TARGET_ADDRESS, _abi._scratch1);
if (_needs_return_buffer) {
locs.set_frame_data(StubLocations::RETURN_BUFFER, allocated_frame_size);
allocated_frame_size += BytesPerWord;
}
if (_captured_state_mask != 0) {
locs.set_frame_data(StubLocations::CAPTURED_STATE_BUFFER, allocated_frame_size);
allocated_frame_size += BytesPerWord;
}
allocated_frame_size = align_up(allocated_frame_size, 16);
// _framesize is in 32-bit stack slots:
_framesize += framesize_base + (allocated_frame_size >> LogBytesPerInt);
assert(is_even(_framesize/2), "sp not 16-byte aligned");
_frame_size_slots += framesize_base + (allocated_frame_size >> LogBytesPerInt);
assert(is_even(_frame_size_slots/2), "sp not 16-byte aligned");
_oop_maps = new OopMapSet();
address start = __ pc();
@ -192,7 +202,7 @@ void DowncallStubGenerator::generate() {
__ block_comment("{ thread java2native");
__ set_last_Java_frame(rsp, rbp, (address)the_pc, rscratch1);
OopMap* map = new OopMap(_framesize, 0);
OopMap* map = new OopMap(_frame_size_slots, 0);
_oop_maps->add_gc_map(the_pc - start, map);
// State transition
@ -200,28 +210,22 @@ void DowncallStubGenerator::generate() {
__ block_comment("} thread java2native");
__ block_comment("{ argument shuffle");
arg_shuffle.generate(_masm, shufffle_reg->as_VMReg(), 0, _abi._shadow_space_bytes);
if (_needs_return_buffer) {
// spill our return buffer address
assert(ret_buf_addr_rsp_offset != -1, "no return buffer addr spill");
__ movptr(Address(rsp, ret_buf_addr_rsp_offset), _abi._ret_buf_addr_reg);
}
arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes, locs);
__ block_comment("} argument shuffle");
__ call(_abi._target_addr_reg);
__ call(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
// this call is assumed not to have killed r15_thread
if (_needs_return_buffer) {
assert(ret_buf_addr_rsp_offset != -1, "no return buffer addr spill");
__ movptr(rscratch1, Address(rsp, ret_buf_addr_rsp_offset));
__ movptr(rscratch1, Address(rsp, locs.data_offset(StubLocations::RETURN_BUFFER)));
int offset = 0;
for (int i = 0; i < _output_registers.length(); i++) {
VMReg reg = _output_registers.at(i);
if (reg->is_Register()) {
__ movptr(Address(rscratch1, offset), reg->as_Register());
VMStorage reg = _output_registers.at(i);
if (reg.type() == StorageType::INTEGER) {
__ movptr(Address(rscratch1, offset), as_Register(reg));
offset += 8;
} else if (reg->is_XMMRegister()) {
__ movdqu(Address(rscratch1, offset), reg->as_XMMRegister());
} else if (reg.type() == StorageType::VECTOR) {
__ movdqu(Address(rscratch1, offset), as_XMMRegister(reg));
offset += 16;
} else {
ShouldNotReachHere();
@ -229,6 +233,34 @@ void DowncallStubGenerator::generate() {
}
}
//////////////////////////////////////////////////////////////////////////////
if (_captured_state_mask != 0) {
__ block_comment("{ save thread local");
__ vzeroupper();
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_rsp_offset);
}
__ movptr(c_rarg0, Address(rsp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ movl(c_rarg1, _captured_state_mask);
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state)));
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_rsp_offset);
}
__ block_comment("} save thread local");
}
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ thread native2java");
__ restore_cpu_control_state_after_jni(rscratch1);
@ -272,7 +304,7 @@ void DowncallStubGenerator::generate() {
__ bind(L_safepoint_poll_slow_path);
__ vzeroupper();
if(!_needs_return_buffer) {
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_rsp_offset);
}
@ -284,7 +316,7 @@ void DowncallStubGenerator::generate() {
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
if(!_needs_return_buffer) {
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_rsp_offset);
}
@ -297,7 +329,7 @@ void DowncallStubGenerator::generate() {
__ bind(L_reguard);
__ vzeroupper();
if(!_needs_return_buffer) {
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_rsp_offset);
}
@ -308,7 +340,7 @@ void DowncallStubGenerator::generate() {
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
if(!_needs_return_buffer) {
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_rsp_offset);
}

View File

@ -40,8 +40,8 @@ struct ABIDescriptor {
int32_t _stack_alignment_bytes;
int32_t _shadow_space_bytes;
Register _target_addr_reg;
Register _ret_buf_addr_reg;
VMStorage _scratch1;
VMStorage _scratch2;
bool is_volatile_reg(Register reg) const;
bool is_volatile_reg(XMMRegister reg) const;

View File

@ -33,24 +33,19 @@ const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
return {};
}
VMReg ForeignGlobals::vmstorage_to_vmreg(int type, int index) {
Unimplemented();
return VMRegImpl::Bad();
}
int RegSpiller::pd_reg_size(VMReg reg) {
int RegSpiller::pd_reg_size(VMStorage reg) {
Unimplemented();
return -1;
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Unimplemented();
}

View File

@ -40,123 +40,154 @@ bool ABIDescriptor::is_volatile_reg(XMMRegister reg) const {
|| _vector_additional_volatile_registers.contains(reg);
}
static constexpr int INTEGER_TYPE = 0;
static constexpr int VECTOR_TYPE = 1;
static constexpr int X87_TYPE = 2;
const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
oop abi_oop = JNIHandles::resolve_non_null(jabi);
ABIDescriptor abi;
objArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
parse_register_array(inputStorage, INTEGER_TYPE, abi._integer_argument_registers, as_Register);
parse_register_array(inputStorage, VECTOR_TYPE, abi._vector_argument_registers, as_XMMRegister);
parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
parse_register_array(inputStorage, StorageType::VECTOR, abi._vector_argument_registers, as_XMMRegister);
objArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
parse_register_array(outputStorage, INTEGER_TYPE, abi._integer_return_registers, as_Register);
parse_register_array(outputStorage, VECTOR_TYPE, abi._vector_return_registers, as_XMMRegister);
objArrayOop subarray = oop_cast<objArrayOop>(outputStorage->obj_at(X87_TYPE));
parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
parse_register_array(outputStorage, StorageType::VECTOR, abi._vector_return_registers, as_XMMRegister);
objArrayOop subarray = oop_cast<objArrayOop>(outputStorage->obj_at((int) StorageType::X87));
abi._X87_return_registers_noof = subarray->length();
objArrayOop volatileStorage = jdk_internal_foreign_abi_ABIDescriptor::volatileStorage(abi_oop);
parse_register_array(volatileStorage, INTEGER_TYPE, abi._integer_additional_volatile_registers, as_Register);
parse_register_array(volatileStorage, VECTOR_TYPE, abi._vector_additional_volatile_registers, as_XMMRegister);
parse_register_array(volatileStorage, StorageType::INTEGER, abi._integer_additional_volatile_registers, as_Register);
parse_register_array(volatileStorage, StorageType::VECTOR, abi._vector_additional_volatile_registers, as_XMMRegister);
abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
abi._target_addr_reg = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::targetAddrStorage(abi_oop))->as_Register();
abi._ret_buf_addr_reg = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::retBufAddrStorage(abi_oop))->as_Register();
abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
return abi;
}
enum class RegType {
INTEGER = 0,
VECTOR = 1,
X87 = 2,
STACK = 3
};
VMReg ForeignGlobals::vmstorage_to_vmreg(int type, int index) {
switch(static_cast<RegType>(type)) {
case RegType::INTEGER: return ::as_Register(index)->as_VMReg();
case RegType::VECTOR: return ::as_XMMRegister(index)->as_VMReg();
case RegType::STACK: return VMRegImpl::stack2reg(index LP64_ONLY(* 2)); // numbering on x64 goes per 64-bits
case RegType::X87: break;
}
return VMRegImpl::Bad();
}
int RegSpiller::pd_reg_size(VMReg reg) {
if (reg->is_Register()) {
int RegSpiller::pd_reg_size(VMStorage reg) {
if (reg.type() == StorageType::INTEGER) {
return 8;
} else if (reg->is_XMMRegister()) {
} else if (reg.type() == StorageType::VECTOR) {
return 16;
}
return 0; // stack and BAD
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMReg reg) {
if (reg->is_Register()) {
masm->movptr(Address(rsp, offset), reg->as_Register());
} else if (reg->is_XMMRegister()) {
masm->movdqu(Address(rsp, offset), reg->as_XMMRegister());
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
if (reg.type() == StorageType::INTEGER) {
masm->movptr(Address(rsp, offset), as_Register(reg));
} else if (reg.type() == StorageType::VECTOR) {
masm->movdqu(Address(rsp, offset), as_XMMRegister(reg));
} else {
// stack and BAD
}
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMReg reg) {
if (reg->is_Register()) {
masm->movptr(reg->as_Register(), Address(rsp, offset));
} else if (reg->is_XMMRegister()) {
masm->movdqu(reg->as_XMMRegister(), Address(rsp, offset));
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
if (reg.type() == StorageType::INTEGER) {
masm->movptr(as_Register(reg), Address(rsp, offset));
} else if (reg.type() == StorageType::VECTOR) {
masm->movdqu(as_XMMRegister(reg), Address(rsp, offset));
} else {
// stack and BAD
}
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
Register tmp_reg = tmp->as_Register();
static constexpr int RBP_BIAS = 16; // skip old rbp and return address
static void move_reg64(MacroAssembler* masm, int out_stk_bias,
Register from_reg, VMStorage to_reg) {
int out_bias = 0;
switch (to_reg.type()) {
case StorageType::INTEGER:
assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
masm->movq(as_Register(to_reg), from_reg);
break;
case StorageType::STACK:
out_bias = out_stk_bias;
case StorageType::FRAME_DATA:
assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
masm->movq(Address(rsp, to_reg.offset() + out_bias), from_reg);
break;
default: ShouldNotReachHere();
}
}
static void move_stack64(MacroAssembler* masm, Register tmp_reg, int out_stk_bias,
Address from_address, VMStorage to_reg) {
int out_bias = 0;
switch (to_reg.type()) {
case StorageType::INTEGER:
assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
masm->movq(as_Register(to_reg), from_address);
break;
case StorageType::VECTOR:
assert(to_reg.segment_mask() == XMM_MASK, "only moves to xmm registers supported");
masm->movdqu(as_XMMRegister(to_reg), from_address);
break;
case StorageType::STACK:
out_bias = out_stk_bias;
case StorageType::FRAME_DATA:
assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
masm->movq(tmp_reg, from_address);
masm->movq(Address(rsp, to_reg.offset() + out_bias), tmp_reg);
break;
default: ShouldNotReachHere();
}
}
static void move_xmm(MacroAssembler* masm, int out_stk_bias,
XMMRegister from_reg, VMStorage to_reg) {
switch (to_reg.type()) {
case StorageType::INTEGER: // windows vargarg floats
assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
masm->movq(as_Register(to_reg), from_reg);
break;
case StorageType::VECTOR:
assert(to_reg.segment_mask() == XMM_MASK, "only moves to xmm registers supported");
masm->movdqu(as_XMMRegister(to_reg), from_reg);
break;
case StorageType::STACK:
assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
masm->movq(Address(rsp, to_reg.offset() + out_stk_bias), from_reg);
break;
default: ShouldNotReachHere();
}
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Register tmp_reg = as_Register(tmp);
for (int i = 0; i < _moves.length(); i++) {
Move move = _moves.at(i);
BasicType arg_bt = move.bt;
VMRegPair from_vmreg = move.from;
VMRegPair to_vmreg = move.to;
VMStorage from_reg = move.from;
VMStorage to_reg = move.to;
masm->block_comment(err_msg("bt=%s", null_safe_string(type2name(arg_bt))));
switch (arg_bt) {
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT:
masm->move32_64(from_vmreg, to_vmreg, tmp_reg, in_stk_bias, out_stk_bias);
// replace any placeholders
if (from_reg.type() == StorageType::PLACEHOLDER) {
from_reg = locs.get(from_reg);
}
if (to_reg.type() == StorageType::PLACEHOLDER) {
to_reg = locs.get(to_reg);
}
switch (from_reg.type()) {
case StorageType::INTEGER:
assert(from_reg.segment_mask() == REG64_MASK, "only 64-bit register supported");
move_reg64(masm, out_stk_bias, as_Register(from_reg), to_reg);
break;
case T_FLOAT:
if (to_vmreg.first()->is_Register()) { // Windows vararg call
masm->movq(to_vmreg.first()->as_Register(), from_vmreg.first()->as_XMMRegister());
} else {
masm->float_move(from_vmreg, to_vmreg, tmp_reg, in_stk_bias, out_stk_bias);
}
case StorageType::VECTOR:
assert(from_reg.segment_mask() == XMM_MASK, "only xmm register supported");
move_xmm(masm, out_stk_bias, as_XMMRegister(from_reg), to_reg);
break;
case T_DOUBLE:
if (to_vmreg.first()->is_Register()) { // Windows vararg call
masm->movq(to_vmreg.first()->as_Register(), from_vmreg.first()->as_XMMRegister());
} else {
masm->double_move(from_vmreg, to_vmreg, tmp_reg, in_stk_bias, out_stk_bias);
}
break;
case T_LONG:
masm->long_move(from_vmreg, to_vmreg, tmp_reg, in_stk_bias, out_stk_bias);
break;
default:
fatal("found in upcall args: %s", type2name(arg_bt));
case StorageType::STACK: {
assert(from_reg.stack_size() == 8, "only stack_size 8 supported");
Address from_addr(rbp, RBP_BIAS + from_reg.offset() + in_stk_bias);
move_stack64(masm, tmp_reg, out_stk_bias, from_addr, to_reg);
} break;
default: ShouldNotReachHere();
}
}
}

View File

@ -56,9 +56,9 @@ public:
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
constexpr int raw_encoding() const { return this - first(); }
constexpr int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
constexpr bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
bool has_byte_register() const { return 0 <= raw_encoding() && raw_encoding() < number_of_byte_registers; }
// derived registers, offsets, and addresses
@ -74,7 +74,7 @@ public:
int operator==(const Register r) const { return _encoding == r._encoding; }
int operator!=(const Register r) const { return _encoding != r._encoding; }
const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; }
constexpr const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; }
};
extern Register::RegisterImpl all_RegisterImpls[Register::number_of_registers + 1] INTERNAL_VISIBILITY;
@ -202,9 +202,9 @@ public:
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
constexpr int raw_encoding() const { return this - first(); }
constexpr int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
constexpr bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// derived registers, offsets, and addresses
inline XMMRegister successor() const;
@ -219,7 +219,7 @@ public:
int operator==(const XMMRegister r) const { return _encoding == r._encoding; }
int operator!=(const XMMRegister r) const { return _encoding != r._encoding; }
const XMMRegisterImpl* operator->() const { return XMMRegisterImpl::first() + _encoding; }
constexpr const XMMRegisterImpl* operator->() const { return XMMRegisterImpl::first() + _encoding; }
// Actually available XMM registers for use, depending on actual CPU capabilities and flags.
static int available_xmm_registers() {

View File

@ -175,12 +175,13 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
const CallRegs call_regs = ForeignGlobals::parse_call_regs(jconv);
CodeBuffer buffer("upcall_stub", /* code_size = */ 2048, /* locs_size = */ 1024);
Register shuffle_reg = rbx;
VMStorage shuffle_reg = as_VMStorage(rbx);
JavaCallingConvention out_conv;
NativeCallingConvention in_conv(call_regs._arg_regs);
ArgumentShuffle arg_shuffle(in_sig_bt, total_in_args, out_sig_bt, total_out_args, &in_conv, &out_conv, shuffle_reg->as_VMReg());
int stack_slots = SharedRuntime::out_preserve_stack_slots() + arg_shuffle.out_arg_stack_slots();
int out_arg_area = align_up(stack_slots * VMRegImpl::stack_slot_size, StackAlignmentInBytes);
ArgumentShuffle arg_shuffle(in_sig_bt, total_in_args, out_sig_bt, total_out_args, &in_conv, &out_conv, shuffle_reg);
int preserved_bytes = SharedRuntime::out_preserve_stack_slots() * VMRegImpl::stack_slot_size;
int stack_bytes = preserved_bytes + arg_shuffle.out_arg_bytes();
int out_arg_area = align_up(stack_bytes , StackAlignmentInBytes);
#ifndef PRODUCT
LogTarget(Trace, foreign, upcall) lt;
@ -208,10 +209,14 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
int frame_data_offset = reg_save_area_offset + reg_save_area_size;
int frame_bottom_offset = frame_data_offset + sizeof(UpcallStub::FrameData);
StubLocations locs;
int ret_buf_offset = -1;
if (needs_return_buffer) {
ret_buf_offset = frame_bottom_offset;
frame_bottom_offset += ret_buf_size;
// use a free register for shuffling code to pick up return
// buffer address from
locs.set(StubLocations::RETURN_BUFFER, abi._scratch1);
}
int frame_size = frame_bottom_offset;
@ -273,9 +278,9 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
arg_spilller.generate_fill(_masm, arg_save_area_offset);
if (needs_return_buffer) {
assert(ret_buf_offset != -1, "no return buffer allocated");
__ lea(abi._ret_buf_addr_reg, Address(rsp, ret_buf_offset));
__ lea(as_Register(locs.get(StubLocations::RETURN_BUFFER)), Address(rsp, ret_buf_offset));
}
arg_shuffle.generate(_masm, shuffle_reg->as_VMReg(), abi._shadow_space_bytes, 0);
arg_shuffle.generate(_masm, shuffle_reg, abi._shadow_space_bytes, 0, locs);
__ block_comment("} argument shuffle");
__ block_comment("{ receiver ");
@ -293,7 +298,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
if (!needs_return_buffer) {
#ifdef ASSERT
if (call_regs._ret_regs.length() == 1) { // 0 or 1
VMReg j_expected_result_reg;
VMStorage j_expected_result_reg;
switch (ret_type) {
case T_BOOLEAN:
case T_BYTE:
@ -301,19 +306,18 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
case T_CHAR:
case T_INT:
case T_LONG:
j_expected_result_reg = rax->as_VMReg();
j_expected_result_reg = as_VMStorage(rax);
break;
case T_FLOAT:
case T_DOUBLE:
j_expected_result_reg = xmm0->as_VMReg();
j_expected_result_reg = as_VMStorage(xmm0);
break;
default:
fatal("unexpected return type: %s", type2name(ret_type));
}
// No need to move for now, since CallArranger can pick a return type
// that goes in the same reg for both CCs. But, at least assert they are the same
assert(call_regs._ret_regs.at(0) == j_expected_result_reg,
"unexpected result register: %s != %s", call_regs._ret_regs.at(0)->name(), j_expected_result_reg->name());
assert(call_regs._ret_regs.at(0) == j_expected_result_reg, "unexpected result register");
}
#endif
} else {
@ -321,12 +325,12 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ lea(rscratch1, Address(rsp, ret_buf_offset));
int offset = 0;
for (int i = 0; i < call_regs._ret_regs.length(); i++) {
VMReg reg = call_regs._ret_regs.at(i);
if (reg->is_Register()) {
__ movptr(reg->as_Register(), Address(rscratch1, offset));
VMStorage reg = call_regs._ret_regs.at(i);
if (reg.type() == StorageType::INTEGER) {
__ movptr(as_Register(reg), Address(rscratch1, offset));
offset += 8;
} else if (reg->is_XMMRegister()) {
__ movdqu(reg->as_XMMRegister(), Address(rscratch1, offset));
} else if (reg.type() == StorageType::VECTOR) {
__ movdqu(as_XMMRegister(reg), Address(rscratch1, offset));
offset += 16;
} else {
ShouldNotReachHere();
@ -389,9 +393,13 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
receiver,
in_ByteSize(frame_data_offset));
if (TraceOptimizedUpcallStubs) {
blob->print_on(tty);
#ifndef PRODUCT
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
blob->print_on(&ls);
}
#endif
return blob->code_begin();
}

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_X86_VMSTORAGE_X86_INLINE_HPP
#define CPU_X86_VMSTORAGE_X86_INLINE_HPP
#include <cstdint>
#include "asm/register.hpp"
#include "code/vmreg.inline.hpp"
// keep in sync with jdk/internal/foreign/abi/x64/X86_64Architecture
enum class StorageType : int8_t {
INTEGER = 0,
VECTOR = 1,
X87 = 2,
STACK = 3,
PLACEHOLDER = 4,
// special locations used only by native code
FRAME_DATA = PLACEHOLDER + 1,
INVALID = -1
};
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
return type == StorageType::INTEGER || type == StorageType::VECTOR || type == StorageType::X87;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
constexpr uint16_t REG64_MASK = 0b0000000000001111;
constexpr uint16_t XMM_MASK = 0b0000000000000001;
inline Register as_Register(VMStorage vms) {
assert(vms.type() == StorageType::INTEGER, "not the right type");
return ::as_Register(vms.index());
}
inline XMMRegister as_XMMRegister(VMStorage vms) {
assert(vms.type() == StorageType::VECTOR, "not the right type");
return ::as_XMMRegister(vms.index());
}
inline VMReg as_VMReg(VMStorage vms) {
switch (vms.type()) {
case StorageType::INTEGER: return as_Register(vms)->as_VMReg();
case StorageType::VECTOR: return as_XMMRegister(vms)->as_VMReg();
case StorageType::STACK: {
assert((vms.index() % VMRegImpl::stack_slot_size) == 0, "can not represent as VMReg");
return VMRegImpl::stack2reg(vms.index() / VMRegImpl::stack_slot_size);
}
default: ShouldNotReachHere(); return VMRegImpl::Bad();
}
}
constexpr inline VMStorage as_VMStorage(Register reg) {
return VMStorage::reg_storage(StorageType::INTEGER, REG64_MASK, reg->encoding());
}
constexpr inline VMStorage as_VMStorage(XMMRegister reg) {
return VMStorage::reg_storage(StorageType::VECTOR, XMM_MASK, reg->encoding());
}
inline VMStorage as_VMStorage(VMReg reg) {
if (reg->is_Register()) {
return as_VMStorage(reg->as_Register());
} else if (reg->is_XMMRegister()) {
return as_VMStorage(reg->as_XMMRegister());
} else if (reg->is_stack()) {
return VMStorage::stack_storage(reg);
} else if (!reg->is_valid()) {
return VMStorage::invalid();
}
ShouldNotReachHere();
return VMStorage::invalid();
}
#endif // CPU_X86_VMSTORAGE_X86_INLINE_HPP

View File

@ -28,9 +28,10 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer) {
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
Unimplemented();
return nullptr;
}

View File

@ -33,24 +33,19 @@ const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
return {};
}
VMReg ForeignGlobals::vmstorage_to_vmreg(int type, int index) {
Unimplemented();
return VMRegImpl::Bad();
}
int RegSpiller::pd_reg_size(VMReg reg) {
int RegSpiller::pd_reg_size(VMStorage reg) {
Unimplemented();
return -1;
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMReg reg) {
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Unimplemented();
}

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_ZERO_VMSTORAGE_ZERO_INLINE_HPP
#define CPU_ZERO_VMSTORAGE_ZERO_INLINE_HPP
#include <cstdint>
#include "asm/register.hpp"
enum class StorageType : int8_t {
STACK = 0,
PLACEHOLDER = 1,
// special locations used only by native code
FRAME_DATA = PLACEHOLDER + 1,
INVALID = -1
};
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
return false;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
inline VMStorage as_VMStorage(VMReg reg) {
ShouldNotReachHere();
return VMStorage::invalid();
}
#endif // CPU_ZERO_VMSTORAGE_ZERO_INLINE_HPP

View File

@ -4131,17 +4131,17 @@ int jdk_internal_foreign_abi_ABIDescriptor::_outputStorage_offset;
int jdk_internal_foreign_abi_ABIDescriptor::_volatileStorage_offset;
int jdk_internal_foreign_abi_ABIDescriptor::_stackAlignment_offset;
int jdk_internal_foreign_abi_ABIDescriptor::_shadowSpace_offset;
int jdk_internal_foreign_abi_ABIDescriptor::_targetAddrStorage_offset;
int jdk_internal_foreign_abi_ABIDescriptor::_retBufAddrStorage_offset;
int jdk_internal_foreign_abi_ABIDescriptor::_scratch1_offset;
int jdk_internal_foreign_abi_ABIDescriptor::_scratch2_offset;
#define ABIDescriptor_FIELDS_DO(macro) \
macro(_inputStorage_offset, k, "inputStorage", jdk_internal_foreign_abi_VMStorage_array_array_signature, false); \
macro(_outputStorage_offset, k, "outputStorage", jdk_internal_foreign_abi_VMStorage_array_array_signature, false); \
macro(_volatileStorage_offset, k, "volatileStorage", jdk_internal_foreign_abi_VMStorage_array_array_signature, false); \
macro(_stackAlignment_offset, k, "stackAlignment", int_signature, false); \
macro(_shadowSpace_offset, k, "shadowSpace", int_signature, false); \
macro(_targetAddrStorage_offset, k, "targetAddrStorage", jdk_internal_foreign_abi_VMStorage_signature, false); \
macro(_retBufAddrStorage_offset, k, "retBufAddrStorage", jdk_internal_foreign_abi_VMStorage_signature, false);
macro(_inputStorage_offset, k, "inputStorage", jdk_internal_foreign_abi_VMStorage_array_array_signature, false); \
macro(_outputStorage_offset, k, "outputStorage", jdk_internal_foreign_abi_VMStorage_array_array_signature, false); \
macro(_volatileStorage_offset, k, "volatileStorage", jdk_internal_foreign_abi_VMStorage_array_array_signature, false); \
macro(_stackAlignment_offset, k, "stackAlignment", int_signature, false); \
macro(_shadowSpace_offset, k, "shadowSpace", int_signature, false); \
macro(_scratch1_offset, k, "scratch1", jdk_internal_foreign_abi_VMStorage_signature, false); \
macro(_scratch2_offset, k, "scratch2", jdk_internal_foreign_abi_VMStorage_signature, false);
bool jdk_internal_foreign_abi_ABIDescriptor::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
@ -4178,22 +4178,24 @@ jint jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(oop entry) {
return entry->int_field(_shadowSpace_offset);
}
oop jdk_internal_foreign_abi_ABIDescriptor::targetAddrStorage(oop entry) {
return entry->obj_field(_targetAddrStorage_offset);
oop jdk_internal_foreign_abi_ABIDescriptor::scratch1(oop entry) {
return entry->obj_field(_scratch1_offset);
}
oop jdk_internal_foreign_abi_ABIDescriptor::retBufAddrStorage(oop entry) {
return entry->obj_field(_retBufAddrStorage_offset);
oop jdk_internal_foreign_abi_ABIDescriptor::scratch2(oop entry) {
return entry->obj_field(_scratch2_offset);
}
int jdk_internal_foreign_abi_VMStorage::_type_offset;
int jdk_internal_foreign_abi_VMStorage::_index_offset;
int jdk_internal_foreign_abi_VMStorage::_indexOrOffset_offset;
int jdk_internal_foreign_abi_VMStorage::_segmentMaskOrSize_offset;
int jdk_internal_foreign_abi_VMStorage::_debugName_offset;
#define VMStorage_FIELDS_DO(macro) \
macro(_type_offset, k, "type", int_signature, false); \
macro(_index_offset, k, "index", int_signature, false); \
macro(_debugName_offset, k, "debugName", string_signature, false); \
macro(_type_offset, k, "type", byte_signature, false); \
macro(_indexOrOffset_offset, k, "indexOrOffset", int_signature, false); \
macro(_segmentMaskOrSize_offset, k, "segmentMaskOrSize", short_signature, false); \
macro(_debugName_offset, k, "debugName", string_signature, false); \
bool jdk_internal_foreign_abi_VMStorage::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
@ -4210,12 +4212,16 @@ void jdk_internal_foreign_abi_VMStorage::serialize_offsets(SerializeClosure* f)
}
#endif
jint jdk_internal_foreign_abi_VMStorage::type(oop entry) {
return entry->int_field(_type_offset);
jbyte jdk_internal_foreign_abi_VMStorage::type(oop entry) {
return entry->byte_field(_type_offset);
}
jint jdk_internal_foreign_abi_VMStorage::index(oop entry) {
return entry->int_field(_index_offset);
jint jdk_internal_foreign_abi_VMStorage::index_or_offset(oop entry) {
return entry->int_field(_indexOrOffset_offset);
}
jshort jdk_internal_foreign_abi_VMStorage::segment_mask_or_size(oop entry) {
return entry->short_field(_segmentMaskOrSize_offset);
}
oop jdk_internal_foreign_abi_VMStorage::debugName(oop entry) {

View File

@ -1121,8 +1121,8 @@ class jdk_internal_foreign_abi_ABIDescriptor: AllStatic {
static int _volatileStorage_offset;
static int _stackAlignment_offset;
static int _shadowSpace_offset;
static int _targetAddrStorage_offset;
static int _retBufAddrStorage_offset;
static int _scratch1_offset;
static int _scratch2_offset;
static void compute_offsets();
@ -1135,8 +1135,8 @@ class jdk_internal_foreign_abi_ABIDescriptor: AllStatic {
static objArrayOop volatileStorage(oop entry);
static jint stackAlignment(oop entry);
static jint shadowSpace(oop entry);
static oop targetAddrStorage(oop entry);
static oop retBufAddrStorage(oop entry);
static oop scratch1(oop entry);
static oop scratch2(oop entry);
// Testers
static bool is_subclass(Klass* klass) {
@ -1151,7 +1151,8 @@ class jdk_internal_foreign_abi_VMStorage: AllStatic {
private:
static int _type_offset;
static int _index_offset;
static int _indexOrOffset_offset;
static int _segmentMaskOrSize_offset;
static int _debugName_offset;
static void compute_offsets();
@ -1160,9 +1161,10 @@ class jdk_internal_foreign_abi_VMStorage: AllStatic {
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static jint type(oop entry);
static jint index(oop entry);
static oop debugName(oop entry);
static jbyte type(oop entry);
static jint index_or_offset(oop entry);
static jshort segment_mask_or_size(oop entry);
static oop debugName(oop entry);
// Testers
static bool is_subclass(Klass* klass) {

View File

@ -792,6 +792,7 @@ void UpcallStub::verify() {
void UpcallStub::print_on(outputStream* st) const {
RuntimeBlob::print_on(st);
print_value_on(st);
Disassembler::decode((RuntimeBlob*)this, st);
}
void UpcallStub::print_value_on(outputStream* st) const {

View File

@ -109,7 +109,7 @@ protected:
// that range. There is a similar range(s) on returns
// which we don't detect.
int _data_offset; // offset to where data region begins
int _frame_size; // size of stack frame
int _frame_size; // size of stack frame in words (NOT slots. On x64 these are 64bit words)
bool _caller_must_gc_arguments;

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "downcallLinker.hpp"
#include <cerrno>
#ifdef _WIN64
#include <Windows.h>
#include <Winsock2.h>
#endif
void DowncallLinker::capture_state(int32_t* value_ptr, int captured_state_mask) {
// keep in synch with jdk.internal.foreign.abi.PreservableValues
enum PreservableValues {
NONE = 0,
GET_LAST_ERROR = 1,
WSA_GET_LAST_ERROR = 1 << 1,
ERRNO = 1 << 2
};
#ifdef _WIN64
if (captured_state_mask & GET_LAST_ERROR) {
*value_ptr = GetLastError();
value_ptr++;
}
if (captured_state_mask & WSA_GET_LAST_ERROR) {
*value_ptr = WSAGetLastError();
value_ptr++;
}
#endif
if (captured_state_mask & ERRNO) {
*value_ptr = errno;
}
}

View File

@ -34,9 +34,12 @@ public:
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers,
bool needs_return_buffer);
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask);
static void capture_state(int32_t* value_ptr, int captured_state_mask);
};
#endif // SHARE_VM_PRIMS_DOWNCALLLINKER_HPP

View File

@ -28,6 +28,39 @@
#include "prims/foreignGlobals.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
StubLocations::StubLocations() {
for (uint32_t i = 0; i < LOCATION_LIMIT; i++) {
_locs[i] = VMStorage::invalid();
}
}
void StubLocations::set(uint32_t loc, VMStorage storage) {
assert(loc < LOCATION_LIMIT, "oob");
_locs[loc] = storage;
}
void StubLocations::set_frame_data(uint32_t loc, int offset) {
set(loc, VMStorage(StorageType::FRAME_DATA, 8, offset));
}
VMStorage StubLocations::get(uint32_t loc) const {
assert(loc < LOCATION_LIMIT, "oob");
VMStorage storage = _locs[loc];
assert(storage.is_valid(), "not set");
return storage;
}
VMStorage StubLocations::get(VMStorage placeholder) const {
assert(placeholder.type() == StorageType::PLACEHOLDER, "must be");
return get(placeholder.index());
}
int StubLocations::data_offset(uint32_t loc) const {
VMStorage storage = get(loc);
assert(storage.type() == StorageType::FRAME_DATA, "must be");
return storage.offset();
}
#define FOREIGN_ABI "jdk/internal/foreign/abi/"
const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
@ -49,13 +82,15 @@ const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
return result;
}
VMReg ForeignGlobals::parse_vmstorage(oop storage) {
jint index = jdk_internal_foreign_abi_VMStorage::index(storage);
jint type = jdk_internal_foreign_abi_VMStorage::type(storage);
return vmstorage_to_vmreg(type, index);
VMStorage ForeignGlobals::parse_vmstorage(oop storage) {
jbyte type = jdk_internal_foreign_abi_VMStorage::type(storage);
jshort segment_mask_or_size = jdk_internal_foreign_abi_VMStorage::segment_mask_or_size(storage);
jint index_or_offset = jdk_internal_foreign_abi_VMStorage::index_or_offset(storage);
return VMStorage(static_cast<StorageType>(type), segment_mask_or_size, index_or_offset);
}
int RegSpiller::compute_spill_area(const GrowableArray<VMReg>& regs) {
int RegSpiller::compute_spill_area(const GrowableArray<VMStorage>& regs) {
int result_size = 0;
for (int i = 0; i < regs.length(); i++) {
result_size += pd_reg_size(regs.at(i));
@ -67,7 +102,7 @@ void RegSpiller::generate(MacroAssembler* masm, int rsp_offset, bool spill) cons
assert(rsp_offset != -1, "rsp_offset should be set");
int offset = rsp_offset;
for (int i = 0; i < _regs.length(); i++) {
VMReg reg = _regs.at(i);
VMStorage reg = _regs.at(i);
if (spill) {
pd_store_reg(masm, offset, reg);
} else {
@ -81,27 +116,23 @@ void ArgumentShuffle::print_on(outputStream* os) const {
os->print_cr("Argument shuffle {");
for (int i = 0; i < _moves.length(); i++) {
Move move = _moves.at(i);
BasicType arg_bt = move.bt;
VMRegPair from_vmreg = move.from;
VMRegPair to_vmreg = move.to;
BasicType arg_bt = move.bt;
VMStorage from_reg = move.from;
VMStorage to_reg = move.to;
os->print("Move a %s from (", null_safe_string(type2name(arg_bt)));
from_vmreg.first()->print_on(os);
os->print(",");
from_vmreg.second()->print_on(os);
os->print(") to (");
to_vmreg.first()->print_on(os);
os->print(",");
to_vmreg.second()->print_on(os);
os->print_cr(")");
os->print("Move a %s from ", null_safe_string(type2name(arg_bt)));
from_reg.print_on(os);
os->print(" to ");
to_reg.print_on(os);
os->print_cr("");
}
os->print_cr("Stack argument slots: %d", _out_arg_stack_slots);
os->print_cr("Stack argument bytes: %d", _out_arg_bytes);
os->print_cr("}");
}
int NativeCallingConvention::calling_convention(BasicType* sig_bt, VMRegPair* out_regs, int num_args) const {
int NativeCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* out_regs, int num_args) const {
int src_pos = 0;
int stk_slots = 0;
uint32_t max_stack_offset = 0;
for (int i = 0; i < num_args; i++) {
switch (sig_bt[i]) {
case T_BOOLEAN:
@ -110,53 +141,66 @@ int NativeCallingConvention::calling_convention(BasicType* sig_bt, VMRegPair* ou
case T_SHORT:
case T_INT:
case T_FLOAT: {
assert(src_pos < _input_regs.length(), "oob");
VMReg reg = _input_regs.at(src_pos++);
out_regs[i].set1(reg);
if (reg->is_stack())
stk_slots += 2;
VMStorage reg = _input_regs.at(src_pos++);
out_regs[i] = reg;
if (reg.is_stack())
max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
break;
}
case T_LONG:
case T_DOUBLE: {
assert((i + 1) < num_args && sig_bt[i + 1] == T_VOID, "expecting half");
assert(src_pos < _input_regs.length(), "oob");
VMReg reg = _input_regs.at(src_pos++);
out_regs[i].set2(reg);
if (reg->is_stack())
stk_slots += 2;
VMStorage reg = _input_regs.at(src_pos++);
out_regs[i] = reg;
if (reg.is_stack())
max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
break;
}
case T_VOID: // Halves of longs and doubles
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
out_regs[i].set_bad();
out_regs[i] = VMStorage::invalid();
break;
default:
ShouldNotReachHere();
break;
}
}
return stk_slots;
return align_up(max_stack_offset, 8);
}
int JavaCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* regs, int num_args) const {
VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
int slots = SharedRuntime::java_calling_convention(sig_bt, vm_regs, num_args);
for (int i = 0; i < num_args; i++) {
VMRegPair pair = vm_regs[i];
// note, we ignore second here. Signature should consist of register-size values. So there should be
// no need for multi-register pairs.
//assert(!pair.first()->is_valid() || pair.is_single_reg(), "must be: %s");
regs[i] = as_VMStorage(pair.first());
}
return slots << LogBytesPerInt;
}
class ComputeMoveOrder: public StackObj {
class MoveOperation: public ResourceObj {
friend class ComputeMoveOrder;
private:
VMRegPair _src;
VMRegPair _dst;
bool _processed;
MoveOperation* _next;
MoveOperation* _prev;
BasicType _bt;
VMStorage _src;
VMStorage _dst;
bool _processed;
MoveOperation* _next;
MoveOperation* _prev;
BasicType _bt;
static int get_id(VMRegPair r) {
return r.first()->value();
static int get_id(VMStorage r) {
assert((r.index_or_offset() & 0xFF000000) == 0, "index or offset too large");
// assuming mask and size doesn't matter for now
return ((int) r.type()) | (r.index_or_offset() << 8);
}
public:
MoveOperation(VMRegPair src, VMRegPair dst, BasicType bt)
: _src(src), _dst(dst), _processed(false), _next(NULL), _prev(NULL), _bt(bt) {}
MoveOperation(VMStorage src, VMStorage dst, BasicType bt):
_src(src), _dst(dst), _processed(false), _next(NULL), _prev(NULL), _bt(bt) {}
int src_id() const { return get_id(_src); }
int dst_id() const { return get_id(_dst); }
@ -166,7 +210,7 @@ class ComputeMoveOrder: public StackObj {
bool is_processed() const { return _processed; }
// insert
void break_cycle(VMRegPair temp_register) {
void break_cycle(VMStorage temp_register) {
// create a new store following the last store
// to move from the temp_register to the original
MoveOperation* new_store = new MoveOperation(temp_register, _dst, _bt);
@ -200,16 +244,17 @@ class ComputeMoveOrder: public StackObj {
private:
int _total_in_args;
const VMRegPair* _in_regs;
const VMStorage* _in_regs;
int _total_out_args;
const VMRegPair* _out_regs;
const VMStorage* _out_regs;
const BasicType* _in_sig_bt;
VMRegPair _tmp_vmreg;
VMStorage _tmp_vmreg;
GrowableArray<MoveOperation*> _edges;
GrowableArray<Move> _moves;
ComputeMoveOrder(int total_in_args, const VMRegPair* in_regs, int total_out_args, VMRegPair* out_regs,
const BasicType* in_sig_bt, VMRegPair tmp_vmreg) :
public:
ComputeMoveOrder(int total_in_args, const VMStorage* in_regs, int total_out_args, VMStorage* out_regs,
const BasicType* in_sig_bt, VMStorage tmp_vmreg) :
_total_in_args(total_in_args),
_in_regs(in_regs),
_total_out_args(total_out_args),
@ -232,16 +277,16 @@ class ComputeMoveOrder: public StackObj {
for (int in_idx = _total_in_args - 1, out_idx = _total_out_args - 1; in_idx >= 0; in_idx--, out_idx--) {
BasicType bt = _in_sig_bt[in_idx];
assert(bt != T_ARRAY, "array not expected");
VMRegPair in_reg = _in_regs[in_idx];
VMRegPair out_reg = _out_regs[out_idx];
VMStorage in_reg = _in_regs[in_idx];
VMStorage out_reg = _out_regs[out_idx];
if (out_reg.first()->is_stack()) {
if (out_reg.is_stack()) {
// Move operations where the dest is the stack can all be
// scheduled first since they can't interfere with the other moves.
// The input and output stack spaces are distinct from each other.
Move move{bt, in_reg, out_reg};
_moves.push(move);
} else if (in_reg.first() == out_reg.first()
} else if (in_reg == out_reg
|| bt == T_VOID) {
// 1. Can skip non-stack identity moves.
//
@ -259,8 +304,9 @@ class ComputeMoveOrder: public StackObj {
// Walk the edges breaking cycles between moves. The result list
// can be walked in order to produce the proper set of loads
void compute_store_order(VMRegPair temp_register) {
void compute_store_order(VMStorage temp_register) {
// Record which moves kill which values
// FIXME should be a map
GrowableArray<MoveOperation*> killer; // essentially a map of register id -> MoveOperation*
for (int i = 0; i < _edges.length(); i++) {
MoveOperation* s = _edges.at(i);
@ -304,9 +350,9 @@ class ComputeMoveOrder: public StackObj {
}
public:
static GrowableArray<Move> compute_move_order(int total_in_args, const VMRegPair* in_regs,
int total_out_args, VMRegPair* out_regs,
const BasicType* in_sig_bt, VMRegPair tmp_vmreg) {
static GrowableArray<Move> compute_move_order(int total_in_args, const VMStorage* in_regs,
int total_out_args, VMStorage* out_regs,
const BasicType* in_sig_bt, VMStorage tmp_vmreg) {
ComputeMoveOrder cmo(total_in_args, in_regs, total_out_args, out_regs, in_sig_bt, tmp_vmreg);
cmo.compute();
return cmo._moves;
@ -320,24 +366,15 @@ ArgumentShuffle::ArgumentShuffle(
int num_out_args,
const CallingConventionClosure* input_conv,
const CallingConventionClosure* output_conv,
VMReg shuffle_temp) {
VMStorage shuffle_temp) {
VMRegPair* in_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_in_args);
VMStorage* in_regs = NEW_RESOURCE_ARRAY(VMStorage, num_in_args);
input_conv->calling_convention(in_sig_bt, in_regs, num_in_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_out_args);
_out_arg_stack_slots = output_conv->calling_convention(out_sig_bt, out_regs, num_out_args);
VMStorage* out_regs = NEW_RESOURCE_ARRAY(VMStorage, num_out_args);
_out_arg_bytes = output_conv->calling_convention(out_sig_bt, out_regs, num_out_args);
VMRegPair tmp_vmreg;
tmp_vmreg.set2(shuffle_temp);
// Compute a valid move order, using tmp_vmreg to break any cycles.
// Note that ComputeMoveOrder ignores the upper half of our VMRegPairs.
// We are not moving Java values here, only register-sized values,
// so we shouldn't have to worry about the upper half any ways.
// This should work fine on 32-bit as well, since we would only be
// moving 32-bit sized values (i.e. low-level MH shouldn't take any double/long).
_moves = ComputeMoveOrder::compute_move_order(num_in_args, in_regs,
num_out_args, out_regs,
in_sig_bt, tmp_vmreg);
in_sig_bt, shuffle_temp);
}

View File

@ -26,60 +26,80 @@
#include "code/vmreg.hpp"
#include "oops/oopsHierarchy.hpp"
#include "prims/vmstorage.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#include CPU_HEADER(foreignGlobals)
// needs to match StubLocations in Java code.
// placeholder locations to be filled in by
// the code gen code
class StubLocations {
public:
enum Location : uint32_t {
TARGET_ADDRESS,
RETURN_BUFFER,
CAPTURED_STATE_BUFFER,
LOCATION_LIMIT
};
private:
VMStorage _locs[LOCATION_LIMIT];
public:
StubLocations();
void set(uint32_t loc, VMStorage storage);
void set_frame_data(uint32_t loc, int offset);
VMStorage get(uint32_t loc) const;
VMStorage get(VMStorage placeholder) const;
int data_offset(uint32_t loc) const;
};
class CallingConventionClosure {
public:
virtual int calling_convention(BasicType* sig_bt, VMRegPair* regs, int num_args) const = 0;
virtual int calling_convention(const BasicType* sig_bt, VMStorage* regs, int num_args) const = 0;
};
struct CallRegs {
GrowableArray<VMReg> _arg_regs;
GrowableArray<VMReg> _ret_regs;
GrowableArray<VMStorage> _arg_regs;
GrowableArray<VMStorage> _ret_regs;
CallRegs(int num_args, int num_rets)
: _arg_regs(num_args), _ret_regs(num_rets) {}
};
class ForeignGlobals {
private:
template<typename T, typename Func>
static void parse_register_array(objArrayOop jarray, int type_index, GrowableArray<T>& array, Func converter);
template<typename T>
static void parse_register_array(objArrayOop jarray, StorageType type_index, GrowableArray<T>& array, T (*converter)(int));
public:
static const ABIDescriptor parse_abi_descriptor(jobject jabi);
static const CallRegs parse_call_regs(jobject jconv);
static VMReg vmstorage_to_vmreg(int type, int index);
static VMReg parse_vmstorage(oop storage);
static VMStorage parse_vmstorage(oop storage);
};
class JavaCallingConvention : public CallingConventionClosure {
public:
int calling_convention(BasicType* sig_bt, VMRegPair* regs, int num_args) const override {
return SharedRuntime::java_calling_convention(sig_bt, regs, num_args);
}
int calling_convention(const BasicType* sig_bt, VMStorage* regs, int num_args) const override;
};
class NativeCallingConvention : public CallingConventionClosure {
GrowableArray<VMReg> _input_regs;
GrowableArray<VMStorage> _input_regs;
public:
NativeCallingConvention(const GrowableArray<VMReg>& input_regs)
NativeCallingConvention(const GrowableArray<VMStorage>& input_regs)
: _input_regs(input_regs) {}
int calling_convention(BasicType* sig_bt, VMRegPair* out_regs, int num_args) const override;
int calling_convention(const BasicType* sig_bt, VMStorage* out_regs, int num_args) const override;
};
class RegSpiller {
GrowableArray<VMReg> _regs;
GrowableArray<VMStorage> _regs;
int _spill_size_bytes;
public:
RegSpiller(const GrowableArray<VMReg>& regs) : _regs(regs), _spill_size_bytes(compute_spill_area(regs)) {
RegSpiller(const GrowableArray<VMStorage>& regs) : _regs(regs), _spill_size_bytes(compute_spill_area(regs)) {
}
int spill_size_bytes() const { return _spill_size_bytes; }
@ -87,39 +107,39 @@ public:
void generate_fill(MacroAssembler* masm, int rsp_offset) const { return generate(masm, rsp_offset, false); }
private:
static int compute_spill_area(const GrowableArray<VMReg>& regs);
static int compute_spill_area(const GrowableArray<VMStorage>& regs);
void generate(MacroAssembler* masm, int rsp_offset, bool is_spill) const;
static int pd_reg_size(VMReg reg);
static void pd_store_reg(MacroAssembler* masm, int offset, VMReg reg);
static void pd_load_reg(MacroAssembler* masm, int offset, VMReg reg);
static int pd_reg_size(VMStorage reg);
static void pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg);
static void pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg);
};
struct Move {
BasicType bt;
VMRegPair from;
VMRegPair to;
VMStorage from;
VMStorage to;
};
class ArgumentShuffle {
private:
GrowableArray<Move> _moves;
int _out_arg_stack_slots;
int _out_arg_bytes;
public:
ArgumentShuffle(
BasicType* in_sig_bt, int num_in_args,
BasicType* out_sig_bt, int num_out_args,
const CallingConventionClosure* input_conv, const CallingConventionClosure* output_conv,
VMReg shuffle_temp);
VMStorage shuffle_temp);
int out_arg_stack_slots() const { return _out_arg_stack_slots; }
void generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const {
pd_generate(masm, tmp, in_stk_bias, out_stk_bias);
int out_arg_bytes() const { return _out_arg_bytes; }
void generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
pd_generate(masm, tmp, in_stk_bias, out_stk_bias, locs);
}
void print_on(outputStream* os) const;
private:
void pd_generate(MacroAssembler* masm, VMReg tmp, int in_stk_bias, int out_stk_bias) const;
void pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const;
};
#endif // SHARE_PRIMS_FOREIGN_GLOBALS

View File

@ -31,13 +31,13 @@
#include "oops/objArrayOop.hpp"
#include "oops/oopCast.inline.hpp"
template<typename T, typename Func>
void ForeignGlobals::parse_register_array(objArrayOop jarray, int type_index, GrowableArray<T>& array, Func converter) {
objArrayOop subarray = oop_cast<objArrayOop>(jarray->obj_at(type_index));
template<typename T>
void ForeignGlobals::parse_register_array(objArrayOop jarray, StorageType type_index, GrowableArray<T>& array, T (*converter)(int)) {
objArrayOop subarray = oop_cast<objArrayOop>(jarray->obj_at((int) type_index));
int subarray_length = subarray->length();
for (int i = 0; i < subarray_length; i++) {
oop storage = subarray->obj_at(i);
jint index = jdk_internal_foreign_abi_VMStorage::index(storage);
jint index = jdk_internal_foreign_abi_VMStorage::index_or_offset(storage);
array.push(converter(index));
}
}

View File

@ -36,7 +36,8 @@
#include "runtime/jniHandles.inline.hpp"
JNI_ENTRY(jlong, NEP_makeDowncallStub(JNIEnv* env, jclass _unused, jobject method_type, jobject jabi,
jobjectArray arg_moves, jobjectArray ret_moves, jboolean needs_return_buffer))
jobjectArray arg_moves, jobjectArray ret_moves,
jboolean needs_return_buffer, jint captured_state_mask))
ResourceMark rm;
const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi);
@ -47,7 +48,7 @@ JNI_ENTRY(jlong, NEP_makeDowncallStub(JNIEnv* env, jclass _unused, jobject metho
int pslots = java_lang_invoke_MethodType::ptype_slot_count(type);
BasicType* basic_type = NEW_RESOURCE_ARRAY(BasicType, pslots);
GrowableArray<VMReg> input_regs(pcount);
GrowableArray<VMStorage> input_regs(pcount);
for (int i = 0, bt_idx = 0; i < pcount; i++) {
oop type_oop = java_lang_invoke_MethodType::ptype(type, i);
assert(java_lang_Class::is_primitive(type_oop), "Only primitives expected");
@ -65,7 +66,7 @@ JNI_ENTRY(jlong, NEP_makeDowncallStub(JNIEnv* env, jclass _unused, jobject metho
jint outs = ret_moves_oop->length();
GrowableArray<VMReg> output_regs(outs);
GrowableArray<VMStorage> output_regs(outs);
oop type_oop = java_lang_invoke_MethodType::rtype(type);
BasicType ret_bt = java_lang_Class::primitive_type(type_oop);
for (int i = 0; i < outs; i++) {
@ -74,8 +75,9 @@ JNI_ENTRY(jlong, NEP_makeDowncallStub(JNIEnv* env, jclass _unused, jobject metho
output_regs.push(ForeignGlobals::parse_vmstorage(ret_moves_oop->obj_at(i)));
}
return (jlong) DowncallLinker::make_downcall_stub(
basic_type, pslots, ret_bt, abi, input_regs, output_regs, needs_return_buffer)->code_begin();
return (jlong) DowncallLinker::make_downcall_stub(basic_type, pslots, ret_bt, abi,
input_regs, output_regs,
needs_return_buffer, captured_state_mask)->code_begin();
JNI_END
JNI_ENTRY(jboolean, NEP_freeDowncallStub(JNIEnv* env, jclass _unused, jlong invoker))
@ -95,7 +97,7 @@ JNI_END
#define VM_STORAGE_ARR "[Ljdk/internal/foreign/abi/VMStorage;"
static JNINativeMethod NEP_methods[] = {
{CC "makeDowncallStub", CC "(" METHOD_TYPE ABI_DESC VM_STORAGE_ARR VM_STORAGE_ARR "Z)J", FN_PTR(NEP_makeDowncallStub)},
{CC "makeDowncallStub", CC "(" METHOD_TYPE ABI_DESC VM_STORAGE_ARR VM_STORAGE_ARR "ZI)J", FN_PTR(NEP_makeDowncallStub)},
{CC "freeDowncallStub0", CC "(J)Z", FN_PTR(NEP_freeDowncallStub)},
};

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "prims/vmstorage.hpp"
void VMStorage::print_on(outputStream* os) const {
os->print("{type=%d, index=%d, %s=%d}", static_cast<int8_t>(_type), _index_or_offset,
is_stack() ? "size" : "segment_mask", _segment_mask_or_size);
}

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_PRIMS_VMSTORAGE_HPP
#define SHARE_PRIMS_VMSTORAGE_HPP
#include <cstdint>
#include "code/vmreg.hpp"
#include "utilities/debug.hpp"
#include "utilities/ostream.hpp"
enum class StorageType : int8_t; // defined in arch specific headers
class VMStorage {
public:
constexpr static StorageType INVALID_TYPE = static_cast<StorageType>(-1);
private:
StorageType _type;
// 1 byte of padding
uint16_t _segment_mask_or_size;
uint32_t _index_or_offset; // stack offset in bytes for stack storage
friend bool operator==(const VMStorage& a, const VMStorage& b);
constexpr inline static bool is_reg(StorageType type);
constexpr inline static StorageType stack_type();
constexpr inline static StorageType placeholder_type();
constexpr inline static StorageType frame_data_type();
public:
constexpr VMStorage() : _type(INVALID_TYPE), _segment_mask_or_size(0), _index_or_offset(0) {};
constexpr VMStorage(StorageType type, uint16_t segment_mask_or_size, uint32_t index_or_offset)
: _type(type), _segment_mask_or_size(segment_mask_or_size), _index_or_offset(index_or_offset) {};
constexpr static VMStorage reg_storage(StorageType type, uint16_t segment_mask, uint32_t index) {
assert(is_reg(type), "must be reg");
return VMStorage(type, segment_mask, index);
}
constexpr static VMStorage stack_storage(uint16_t size, uint32_t offset) {
return VMStorage(stack_type(), size, offset);
}
static VMStorage stack_storage(VMReg reg) {
return stack_storage(BytesPerWord, checked_cast<uint16_t>(reg->reg2stack() * VMRegImpl::stack_slot_size));
}
constexpr static VMStorage invalid() {
VMStorage result;
result._type = INVALID_TYPE;
return result;
}
StorageType type() const { return _type; }
// type specific accessors to make calling code more readable
uint16_t segment_mask() const { assert(is_reg(), "must be reg"); return _segment_mask_or_size; }
uint16_t stack_size() const { assert(is_stack() || is_frame_data(), "must be"); return _segment_mask_or_size; }
uint32_t index() const { assert(is_reg() || is_placeholder(), "must be"); return _index_or_offset; }
uint32_t offset() const { assert(is_stack() || is_frame_data(), "must be"); return _index_or_offset; }
uint32_t index_or_offset() const { assert(is_valid(), "must be valid"); return _index_or_offset; }
bool is_valid() const { return _type != INVALID_TYPE; }
bool is_reg() const { return is_reg(_type); }
bool is_stack() const { return _type == stack_type(); }
bool is_placeholder() const { return _type == placeholder_type(); }
bool is_frame_data() const { return _type == frame_data_type(); }
void print_on(outputStream* os) const;
};
inline bool operator==(const VMStorage& a, const VMStorage& b) {
return a._type == b._type
&& a._index_or_offset == b._index_or_offset
&& a._segment_mask_or_size == b._segment_mask_or_size;
}
#include CPU_HEADER(vmstorage)
#endif // SHARE_PRIMS_VMSTORAGE_HPP

View File

@ -1987,9 +1987,6 @@ const int ObjectAlignmentInBytes = 8;
false AARCH64_ONLY(DEBUG_ONLY(||true)), \
"Mark all threads after a safepoint, and clear on a modify " \
"fence. Add cleanliness checks.") \
\
develop(bool, TraceOptimizedUpcallStubs, false, \
"Trace optimized upcall stub generation") \
// end of RUNTIME_FLAGS

View File

@ -27,12 +27,17 @@ package java.lang.foreign;
import jdk.internal.foreign.abi.AbstractLinker;
import jdk.internal.foreign.abi.LinkerOptions;
import jdk.internal.foreign.abi.CapturableState;
import jdk.internal.foreign.abi.SharedUtils;
import jdk.internal.javac.PreviewFeature;
import jdk.internal.reflect.CallerSensitive;
import jdk.internal.reflect.Reflection;
import java.lang.invoke.MethodHandle;
import java.util.Arrays;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* A linker provides access to foreign functions from Java code, and access to Java code from foreign functions.
@ -282,7 +287,8 @@ public sealed interface Linker permits AbstractLinker {
*/
@PreviewFeature(feature=PreviewFeature.Feature.FOREIGN)
sealed interface Option
permits LinkerOptions.FirstVariadicArg {
permits LinkerOptions.LinkerOptionImpl,
Option.CaptureCallState {
/**
* {@return a linker option used to denote the index of the first variadic argument layout in a
@ -292,5 +298,71 @@ public sealed interface Linker permits AbstractLinker {
static Option firstVariadicArg(int index) {
return new LinkerOptions.FirstVariadicArg(index);
}
/**
* {@return A linker option used to save portions of the execution state immediately after
* calling a foreign function associated with a downcall method handle,
* before it can be overwritten by the Java runtime, or read through conventional means}
* <p>
* A downcall method handle linked with this option will feature an additional {@link MemorySegment}
* parameter directly following the target address parameter. This memory segment must be a
* native segment into which the captured state is written.
*
* @param capturedState the names of the values to save.
* @see CaptureCallState#supported()
*/
static CaptureCallState captureCallState(String... capturedState) {
Set<CapturableState> set = Stream.of(capturedState)
.map(CapturableState::forName)
.collect(Collectors.toSet());
return new LinkerOptions.CaptureCallStateImpl(set);
}
/**
* A linker option for saving portions of the execution state immediately
* after calling a foreign function associated with a downcall method handle,
* before it can be overwritten by the runtime, or read through conventional means.
* <p>
* Execution state is captured by a downcall method handle on invocation, by writing it
* to a native segment provided by the user to the downcall method handle.
* <p>
* The native segment should have the layout {@linkplain CaptureCallState#layout associated}
* with the particular {@code CaptureCallState} instance used to link the downcall handle.
* <p>
* Captured state can be retrieved from this native segment by constructing var handles
* from the {@linkplain #layout layout} associated with the {@code CaptureCallState} instance.
* <p>
* The following example demonstrates the use of this linker option:
* {@snippet lang = "java":
* MemorySegment targetAddress = ...
* CaptureCallState ccs = Linker.Option.captureCallState("errno");
* MethodHandle handle = Linker.nativeLinker().downcallHandle(targetAddress, FunctionDescriptor.ofVoid(), ccs);
*
* VarHandle errnoHandle = ccs.layout().varHandle(PathElement.groupElement("errno"));
* try (Arena arena = Arena.openConfined()) {
* MemorySegment capturedState = arena.allocate(ccs.layout());
* handle.invoke(capturedState);
* int errno = errnoHandle.get(capturedState);
* // use errno
* }
* }
*/
sealed interface CaptureCallState extends Option
permits LinkerOptions.CaptureCallStateImpl {
/**
* {@return A struct layout that represents the layout of the native segment passed
* to a downcall handle linked with this {@code CapturedCallState} instance}
*/
StructLayout layout();
/**
* {@return the names of the state that can be capture by this implementation}
*/
static Set<String> supported() {
return Arrays.stream(CapturableState.values())
.map(CapturableState::stateName)
.collect(Collectors.toSet());
}
}
}
}

View File

@ -40,20 +40,29 @@ public class ABIDescriptor {
final int stackAlignment;
final int shadowSpace;
final VMStorage scratch1;
final VMStorage scratch2;
final VMStorage targetAddrStorage;
final VMStorage retBufAddrStorage;
final VMStorage capturedStateStorage;
public ABIDescriptor(Architecture arch, VMStorage[][] inputStorage, VMStorage[][] outputStorage,
VMStorage[][] volatileStorage, int stackAlignment, int shadowSpace,
VMStorage targetAddrStorage, VMStorage retBufAddrStorage) {
VMStorage scratch1, VMStorage scratch2,
VMStorage targetAddrStorage, VMStorage retBufAddrStorage,
VMStorage capturedStateStorage) {
this.arch = arch;
this.inputStorage = inputStorage;
this.outputStorage = outputStorage;
this.volatileStorage = volatileStorage;
this.stackAlignment = stackAlignment;
this.shadowSpace = shadowSpace;
this.scratch1 = scratch1;
this.scratch2 = scratch2;
this.targetAddrStorage = targetAddrStorage;
this.retBufAddrStorage = retBufAddrStorage;
this.capturedStateStorage = capturedStateStorage;
}
public VMStorage targetAddrStorage() {
@ -63,4 +72,8 @@ public class ABIDescriptor {
public VMStorage retBufAddrStorage() {
return retBufAddrStorage;
}
public VMStorage capturedStateStorage() {
return capturedStateStorage;
}
}

View File

@ -53,7 +53,7 @@ public abstract sealed class AbstractLinker implements Linker permits LinuxAArch
Objects.requireNonNull(function);
Objects.requireNonNull(options);
checkHasNaturalAlignment(function);
LinkerOptions optionSet = LinkerOptions.of(options);
LinkerOptions optionSet = LinkerOptions.forDowncall(function, options);
return DOWNCALL_CACHE.get(new LinkRequest(function, optionSet), linkRequest -> {
FunctionDescriptor fd = linkRequest.descriptor();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,5 +27,4 @@ package jdk.internal.foreign.abi;
public interface Architecture {
boolean isStackType(int cls);
int typeSize(int cls);
int stackType();
}

View File

@ -42,9 +42,12 @@ public class CallingSequence {
private final List<Binding> returnBindings;
private final List<List<Binding>> argumentBindings;
private final LinkerOptions linkerOptions;
public CallingSequence(boolean forUpcall, MethodType callerMethodType, MethodType calleeMethodType, FunctionDescriptor desc,
boolean needsReturnBuffer, long returnBufferSize, long allocationSize,
List<List<Binding>> argumentBindings, List<Binding> returnBindings) {
List<List<Binding>> argumentBindings, List<Binding> returnBindings,
LinkerOptions linkerOptions) {
this.forUpcall = forUpcall;
this.callerMethodType = callerMethodType;
this.calleeMethodType = calleeMethodType;
@ -54,6 +57,7 @@ public class CallingSequence {
this.allocationSize = allocationSize;
this.returnBindings = returnBindings;
this.argumentBindings = argumentBindings;
this.linkerOptions = linkerOptions;
}
/**
@ -181,6 +185,16 @@ public class CallingSequence {
return !returnBindings.isEmpty();
}
public int capturedStateMask() {
return linkerOptions.capturedCallState()
.mapToInt(CapturableState::mask)
.reduce(0, (a, b) -> a | b);
}
public int numLeadingParams() {
return 2 + (linkerOptions.hasCapturedCallState() ? 1 : 0); // 2 for addr, allocator
}
public String asString() {
StringBuilder sb = new StringBuilder();

View File

@ -47,6 +47,7 @@ public class CallingSequenceBuilder {
GetPropertyAction.privilegedGetProperty("java.lang.foreign.VERIFY_BINDINGS", "true"));
private final ABIDescriptor abi;
private final LinkerOptions linkerOptions;
private final boolean forUpcall;
private final List<List<Binding>> inputBindings = new ArrayList<>();
@ -55,9 +56,10 @@ public class CallingSequenceBuilder {
private MethodType mt = MethodType.methodType(void.class);
private FunctionDescriptor desc = FunctionDescriptor.ofVoid();
public CallingSequenceBuilder(ABIDescriptor abi, boolean forUpcall) {
public CallingSequenceBuilder(ABIDescriptor abi, boolean forUpcall, LinkerOptions linkerOptions) {
this.abi = abi;
this.forUpcall = forUpcall;
this.linkerOptions = linkerOptions;
}
public final CallingSequenceBuilder addArgumentBindings(Class<?> carrier, MemoryLayout layout,
@ -95,6 +97,11 @@ public class CallingSequenceBuilder {
MethodType callerMethodType;
MethodType calleeMethodType;
if (!forUpcall) {
if (linkerOptions.hasCapturedCallState()) {
addArgumentBinding(0, MemorySegment.class, ValueLayout.ADDRESS, List.of(
Binding.unboxAddress(),
Binding.vmStore(abi.capturedStateStorage(), long.class)));
}
addArgumentBinding(0, MemorySegment.class, ValueLayout.ADDRESS, List.of(
Binding.unboxAddress(),
Binding.vmStore(abi.targetAddrStorage(), long.class)));
@ -117,7 +124,7 @@ public class CallingSequenceBuilder {
calleeMethodType = mt;
}
return new CallingSequence(forUpcall, callerMethodType, calleeMethodType, desc, needsReturnBuffer,
returnBufferSize, allocationSize, inputBindings, outputBindings);
returnBufferSize, allocationSize, inputBindings, outputBindings, linkerOptions);
}
private MethodType computeCallerTypeForUpcall() {

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.foreign.abi;
import java.lang.foreign.ValueLayout;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static java.lang.foreign.ValueLayout.JAVA_INT;
public enum CapturableState {
GET_LAST_ERROR ("GetLastError", JAVA_INT, 1 << 0),
WSA_GET_LAST_ERROR("WSAGetLastError", JAVA_INT, 1 << 1),
ERRNO ("errno", JAVA_INT, 1 << 2);
private final String stateName;
private final ValueLayout layout;
private final int mask;
CapturableState(String stateName, ValueLayout layout, int mask) {
this.stateName = stateName;
this.layout = layout.withName(stateName);
this.mask = mask;
}
public static CapturableState forName(String name) {
return Stream.of(values())
.filter(stl -> stl.stateName().equals(name))
.findAny()
.orElseThrow(() -> new IllegalArgumentException(
"Unknown name: " + name +", must be one of: "
+ Stream.of(CapturableState.values())
.map(CapturableState::stateName)
.collect(Collectors.joining(", "))));
}
public String stateName() {
return stateName;
}
public ValueLayout layout() {
return layout;
}
public int mask() {
return mask;
}
}

View File

@ -85,7 +85,8 @@ public class DowncallLinker {
toStorageArray(argMoves),
toStorageArray(retMoves),
leafType,
callingSequence.needsReturnBuffer()
callingSequence.needsReturnBuffer(),
callingSequence.capturedStateMask()
);
MethodHandle handle = JLIA.nativeMethodHandle(nep);

View File

@ -24,28 +24,36 @@
*/
package jdk.internal.foreign.abi;
import java.lang.foreign.FunctionDescriptor;
import java.lang.foreign.Linker;
import java.lang.foreign.MemoryLayout;
import java.lang.foreign.StructLayout;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Stream;
public class LinkerOptions {
private static final LinkerOptions EMPTY = LinkerOptions.of();
private final Map<Class<?>, Linker.Option> optionsMap;
private static final LinkerOptions EMPTY = new LinkerOptions(Map.of());
private final Map<Class<?>, LinkerOptionImpl> optionsMap;
private LinkerOptions(Map<Class<?>, Linker.Option> optionsMap) {
private LinkerOptions(Map<Class<?>, LinkerOptionImpl> optionsMap) {
this.optionsMap = optionsMap;
}
public static LinkerOptions of(Linker.Option... options) {
Map<Class<?>, Linker.Option> optionMap = new HashMap<>();
public static LinkerOptions forDowncall(FunctionDescriptor desc, Linker.Option... options) {
Map<Class<?>, LinkerOptionImpl> optionMap = new HashMap<>();
for (Linker.Option option : options) {
if (optionMap.containsKey(option.getClass())) {
throw new IllegalArgumentException("Duplicate option: " + option);
}
optionMap.put(option.getClass(), option);
LinkerOptionImpl opImpl = (LinkerOptionImpl) option;
opImpl.validateForDowncall(desc);
optionMap.put(option.getClass(), opImpl);
}
return new LinkerOptions(optionMap);
@ -64,6 +72,15 @@ public class LinkerOptions {
return fva != null && argIndex >= fva.index();
}
public boolean hasCapturedCallState() {
return getOption(CaptureCallStateImpl.class) != null;
}
public Stream<CapturableState> capturedCallState() {
CaptureCallStateImpl stl = getOption(CaptureCallStateImpl.class);
return stl == null ? Stream.empty() : stl.saved().stream();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
@ -76,5 +93,39 @@ public class LinkerOptions {
return Objects.hash(optionsMap);
}
public record FirstVariadicArg(int index) implements Linker.Option { }
public sealed interface LinkerOptionImpl extends Linker.Option
permits FirstVariadicArg,
CaptureCallStateImpl {
default void validateForDowncall(FunctionDescriptor descriptor) {
throw new IllegalArgumentException("Not supported for downcall: " + this);
}
}
public record FirstVariadicArg(int index) implements LinkerOptionImpl {
@Override
public void validateForDowncall(FunctionDescriptor descriptor) {
if (index < 0 || index > descriptor.argumentLayouts().size()) {
throw new IllegalArgumentException("Index '" + index + "' not in bounds for descriptor: " + descriptor);
}
}
}
public record CaptureCallStateImpl(Set<CapturableState> saved) implements LinkerOptionImpl, Linker.Option.CaptureCallState {
@Override
public void validateForDowncall(FunctionDescriptor descriptor) {
// done during construction
}
@Override
public StructLayout layout() {
return MemoryLayout.structLayout(
saved.stream()
.sorted(Comparator.comparingInt(CapturableState::ordinal))
.map(CapturableState::layout)
.toArray(MemoryLayout[]::new)
);
}
}
}

View File

@ -47,7 +47,7 @@ public class NativeEntryPoint {
private static final SoftReferenceCache<CacheKey, NativeEntryPoint> NEP_CACHE = new SoftReferenceCache<>();
private record CacheKey(MethodType methodType, ABIDescriptor abi,
List<VMStorage> argMoves, List<VMStorage> retMoves,
boolean needsReturnBuffer) {}
boolean needsReturnBuffer, int capturedStateMask) {}
private NativeEntryPoint(MethodType methodType, long downcallStubAddress) {
this.methodType = methodType;
@ -56,26 +56,38 @@ public class NativeEntryPoint {
public static NativeEntryPoint make(ABIDescriptor abi,
VMStorage[] argMoves, VMStorage[] returnMoves,
MethodType methodType, boolean needsReturnBuffer) {
MethodType methodType,
boolean needsReturnBuffer,
int capturedStateMask) {
if (returnMoves.length > 1 != needsReturnBuffer) {
throw new IllegalArgumentException("Multiple register return, but needsReturnBuffer was false");
throw new AssertionError("Multiple register return, but needsReturnBuffer was false");
}
checkType(methodType, needsReturnBuffer, capturedStateMask);
assert (methodType.parameterType(0) == long.class) : "Address expected";
assert (!needsReturnBuffer || methodType.parameterType(1) == long.class) : "return buffer address expected";
CacheKey key = new CacheKey(methodType, abi, Arrays.asList(argMoves), Arrays.asList(returnMoves), needsReturnBuffer);
CacheKey key = new CacheKey(methodType, abi, Arrays.asList(argMoves), Arrays.asList(returnMoves), needsReturnBuffer, capturedStateMask);
return NEP_CACHE.get(key, k -> {
long downcallStub = makeDowncallStub(methodType, abi, argMoves, returnMoves, needsReturnBuffer);
long downcallStub = makeDowncallStub(methodType, abi, argMoves, returnMoves, needsReturnBuffer, capturedStateMask);
NativeEntryPoint nep = new NativeEntryPoint(methodType, downcallStub);
CLEANER.register(nep, () -> freeDowncallStub(downcallStub));
return nep;
});
}
private static void checkType(MethodType methodType, boolean needsReturnBuffer, int savedValueMask) {
if (methodType.parameterType(0) != long.class) {
throw new AssertionError("Address expected as first param: " + methodType);
}
int checkIdx = 1;
if ((needsReturnBuffer && methodType.parameterType(checkIdx++) != long.class)
|| (savedValueMask != 0 && methodType.parameterType(checkIdx) != long.class)) {
throw new AssertionError("return buffer and/or preserved value address expected: " + methodType);
}
}
private static native long makeDowncallStub(MethodType methodType, ABIDescriptor abi,
VMStorage[] encArgMoves, VMStorage[] encRetMoves,
boolean needsReturnBuffer);
boolean needsReturnBuffer,
int capturedStateMask);
private static native boolean freeDowncallStub0(long downcallStub);
private static void freeDowncallStub(long downcallStub) {

View File

@ -108,17 +108,18 @@ public final class SharedUtils {
* @param cDesc the function descriptor of the native function (with actual return layout)
* @return the adapted handle
*/
public static MethodHandle adaptDowncallForIMR(MethodHandle handle, FunctionDescriptor cDesc) {
public static MethodHandle adaptDowncallForIMR(MethodHandle handle, FunctionDescriptor cDesc, CallingSequence sequence) {
if (handle.type().returnType() != void.class)
throw new IllegalArgumentException("return expected to be void for in memory returns: " + handle.type());
if (handle.type().parameterType(2) != MemorySegment.class)
int imrAddrIdx = sequence.numLeadingParams();
if (handle.type().parameterType(imrAddrIdx) != MemorySegment.class)
throw new IllegalArgumentException("MemorySegment expected as third param: " + handle.type());
if (cDesc.returnLayout().isEmpty())
throw new IllegalArgumentException("Return layout needed: " + cDesc);
MethodHandle ret = identity(MemorySegment.class); // (MemorySegment) MemorySegment
handle = collectArguments(ret, 1, handle); // (MemorySegment, MemorySegment, SegmentAllocator, MemorySegment, ...) MemorySegment
handle = mergeArguments(handle, 0, 3); // (MemorySegment, MemorySegment, SegmentAllocator, ...) MemorySegment
handle = mergeArguments(handle, 0, 1 + imrAddrIdx); // (MemorySegment, MemorySegment, SegmentAllocator, ...) MemorySegment
handle = collectArguments(handle, 0, insertArguments(MH_ALLOC_BUFFER, 1, cDesc.returnLayout().get())); // (SegmentAllocator, MemorySegment, SegmentAllocator, ...) MemorySegment
handle = mergeArguments(handle, 0, 2); // (SegmentAllocator, MemorySegment, ...) MemorySegment
handle = swapArguments(handle, 0, 1); // (MemorySegment, SegmentAllocator, ...) MemorySegment

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.foreign.abi;
// must keep in sync with StubLocations in VM code
public enum StubLocations {
TARGET_ADDRESS,
RETURN_BUFFER,
CAPTURED_STATE_BUFFER;
public VMStorage storage(byte type) {
return new VMStorage(type, (short) 8, ordinal());
}
}

View File

@ -24,52 +24,23 @@
*/
package jdk.internal.foreign.abi;
import java.util.Objects;
/**
*
* @param type the type of storage. e.g. stack, or which register type (GP, FP, vector)
* @param segmentMaskOrSize the (on stack) size in bytes when type = stack, a register mask otherwise,
* the register mask indicates which segments of a register are used.
* @param indexOrOffset the index is either a register number within a type, or
* a stack offset in bytes if type = stack.
* (a particular platform might add a bias to this in generate code)
* @param debugName the debug name
*/
public record VMStorage(byte type,
short segmentMaskOrSize,
int indexOrOffset,
String debugName) {
public class VMStorage {
private final int type;
private final int index;
private final String debugName;
public VMStorage(int type, int index, String debugName) {
this.type = type;
this.index = index;
this.debugName = debugName;
public VMStorage(byte type, short segmentMaskOrSize, int indexOrOffset) {
this(type, segmentMaskOrSize, indexOrOffset, "Stack@" + indexOrOffset);
}
public int type() {
return type;
}
public int index() {
return index;
}
public String name() {
return debugName;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
VMStorage vmStorage = (VMStorage) o;
return type == vmStorage.type &&
index == vmStorage.index;
}
@Override
public int hashCode() {
return Objects.hash(type, index);
}
@Override
public String toString() {
return "VMStorage{" +
"type=" + type +
", index=" + index +
", debugName='" + debugName + '\'' +
'}';
}
}

View File

@ -27,118 +27,118 @@ package jdk.internal.foreign.abi.aarch64;
import jdk.internal.foreign.abi.ABIDescriptor;
import jdk.internal.foreign.abi.Architecture;
import jdk.internal.foreign.abi.StubLocations;
import jdk.internal.foreign.abi.VMStorage;
public class AArch64Architecture implements Architecture {
public static final Architecture INSTANCE = new AArch64Architecture();
private static final short REG64_MASK = 0b0000_0000_0000_0001;
private static final short V128_MASK = 0b0000_0000_0000_0001;
private static final int INTEGER_REG_SIZE = 8;
private static final int VECTOR_REG_SIZE = 16;
private static final int STACK_SLOT_SIZE = 8;
@Override
public boolean isStackType(int cls) {
return cls == StorageClasses.STACK;
return cls == StorageType.STACK;
}
@Override
public int typeSize(int cls) {
switch (cls) {
case StorageClasses.INTEGER: return INTEGER_REG_SIZE;
case StorageClasses.VECTOR: return VECTOR_REG_SIZE;
case StorageClasses.STACK: return STACK_SLOT_SIZE;
case StorageType.INTEGER: return INTEGER_REG_SIZE;
case StorageType.VECTOR: return VECTOR_REG_SIZE;
// STACK is deliberately omitted
}
throw new IllegalArgumentException("Invalid Storage Class: " + cls);
}
@Override
public int stackType() {
return StorageClasses.STACK;
public interface StorageType {
byte INTEGER = 0;
byte VECTOR = 1;
byte STACK = 2;
byte PLACEHOLDER = 3;
}
public interface StorageClasses {
int INTEGER = 0;
int VECTOR = 1;
int STACK = 3;
public static class Regs { // break circular dependency
public static final VMStorage r0 = integerRegister(0);
public static final VMStorage r1 = integerRegister(1);
public static final VMStorage r2 = integerRegister(2);
public static final VMStorage r3 = integerRegister(3);
public static final VMStorage r4 = integerRegister(4);
public static final VMStorage r5 = integerRegister(5);
public static final VMStorage r6 = integerRegister(6);
public static final VMStorage r7 = integerRegister(7);
public static final VMStorage r8 = integerRegister(8);
public static final VMStorage r9 = integerRegister(9);
public static final VMStorage r10 = integerRegister(10);
public static final VMStorage r11 = integerRegister(11);
public static final VMStorage r12 = integerRegister(12);
public static final VMStorage r13 = integerRegister(13);
public static final VMStorage r14 = integerRegister(14);
public static final VMStorage r15 = integerRegister(15);
public static final VMStorage r16 = integerRegister(16);
public static final VMStorage r17 = integerRegister(17);
public static final VMStorage r18 = integerRegister(18);
public static final VMStorage r19 = integerRegister(19);
public static final VMStorage r20 = integerRegister(20);
public static final VMStorage r21 = integerRegister(21);
public static final VMStorage r22 = integerRegister(22);
public static final VMStorage r23 = integerRegister(23);
public static final VMStorage r24 = integerRegister(24);
public static final VMStorage r25 = integerRegister(25);
public static final VMStorage r26 = integerRegister(26);
public static final VMStorage r27 = integerRegister(27);
public static final VMStorage r28 = integerRegister(28);
public static final VMStorage r29 = integerRegister(29);
public static final VMStorage r30 = integerRegister(30);
public static final VMStorage r31 = integerRegister(31);
public static final VMStorage v0 = vectorRegister(0);
public static final VMStorage v1 = vectorRegister(1);
public static final VMStorage v2 = vectorRegister(2);
public static final VMStorage v3 = vectorRegister(3);
public static final VMStorage v4 = vectorRegister(4);
public static final VMStorage v5 = vectorRegister(5);
public static final VMStorage v6 = vectorRegister(6);
public static final VMStorage v7 = vectorRegister(7);
public static final VMStorage v8 = vectorRegister(8);
public static final VMStorage v9 = vectorRegister(9);
public static final VMStorage v10 = vectorRegister(10);
public static final VMStorage v11 = vectorRegister(11);
public static final VMStorage v12 = vectorRegister(12);
public static final VMStorage v13 = vectorRegister(13);
public static final VMStorage v14 = vectorRegister(14);
public static final VMStorage v15 = vectorRegister(15);
public static final VMStorage v16 = vectorRegister(16);
public static final VMStorage v17 = vectorRegister(17);
public static final VMStorage v18 = vectorRegister(18);
public static final VMStorage v19 = vectorRegister(19);
public static final VMStorage v20 = vectorRegister(20);
public static final VMStorage v21 = vectorRegister(21);
public static final VMStorage v22 = vectorRegister(22);
public static final VMStorage v23 = vectorRegister(23);
public static final VMStorage v24 = vectorRegister(24);
public static final VMStorage v25 = vectorRegister(25);
public static final VMStorage v26 = vectorRegister(26);
public static final VMStorage v27 = vectorRegister(27);
public static final VMStorage v28 = vectorRegister(28);
public static final VMStorage v29 = vectorRegister(29);
public static final VMStorage v30 = vectorRegister(30);
public static final VMStorage v31 = vectorRegister(31);
}
public static final VMStorage r0 = integerRegister(0);
public static final VMStorage r1 = integerRegister(1);
public static final VMStorage r2 = integerRegister(2);
public static final VMStorage r3 = integerRegister(3);
public static final VMStorage r4 = integerRegister(4);
public static final VMStorage r5 = integerRegister(5);
public static final VMStorage r6 = integerRegister(6);
public static final VMStorage r7 = integerRegister(7);
public static final VMStorage r8 = integerRegister(8);
public static final VMStorage r9 = integerRegister(9);
public static final VMStorage r10 = integerRegister(10);
public static final VMStorage r11 = integerRegister(11);
public static final VMStorage r12 = integerRegister(12);
public static final VMStorage r13 = integerRegister(13);
public static final VMStorage r14 = integerRegister(14);
public static final VMStorage r15 = integerRegister(15);
public static final VMStorage r16 = integerRegister(16);
public static final VMStorage r17 = integerRegister(17);
public static final VMStorage r18 = integerRegister(18);
public static final VMStorage r19 = integerRegister(19);
public static final VMStorage r20 = integerRegister(20);
public static final VMStorage r21 = integerRegister(21);
public static final VMStorage r22 = integerRegister(22);
public static final VMStorage r23 = integerRegister(23);
public static final VMStorage r24 = integerRegister(24);
public static final VMStorage r25 = integerRegister(25);
public static final VMStorage r26 = integerRegister(26);
public static final VMStorage r27 = integerRegister(27);
public static final VMStorage r28 = integerRegister(28);
public static final VMStorage r29 = integerRegister(29);
public static final VMStorage r30 = integerRegister(30);
public static final VMStorage r31 = integerRegister(31);
public static final VMStorage v0 = vectorRegister(0);
public static final VMStorage v1 = vectorRegister(1);
public static final VMStorage v2 = vectorRegister(2);
public static final VMStorage v3 = vectorRegister(3);
public static final VMStorage v4 = vectorRegister(4);
public static final VMStorage v5 = vectorRegister(5);
public static final VMStorage v6 = vectorRegister(6);
public static final VMStorage v7 = vectorRegister(7);
public static final VMStorage v8 = vectorRegister(8);
public static final VMStorage v9 = vectorRegister(9);
public static final VMStorage v10 = vectorRegister(10);
public static final VMStorage v11 = vectorRegister(11);
public static final VMStorage v12 = vectorRegister(12);
public static final VMStorage v13 = vectorRegister(13);
public static final VMStorage v14 = vectorRegister(14);
public static final VMStorage v15 = vectorRegister(15);
public static final VMStorage v16 = vectorRegister(16);
public static final VMStorage v17 = vectorRegister(17);
public static final VMStorage v18 = vectorRegister(18);
public static final VMStorage v19 = vectorRegister(19);
public static final VMStorage v20 = vectorRegister(20);
public static final VMStorage v21 = vectorRegister(21);
public static final VMStorage v22 = vectorRegister(22);
public static final VMStorage v23 = vectorRegister(23);
public static final VMStorage v24 = vectorRegister(24);
public static final VMStorage v25 = vectorRegister(25);
public static final VMStorage v26 = vectorRegister(26);
public static final VMStorage v27 = vectorRegister(27);
public static final VMStorage v28 = vectorRegister(28);
public static final VMStorage v29 = vectorRegister(29);
public static final VMStorage v30 = vectorRegister(30);
public static final VMStorage v31 = vectorRegister(31);
private static VMStorage integerRegister(int index) {
return new VMStorage(StorageClasses.INTEGER, index, "r" + index);
return new VMStorage(StorageType.INTEGER, REG64_MASK, index, "r" + index);
}
private static VMStorage vectorRegister(int index) {
return new VMStorage(StorageClasses.VECTOR, index, "v" + index);
return new VMStorage(StorageType.VECTOR, V128_MASK, index, "v" + index);
}
public static VMStorage stackStorage(int index) {
return new VMStorage(StorageClasses.STACK, index, "Stack@" + index);
public static VMStorage stackStorage(short size, int byteOffset) {
return new VMStorage(StorageType.STACK, size, byteOffset);
}
public static ABIDescriptor abiFor(VMStorage[] inputIntRegs,
@ -149,7 +149,7 @@ public class AArch64Architecture implements Architecture {
VMStorage[] volatileVectorRegs,
int stackAlignment,
int shadowSpace,
VMStorage targetAddrStorage, VMStorage retBufAddrStorage) {
VMStorage scratch1, VMStorage scratch2) {
return new ABIDescriptor(
INSTANCE,
new VMStorage[][] {
@ -166,7 +166,10 @@ public class AArch64Architecture implements Architecture {
},
stackAlignment,
shadowSpace,
targetAddrStorage, retBufAddrStorage);
scratch1, scratch2,
StubLocations.TARGET_ADDRESS.storage(StorageType.PLACEHOLDER),
StubLocations.RETURN_BUFFER.storage(StorageType.PLACEHOLDER),
StubLocations.CAPTURED_STATE_BUFFER.storage(StorageType.PLACEHOLDER));
}
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2021, Arm Limited. All rights reserved.
* Copyright (c) 2019, 2022, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,6 +50,7 @@ import java.util.Optional;
import static jdk.internal.foreign.PlatformLayouts.*;
import static jdk.internal.foreign.abi.aarch64.AArch64Architecture.*;
import static jdk.internal.foreign.abi.aarch64.AArch64Architecture.Regs.*;
/**
* For the AArch64 C ABI specifically, this class uses CallingSequenceBuilder
@ -84,12 +85,11 @@ public abstract class CallArranger {
new VMStorage[] { r0, r1 },
new VMStorage[] { v0, v1, v2, v3 },
new VMStorage[] { r9, r10, r11, r12, r13, r14, r15 },
new VMStorage[] { v16, v17, v18, v19, v20, v21, v22, v23, v25,
new VMStorage[] { v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
v26, v27, v28, v29, v30, v31 },
16, // Stack is always 16 byte aligned on AArch64
0, // No shadow space
r9, // target addr reg
r10 // return buffer addr reg
r9, r10 // scratch 1 & 2
);
public record Bindings(CallingSequence callingSequence,
@ -119,7 +119,7 @@ public abstract class CallArranger {
}
public Bindings getBindings(MethodType mt, FunctionDescriptor cDesc, boolean forUpcall, LinkerOptions options) {
CallingSequenceBuilder csb = new CallingSequenceBuilder(C, forUpcall);
CallingSequenceBuilder csb = new CallingSequenceBuilder(C, forUpcall, options);
BindingCalculator argCalc = forUpcall ? new BoxBindingCalculator(true) : new UnboxBindingCalculator(true);
BindingCalculator retCalc = forUpcall ? new UnboxBindingCalculator(false) : new BoxBindingCalculator(false);
@ -152,7 +152,7 @@ public abstract class CallArranger {
MethodHandle handle = new DowncallLinker(C, bindings.callingSequence).getBoundMethodHandle();
if (bindings.isInMemoryReturn) {
handle = SharedUtils.adaptDowncallForIMR(handle, cDesc);
handle = SharedUtils.adaptDowncallForIMR(handle, cDesc, bindings.callingSequence);
}
return handle;
@ -186,32 +186,28 @@ public abstract class CallArranger {
this.forArguments = forArguments;
}
void alignStack(long alignment) {
stackOffset = Utils.alignUp(stackOffset, alignment);
}
VMStorage stackAlloc(long size, long alignment) {
assert forArguments : "no stack returns";
// Implementation limit: each arg must take up at least an 8 byte stack slot (on the Java side)
// There is currently no way to address stack offsets that are not multiples of 8 bytes
// The VM can only address multiple-of-4-bytes offsets, which is also not good enough for some ABIs
// see JDK-8283462 and related issues
long stackSlotAlignment = Math.max(alignment, STACK_SLOT_SIZE);
long alignedStackOffset = Utils.alignUp(stackOffset, stackSlotAlignment);
// macos-aarch64 ABI potentially requires addressing stack offsets that are not multiples of 8 bytes
// Reject such call types here, to prevent undefined behavior down the line
// Reject if the above stack-slot-aligned offset does not match the offset the ABI really wants
// Except for variadic arguments, which _are_ passed at 8-byte-aligned offsets
if (requiresSubSlotStackPacking() && alignedStackOffset != Utils.alignUp(stackOffset, alignment)
&& !forVarArgs) // varargs are given a pass on all aarch64 ABIs
throw new UnsupportedOperationException("Call type not supported on this platform");
long alignedStackOffset = Utils.alignUp(stackOffset, alignment);
stackOffset = alignedStackOffset;
short encodedSize = (short) size;
assert (encodedSize & 0xFFFF) == size;
VMStorage storage =
stackStorage((int)(stackOffset / STACK_SLOT_SIZE));
stackOffset += size;
AArch64Architecture.stackStorage(encodedSize, (int)alignedStackOffset);
stackOffset = alignedStackOffset + size;
return storage;
}
VMStorage stackAlloc(MemoryLayout layout) {
return stackAlloc(layout.byteSize(), layout.byteAlignment());
long stackSlotAlignment = requiresSubSlotStackPacking() && !forVarArgs
? layout.byteAlignment()
: Math.max(layout.byteAlignment(), STACK_SLOT_SIZE);
return stackAlloc(layout.byteSize(), stackSlotAlignment);
}
VMStorage[] regAlloc(int type, int count) {
@ -244,11 +240,31 @@ public abstract class CallArranger {
return storage[0];
}
VMStorage[] nextStorageForHFA(GroupLayout group) {
final int nFields = group.memberLayouts().size();
VMStorage[] regs = regAlloc(StorageType.VECTOR, nFields);
if (regs == null && requiresSubSlotStackPacking() && !forVarArgs) {
// For the ABI variants that pack arguments spilled to the
// stack, HFA arguments are spilled as if their individual
// fields had been allocated separately rather than as if the
// struct had been spilled as a whole.
VMStorage[] slots = new VMStorage[nFields];
for (int i = 0; i < nFields; i++) {
slots[i] = stackAlloc(group.memberLayouts().get(i));
}
return slots;
} else {
return regs;
}
}
void adjustForVarArgs() {
// This system passes all variadic parameters on the stack. Ensure
// no further arguments are allocated to registers.
nRegs[StorageClasses.INTEGER] = MAX_REGISTER_ARGUMENTS;
nRegs[StorageClasses.VECTOR] = MAX_REGISTER_ARGUMENTS;
nRegs[StorageType.INTEGER] = MAX_REGISTER_ARGUMENTS;
nRegs[StorageType.VECTOR] = MAX_REGISTER_ARGUMENTS;
forVarArgs = true;
}
}
@ -279,6 +295,12 @@ public abstract class CallArranger {
.vmStore(storage, type);
offset += STACK_SLOT_SIZE;
}
if (requiresSubSlotStackPacking()) {
// Pad to the next stack slot boundary instead of packing
// additional arguments into the unused space.
storageCalculator.alignStack(STACK_SLOT_SIZE);
}
}
protected void spillStructBox(Binding.Builder bindings, MemoryLayout layout) {
@ -298,6 +320,12 @@ public abstract class CallArranger {
.bufferStore(offset, type);
offset += STACK_SLOT_SIZE;
}
if (requiresSubSlotStackPacking()) {
// Pad to the next stack slot boundary instead of packing
// additional arguments into the unused space.
storageCalculator.alignStack(STACK_SLOT_SIZE);
}
}
abstract List<Binding> getBindings(Class<?> carrier, MemoryLayout layout);
@ -326,14 +354,14 @@ public abstract class CallArranger {
case STRUCT_REGISTER: {
assert carrier == MemorySegment.class;
VMStorage[] regs = storageCalculator.regAlloc(
StorageClasses.INTEGER, layout);
StorageType.INTEGER, layout);
if (regs != null) {
int regIndex = 0;
long offset = 0;
while (offset < layout.byteSize()) {
final long copy = Math.min(layout.byteSize() - offset, 8);
VMStorage storage = regs[regIndex++];
boolean useFloat = storage.type() == StorageClasses.VECTOR;
boolean useFloat = storage.type() == StorageType.VECTOR;
Class<?> type = SharedUtils.primitiveCarrierForSize(copy, useFloat);
if (offset + copy < layout.byteSize()) {
bindings.dup();
@ -352,21 +380,20 @@ public abstract class CallArranger {
bindings.copy(layout)
.unboxAddress();
VMStorage storage = storageCalculator.nextStorage(
StorageClasses.INTEGER, AArch64.C_POINTER);
StorageType.INTEGER, AArch64.C_POINTER);
bindings.vmStore(storage, long.class);
break;
}
case STRUCT_HFA: {
assert carrier == MemorySegment.class;
GroupLayout group = (GroupLayout)layout;
VMStorage[] regs = storageCalculator.regAlloc(
StorageClasses.VECTOR, group.memberLayouts().size());
VMStorage[] regs = storageCalculator.nextStorageForHFA(group);
if (regs != null) {
long offset = 0;
for (int i = 0; i < group.memberLayouts().size(); i++) {
VMStorage storage = regs[i];
final long size = group.memberLayouts().get(i).byteSize();
boolean useFloat = storage.type() == StorageClasses.VECTOR;
boolean useFloat = storage.type() == StorageType.VECTOR;
Class<?> type = SharedUtils.primitiveCarrierForSize(size, useFloat);
if (i + 1 < group.memberLayouts().size()) {
bindings.dup();
@ -383,19 +410,19 @@ public abstract class CallArranger {
case POINTER: {
bindings.unboxAddress();
VMStorage storage =
storageCalculator.nextStorage(StorageClasses.INTEGER, layout);
storageCalculator.nextStorage(StorageType.INTEGER, layout);
bindings.vmStore(storage, long.class);
break;
}
case INTEGER: {
VMStorage storage =
storageCalculator.nextStorage(StorageClasses.INTEGER, layout);
storageCalculator.nextStorage(StorageType.INTEGER, layout);
bindings.vmStore(storage, carrier);
break;
}
case FLOAT: {
VMStorage storage =
storageCalculator.nextStorage(StorageClasses.VECTOR, layout);
storageCalculator.nextStorage(StorageType.VECTOR, layout);
bindings.vmStore(storage, carrier);
break;
}
@ -428,7 +455,7 @@ public abstract class CallArranger {
assert carrier == MemorySegment.class;
bindings.allocate(layout);
VMStorage[] regs = storageCalculator.regAlloc(
StorageClasses.INTEGER, layout);
StorageType.INTEGER, layout);
if (regs != null) {
int regIndex = 0;
long offset = 0;
@ -436,7 +463,7 @@ public abstract class CallArranger {
final long copy = Math.min(layout.byteSize() - offset, 8);
VMStorage storage = regs[regIndex++];
bindings.dup();
boolean useFloat = storage.type() == StorageClasses.VECTOR;
boolean useFloat = storage.type() == StorageType.VECTOR;
Class<?> type = SharedUtils.primitiveCarrierForSize(copy, useFloat);
bindings.vmLoad(storage, type)
.bufferStore(offset, type);
@ -449,7 +476,7 @@ public abstract class CallArranger {
case STRUCT_REFERENCE -> {
assert carrier == MemorySegment.class;
VMStorage storage = storageCalculator.nextStorage(
StorageClasses.INTEGER, AArch64.C_POINTER);
StorageType.INTEGER, AArch64.C_POINTER);
bindings.vmLoad(storage, long.class)
.boxAddress(layout);
}
@ -457,14 +484,13 @@ public abstract class CallArranger {
assert carrier == MemorySegment.class;
bindings.allocate(layout);
GroupLayout group = (GroupLayout) layout;
VMStorage[] regs = storageCalculator.regAlloc(
StorageClasses.VECTOR, group.memberLayouts().size());
VMStorage[] regs = storageCalculator.nextStorageForHFA(group);
if (regs != null) {
long offset = 0;
for (int i = 0; i < group.memberLayouts().size(); i++) {
VMStorage storage = regs[i];
final long size = group.memberLayouts().get(i).byteSize();
boolean useFloat = storage.type() == StorageClasses.VECTOR;
boolean useFloat = storage.type() == StorageType.VECTOR;
Class<?> type = SharedUtils.primitiveCarrierForSize(size, useFloat);
bindings.dup()
.vmLoad(storage, type)
@ -477,18 +503,18 @@ public abstract class CallArranger {
}
case POINTER -> {
VMStorage storage =
storageCalculator.nextStorage(StorageClasses.INTEGER, layout);
storageCalculator.nextStorage(StorageType.INTEGER, layout);
bindings.vmLoad(storage, long.class)
.boxAddressRaw(Utils.pointeeSize(layout));
}
case INTEGER -> {
VMStorage storage =
storageCalculator.nextStorage(StorageClasses.INTEGER, layout);
storageCalculator.nextStorage(StorageType.INTEGER, layout);
bindings.vmLoad(storage, carrier);
}
case FLOAT -> {
VMStorage storage =
storageCalculator.nextStorage(StorageClasses.VECTOR, layout);
storageCalculator.nextStorage(StorageType.VECTOR, layout);
bindings.vmLoad(storage, carrier);
}
default -> throw new UnsupportedOperationException("Unhandled class " + argumentClass);

View File

@ -26,116 +26,126 @@ package jdk.internal.foreign.abi.x64;
import jdk.internal.foreign.abi.ABIDescriptor;
import jdk.internal.foreign.abi.Architecture;
import jdk.internal.foreign.abi.StubLocations;
import jdk.internal.foreign.abi.VMStorage;
import java.util.stream.IntStream;
public class X86_64Architecture implements Architecture {
public static final Architecture INSTANCE = new X86_64Architecture();
private static final short REG8_H_MASK = 0b0000_0000_0000_0010;
private static final short REG8_L_MASK = 0b0000_0000_0000_0001;
private static final short REG16_MASK = 0b0000_0000_0000_0011;
private static final short REG32_MASK = 0b0000_0000_0000_0111;
private static final short REG64_MASK = 0b0000_0000_0000_1111;
private static final short XMM_MASK = 0b0000_0000_0000_0001;
private static final short YMM_MASK = 0b0000_0000_0000_0011;
private static final short ZMM_MASK = 0b0000_0000_0000_0111;
private static final short STP_MASK = 0b0000_0000_0000_0001;
private static final int INTEGER_REG_SIZE = 8; // bytes
private static final int VECTOR_REG_SIZE = 16; // size of XMM register
private static final int X87_REG_SIZE = 16;
private static final int STACK_SLOT_SIZE = 8;
@Override
public boolean isStackType(int cls) {
return cls == StorageClasses.STACK;
return cls == StorageType.STACK;
}
@Override
public int typeSize(int cls) {
switch (cls) {
case StorageClasses.INTEGER: return INTEGER_REG_SIZE;
case StorageClasses.VECTOR: return VECTOR_REG_SIZE;
case StorageClasses.X87: return X87_REG_SIZE;
case StorageClasses.STACK: return STACK_SLOT_SIZE;
case StorageType.INTEGER: return INTEGER_REG_SIZE;
case StorageType.VECTOR: return VECTOR_REG_SIZE;
case StorageType.X87: return X87_REG_SIZE;
// STACK is deliberately omitted
}
throw new IllegalArgumentException("Invalid Storage Class: " +cls);
}
@Override
public int stackType() {
return StorageClasses.STACK;
// must keep in sync with StorageType in VM code
public interface StorageType {
byte INTEGER = 0;
byte VECTOR = 1;
byte X87 = 2;
byte STACK = 3;
byte PLACEHOLDER = 4;
}
public interface StorageClasses {
int INTEGER = 0;
int VECTOR = 1;
int X87 = 2;
int STACK = 3;
public static class Regs { // break circular dependency
public static final VMStorage rax = integerRegister(0, "rax");
public static final VMStorage rcx = integerRegister(1, "rcx");
public static final VMStorage rdx = integerRegister(2, "rdx");
public static final VMStorage rbx = integerRegister(3, "rbx");
public static final VMStorage rsp = integerRegister(4, "rsp");
public static final VMStorage rbp = integerRegister(5, "rbp");
public static final VMStorage rsi = integerRegister(6, "rsi");
public static final VMStorage rdi = integerRegister(7, "rdi");
public static final VMStorage r8 = integerRegister(8, "r8");
public static final VMStorage r9 = integerRegister(9, "r9");
public static final VMStorage r10 = integerRegister(10, "r10");
public static final VMStorage r11 = integerRegister(11, "r11");
public static final VMStorage r12 = integerRegister(12, "r12");
public static final VMStorage r13 = integerRegister(13, "r13");
public static final VMStorage r14 = integerRegister(14, "r14");
public static final VMStorage r15 = integerRegister(15, "r15");
public static final VMStorage xmm0 = vectorRegister(0, "xmm0");
public static final VMStorage xmm1 = vectorRegister(1, "xmm1");
public static final VMStorage xmm2 = vectorRegister(2, "xmm2");
public static final VMStorage xmm3 = vectorRegister(3, "xmm3");
public static final VMStorage xmm4 = vectorRegister(4, "xmm4");
public static final VMStorage xmm5 = vectorRegister(5, "xmm5");
public static final VMStorage xmm6 = vectorRegister(6, "xmm6");
public static final VMStorage xmm7 = vectorRegister(7, "xmm7");
public static final VMStorage xmm8 = vectorRegister(8, "xmm8");
public static final VMStorage xmm9 = vectorRegister(9, "xmm9");
public static final VMStorage xmm10 = vectorRegister(10, "xmm10");
public static final VMStorage xmm11 = vectorRegister(11, "xmm11");
public static final VMStorage xmm12 = vectorRegister(12, "xmm12");
public static final VMStorage xmm13 = vectorRegister(13, "xmm13");
public static final VMStorage xmm14 = vectorRegister(14, "xmm14");
public static final VMStorage xmm15 = vectorRegister(15, "xmm15");
public static final VMStorage xmm16 = vectorRegister(16, "xmm16");
public static final VMStorage xmm17 = vectorRegister(17, "xmm17");
public static final VMStorage xmm18 = vectorRegister(18, "xmm18");
public static final VMStorage xmm19 = vectorRegister(19, "xmm19");
public static final VMStorage xmm20 = vectorRegister(20, "xmm20");
public static final VMStorage xmm21 = vectorRegister(21, "xmm21");
public static final VMStorage xmm22 = vectorRegister(22, "xmm22");
public static final VMStorage xmm23 = vectorRegister(23, "xmm23");
public static final VMStorage xmm24 = vectorRegister(24, "xmm24");
public static final VMStorage xmm25 = vectorRegister(25, "xmm25");
public static final VMStorage xmm26 = vectorRegister(26, "xmm26");
public static final VMStorage xmm27 = vectorRegister(27, "xmm27");
public static final VMStorage xmm28 = vectorRegister(28, "xmm28");
public static final VMStorage xmm29 = vectorRegister(29, "xmm29");
public static final VMStorage xmm30 = vectorRegister(30, "xmm30");
public static final VMStorage xmm31 = vectorRegister(31, "xmm31");
}
public static final VMStorage rax = integerRegister(0, "rax");
public static final VMStorage rcx = integerRegister(1, "rcx");
public static final VMStorage rdx = integerRegister(2, "rdx");
public static final VMStorage rbx = integerRegister(3, "rbx");
public static final VMStorage rsp = integerRegister(4, "rsp");
public static final VMStorage rbp = integerRegister(5, "rbp");
public static final VMStorage rsi = integerRegister(6, "rsi");
public static final VMStorage rdi = integerRegister(7, "rdi");
public static final VMStorage r8 = integerRegister(8, "r8");
public static final VMStorage r9 = integerRegister(9, "r9");
public static final VMStorage r10 = integerRegister(10, "r10");
public static final VMStorage r11 = integerRegister(11, "r11");
public static final VMStorage r12 = integerRegister(12, "r12");
public static final VMStorage r13 = integerRegister(13, "r13");
public static final VMStorage r14 = integerRegister(14, "r14");
public static final VMStorage r15 = integerRegister(15, "r15");
public static final VMStorage xmm0 = vectorRegister(0, "xmm0");
public static final VMStorage xmm1 = vectorRegister(1, "xmm1");
public static final VMStorage xmm2 = vectorRegister(2, "xmm2");
public static final VMStorage xmm3 = vectorRegister(3, "xmm3");
public static final VMStorage xmm4 = vectorRegister(4, "xmm4");
public static final VMStorage xmm5 = vectorRegister(5, "xmm5");
public static final VMStorage xmm6 = vectorRegister(6, "xmm6");
public static final VMStorage xmm7 = vectorRegister(7, "xmm7");
public static final VMStorage xmm8 = vectorRegister(8, "xmm8");
public static final VMStorage xmm9 = vectorRegister(9, "xmm9");
public static final VMStorage xmm10 = vectorRegister(10, "xmm10");
public static final VMStorage xmm11 = vectorRegister(11, "xmm11");
public static final VMStorage xmm12 = vectorRegister(12, "xmm12");
public static final VMStorage xmm13 = vectorRegister(13, "xmm13");
public static final VMStorage xmm14 = vectorRegister(14, "xmm14");
public static final VMStorage xmm15 = vectorRegister(15, "xmm15");
public static final VMStorage xmm16 = vectorRegister(16, "xmm16");
public static final VMStorage xmm17 = vectorRegister(17, "xmm17");
public static final VMStorage xmm18 = vectorRegister(18, "xmm18");
public static final VMStorage xmm19 = vectorRegister(19, "xmm19");
public static final VMStorage xmm20 = vectorRegister(20, "xmm20");
public static final VMStorage xmm21 = vectorRegister(21, "xmm21");
public static final VMStorage xmm22 = vectorRegister(22, "xmm22");
public static final VMStorage xmm23 = vectorRegister(23, "xmm23");
public static final VMStorage xmm24 = vectorRegister(24, "xmm24");
public static final VMStorage xmm25 = vectorRegister(25, "xmm25");
public static final VMStorage xmm26 = vectorRegister(26, "xmm26");
public static final VMStorage xmm27 = vectorRegister(27, "xmm27");
public static final VMStorage xmm28 = vectorRegister(28, "xmm28");
public static final VMStorage xmm29 = vectorRegister(29, "xmm29");
public static final VMStorage xmm30 = vectorRegister(30, "xmm30");
public static final VMStorage xmm31 = vectorRegister(31, "xmm31");
private static VMStorage integerRegister(int index, String debugName) {
return new VMStorage(StorageClasses.INTEGER, index, debugName);
return new VMStorage(StorageType.INTEGER, REG64_MASK, index, debugName);
}
private static VMStorage vectorRegister(int index, String debugName) {
return new VMStorage(StorageClasses.VECTOR, index, debugName);
return new VMStorage(StorageType.VECTOR, XMM_MASK, index, debugName);
}
public static VMStorage stackStorage(int index) {
return new VMStorage(StorageClasses.STACK, index, "Stack@" + index);
public static VMStorage stackStorage(short size, int byteOffset) {
return new VMStorage(StorageType.STACK, size, byteOffset);
}
public static VMStorage x87Storage(int index) {
return new VMStorage(StorageClasses.X87, index, "X87(" + index + ")");
return new VMStorage(StorageType.X87, STP_MASK, index, "X87(" + index + ")");
}
public static ABIDescriptor abiFor(VMStorage[] inputIntRegs, VMStorage[] inputVectorRegs, VMStorage[] outputIntRegs,
VMStorage[] outputVectorRegs, int numX87Outputs, VMStorage[] volatileIntRegs,
VMStorage[] volatileVectorRegs, int stackAlignment, int shadowSpace,
VMStorage targetAddrStorage, VMStorage retBufAddrStorage) {
VMStorage scratch1, VMStorage scratch2) {
return new ABIDescriptor(
INSTANCE,
new VMStorage[][] {
@ -153,7 +163,10 @@ public class X86_64Architecture implements Architecture {
},
stackAlignment,
shadowSpace,
targetAddrStorage, retBufAddrStorage);
scratch1, scratch2,
StubLocations.TARGET_ADDRESS.storage(StorageType.PLACEHOLDER),
StubLocations.RETURN_BUFFER.storage(StorageType.PLACEHOLDER),
StubLocations.CAPTURED_STATE_BUFFER.storage(StorageType.PLACEHOLDER));
}
}

View File

@ -31,9 +31,11 @@ import jdk.internal.foreign.abi.Binding;
import jdk.internal.foreign.abi.CallingSequence;
import jdk.internal.foreign.abi.CallingSequenceBuilder;
import jdk.internal.foreign.abi.DowncallLinker;
import jdk.internal.foreign.abi.LinkerOptions;
import jdk.internal.foreign.abi.SharedUtils;
import jdk.internal.foreign.abi.UpcallLinker;
import jdk.internal.foreign.abi.VMStorage;
import jdk.internal.foreign.abi.x64.X86_64Architecture;
import java.lang.foreign.SegmentScope;
import java.lang.foreign.FunctionDescriptor;
@ -49,6 +51,7 @@ import java.util.Optional;
import static jdk.internal.foreign.PlatformLayouts.SysV;
import static jdk.internal.foreign.abi.Binding.vmStore;
import static jdk.internal.foreign.abi.x64.X86_64Architecture.*;
import static jdk.internal.foreign.abi.x64.X86_64Architecture.Regs.*;
/**
* For the SysV x64 C ABI specifically, this class uses namely CallingSequenceBuilder
@ -57,9 +60,11 @@ import static jdk.internal.foreign.abi.x64.X86_64Architecture.*;
* This includes taking care of synthetic arguments like pointers to return buffers for 'in-memory' returns.
*/
public class CallArranger {
public static final int MAX_INTEGER_ARGUMENT_REGISTERS = 6;
public static final int MAX_VECTOR_ARGUMENT_REGISTERS = 8;
private static final ABIDescriptor CSysV = abiFor(
private static final int STACK_SLOT_SIZE = 8;
private static final int MAX_INTEGER_ARGUMENT_REGISTERS = 6;
private static final int MAX_VECTOR_ARGUMENT_REGISTERS = 8;
private static final ABIDescriptor CSysV = X86_64Architecture.abiFor(
new VMStorage[] { rdi, rsi, rdx, rcx, r8, r9, rax },
new VMStorage[] { xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 },
new VMStorage[] { rax, rdx },
@ -69,8 +74,7 @@ public class CallArranger {
new VMStorage[] { xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15 },
16,
0, //no shadow space
r10, // target addr reg
r11 // ret buf addr reg
r10, r11 // scratch 1 & 2
);
public record Bindings(
@ -80,7 +84,11 @@ public class CallArranger {
}
public static Bindings getBindings(MethodType mt, FunctionDescriptor cDesc, boolean forUpcall) {
CallingSequenceBuilder csb = new CallingSequenceBuilder(CSysV, forUpcall);
return getBindings(mt, cDesc, forUpcall, LinkerOptions.empty());
}
public static Bindings getBindings(MethodType mt, FunctionDescriptor cDesc, boolean forUpcall, LinkerOptions options) {
CallingSequenceBuilder csb = new CallingSequenceBuilder(CSysV, forUpcall, options);
BindingCalculator argCalc = forUpcall ? new BoxBindingCalculator(true) : new UnboxBindingCalculator(true);
BindingCalculator retCalc = forUpcall ? new UnboxBindingCalculator(false) : new BoxBindingCalculator(false);
@ -111,14 +119,14 @@ public class CallArranger {
return new Bindings(csb.build(), returnInMemory, argCalc.storageCalculator.nVectorReg);
}
public static MethodHandle arrangeDowncall(MethodType mt, FunctionDescriptor cDesc) {
Bindings bindings = getBindings(mt, cDesc, false);
public static MethodHandle arrangeDowncall(MethodType mt, FunctionDescriptor cDesc, LinkerOptions options) {
Bindings bindings = getBindings(mt, cDesc, false, options);
MethodHandle handle = new DowncallLinker(CSysV, bindings.callingSequence).getBoundMethodHandle();
handle = MethodHandles.insertArguments(handle, handle.type().parameterCount() - 1, bindings.nVectorArgs);
if (bindings.isInMemoryReturn) {
handle = SharedUtils.adaptDowncallForIMR(handle, cDesc);
handle = SharedUtils.adaptDowncallForIMR(handle, cDesc, bindings.callingSequence);
}
return handle;
@ -153,15 +161,15 @@ public class CallArranger {
}
private int maxRegisterArguments(int type) {
return type == StorageClasses.INTEGER ?
return type == StorageType.INTEGER ?
MAX_INTEGER_ARGUMENT_REGISTERS :
MAX_VECTOR_ARGUMENT_REGISTERS;
}
VMStorage stackAlloc() {
assert forArguments : "no stack returns";
VMStorage storage = stackStorage((int)stackOffset);
stackOffset++;
VMStorage storage = X86_64Architecture.stackStorage((short) STACK_SLOT_SIZE, (int)stackOffset);
stackOffset += STACK_SLOT_SIZE;
return storage;
}
@ -199,23 +207,23 @@ public class CallArranger {
VMStorage[] storage = new VMStorage[(int)(nIntegerReg + nVectorReg)];
for (int i = 0 ; i < typeClass.classes.size() ; i++) {
boolean sse = typeClass.classes.get(i) == ArgumentClassImpl.SSE;
storage[i] = nextStorage(sse ? StorageClasses.VECTOR : StorageClasses.INTEGER);
storage[i] = nextStorage(sse ? StorageType.VECTOR : StorageType.INTEGER);
}
return storage;
}
int registerCount(int type) {
return switch (type) {
case StorageClasses.INTEGER -> nIntegerReg;
case StorageClasses.VECTOR -> nVectorReg;
case StorageType.INTEGER -> nIntegerReg;
case StorageType.VECTOR -> nVectorReg;
default -> throw new IllegalStateException();
};
}
void incrementRegisterCount(int type) {
switch (type) {
case StorageClasses.INTEGER -> nIntegerReg++;
case StorageClasses.VECTOR -> nVectorReg++;
case StorageType.INTEGER -> nIntegerReg++;
case StorageType.VECTOR -> nVectorReg++;
default -> throw new IllegalStateException();
}
}
@ -253,7 +261,7 @@ public class CallArranger {
if (offset + copy < layout.byteSize()) {
bindings.dup();
}
boolean useFloat = storage.type() == StorageClasses.VECTOR;
boolean useFloat = storage.type() == StorageType.VECTOR;
Class<?> type = SharedUtils.primitiveCarrierForSize(copy, useFloat);
bindings.bufferLoad(offset, type)
.vmStore(storage, type);
@ -262,15 +270,15 @@ public class CallArranger {
}
case POINTER -> {
bindings.unboxAddress();
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmStore(storage, long.class);
}
case INTEGER -> {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmStore(storage, carrier);
}
case FLOAT -> {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.VECTOR);
VMStorage storage = storageCalculator.nextStorage(StorageType.VECTOR);
bindings.vmStore(storage, carrier);
}
default -> throw new UnsupportedOperationException("Unhandled class " + argumentClass);
@ -300,7 +308,7 @@ public class CallArranger {
final long copy = Math.min(layout.byteSize() - offset, 8);
VMStorage storage = regs[regIndex++];
bindings.dup();
boolean useFloat = storage.type() == StorageClasses.VECTOR;
boolean useFloat = storage.type() == StorageType.VECTOR;
Class<?> type = SharedUtils.primitiveCarrierForSize(copy, useFloat);
bindings.vmLoad(storage, type)
.bufferStore(offset, type);
@ -308,16 +316,16 @@ public class CallArranger {
}
}
case POINTER -> {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmLoad(storage, long.class)
.boxAddressRaw(Utils.pointeeSize(layout));
}
case INTEGER -> {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmLoad(storage, carrier);
}
case FLOAT -> {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.VECTOR);
VMStorage storage = storageCalculator.nextStorage(StorageType.VECTOR);
bindings.vmLoad(storage, carrier);
}
default -> throw new UnsupportedOperationException("Unhandled class " + argumentClass);

View File

@ -54,7 +54,7 @@ public final class SysVx64Linker extends AbstractLinker {
}
@Override
protected MethodHandle arrangeDowncall(MethodType inferredMethodType, FunctionDescriptor function, LinkerOptions options) {
return CallArranger.arrangeDowncall(inferredMethodType, function);
return CallArranger.arrangeDowncall(inferredMethodType, function, options);
}
@Override

View File

@ -48,6 +48,7 @@ import java.util.Optional;
import static jdk.internal.foreign.PlatformLayouts.Win64;
import static jdk.internal.foreign.abi.x64.X86_64Architecture.*;
import static jdk.internal.foreign.abi.x64.X86_64Architecture.Regs.*;
/**
* For the Windowx x64 C ABI specifically, this class uses CallingSequenceBuilder
@ -69,8 +70,7 @@ public class CallArranger {
new VMStorage[] { xmm4, xmm5 },
16,
32,
r10, // target addr reg
r11 // ret buf addr reg
r10, r11 // scratch 1 & 2
);
public record Bindings(
@ -84,7 +84,7 @@ public class CallArranger {
public static Bindings getBindings(MethodType mt, FunctionDescriptor cDesc, boolean forUpcall, LinkerOptions options) {
class CallingSequenceBuilderHelper {
final CallingSequenceBuilder csb = new CallingSequenceBuilder(CWindows, forUpcall);
final CallingSequenceBuilder csb = new CallingSequenceBuilder(CWindows, forUpcall, options);
final BindingCalculator argCalc =
forUpcall ? new BoxBindingCalculator(true) : new UnboxBindingCalculator(true);
final BindingCalculator retCalc =
@ -125,7 +125,7 @@ public class CallArranger {
MethodHandle handle = new DowncallLinker(CWindows, bindings.callingSequence).getBoundMethodHandle();
if (bindings.isInMemoryReturn) {
handle = SharedUtils.adaptDowncallForIMR(handle, cDesc);
handle = SharedUtils.adaptDowncallForIMR(handle, cDesc, bindings.callingSequence);
}
return handle;
@ -164,7 +164,7 @@ public class CallArranger {
// stack
assert stackOffset == Utils.alignUp(stackOffset, STACK_SLOT_SIZE); // should always be aligned
VMStorage storage = X86_64Architecture.stackStorage((int) (stackOffset / STACK_SLOT_SIZE));
VMStorage storage = X86_64Architecture.stackStorage((short) STACK_SLOT_SIZE, (int) stackOffset);
stackOffset += STACK_SLOT_SIZE;
return storage;
}
@ -176,7 +176,7 @@ public class CallArranger {
public VMStorage extraVarargsStorage() {
assert forArguments;
return CWindows.inputStorage[StorageClasses.INTEGER][nRegs - 1];
return CWindows.inputStorage[StorageType.INTEGER][nRegs - 1];
}
}
@ -198,7 +198,7 @@ public class CallArranger {
switch (argumentClass) {
case STRUCT_REGISTER: {
assert carrier == MemorySegment.class;
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
Class<?> type = SharedUtils.primitiveCarrierForSize(layout.byteSize(), false);
bindings.bufferLoad(0, type)
.vmStore(storage, type);
@ -208,28 +208,28 @@ public class CallArranger {
assert carrier == MemorySegment.class;
bindings.copy(layout)
.unboxAddress();
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmStore(storage, long.class);
break;
}
case POINTER: {
bindings.unboxAddress();
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmStore(storage, long.class);
break;
}
case INTEGER: {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmStore(storage, carrier);
break;
}
case FLOAT: {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.VECTOR);
VMStorage storage = storageCalculator.nextStorage(StorageType.VECTOR);
bindings.vmStore(storage, carrier);
break;
}
case VARARG_FLOAT: {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.VECTOR);
VMStorage storage = storageCalculator.nextStorage(StorageType.VECTOR);
if (!INSTANCE.isStackType(storage.type())) { // need extra for register arg
VMStorage extraStorage = storageCalculator.extraVarargsStorage();
bindings.dup()
@ -262,7 +262,7 @@ public class CallArranger {
assert carrier == MemorySegment.class;
bindings.allocate(layout)
.dup();
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
Class<?> type = SharedUtils.primitiveCarrierForSize(layout.byteSize(), false);
bindings.vmLoad(storage, type)
.bufferStore(0, type);
@ -270,24 +270,24 @@ public class CallArranger {
}
case STRUCT_REFERENCE: {
assert carrier == MemorySegment.class;
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmLoad(storage, long.class)
.boxAddress(layout);
break;
}
case POINTER: {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmLoad(storage, long.class)
.boxAddressRaw(Utils.pointeeSize(layout));
break;
}
case INTEGER: {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.INTEGER);
VMStorage storage = storageCalculator.nextStorage(StorageType.INTEGER);
bindings.vmLoad(storage, carrier);
break;
}
case FLOAT: {
VMStorage storage = storageCalculator.nextStorage(StorageClasses.VECTOR);
VMStorage storage = storageCalculator.nextStorage(StorageType.VECTOR);
bindings.vmLoad(storage, carrier);
break;
}

View File

@ -474,8 +474,8 @@ java/beans/XMLEncoder/Test6570354.java 8015593 macosx-all
# jdk_foreign
java/foreign/TestUpcallStack.java 8275584 macosx-aarch64
java/foreign/TestDowncallStack.java 8275584 macosx-aarch64
java/foreign/callarranger/TestAarch64CallArranger.java generic-x86
java/foreign/TestLargeSegmentCopy.java generic-x86
############################################################################

View File

@ -34,7 +34,7 @@ import java.lang.foreign.*;
import static java.lang.foreign.ValueLayout.*;
import static org.testng.Assert.*;
public class MemoryLayoutPrincipalTotalityTest extends NativeTestHelper {
public class MemoryLayoutPrincipalTotalityTest {
// The tests in this class is mostly there to ensure compile-time pattern matching totality.

View File

@ -35,7 +35,7 @@ import java.nio.ByteOrder;
import static java.lang.foreign.ValueLayout.*;
import static org.testng.Assert.*;
public class MemoryLayoutTypeRetentionTest extends NativeTestHelper {
public class MemoryLayoutTypeRetentionTest {
// These tests check both compile-time and runtime properties.
// withName() et al. should return the same type as the original object.

View File

@ -25,6 +25,7 @@
/*
* @test
* @enablePreview
* @requires sun.arch.data.model == "64"
* @bug 8292851
* @run testng/othervm -Xmx4G TestLargeSegmentCopy
*/

View File

@ -24,9 +24,11 @@
/*
* @test
* @enablePreview
* @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
* @run testng TestLinker
*/
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.lang.foreign.FunctionDescriptor;
@ -47,4 +49,27 @@ public class TestLinker extends NativeTestHelper {
assertNotSame(mh1, mh2);
}
@DataProvider
public static Object[][] invalidIndexCases() {
return new Object[][]{
{ -1, },
{ 42, },
};
}
@Test(dataProvider = "invalidIndexCases",
expectedExceptions = IllegalArgumentException.class,
expectedExceptionsMessageRegExp = ".*not in bounds for descriptor.*")
public void testInvalidOption(int invalidIndex) {
Linker.Option option = Linker.Option.firstVariadicArg(invalidIndex);
FunctionDescriptor desc = FunctionDescriptor.ofVoid();
Linker.nativeLinker().downcallHandle(desc, option); // throws
}
@Test(expectedExceptions = IllegalArgumentException.class,
expectedExceptionsMessageRegExp = ".*Unknown name.*")
public void testInvalidPreservedValueName() {
Linker.Option.captureCallState("foo"); // throws
}
}

View File

@ -32,16 +32,17 @@ import static org.testng.Assert.assertEquals;
public class CallArrangerTestBase {
public static void checkArgumentBindings(CallingSequence callingSequence, Binding[][] argumentBindings) {
assertEquals(callingSequence.argumentBindingsCount(), argumentBindings.length);
assertEquals(callingSequence.argumentBindingsCount(), argumentBindings.length,
callingSequence.asString() + " != " + Arrays.deepToString(argumentBindings));
for (int i = 0; i < callingSequence.argumentBindingsCount(); i++) {
List<Binding> actual = callingSequence.argumentBindings(i);
Binding[] expected = argumentBindings[i];
assertEquals(actual, Arrays.asList(expected));
assertEquals(actual, Arrays.asList(expected), "bindings at: " + i + ": " + actual + " != " + Arrays.toString(expected));
}
}
public static void checkReturnBindings(CallingSequence callingSequence, Binding[] returnBindings) {
assertEquals(callingSequence.returnBindings(), Arrays.asList(returnBindings));
assertEquals(callingSequence.returnBindings(), Arrays.asList(returnBindings), callingSequence.returnBindings() + " != " + Arrays.toString(returnBindings));
}
}

View File

@ -25,6 +25,7 @@
/*
* @test
* @enablePreview
* @requires sun.arch.data.model == "64"
* @modules java.base/jdk.internal.foreign
* java.base/jdk.internal.foreign.abi
* java.base/jdk.internal.foreign.abi.aarch64
@ -34,10 +35,13 @@
import java.lang.foreign.FunctionDescriptor;
import java.lang.foreign.MemoryLayout;
import java.lang.foreign.StructLayout;
import java.lang.foreign.MemorySegment;
import jdk.internal.foreign.abi.Binding;
import jdk.internal.foreign.abi.CallingSequence;
import jdk.internal.foreign.abi.LinkerOptions;
import jdk.internal.foreign.abi.StubLocations;
import jdk.internal.foreign.abi.VMStorage;
import jdk.internal.foreign.abi.aarch64.CallArranger;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
@ -49,6 +53,7 @@ import static java.lang.foreign.ValueLayout.ADDRESS;
import static jdk.internal.foreign.PlatformLayouts.AArch64.*;
import static jdk.internal.foreign.abi.Binding.*;
import static jdk.internal.foreign.abi.aarch64.AArch64Architecture.*;
import static jdk.internal.foreign.abi.aarch64.AArch64Architecture.Regs.*;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
@ -56,6 +61,9 @@ import static org.testng.Assert.assertTrue;
public class TestAarch64CallArranger extends CallArrangerTestBase {
private static final VMStorage TARGET_ADDRESS_STORAGE = StubLocations.TARGET_ADDRESS.storage(StorageType.PLACEHOLDER);
private static final VMStorage RETURN_BUFFER_STORAGE = StubLocations.RETURN_BUFFER.storage(StorageType.PLACEHOLDER);
@Test
public void testEmpty() {
MethodType mt = MethodType.methodType(void.class);
@ -68,7 +76,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) }
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
@ -92,7 +100,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(r0, int.class) },
{ vmStore(r1, int.class) },
{ vmStore(r2, int.class) },
@ -101,8 +109,8 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
{ vmStore(r5, int.class) },
{ vmStore(r6, int.class) },
{ vmStore(r7, int.class) },
{ vmStore(stackStorage(0), int.class) },
{ vmStore(stackStorage(1), int.class) },
{ vmStore(stackStorage((short) 4, 0), int.class) },
{ vmStore(stackStorage((short) 4, 8), int.class) },
});
checkReturnBindings(callingSequence, new Binding[]{});
@ -122,7 +130,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(r0, int.class) },
{ vmStore(r1, int.class) },
{ vmStore(v0, float.class) },
@ -144,7 +152,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
expectedBindings
});
@ -204,7 +212,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{
copy(struct1),
unboxAddress(),
@ -235,7 +243,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), FunctionDescriptor.ofVoid(ADDRESS, C_POINTER));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{
unboxAddress(),
vmStore(r8, long.class)
@ -259,8 +267,8 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(r9, long.class) }
{ unboxAddress(), vmStore(RETURN_BUFFER_STORAGE, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) }
});
checkReturnBindings(callingSequence, new Binding[]{
@ -288,8 +296,8 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(RETURN_BUFFER_STORAGE, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(v0, float.class) },
{ vmStore(r0, int.class) },
{
@ -326,7 +334,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{
dup(),
bufferLoad(0, float.class),
@ -350,9 +358,9 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
{
dup(),
bufferLoad(0, long.class),
vmStore(stackStorage(0), long.class),
vmStore(stackStorage((short) 8, 0), long.class),
bufferLoad(8, int.class),
vmStore(stackStorage(1), int.class),
vmStore(stackStorage((short) 4, 8), int.class),
}
});
@ -380,7 +388,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ copy(struct), unboxAddress(), vmStore(r0, long.class) },
{ copy(struct), unboxAddress(), vmStore(r1, long.class) },
{ vmStore(r2, int.class) },
@ -389,8 +397,8 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
{ vmStore(r5, int.class) },
{ vmStore(r6, int.class) },
{ vmStore(r7, int.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage(0), long.class) },
{ vmStore(stackStorage(1), int.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage((short) 8, 0), long.class) },
{ vmStore(stackStorage((short) 4, 8), int.class) },
});
checkReturnBindings(callingSequence, new Binding[]{});
@ -401,7 +409,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
MethodType mt = MethodType.methodType(void.class, int.class, int.class, float.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(C_INT, C_INT, C_FLOAT);
FunctionDescriptor fdExpected = FunctionDescriptor.ofVoid(ADDRESS, C_INT, C_INT, C_FLOAT);
CallArranger.Bindings bindings = CallArranger.LINUX.getBindings(mt, fd, false, LinkerOptions.of(firstVariadicArg(1)));
CallArranger.Bindings bindings = CallArranger.LINUX.getBindings(mt, fd, false, LinkerOptions.forDowncall(fd, firstVariadicArg(1)));
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
@ -410,7 +418,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
// This is identical to the non-variadic calling sequence
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(r0, int.class) },
{ vmStore(r1, int.class) },
{ vmStore(v0, float.class) },
@ -424,7 +432,7 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
MethodType mt = MethodType.methodType(void.class, int.class, int.class, float.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(C_INT, C_INT, C_FLOAT);
FunctionDescriptor fdExpected = FunctionDescriptor.ofVoid(ADDRESS, C_INT, C_INT, C_FLOAT);
CallArranger.Bindings bindings = CallArranger.MACOS.getBindings(mt, fd, false, LinkerOptions.of(firstVariadicArg(1)));
CallArranger.Bindings bindings = CallArranger.MACOS.getBindings(mt, fd, false, LinkerOptions.forDowncall(fd, firstVariadicArg(1)));
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
@ -433,10 +441,207 @@ public class TestAarch64CallArranger extends CallArrangerTestBase {
// The two variadic arguments should be allocated on the stack
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r9, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(r0, int.class) },
{ vmStore(stackStorage(0), int.class) },
{ vmStore(stackStorage(1), float.class) },
{ vmStore(stackStorage((short) 4, 0), int.class) },
{ vmStore(stackStorage((short) 4, 8), float.class) },
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testMacArgsOnStack() {
MethodType mt = MethodType.methodType(void.class,
int.class, int.class, int.class, int.class,
int.class, int.class, int.class, int.class,
int.class, int.class, short.class, byte.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(
C_INT, C_INT, C_INT, C_INT,
C_INT, C_INT, C_INT, C_INT,
C_INT, C_INT, C_SHORT, C_CHAR);
CallArranger.Bindings bindings = CallArranger.MACOS.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(r0, int.class) },
{ vmStore(r1, int.class) },
{ vmStore(r2, int.class) },
{ vmStore(r3, int.class) },
{ vmStore(r4, int.class) },
{ vmStore(r5, int.class) },
{ vmStore(r6, int.class) },
{ vmStore(r7, int.class) },
{ vmStore(stackStorage((short) 4, 0), int.class) },
{ vmStore(stackStorage((short) 4, 4), int.class) },
{ cast(short.class, int.class), vmStore(stackStorage((short) 2, 8), int.class) },
{ cast(byte.class, int.class), vmStore(stackStorage((short) 1, 10), int.class) },
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testMacArgsOnStack2() {
StructLayout struct = MemoryLayout.structLayout(
C_FLOAT,
C_FLOAT
);
MethodType mt = MethodType.methodType(void.class,
long.class, long.class, long.class, long.class,
long.class, long.class, long.class, long.class,
double.class, double.class, double.class, double.class,
double.class, double.class, double.class, double.class,
int.class, MemorySegment.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(
C_LONG_LONG, C_LONG_LONG, C_LONG_LONG, C_LONG_LONG,
C_LONG_LONG, C_LONG_LONG, C_LONG_LONG, C_LONG_LONG,
C_DOUBLE, C_DOUBLE, C_DOUBLE, C_DOUBLE,
C_DOUBLE, C_DOUBLE, C_DOUBLE, C_DOUBLE,
C_INT, struct);
CallArranger.Bindings bindings = CallArranger.MACOS.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(r0, long.class) },
{ vmStore(r1, long.class) },
{ vmStore(r2, long.class) },
{ vmStore(r3, long.class) },
{ vmStore(r4, long.class) },
{ vmStore(r5, long.class) },
{ vmStore(r6, long.class) },
{ vmStore(r7, long.class) },
{ vmStore(v0, double.class) },
{ vmStore(v1, double.class) },
{ vmStore(v2, double.class) },
{ vmStore(v3, double.class) },
{ vmStore(v4, double.class) },
{ vmStore(v5, double.class) },
{ vmStore(v6, double.class) },
{ vmStore(v7, double.class) },
{ vmStore(stackStorage((short) 4, 0), int.class) },
{
dup(),
bufferLoad(0, int.class),
vmStore(stackStorage((short) 4, 4), int.class),
bufferLoad(4, int.class),
vmStore(stackStorage((short) 4, 8), int.class),
}
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testMacArgsOnStack3() {
StructLayout struct = MemoryLayout.structLayout(
C_POINTER,
C_POINTER
);
MethodType mt = MethodType.methodType(void.class,
long.class, long.class, long.class, long.class,
long.class, long.class, long.class, long.class,
double.class, double.class, double.class, double.class,
double.class, double.class, double.class, double.class,
MemorySegment.class, float.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(
C_LONG_LONG, C_LONG_LONG, C_LONG_LONG, C_LONG_LONG,
C_LONG_LONG, C_LONG_LONG, C_LONG_LONG, C_LONG_LONG,
C_DOUBLE, C_DOUBLE, C_DOUBLE, C_DOUBLE,
C_DOUBLE, C_DOUBLE, C_DOUBLE, C_DOUBLE,
struct, C_FLOAT);
CallArranger.Bindings bindings = CallArranger.MACOS.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(r0, long.class) },
{ vmStore(r1, long.class) },
{ vmStore(r2, long.class) },
{ vmStore(r3, long.class) },
{ vmStore(r4, long.class) },
{ vmStore(r5, long.class) },
{ vmStore(r6, long.class) },
{ vmStore(r7, long.class) },
{ vmStore(v0, double.class) },
{ vmStore(v1, double.class) },
{ vmStore(v2, double.class) },
{ vmStore(v3, double.class) },
{ vmStore(v4, double.class) },
{ vmStore(v5, double.class) },
{ vmStore(v6, double.class) },
{ vmStore(v7, double.class) },
{ dup(),
bufferLoad(0, long.class), vmStore(stackStorage((short) 8, 0), long.class),
bufferLoad(8, long.class), vmStore(stackStorage((short) 8, 8), long.class) },
{ vmStore(stackStorage((short) 4, 16), float.class) },
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testMacArgsOnStack4() {
StructLayout struct = MemoryLayout.structLayout(
C_INT,
C_INT,
C_POINTER
);
MethodType mt = MethodType.methodType(void.class,
long.class, long.class, long.class, long.class,
long.class, long.class, long.class, long.class,
double.class, double.class, double.class, double.class,
double.class, double.class, double.class, double.class,
float.class, MemorySegment.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(
C_LONG_LONG, C_LONG_LONG, C_LONG_LONG, C_LONG_LONG,
C_LONG_LONG, C_LONG_LONG, C_LONG_LONG, C_LONG_LONG,
C_DOUBLE, C_DOUBLE, C_DOUBLE, C_DOUBLE,
C_DOUBLE, C_DOUBLE, C_DOUBLE, C_DOUBLE,
C_FLOAT, struct);
CallArranger.Bindings bindings = CallArranger.MACOS.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(r0, long.class) },
{ vmStore(r1, long.class) },
{ vmStore(r2, long.class) },
{ vmStore(r3, long.class) },
{ vmStore(r4, long.class) },
{ vmStore(r5, long.class) },
{ vmStore(r6, long.class) },
{ vmStore(r7, long.class) },
{ vmStore(v0, double.class) },
{ vmStore(v1, double.class) },
{ vmStore(v2, double.class) },
{ vmStore(v3, double.class) },
{ vmStore(v4, double.class) },
{ vmStore(v5, double.class) },
{ vmStore(v6, double.class) },
{ vmStore(v7, double.class) },
{ vmStore(stackStorage((short) 4, 0), float.class) },
{ dup(),
bufferLoad(0, long.class), vmStore(stackStorage((short) 8, 8), long.class),
bufferLoad(8, long.class), vmStore(stackStorage((short) 8, 16), long.class) },
});
checkReturnBindings(callingSequence, new Binding[]{});

View File

@ -38,6 +38,8 @@ import java.lang.foreign.MemoryLayout;
import java.lang.foreign.MemorySegment;
import jdk.internal.foreign.abi.Binding;
import jdk.internal.foreign.abi.CallingSequence;
import jdk.internal.foreign.abi.StubLocations;
import jdk.internal.foreign.abi.VMStorage;
import jdk.internal.foreign.abi.x64.sysv.CallArranger;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
@ -48,6 +50,7 @@ import static java.lang.foreign.ValueLayout.ADDRESS;
import static jdk.internal.foreign.PlatformLayouts.SysV.*;
import static jdk.internal.foreign.abi.Binding.*;
import static jdk.internal.foreign.abi.x64.X86_64Architecture.*;
import static jdk.internal.foreign.abi.x64.X86_64Architecture.Regs.*;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
@ -55,6 +58,10 @@ import static org.testng.Assert.assertTrue;
public class TestSysVCallArranger extends CallArrangerTestBase {
private static final short STACK_SLOT_SIZE = 8;
private static final VMStorage TARGET_ADDRESS_STORAGE = StubLocations.TARGET_ADDRESS.storage(StorageType.PLACEHOLDER);
private static final VMStorage RETURN_BUFFER_STORAGE = StubLocations.RETURN_BUFFER.storage(StorageType.PLACEHOLDER);
@Test
public void testEmpty() {
MethodType mt = MethodType.methodType(void.class);
@ -67,7 +74,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rax, long.class) }
});
@ -95,7 +102,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ dup(), bufferLoad(0, long.class), vmStore(rdi, long.class),
bufferLoad(8, int.class), vmStore(rsi, int.class)},
{ vmStore(rax, long.class) },
@ -126,7 +133,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ dup(), bufferLoad(0, long.class), vmStore(rdi, long.class),
bufferLoad(8, long.class), vmStore(rsi, long.class)},
{ vmStore(rax, long.class) },
@ -156,9 +163,9 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ dup(), bufferLoad(0, long.class), vmStore(stackStorage(0), long.class),
bufferLoad(8, long.class), vmStore(stackStorage(1), long.class)},
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ dup(), bufferLoad(0, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 0), long.class),
bufferLoad(8, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 8), long.class)},
{ vmStore(rax, long.class) },
});
@ -186,9 +193,9 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ dup(), bufferLoad(0, long.class), vmStore(stackStorage(0), long.class),
bufferLoad(8, int.class), vmStore(stackStorage(1), int.class)},
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ dup(), bufferLoad(0, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 0), long.class),
bufferLoad(8, int.class), vmStore(stackStorage(STACK_SLOT_SIZE, 8), int.class)},
{ vmStore(rax, long.class) },
});
@ -211,7 +218,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rdi, int.class) },
{ vmStore(rsi, int.class) },
{ vmStore(rdx, int.class) },
@ -242,7 +249,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(xmm0, double.class) },
{ vmStore(xmm1, double.class) },
{ vmStore(xmm2, double.class) },
@ -277,15 +284,15 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rdi, long.class) },
{ vmStore(rsi, long.class) },
{ vmStore(rdx, long.class) },
{ vmStore(rcx, long.class) },
{ vmStore(r8, long.class) },
{ vmStore(r9, long.class) },
{ vmStore(stackStorage(0), long.class) },
{ vmStore(stackStorage(1), long.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 0), long.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 8), long.class) },
{ vmStore(xmm0, float.class) },
{ vmStore(xmm1, float.class) },
{ vmStore(xmm2, float.class) },
@ -294,8 +301,8 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
{ vmStore(xmm5, float.class) },
{ vmStore(xmm6, float.class) },
{ vmStore(xmm7, float.class) },
{ vmStore(stackStorage(2), float.class) },
{ vmStore(stackStorage(3), float.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 16), float.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 24), float.class) },
{ vmStore(rax, long.class) },
});
@ -334,7 +341,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rdi, int.class) },
{ vmStore(rsi, int.class) },
{
@ -347,8 +354,8 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
{ vmStore(xmm1, double.class) },
{ vmStore(xmm2, double.class) },
{ vmStore(r9, int.class) },
{ vmStore(stackStorage(0), int.class) },
{ vmStore(stackStorage(1), int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 0), int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 8), int.class) },
{ vmStore(rax, long.class) },
});
@ -377,7 +384,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ unboxAddress(), vmStore(rdi, long.class) },
{ vmStore(rax, long.class) },
});
@ -399,7 +406,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
expectedBindings,
{ vmStore(rax, long.class) },
});
@ -425,20 +432,20 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
},
{ MemoryLayout.structLayout(C_LONG, C_LONG, C_LONG), new Binding[]{
dup(),
bufferLoad(0, long.class), vmStore(stackStorage(0), long.class),
bufferLoad(0, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 0), long.class),
dup(),
bufferLoad(8, long.class), vmStore(stackStorage(1), long.class),
bufferLoad(16, long.class), vmStore(stackStorage(2), long.class)
bufferLoad(8, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 8), long.class),
bufferLoad(16, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 16), long.class)
}
},
{ MemoryLayout.structLayout(C_LONG, C_LONG, C_LONG, C_LONG), new Binding[]{
dup(),
bufferLoad(0, long.class), vmStore(stackStorage(0), long.class),
bufferLoad(0, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 0), long.class),
dup(),
bufferLoad(8, long.class), vmStore(stackStorage(1), long.class),
bufferLoad(8, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 8), long.class),
dup(),
bufferLoad(16, long.class), vmStore(stackStorage(2), long.class),
bufferLoad(24, long.class), vmStore(stackStorage(3), long.class)
bufferLoad(16, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 16), long.class),
bufferLoad(24, long.class), vmStore(stackStorage(STACK_SLOT_SIZE, 24), long.class)
}
},
};
@ -458,8 +465,8 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.appendArgumentLayouts(C_LONG).insertArgumentLayouts(0, ADDRESS, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r11, long.class) },
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(RETURN_BUFFER_STORAGE, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rax, long.class) }
});
@ -490,7 +497,7 @@ public class TestSysVCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), FunctionDescriptor.ofVoid(ADDRESS, C_POINTER, C_LONG));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ unboxAddress(), vmStore(rdi, long.class) },
{ vmStore(rax, long.class) }
});

View File

@ -39,6 +39,8 @@ import java.lang.foreign.MemorySegment;
import jdk.internal.foreign.abi.Binding;
import jdk.internal.foreign.abi.CallingSequence;
import jdk.internal.foreign.abi.LinkerOptions;
import jdk.internal.foreign.abi.StubLocations;
import jdk.internal.foreign.abi.VMStorage;
import jdk.internal.foreign.abi.x64.windows.CallArranger;
import org.testng.annotations.Test;
@ -50,11 +52,15 @@ import static jdk.internal.foreign.PlatformLayouts.Win64.*;
import static jdk.internal.foreign.abi.Binding.*;
import static jdk.internal.foreign.abi.Binding.copy;
import static jdk.internal.foreign.abi.x64.X86_64Architecture.*;
import static jdk.internal.foreign.abi.x64.X86_64Architecture.Regs.*;
import static org.testng.Assert.*;
public class TestWindowsCallArranger extends CallArrangerTestBase {
private static final short STACK_SLOT_SIZE = 8;
private static final VMStorage TARGET_ADDRESS_STORAGE = StubLocations.TARGET_ADDRESS.storage(StorageType.PLACEHOLDER);
@Test
public void testEmpty() {
MethodType mt = MethodType.methodType(void.class);
@ -67,7 +73,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) }
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@ -84,7 +90,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rcx, int.class) },
{ vmStore(rdx, int.class) },
{ vmStore(r8, int.class) },
@ -106,7 +112,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(xmm0, double.class) },
{ vmStore(xmm1, double.class) },
{ vmStore(xmm2, double.class) },
@ -130,15 +136,15 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rcx, long.class) },
{ vmStore(rdx, long.class) },
{ vmStore(xmm2, float.class) },
{ vmStore(xmm3, float.class) },
{ vmStore(stackStorage(0), long.class) },
{ vmStore(stackStorage(1), long.class) },
{ vmStore(stackStorage(2), float.class) },
{ vmStore(stackStorage(3), float.class) }
{ vmStore(stackStorage(STACK_SLOT_SIZE, 0), long.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 8), long.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 16), float.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 24), float.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
@ -161,7 +167,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rcx, int.class) },
{ vmStore(rdx, int.class) },
{
@ -170,13 +176,13 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
vmStore(r8, long.class)
},
{ vmStore(r9, int.class) },
{ vmStore(stackStorage(0), int.class) },
{ vmStore(stackStorage(1), double.class) },
{ vmStore(stackStorage(2), double.class) },
{ vmStore(stackStorage(3), double.class) },
{ vmStore(stackStorage(4), int.class) },
{ vmStore(stackStorage(5), int.class) },
{ vmStore(stackStorage(6), int.class) }
{ vmStore(stackStorage(STACK_SLOT_SIZE, 0), int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 8), double.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 16), double.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 24), double.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 32), int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 40), int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 48), int.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
@ -190,7 +196,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
C_INT, C_DOUBLE, C_INT, C_DOUBLE, C_DOUBLE);
FunctionDescriptor fdExpected = FunctionDescriptor.ofVoid(
ADDRESS, C_INT, C_DOUBLE, C_INT, C_DOUBLE, C_DOUBLE);
CallArranger.Bindings bindings = CallArranger.getBindings(mt, fd, false, LinkerOptions.of(firstVariadicArg(2)));
CallArranger.Bindings bindings = CallArranger.getBindings(mt, fd, false, LinkerOptions.forDowncall(fd, firstVariadicArg(2)));
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
@ -198,12 +204,12 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fdExpected);
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(rcx, int.class) },
{ vmStore(xmm1, double.class) },
{ vmStore(r8, int.class) },
{ dup(), vmStore(r9, double.class), vmStore(xmm3, double.class) },
{ vmStore(stackStorage(0), double.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 0), double.class) },
});
checkReturnBindings(callingSequence, new Binding[]{});
@ -232,7 +238,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ bufferLoad(0, long.class), vmStore(rcx, long.class) }
});
@ -262,7 +268,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{
copy(struct),
unboxAddress(),
@ -293,7 +299,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ unboxAddress(), vmStore(rcx, long.class) }
});
@ -314,7 +320,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
});
checkReturnBindings(callingSequence,
@ -338,7 +344,7 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), FunctionDescriptor.ofVoid(ADDRESS, C_POINTER));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ unboxAddress(), vmStore(rcx, long.class) }
});
@ -367,23 +373,23 @@ public class TestWindowsCallArranger extends CallArrangerTestBase {
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(r10, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ copy(struct), unboxAddress(), vmStore(rcx, long.class) },
{ vmStore(rdx, int.class) },
{ vmStore(xmm2, double.class) },
{ unboxAddress(), vmStore(r9, long.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage(0), long.class) },
{ vmStore(stackStorage(1), int.class) },
{ vmStore(stackStorage(2), double.class) },
{ unboxAddress(), vmStore(stackStorage(3), long.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage(4), long.class) },
{ vmStore(stackStorage(5), int.class) },
{ vmStore(stackStorage(6), double.class) },
{ unboxAddress(), vmStore(stackStorage(7), long.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage(8), long.class) },
{ vmStore(stackStorage(9), int.class) },
{ vmStore(stackStorage(10), double.class) },
{ unboxAddress(), vmStore(stackStorage(11), long.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage(STACK_SLOT_SIZE, 0), long.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 8), int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 16), double.class) },
{ unboxAddress(), vmStore(stackStorage(STACK_SLOT_SIZE, 24), long.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage(STACK_SLOT_SIZE, 32), long.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 40), int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 48), double.class) },
{ unboxAddress(), vmStore(stackStorage(STACK_SLOT_SIZE, 56), long.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage(STACK_SLOT_SIZE, 64), long.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 72), int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 80), double.class) },
{ unboxAddress(), vmStore(stackStorage(STACK_SLOT_SIZE, 88), long.class) },
});
checkReturnBindings(callingSequence, new Binding[]{});

View File

@ -0,0 +1,131 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @enablePreview
* @library ../ /test/lib
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestCaptureCallState
*/
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.lang.foreign.Arena;
import java.lang.foreign.FunctionDescriptor;
import java.lang.foreign.Linker;
import java.lang.foreign.MemoryLayout;
import java.lang.foreign.MemorySegment;
import java.lang.foreign.StructLayout;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.VarHandle;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import static java.lang.foreign.MemoryLayout.PathElement.groupElement;
import static java.lang.foreign.ValueLayout.JAVA_DOUBLE;
import static java.lang.foreign.ValueLayout.JAVA_INT;
import static java.lang.foreign.ValueLayout.JAVA_LONG;
import static org.testng.Assert.assertEquals;
public class TestCaptureCallState extends NativeTestHelper {
static {
System.loadLibrary("CaptureCallState");
if (IS_WINDOWS) {
String system32 = System.getenv("SystemRoot") + "\\system32";
System.load(system32 + "\\Kernel32.dll");
System.load(system32 + "\\Ws2_32.dll");
}
}
private record SaveValuesCase(String nativeTarget, FunctionDescriptor nativeDesc, String threadLocalName, Consumer<Object> resultCheck) {}
@Test(dataProvider = "cases")
public void testSavedThreadLocal(SaveValuesCase testCase) throws Throwable {
Linker.Option.CaptureCallState stl = Linker.Option.captureCallState(testCase.threadLocalName());
MethodHandle handle = downcallHandle(testCase.nativeTarget(), testCase.nativeDesc(), stl);
VarHandle errnoHandle = stl.layout().varHandle(groupElement(testCase.threadLocalName()));
try (Arena arena = Arena.openConfined()) {
MemorySegment saveSeg = arena.allocate(stl.layout());
int testValue = 42;
boolean needsAllocator = testCase.nativeDesc().returnLayout().map(StructLayout.class::isInstance).orElse(false);
Object result = needsAllocator
? handle.invoke(arena, saveSeg, testValue)
: handle.invoke(saveSeg, testValue);
testCase.resultCheck().accept(result);
int savedErrno = (int) errnoHandle.get(saveSeg);
assertEquals(savedErrno, testValue);
}
}
@DataProvider
public static Object[][] cases() {
List<SaveValuesCase> cases = new ArrayList<>();
cases.add(new SaveValuesCase("set_errno_V", FunctionDescriptor.ofVoid(JAVA_INT), "errno", o -> {}));
cases.add(new SaveValuesCase("set_errno_I", FunctionDescriptor.of(JAVA_INT, JAVA_INT), "errno", o -> assertEquals((int) o, 42)));
cases.add(new SaveValuesCase("set_errno_D", FunctionDescriptor.of(JAVA_DOUBLE, JAVA_INT), "errno", o -> assertEquals((double) o, 42.0)));
cases.add(structCase("SL", Map.of(JAVA_LONG.withName("x"), 42L)));
cases.add(structCase("SLL", Map.of(JAVA_LONG.withName("x"), 42L,
JAVA_LONG.withName("y"), 42L)));
cases.add(structCase("SLLL", Map.of(JAVA_LONG.withName("x"), 42L,
JAVA_LONG.withName("y"), 42L,
JAVA_LONG.withName("z"), 42L)));
cases.add(structCase("SD", Map.of(JAVA_DOUBLE.withName("x"), 42D)));
cases.add(structCase("SDD", Map.of(JAVA_DOUBLE.withName("x"), 42D,
JAVA_DOUBLE.withName("y"), 42D)));
cases.add(structCase("SDDD", Map.of(JAVA_DOUBLE.withName("x"), 42D,
JAVA_DOUBLE.withName("y"), 42D,
JAVA_DOUBLE.withName("z"), 42D)));
if (IS_WINDOWS) {
cases.add(new SaveValuesCase("SetLastError", FunctionDescriptor.ofVoid(JAVA_INT), "GetLastError", o -> {}));
cases.add(new SaveValuesCase("WSASetLastError", FunctionDescriptor.ofVoid(JAVA_INT), "WSAGetLastError", o -> {}));
}
return cases.stream().map(tc -> new Object[] {tc}).toArray(Object[][]::new);
}
static SaveValuesCase structCase(String name, Map<MemoryLayout, Object> fields) {
StructLayout layout = MemoryLayout.structLayout(fields.keySet().toArray(MemoryLayout[]::new));
Consumer<Object> check = o -> {};
for (var field : fields.entrySet()) {
MemoryLayout fieldLayout = field.getKey();
VarHandle fieldHandle = layout.varHandle(MemoryLayout.PathElement.groupElement(fieldLayout.name().get()));
Object value = field.getValue();
check = check.andThen(o -> assertEquals(fieldHandle.get(o), value));
}
return new SaveValuesCase("set_errno_" + name, FunctionDescriptor.of(layout, JAVA_INT), "errno", check);
}
}

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include <errno.h>
#ifdef _WIN64
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
EXPORT void set_errno_V(int value) {
errno = value;
}
EXPORT int set_errno_I(int value) {
errno = value;
return 42;
}
EXPORT double set_errno_D(int value) {
errno = value;
return 42.0;
}
struct SL {
long long x;
};
EXPORT struct SL set_errno_SL(int value) {
errno = value;
struct SL s;
s.x = 42;
return s;
}
struct SLL {
long long x;
long long y;
};
EXPORT struct SLL set_errno_SLL(int value) {
errno = value;
struct SLL s;
s.x = 42;
s.y = 42;
return s;
}
struct SLLL {
long long x;
long long y;
long long z;
};
EXPORT struct SLLL set_errno_SLLL(int value) {
errno = value;
struct SLLL s;
s.x = 42;
s.y = 42;
s.z = 42;
return s;
}
struct SD {
double x;
};
EXPORT struct SD set_errno_SD(int value) {
errno = value;
struct SD s;
s.x = 42.0;
return s;
}
struct SDD {
double x;
double y;
};
EXPORT struct SDD set_errno_SDD(int value) {
errno = value;
struct SDD s;
s.x = 42.0;
s.y = 42.0;
return s;
}
struct SDDD {
double x;
double y;
double z;
};
EXPORT struct SDDD set_errno_SDDD(int value) {
errno = value;
struct SDDD s;
s.x = 42.0;
s.y = 42.0;
s.z = 42.0;
return s;
}