8293841: RISC-V: Implementation of Foreign Function & Memory API (Preview)

Co-authored-by: Weikai He <weikai@isrc.iscas.ac.cn>
Co-authored-by: Fei Yang <fyang@openjdk.org>
Reviewed-by: jvernee, fyang, shade, yadongwang
This commit is contained in:
Feilong Jiang 2023-01-19 01:33:35 +00:00 committed by Fei Yang
parent 8e3036cf74
commit 24cdcd4c70
64 changed files with 2844 additions and 125 deletions

View File

@ -1,6 +1,7 @@
/*
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2022, Institute of Software, Chinese Academy of Sciences. All rights reserved.
* Copyright (c) 2022, Institute of Software, Chinese Academy of Sciences.
* All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,8 +24,74 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/vmreg.inline.hpp"
#include "compiler/oopMap.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "prims/downcallLinker.hpp"
#include "utilities/debug.hpp"
#include "runtime/globals.hpp"
#include "runtime/stubCodeGenerator.hpp"
#define __ _masm->
class DowncallStubGenerator : public StubCodeGenerator {
BasicType* _signature;
int _num_args;
BasicType _ret_bt;
const ABIDescriptor& _abi;
const GrowableArray<VMStorage>& _input_registers;
const GrowableArray<VMStorage>& _output_registers;
bool _needs_return_buffer;
int _captured_state_mask;
int _frame_complete;
int _frame_size_slots;
OopMapSet* _oop_maps;
public:
DowncallStubGenerator(CodeBuffer* buffer,
BasicType* signature,
int num_args,
BasicType ret_bt,
const ABIDescriptor& abi,
const GrowableArray<VMStorage>& input_registers,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask)
: StubCodeGenerator(buffer, PrintMethodHandleStubs),
_signature(signature),
_num_args(num_args),
_ret_bt(ret_bt),
_abi(abi),
_input_registers(input_registers),
_output_registers(output_registers),
_needs_return_buffer(needs_return_buffer),
_captured_state_mask(captured_state_mask),
_frame_complete(0),
_frame_size_slots(0),
_oop_maps(NULL) {
}
void generate();
int frame_complete() const {
return _frame_complete;
}
int framesize() const {
return (_frame_size_slots >> (LogBytesPerWord - LogBytesPerInt));
}
OopMapSet* oop_maps() const {
return _oop_maps;
}
};
static const int native_invoker_code_size = 1024;
RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
@ -35,6 +101,236 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
const GrowableArray<VMStorage>& output_registers,
bool needs_return_buffer,
int captured_state_mask) {
Unimplemented();
return nullptr;
int locs_size = 64;
CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi,
input_registers, output_registers,
needs_return_buffer, captured_state_mask);
g.generate();
code.log_section_sizes("nep_invoker_blob");
RuntimeStub* stub =
RuntimeStub::new_runtime_stub("nep_invoker_blob",
&code,
g.frame_complete(),
g.framesize(),
g.oop_maps(), false);
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
stub->print_on(&ls);
}
#endif
return stub;
}
void DowncallStubGenerator::generate() {
enum layout {
fp_off,
fp_off2,
ra_off,
ra_off2,
framesize // inclusive of return address
// The following are also computed dynamically:
// spill area for return value
// out arg area (e.g. for stack args)
};
VMStorage shuffle_reg = as_VMStorage(x9);
JavaCallingConvention in_conv;
NativeCallingConvention out_conv(_input_registers);
ArgumentShuffle arg_shuffle(_signature, _num_args, _signature, _num_args, &in_conv, &out_conv, shuffle_reg);
#ifndef PRODUCT
LogTarget(Trace, foreign, downcall) lt;
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
arg_shuffle.print_on(&ls);
}
#endif
int allocated_frame_size = 0;
assert(_abi._shadow_space_bytes == 0, "not expecting shadow space on RISCV64");
allocated_frame_size += arg_shuffle.out_arg_bytes();
bool should_save_return_value = !_needs_return_buffer;
RegSpiller out_reg_spiller(_output_registers);
int spill_offset = -1;
if (should_save_return_value) {
spill_offset = 0;
// spill area can be shared with shadow space and out args,
// since they are only used before the call,
// and spill area is only used after.
allocated_frame_size = out_reg_spiller.spill_size_bytes() > allocated_frame_size
? out_reg_spiller.spill_size_bytes()
: allocated_frame_size;
}
StubLocations locs;
locs.set(StubLocations::TARGET_ADDRESS, _abi._scratch1);
if (_needs_return_buffer) {
locs.set_frame_data(StubLocations::RETURN_BUFFER, allocated_frame_size);
allocated_frame_size += BytesPerWord; // for address spill
}
if (_captured_state_mask != 0) {
locs.set_frame_data(StubLocations::CAPTURED_STATE_BUFFER, allocated_frame_size);
allocated_frame_size += BytesPerWord;
}
allocated_frame_size = align_up(allocated_frame_size, 16);
// _frame_size_slots is in 32-bit stack slots:
_frame_size_slots += framesize + (allocated_frame_size >> LogBytesPerInt);
assert(is_even(_frame_size_slots / 2), "sp not 16-byte aligned");
_oop_maps = new OopMapSet();
address start = __ pc();
__ enter();
// ra and fp are already in place
__ sub(sp, sp, allocated_frame_size); // prolog
_frame_complete = __ pc() - start; // frame build complete.
__ block_comment("{ thread java2native");
address the_pc = __ pc();
__ set_last_Java_frame(sp, fp, the_pc, t0);
OopMap* map = new OopMap(_frame_size_slots, 0);
_oop_maps->add_gc_map(the_pc - start, map);
// State transition
__ mv(t0, _thread_in_native);
__ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
__ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
__ block_comment("} thread java2native");
__ block_comment("{ argument shuffle");
arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes, locs);
__ block_comment("} argument shuffle");
__ jalr(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
// this call is assumed not to have killed xthread
if (_needs_return_buffer) {
// when use return buffer, copy content of return registers to return buffer,
// then operations created in BoxBindingCalculator will be operated.
__ ld(t0, Address(sp, locs.data_offset(StubLocations::RETURN_BUFFER)));
int offset = 0;
for (int i = 0; i < _output_registers.length(); i++) {
VMStorage reg = _output_registers.at(i);
if (reg.type() == StorageType::INTEGER) {
__ sd(as_Register(reg), Address(t0, offset));
offset += 8;
} else if (reg.type() == StorageType::FLOAT) {
__ fsd(as_FloatRegister(reg), Address(t0, offset));
offset += 8;
} else {
ShouldNotReachHere();
}
}
}
//////////////////////////////////////////////////////////////////////////////
if (_captured_state_mask != 0) {
__ block_comment("{ save thread local");
if (should_save_return_value) {
out_reg_spiller.generate_spill(_masm, spill_offset);
}
__ ld(c_rarg0, Address(sp, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
__ mv(c_rarg1, _captured_state_mask);
__ rt_call(CAST_FROM_FN_PTR(address, DowncallLinker::capture_state));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
}
__ block_comment("} save thread local");
}
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ thread native2java");
__ mv(t0, _thread_in_native_trans);
__ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
// Force this write out before the read below
__ membar(MacroAssembler::AnyAny);
Label L_after_safepoint_poll;
Label L_safepoint_poll_slow_path;
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */);
__ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
__ bnez(t0, L_safepoint_poll_slow_path);
__ bind(L_after_safepoint_poll);
__ mv(t0, _thread_in_Java);
__ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
__ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
__ block_comment("reguard stack check");
Label L_reguard;
Label L_after_reguard;
__ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
__ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled);
__ beq(t0, t1, L_reguard);
__ bind(L_after_reguard);
__ reset_last_Java_frame(true);
__ block_comment("} thread native2java");
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret();
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ L_safepoint_poll_slow_path");
__ bind(L_safepoint_poll_slow_path);
if (should_save_return_value) {
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
}
__ mv(c_rarg0, xthread);
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
__ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
}
__ j(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ L_reguard");
__ bind(L_reguard);
if (should_save_return_value) {
// Need to save the native result registers around any runtime calls.
out_reg_spiller.generate_spill(_masm, spill_offset);
}
__ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
if (should_save_return_value) {
out_reg_spiller.generate_fill(_masm, spill_offset);
}
__ j(L_after_reguard);
__ block_comment("} L_reguard");
//////////////////////////////////////////////////////////////////////////////
__ flush();
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,30 +24,169 @@
*/
#include "precompiled.hpp"
#include "code/vmreg.hpp"
#include "code/vmreg.inline.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "oops/oopCast.inline.hpp"
#include "prims/foreignGlobals.hpp"
#include "utilities/debug.hpp"
#include "prims/foreignGlobals.inline.hpp"
#include "prims/vmstorage.hpp"
#include "utilities/formatBuffer.hpp"
class MacroAssembler;
bool ABIDescriptor::is_volatile_reg(Register reg) const {
return _integer_argument_registers.contains(reg)
|| _integer_additional_volatile_registers.contains(reg);
}
bool ABIDescriptor::is_volatile_reg(FloatRegister reg) const {
return _float_argument_registers.contains(reg)
|| _float_additional_volatile_registers.contains(reg);
}
const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
ShouldNotCallThis();
return {};
oop abi_oop = JNIHandles::resolve_non_null(jabi);
ABIDescriptor abi;
objArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
parse_register_array(inputStorage, StorageType::FLOAT, abi._float_argument_registers, as_FloatRegister);
objArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
parse_register_array(outputStorage, StorageType::FLOAT, abi._float_return_registers, as_FloatRegister);
objArrayOop volatileStorage = jdk_internal_foreign_abi_ABIDescriptor::volatileStorage(abi_oop);
parse_register_array(volatileStorage, StorageType::INTEGER, abi._integer_additional_volatile_registers, as_Register);
parse_register_array(volatileStorage, StorageType::FLOAT, abi._float_additional_volatile_registers, as_FloatRegister);
abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
return abi;
}
int RegSpiller::pd_reg_size(VMStorage reg) {
Unimplemented();
return -1;
if (reg.type() == StorageType::INTEGER || reg.type() == StorageType::FLOAT) {
return 8;
}
return 0; // stack and BAD
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
if (reg.type() == StorageType::INTEGER) {
masm->sd(as_Register(reg), Address(sp, offset));
} else if (reg.type() == StorageType::FLOAT) {
masm->fsd(as_FloatRegister(reg), Address(sp, offset));
} else {
// stack and BAD
}
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
Unimplemented();
if (reg.type() == StorageType::INTEGER) {
masm->ld(as_Register(reg), Address(sp, offset));
} else if (reg.type() == StorageType::FLOAT) {
masm->fld(as_FloatRegister(reg), Address(sp, offset));
} else {
// stack and BAD
}
}
static constexpr int FP_BIAS = 0; // sender_sp_offset is 0 on RISCV
static void move_reg64(MacroAssembler* masm, int out_stk_bias,
Register from_reg, VMStorage to_reg) {
int out_bias = 0;
switch (to_reg.type()) {
case StorageType::INTEGER:
assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit integer registers supported");
masm->mv(as_Register(to_reg), from_reg);
break;
case StorageType::STACK:
out_bias = out_stk_bias;
case StorageType::FRAME_DATA: {
Address dest(sp, to_reg.offset() + out_bias);
masm->sd(from_reg, dest);
} break;
default: ShouldNotReachHere();
}
}
static void move_stack(MacroAssembler* masm, Register tmp_reg, int in_stk_bias, int out_stk_bias,
VMStorage from_reg, VMStorage to_reg) {
Address from_addr(fp, FP_BIAS + from_reg.offset() + in_stk_bias);
int out_bias = 0;
switch (to_reg.type()) {
case StorageType::INTEGER:
assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit integer registers supported");
masm->ld(as_Register(to_reg), from_addr);
break;
case StorageType::FLOAT:
assert(to_reg.segment_mask() == FP_MASK, "only moves to floating-point registers supported");
masm->fld(as_FloatRegister(to_reg), from_addr);
break;
case StorageType::STACK:
out_bias = out_stk_bias;
case StorageType::FRAME_DATA: {
masm->ld(tmp_reg, from_addr);
Address dest(sp, to_reg.offset() + out_bias);
masm->sd(tmp_reg, dest); break;
} break;
default: ShouldNotReachHere();
}
}
static void move_fp(MacroAssembler* masm, int out_stk_bias,
FloatRegister from_reg, VMStorage to_reg) {
switch (to_reg.type()) {
case StorageType::INTEGER:
assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit integer registers supported");
masm->fmv_x_d(as_Register(to_reg), from_reg);
break;
case StorageType::FLOAT:
assert(to_reg.segment_mask() == FP_MASK, "only moves to floating-point registers supported");
masm->fmv_d(as_FloatRegister(to_reg), from_reg); break;
break;
case StorageType::STACK: {
Address dest(sp, to_reg.offset() + out_stk_bias);
masm->fsd(from_reg, dest); break;
} break;
default: ShouldNotReachHere();
}
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
Unimplemented();
Register tmp_reg = as_Register(tmp);
for (int i = 0; i < _moves.length(); i++) {
Move move = _moves.at(i);
VMStorage from_reg = move.from;
VMStorage to_reg = move.to;
// replace any placeholders
if (from_reg.type() == StorageType::PLACEHOLDER) {
from_reg = locs.get(from_reg);
}
if (to_reg.type() == StorageType::PLACEHOLDER) {
to_reg = locs.get(to_reg);
}
switch (from_reg.type()) {
case StorageType::INTEGER:
assert(from_reg.segment_mask() == REG64_MASK, "only 64-bit integer register supported");
move_reg64(masm, out_stk_bias, as_Register(from_reg), to_reg);
break;
case StorageType::FLOAT:
assert(from_reg.segment_mask() == FP_MASK, "only floating-point register supported");
move_fp(masm, out_stk_bias, as_FloatRegister(from_reg), to_reg);
break;
case StorageType::STACK:
move_stack(masm, tmp_reg, in_stk_bias, out_stk_bias, from_reg, to_reg);
break;
default: ShouldNotReachHere();
}
}
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,27 @@
#ifndef CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP
#define CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP
class ABIDescriptor {};
#include "asm/macroAssembler.hpp"
#include "utilities/growableArray.hpp"
struct ABIDescriptor {
GrowableArray<Register> _integer_argument_registers;
GrowableArray<Register> _integer_return_registers;
GrowableArray<FloatRegister> _float_argument_registers;
GrowableArray<FloatRegister> _float_return_registers;
GrowableArray<Register> _integer_additional_volatile_registers;
GrowableArray<FloatRegister> _float_additional_volatile_registers;
int32_t _stack_alignment_bytes;
int32_t _shadow_space_bytes;
VMStorage _scratch1;
VMStorage _scratch2;
bool is_volatile_reg(Register reg) const;
bool is_volatile_reg(FloatRegister reg) const;
};
#endif // CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -346,18 +346,35 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
}
UpcallStub::FrameData* UpcallStub::frame_data_for_frame(const frame& frame) const {
ShouldNotCallThis();
return nullptr;
assert(frame.is_upcall_stub_frame(), "wrong frame");
// need unextended_sp here, since normal sp is wrong for interpreter callees
return reinterpret_cast<UpcallStub::FrameData*>(
reinterpret_cast<address>(frame.unextended_sp()) + in_bytes(_frame_data_offset));
}
bool frame::upcall_stub_frame_is_first() const {
ShouldNotCallThis();
return false;
assert(is_upcall_stub_frame(), "must be optimzed entry frame");
UpcallStub* blob = _cb->as_upcall_stub();
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
return jfa->last_Java_sp() == NULL;
}
frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
ShouldNotCallThis();
return {};
assert(map != NULL, "map must be set");
UpcallStub* blob = _cb->as_upcall_stub();
// Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
assert(!upcall_stub_frame_is_first(), "must have a frame anchor to go back to");
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
// Since we are walking the stack now this nested anchor is obviously walkable
// even if it wasn't when it was stacked.
jfa->make_walkable();
map->clear();
assert(map->include_argument_oops(), "should be set by clear");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
return fr;
}
//------------------------------------------------------------------------------

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -384,7 +384,9 @@ frame frame::sender_raw(RegisterMap* map) const {
if (is_entry_frame()) {
return sender_for_entry_frame(map);
}
if (is_upcall_stub_frame()) {
return sender_for_upcall_stub_frame(map);
}
if (is_interpreted_frame()) {
return sender_for_interpreter_frame(map);
}

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -909,7 +909,7 @@ public:
if (is_offset_in_range(adr.offset(), 12)) { \
Assembler::NAME(Rs, adr.base(), adr.offset()); \
} else { \
int32_t offset= 0; \
int32_t offset = 0; \
assert_different_registers(Rs, temp); \
baseOffset32(temp, adr, offset); \
Assembler::NAME(Rs, temp, offset); \
@ -1314,7 +1314,6 @@ public:
VMRegPair dst,
bool is_receiver,
int* receiver_offset);
void rt_call(address dest, Register tmp = t0);
void call(const address dest, Register temp = t0) {

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -255,7 +255,16 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
void MethodHandles::jump_to_native_invoker(MacroAssembler* _masm, Register nep_reg, Register temp_target) {
BLOCK_COMMENT("jump_to_native_invoker {");
__ stop("Should not reach here");
assert_different_registers(nep_reg, temp_target);
assert(nep_reg != noreg, "required register");
// Load the invoker, as NEP -> .invoker
__ verify_oop(nep_reg);
__ access_load_at(T_ADDRESS, IN_HEAP, temp_target,
Address(nep_reg, NONZERO(jdk_internal_foreign_abi_NativeEntryPoint::downcall_stub_address_offset_in_bytes())),
noreg, noreg);
__ jr(temp_target);
BLOCK_COMMENT("} jump_to_native_invoker");
}
@ -270,7 +279,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Register temp2 = x28;
Register temp3 = x29;
if (for_compiler_entry) {
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic || iid == vmIntrinsics::_linkToNative ? noreg : j_rarg0), "only valid assignment");
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,9 +70,9 @@ class Register {
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
constexpr int raw_encoding() const { return this - first(); }
constexpr int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
constexpr bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// for rvc
int compressed_raw_encoding() const {
@ -104,7 +104,7 @@ class Register {
int operator==(const Register r) const { return _encoding == r._encoding; }
int operator!=(const Register r) const { return _encoding != r._encoding; }
const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; }
constexpr const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; }
};
extern Register::RegisterImpl all_RegisterImpls[Register::number_of_registers + 1] INTERNAL_VISIBILITY;
@ -187,9 +187,9 @@ class FloatRegister {
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
constexpr int raw_encoding() const { return this - first(); }
constexpr int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
constexpr bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// for rvc
int compressed_raw_encoding() const {
@ -219,7 +219,7 @@ class FloatRegister {
int operator==(const FloatRegister r) const { return _encoding == r._encoding; }
int operator!=(const FloatRegister r) const { return _encoding != r._encoding; }
const FloatRegisterImpl* operator->() const { return FloatRegisterImpl::first() + _encoding; }
constexpr const FloatRegisterImpl* operator->() const { return FloatRegisterImpl::first() + _encoding; }
};
extern FloatRegister::FloatRegisterImpl all_FloatRegisterImpls[FloatRegister::number_of_registers + 1] INTERNAL_VISIBILITY;
@ -297,9 +297,9 @@ class VectorRegister {
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
constexpr int raw_encoding() const { return this - first(); }
constexpr int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
constexpr bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// derived registers, offsets, and addresses
inline VectorRegister successor() const;
@ -314,7 +314,7 @@ class VectorRegister {
int operator==(const VectorRegister r) const { return _encoding == r._encoding; }
int operator!=(const VectorRegister r) const { return _encoding != r._encoding; }
const VectorRegisterImpl* operator->() const { return VectorRegisterImpl::first() + _encoding; }
constexpr const VectorRegisterImpl* operator->() const { return VectorRegisterImpl::first() + _encoding; }
};
extern VectorRegister::VectorRegisterImpl all_VectorRegisterImpls[VectorRegister::number_of_registers + 1] INTERNAL_VISIBILITY;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,8 +24,95 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "prims/upcallLinker.hpp"
#include "utilities/debug.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vmreg_riscv.inline.hpp"
#define __ _masm->
// for callee saved regs, according to the caller's ABI
static int compute_reg_save_area_size(const ABIDescriptor& abi) {
int size = 0;
for (int i = 0; i < Register::number_of_registers; i++) {
Register reg = as_Register(i);
if (reg == fp || reg == sp) continue; // saved/restored by prologue/epilogue
if (!abi.is_volatile_reg(reg)) {
size += 8; // bytes
}
}
for (int i = 0; i < FloatRegister::number_of_registers; i++) {
FloatRegister reg = as_FloatRegister(i);
if (!abi.is_volatile_reg(reg)) {
size += 8; // bytes
}
}
return size;
}
static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) {
// 1. iterate all registers in the architecture
// - check if they are volatile or not for the given abi
// - if NOT, we need to save it here
int offset = reg_save_area_offset;
__ block_comment("{ preserve_callee_saved_regs ");
for (int i = 0; i < Register::number_of_registers; i++) {
Register reg = as_Register(i);
if (reg == fp || reg == sp) continue; // saved/restored by prologue/epilogue
if (!abi.is_volatile_reg(reg)) {
__ sd(reg, Address(sp, offset));
offset += 8;
}
}
for (int i = 0; i < FloatRegister::number_of_registers; i++) {
FloatRegister reg = as_FloatRegister(i);
if (!abi.is_volatile_reg(reg)) {
__ fsd(reg, Address(sp, offset));
offset += 8;
}
}
__ block_comment("} preserve_callee_saved_regs ");
}
static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) {
// 1. iterate all registers in the architecture
// - check if they are volatile or not for the given abi
// - if NOT, we need to restore it here
int offset = reg_save_area_offset;
__ block_comment("{ restore_callee_saved_regs ");
for (int i = 0; i < Register::number_of_registers; i++) {
Register reg = as_Register(i);
if (reg == fp || reg == sp) continue; // saved/restored by prologue/epilogue
if (!abi.is_volatile_reg(reg)) {
__ ld(reg, Address(sp, offset));
offset += 8;
}
}
for (int i = 0; i < FloatRegister::number_of_registers; i++) {
FloatRegister reg = as_FloatRegister(i);
if (!abi.is_volatile_reg(reg)) {
__ fld(reg, Address(sp, offset));
offset += 8;
}
}
__ block_comment("} restore_callee_saved_regs ");
}
address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
BasicType* in_sig_bt, int total_in_args,
@ -33,6 +120,242 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
BasicType ret_type,
jobject jabi, jobject jconv,
bool needs_return_buffer, int ret_buf_size) {
ShouldNotCallThis();
return nullptr;
ResourceMark rm;
const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi);
const CallRegs call_regs = ForeignGlobals::parse_call_regs(jconv);
CodeBuffer buffer("upcall_stub", /* code_size = */ 2048, /* locs_size = */ 1024);
Register shuffle_reg = x9;
JavaCallingConvention out_conv;
NativeCallingConvention in_conv(call_regs._arg_regs);
ArgumentShuffle arg_shuffle(in_sig_bt, total_in_args, out_sig_bt, total_out_args, &in_conv, &out_conv, as_VMStorage(shuffle_reg));
int preserved_bytes = SharedRuntime::out_preserve_stack_slots() * VMRegImpl::stack_slot_size;
int stack_bytes = preserved_bytes + arg_shuffle.out_arg_bytes();
int out_arg_area = align_up(stack_bytes , StackAlignmentInBytes);
#ifndef PRODUCT
LogTarget(Trace, foreign, upcall) lt;
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
arg_shuffle.print_on(&ls);
}
#endif
// out_arg_area (for stack arguments) doubles as shadow space for native calls.
// make sure it is big enough.
if (out_arg_area < frame::arg_reg_save_area_bytes) {
out_arg_area = frame::arg_reg_save_area_bytes;
}
int reg_save_area_size = compute_reg_save_area_size(abi);
RegSpiller arg_spiller(call_regs._arg_regs);
RegSpiller result_spiller(call_regs._ret_regs);
int shuffle_area_offset = 0;
int res_save_area_offset = shuffle_area_offset + out_arg_area;
int arg_save_area_offset = res_save_area_offset + result_spiller.spill_size_bytes();
int reg_save_area_offset = arg_save_area_offset + arg_spiller.spill_size_bytes();
int frame_data_offset = reg_save_area_offset + reg_save_area_size;
int frame_bottom_offset = frame_data_offset + sizeof(UpcallStub::FrameData);
StubLocations locs;
int ret_buf_offset = -1;
if (needs_return_buffer) {
ret_buf_offset = frame_bottom_offset;
frame_bottom_offset += ret_buf_size;
// use a free register for shuffling code to pick up return
// buffer address from
locs.set(StubLocations::RETURN_BUFFER, abi._scratch1);
}
int frame_size = frame_bottom_offset;
frame_size = align_up(frame_size, StackAlignmentInBytes);
// The space we have allocated will look like:
//
//
// FP-> | |
// |---------------------| = frame_bottom_offset = frame_size
// | (optional) |
// | ret_buf |
// |---------------------| = ret_buf_offset
// | |
// | FrameData |
// |---------------------| = frame_data_offset
// | |
// | reg_save_area |
// |---------------------| = reg_save_area_offset
// | |
// | arg_save_area |
// |---------------------| = arg_save_area_offset
// | |
// | res_save_area |
// |---------------------| = res_save_area_offset
// | |
// SP-> | out_arg_area | needs to be at end for shadow space
//
//
//////////////////////////////////////////////////////////////////////////////
MacroAssembler* _masm = new MacroAssembler(&buffer);
address start = __ pc();
__ enter(); // set up frame
assert((abi._stack_alignment_bytes % 16) == 0, "must be 16 byte aligned");
// allocate frame (frame_size is also aligned, so stack is still aligned)
__ sub(sp, sp, frame_size);
// we have to always spill args since we need to do a call to get the thread
// (and maybe attach it). so store those registers temporarily.
arg_spiller.generate_spill(_masm, arg_save_area_offset);
preserve_callee_saved_registers(_masm, abi, reg_save_area_offset);
__ block_comment("{ on_entry");
__ la(c_rarg0, Address(sp, frame_data_offset));
__ rt_call(CAST_FROM_FN_PTR(address, UpcallLinker::on_entry));
__ mv(xthread, x10);
__ reinit_heapbase();
__ block_comment("} on_entry");
__ block_comment("{ argument shuffle");
arg_spiller.generate_fill(_masm, arg_save_area_offset);
if (needs_return_buffer) {
assert(ret_buf_offset != -1, "no return buffer allocated");
// According to RISC-V ISA SPEC, when multiple floating-point precisions are supported,
// then valid values of narrower n-bit types, n < FLEN , are represented in the lower n
// bits of an FLEN-bit NaN value, in a process termed NaN-boxing. The upper bits of a
// valid NaN-boxed value must be all 1s. Any operation that writes a narrower result to
// an f register must write all 1s to the uppermost FLEN - n bits to yield a legal
// NaN-boxed value. We could make use of this initializing all bits of return buffer with
// 1s so that we could always transfer returned floating-point value from return buffer
// into register with a single fld without knowing the current type of the value.
__ mv(t1, -1L);
int offset = 0;
for (int i = 0; i < ret_buf_size / 8; i++) {
__ sd(t1, Address(sp, ret_buf_offset + offset));
offset += 8;
}
for (int i = 0; i < ret_buf_size % 8; i++) {
__ sb(t1, Address(sp, ret_buf_offset + offset));
offset += 1;
}
__ la(as_Register(locs.get(StubLocations::RETURN_BUFFER)), Address(sp, ret_buf_offset));
}
arg_shuffle.generate(_masm, as_VMStorage(shuffle_reg), abi._shadow_space_bytes, 0, locs);
__ block_comment("} argument shuffle");
__ block_comment("{ receiver ");
__ movptr(shuffle_reg, (intptr_t) receiver);
__ resolve_jobject(shuffle_reg, t0, t1);
__ mv(j_rarg0, shuffle_reg);
__ block_comment("} receiver ");
__ mov_metadata(xmethod, entry);
__ sd(xmethod, Address(xthread, JavaThread::callee_target_offset())); // just in case callee is deoptimized
__ ld(t0, Address(xmethod, Method::from_compiled_offset()));
__ jalr(t0);
// return value shuffle
if (!needs_return_buffer) {
#ifdef ASSERT
if (call_regs._ret_regs.length() == 1) { // 0 or 1
VMStorage j_expected_result_reg;
switch (ret_type) {
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT:
case T_LONG:
j_expected_result_reg = as_VMStorage(x10);
break;
case T_FLOAT:
case T_DOUBLE:
j_expected_result_reg = as_VMStorage(f10);
break;
default:
fatal("unexpected return type: %s", type2name(ret_type));
}
// No need to move for now, since CallArranger can pick a return type
// that goes in the same reg for both CCs. But, at least assert they are the same
assert(call_regs._ret_regs.at(0) == j_expected_result_reg, "unexpected result register");
}
#endif
} else {
assert(ret_buf_offset != -1, "no return buffer allocated");
__ la(t0, Address(sp, ret_buf_offset));
int offset = 0;
for (int i = 0; i < call_regs._ret_regs.length(); i++) {
VMStorage reg = call_regs._ret_regs.at(i);
if (reg.type() == StorageType::INTEGER) {
__ ld(as_Register(reg), Address(t0, offset));
} else if (reg.type() == StorageType::FLOAT) {
__ fld(as_FloatRegister(reg), Address(t0, offset));
} else {
ShouldNotReachHere();
}
offset += 8;
}
}
result_spiller.generate_spill(_masm, res_save_area_offset);
__ block_comment("{ on_exit");
__ la(c_rarg0, Address(sp, frame_data_offset));
// stack already aligned
__ rt_call(CAST_FROM_FN_PTR(address, UpcallLinker::on_exit));
__ block_comment("} on_exit");
restore_callee_saved_registers(_masm, abi, reg_save_area_offset);
result_spiller.generate_fill(_masm, res_save_area_offset);
__ leave();
__ ret();
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ exception handler");
intptr_t exception_handler_offset = __ pc() - start;
// Native caller has no idea how to handle exceptions,
// so we just crash here. Up to callee to catch exceptions.
__ verify_oop(x10); // return a exception oop in a0
__ rt_call(CAST_FROM_FN_PTR(address, UpcallLinker::handle_uncaught_exception));
__ should_not_reach_here();
__ block_comment("} exception handler");
__ flush();
#ifndef PRODUCT
stringStream ss;
ss.print("upcall_stub_%s", entry->signature()->as_C_string());
const char *name = _masm->code_string(ss.as_string());
#else // PRODUCT
const char* name = "upcall_stub";
#endif // PRODUCT
UpcallStub* blob
= UpcallStub::create(name,
&buffer,
exception_handler_offset,
receiver,
in_ByteSize(frame_data_offset));
#ifndef PRODUCT
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
blob->print_on(&ls);
}
#endif
return blob->code_begin();
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2006, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "code/vmreg.hpp"
#include "vmreg_riscv.inline.hpp"
void VMRegImpl::set_regName() {
int i = 0;

View File

@ -28,9 +28,12 @@
#include "asm/register.hpp"
// keep in sync with jdk/internal/foreign/abi/riscv64/RISCV64Architecture
enum class StorageType : int8_t {
STACK = 0,
PLACEHOLDER = 1,
INTEGER = 0,
FLOAT = 1,
STACK = 2,
PLACEHOLDER = 3,
// special locations used only by native code
FRAME_DATA = PLACEHOLDER + 1,
INVALID = -1
@ -38,13 +41,44 @@ enum class StorageType : int8_t {
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
return false;
return type == StorageType::INTEGER || type == StorageType::FLOAT;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
constexpr uint16_t REG64_MASK = 0b0000000000000001;
constexpr uint16_t FP_MASK = 0b0000000000000001;
inline Register as_Register(VMStorage vms) {
assert(vms.type() == StorageType::INTEGER, "not the right type");
return ::as_Register(vms.index());
}
inline FloatRegister as_FloatRegister(VMStorage vms) {
assert(vms.type() == StorageType::FLOAT, "not the right type");
return ::as_FloatRegister(vms.index());
}
constexpr inline VMStorage as_VMStorage(Register reg) {
return VMStorage::reg_storage(StorageType::INTEGER, REG64_MASK, reg->encoding());
}
constexpr inline VMStorage as_VMStorage(FloatRegister reg) {
return VMStorage::reg_storage(StorageType::FLOAT, FP_MASK, reg->encoding());
}
inline VMStorage as_VMStorage(VMReg reg) {
if (reg->is_Register()) {
return as_VMStorage(reg->as_Register());
} else if (reg->is_FloatRegister()) {
return as_VMStorage(reg->as_FloatRegister());
} else if (reg->is_stack()) {
return VMStorage::stack_storage(reg);
} else if (!reg->is_valid()) {
return VMStorage::invalid();
}
ShouldNotReachHere();
return VMStorage::invalid();
}

View File

@ -32,6 +32,7 @@ import java.util.function.Consumer;
import jdk.internal.foreign.abi.SharedUtils;
import jdk.internal.foreign.abi.aarch64.linux.LinuxAArch64VaList;
import jdk.internal.foreign.abi.aarch64.macos.MacOsAArch64VaList;
import jdk.internal.foreign.abi.riscv64.linux.LinuxRISCV64VaList;
import jdk.internal.foreign.abi.x64.sysv.SysVVaList;
import jdk.internal.foreign.abi.x64.windows.WinVaList;
import jdk.internal.javac.PreviewFeature;
@ -104,7 +105,7 @@ import jdk.internal.reflect.Reflection;
* @since 19
*/
@PreviewFeature(feature=PreviewFeature.Feature.FOREIGN)
public sealed interface VaList permits WinVaList, SysVVaList, LinuxAArch64VaList, MacOsAArch64VaList, SharedUtils.EmptyVaList {
public sealed interface VaList permits WinVaList, SysVVaList, LinuxAArch64VaList, MacOsAArch64VaList, LinuxRISCV64VaList, SharedUtils.EmptyVaList {
/**
* Reads the next value as an {@code int} and advances this variable argument list's position. The behavior of this
@ -299,7 +300,7 @@ public sealed interface VaList permits WinVaList, SysVVaList, LinuxAArch64VaList
* @since 19
*/
@PreviewFeature(feature=PreviewFeature.Feature.FOREIGN)
sealed interface Builder permits WinVaList.Builder, SysVVaList.Builder, LinuxAArch64VaList.Builder, MacOsAArch64VaList.Builder {
sealed interface Builder permits WinVaList.Builder, SysVVaList.Builder, LinuxAArch64VaList.Builder, MacOsAArch64VaList.Builder, LinuxRISCV64VaList.Builder {
/**
* Writes an {@code int} value to the variable argument list being constructed.

View File

@ -32,7 +32,8 @@ public enum CABI {
SYS_V,
WIN_64,
LINUX_AARCH_64,
MAC_OS_AARCH_64;
MAC_OS_AARCH_64,
LINUX_RISCV_64;
private static final CABI ABI;
private static final String ARCH;
@ -58,6 +59,13 @@ public enum CABI {
// The Linux ABI follows the standard AAPCS ABI
ABI = LINUX_AARCH_64;
}
} else if (ARCH.equals("riscv64")) {
if (OS.startsWith("Linux")) {
ABI = LINUX_RISCV_64;
} else {
// unsupported
ABI = null;
}
} else {
// unsupported
ABI = null;

View File

@ -210,4 +210,60 @@ public final class PlatformLayouts {
*/
public static final ValueLayout.OfAddress C_VA_LIST = AArch64.C_POINTER;
}
public static final class RISCV64 {
private RISCV64() {
//just the one
}
/**
* The {@code bool} native type.
*/
public static final ValueLayout.OfBoolean C_BOOL = ValueLayout.JAVA_BOOLEAN;
/**
* The {@code char} native type.
*/
public static final ValueLayout.OfByte C_CHAR = ValueLayout.JAVA_BYTE;
/**
* The {@code short} native type.
*/
public static final ValueLayout.OfShort C_SHORT = ValueLayout.JAVA_SHORT.withBitAlignment(16);
/**
* The {@code int} native type.
*/
public static final ValueLayout.OfInt C_INT = ValueLayout.JAVA_INT.withBitAlignment(32);
/**
* The {@code long} native type.
*/
public static final ValueLayout.OfLong C_LONG = ValueLayout.JAVA_LONG.withBitAlignment(64);
/**
* The {@code long long} native type.
*/
public static final ValueLayout.OfLong C_LONG_LONG = ValueLayout.JAVA_LONG.withBitAlignment(64);
/**
* The {@code float} native type.
*/
public static final ValueLayout.OfFloat C_FLOAT = ValueLayout.JAVA_FLOAT.withBitAlignment(32);
/**
* The {@code double} native type.
*/
public static final ValueLayout.OfDouble C_DOUBLE = ValueLayout.JAVA_DOUBLE.withBitAlignment(64);
/**
* The {@code T*} native type.
*/
public static final ValueLayout.OfAddress C_POINTER = ValueLayout.ADDRESS.withBitAlignment(64).asUnbounded();
/**
* The {@code va_list} native type, as it is passed to a function.
*/
public static final ValueLayout.OfAddress C_VA_LIST = RISCV64.C_POINTER;
}
}

View File

@ -58,7 +58,7 @@ public final class SystemLookup implements SymbolLookup {
private static SymbolLookup makeSystemLookup() {
try {
return switch (CABI.current()) {
case SYS_V, LINUX_AARCH_64, MAC_OS_AARCH_64 -> libLookup(libs -> libs.load(jdkLibraryPath("syslookup")));
case SYS_V, LINUX_AARCH_64, MAC_OS_AARCH_64, LINUX_RISCV_64 -> libLookup(libs -> libs.load(jdkLibraryPath("syslookup")));
case WIN_64 -> makeWindowsLookup(); // out of line to workaround javac crash
};
} catch (Throwable ex) {
@ -119,7 +119,7 @@ public final class SystemLookup implements SymbolLookup {
private static Path jdkLibraryPath(String name) {
Path javahome = Path.of(GetPropertyAction.privilegedGetProperty("java.home"));
String lib = switch (CABI.current()) {
case SYS_V, LINUX_AARCH_64, MAC_OS_AARCH_64 -> "lib";
case SYS_V, LINUX_AARCH_64, MAC_OS_AARCH_64, LINUX_RISCV_64 -> "lib";
case WIN_64 -> "bin";
};
String libname = System.mapLibraryName(name);
@ -193,8 +193,7 @@ public final class SystemLookup implements SymbolLookup {
wscanf_s,
// time
gmtime
;
gmtime;
static WindowsFallbackSymbols valueOfOrNull(String name) {
try {

View File

@ -27,6 +27,7 @@ package jdk.internal.foreign.abi;
import jdk.internal.foreign.SystemLookup;
import jdk.internal.foreign.abi.aarch64.linux.LinuxAArch64Linker;
import jdk.internal.foreign.abi.aarch64.macos.MacOsAArch64Linker;
import jdk.internal.foreign.abi.riscv64.linux.LinuxRISCV64Linker;
import jdk.internal.foreign.abi.x64.sysv.SysVx64Linker;
import jdk.internal.foreign.abi.x64.windows.Windowsx64Linker;
import jdk.internal.foreign.layout.AbstractLayout;
@ -43,7 +44,7 @@ import java.lang.invoke.MethodType;
import java.util.Objects;
public abstract sealed class AbstractLinker implements Linker permits LinuxAArch64Linker, MacOsAArch64Linker,
SysVx64Linker, Windowsx64Linker {
SysVx64Linker, Windowsx64Linker, LinuxRISCV64Linker {
private record LinkRequest(FunctionDescriptor descriptor, LinkerOptions options) {}
private final SoftReferenceCache<LinkRequest, MethodHandle> DOWNCALL_CACHE = new SoftReferenceCache<>();

View File

@ -30,6 +30,7 @@ import jdk.internal.access.SharedSecrets;
import jdk.internal.foreign.CABI;
import jdk.internal.foreign.abi.aarch64.linux.LinuxAArch64Linker;
import jdk.internal.foreign.abi.aarch64.macos.MacOsAArch64Linker;
import jdk.internal.foreign.abi.riscv64.linux.LinuxRISCV64Linker;
import jdk.internal.foreign.abi.x64.sysv.SysVx64Linker;
import jdk.internal.foreign.abi.x64.windows.Windowsx64Linker;
import jdk.internal.vm.annotation.ForceInline;
@ -183,6 +184,7 @@ public final class SharedUtils {
case SYS_V -> SysVx64Linker.getInstance();
case LINUX_AARCH_64 -> LinuxAArch64Linker.getInstance();
case MAC_OS_AARCH_64 -> MacOsAArch64Linker.getInstance();
case LINUX_RISCV_64 -> LinuxRISCV64Linker.getInstance();
};
}
@ -294,6 +296,7 @@ public final class SharedUtils {
case SYS_V -> SysVx64Linker.newVaList(actions, scope);
case LINUX_AARCH_64 -> LinuxAArch64Linker.newVaList(actions, scope);
case MAC_OS_AARCH_64 -> MacOsAArch64Linker.newVaList(actions, scope);
case LINUX_RISCV_64 -> LinuxRISCV64Linker.newVaList(actions, scope);
};
}
@ -303,6 +306,7 @@ public final class SharedUtils {
case SYS_V -> SysVx64Linker.newVaListOfAddress(address, scope);
case LINUX_AARCH_64 -> LinuxAArch64Linker.newVaListOfAddress(address, scope);
case MAC_OS_AARCH_64 -> MacOsAArch64Linker.newVaListOfAddress(address, scope);
case LINUX_RISCV_64 -> LinuxRISCV64Linker.newVaListOfAddress(address, scope);
};
}
@ -312,6 +316,7 @@ public final class SharedUtils {
case SYS_V -> SysVx64Linker.emptyVaList();
case LINUX_AARCH_64 -> LinuxAArch64Linker.emptyVaList();
case MAC_OS_AARCH_64 -> MacOsAArch64Linker.emptyVaList();
case LINUX_RISCV_64 -> LinuxRISCV64Linker.emptyVaList();
};
}

View File

@ -0,0 +1,177 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Institute of Software, Chinese Academy of Sciences.
* All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.foreign.abi.riscv64;
import jdk.internal.foreign.abi.ABIDescriptor;
import jdk.internal.foreign.abi.Architecture;
import jdk.internal.foreign.abi.StubLocations;
import jdk.internal.foreign.abi.VMStorage;
import jdk.internal.foreign.abi.riscv64.linux.TypeClass;
public class RISCV64Architecture implements Architecture {
public static final Architecture INSTANCE = new RISCV64Architecture();
private static final short REG64_MASK = 0b0000_0000_0000_0001;
private static final short FP_MASK = 0b0000_0000_0000_0001;
private static final int INTEGER_REG_SIZE = 8; // bytes
private static final int FLOAT_REG_SIZE = 8;
@Override
public boolean isStackType(int cls) {
return cls == StorageType.STACK;
}
@Override
public int typeSize(int cls) {
switch (cls) {
case StorageType.INTEGER: return INTEGER_REG_SIZE;
case StorageType.FLOAT: return FLOAT_REG_SIZE;
// STACK is deliberately omitted
}
throw new IllegalArgumentException("Invalid Storage Class: " + cls);
}
public interface StorageType {
byte INTEGER = 0;
byte FLOAT = 1;
byte STACK = 2;
byte PLACEHOLDER = 3;
}
public static class Regs { // break circular dependency
public static final VMStorage x0 = integerRegister(0, "zr");
public static final VMStorage x1 = integerRegister(1, "ra");
public static final VMStorage x2 = integerRegister(2, "sp");
public static final VMStorage x3 = integerRegister(3, "gp");
public static final VMStorage x4 = integerRegister(4, "tp");
public static final VMStorage x5 = integerRegister(5, "t0");
public static final VMStorage x6 = integerRegister(6, "t1");
public static final VMStorage x7 = integerRegister(7, "t2");
public static final VMStorage x8 = integerRegister(8, "s0/fp");
public static final VMStorage x9 = integerRegister(9, "s1");
public static final VMStorage x10 = integerRegister(10, "a0");
public static final VMStorage x11 = integerRegister(11, "a1");
public static final VMStorage x12 = integerRegister(12, "a2");
public static final VMStorage x13 = integerRegister(13, "a3");
public static final VMStorage x14 = integerRegister(14, "a4");
public static final VMStorage x15 = integerRegister(15, "a5");
public static final VMStorage x16 = integerRegister(16, "a6");
public static final VMStorage x17 = integerRegister(17, "a7");
public static final VMStorage x18 = integerRegister(18, "s2");
public static final VMStorage x19 = integerRegister(19, "s3");
public static final VMStorage x20 = integerRegister(20, "s4");
public static final VMStorage x21 = integerRegister(21, "s5");
public static final VMStorage x22 = integerRegister(22, "s6");
public static final VMStorage x23 = integerRegister(23, "s7");
public static final VMStorage x24 = integerRegister(24, "s8");
public static final VMStorage x25 = integerRegister(25, "s9");
public static final VMStorage x26 = integerRegister(26, "s10");
public static final VMStorage x27 = integerRegister(27, "s11");
public static final VMStorage x28 = integerRegister(28, "t3");
public static final VMStorage x29 = integerRegister(29, "t4");
public static final VMStorage x30 = integerRegister(30, "t5");
public static final VMStorage x31 = integerRegister(31, "t6");
public static final VMStorage f0 = floatRegister(0, "ft0");
public static final VMStorage f1 = floatRegister(1, "ft1");
public static final VMStorage f2 = floatRegister(2, "ft2");
public static final VMStorage f3 = floatRegister(3, "ft3");
public static final VMStorage f4 = floatRegister(4, "ft4");
public static final VMStorage f5 = floatRegister(5, "ft5");
public static final VMStorage f6 = floatRegister(6, "ft6");
public static final VMStorage f7 = floatRegister(7, "ft7");
public static final VMStorage f8 = floatRegister(8, "fs0");
public static final VMStorage f9 = floatRegister(9, "fs1");
public static final VMStorage f10 = floatRegister(10, "fa0");
public static final VMStorage f11 = floatRegister(11, "fa1");
public static final VMStorage f12 = floatRegister(12, "fa2");
public static final VMStorage f13 = floatRegister(13, "fa3");
public static final VMStorage f14 = floatRegister(14, "fa4");
public static final VMStorage f15 = floatRegister(15, "fa5");
public static final VMStorage f16 = floatRegister(16, "fa6");
public static final VMStorage f17 = floatRegister(17, "fa7");
public static final VMStorage f18 = floatRegister(18, "fs2");
public static final VMStorage f19 = floatRegister(19, "fs3");
public static final VMStorage f20 = floatRegister(20, "fs4");
public static final VMStorage f21 = floatRegister(21, "fs5");
public static final VMStorage f22 = floatRegister(22, "fs6");
public static final VMStorage f23 = floatRegister(23, "fs7");
public static final VMStorage f24 = floatRegister(24, "fs8");
public static final VMStorage f25 = floatRegister(25, "fs9");
public static final VMStorage f26 = floatRegister(26, "fs10");
public static final VMStorage f27 = floatRegister(27, "fs11");
public static final VMStorage f28 = floatRegister(28, "ft8");
public static final VMStorage f29 = floatRegister(29, "ft9");
public static final VMStorage f30 = floatRegister(30, "ft10");
public static final VMStorage f31 = floatRegister(31, "ft11");
}
private static VMStorage integerRegister(int index, String debugName) {
return new VMStorage(StorageType.INTEGER, REG64_MASK, index, debugName);
}
private static VMStorage floatRegister(int index, String debugName) {
return new VMStorage(StorageType.FLOAT, FP_MASK, index, debugName);
}
public static VMStorage stackStorage(short size, int byteOffset) {
return new VMStorage(StorageType.STACK, size, byteOffset);
}
public static ABIDescriptor abiFor(VMStorage[] inputIntRegs,
VMStorage[] inputFloatRegs,
VMStorage[] outputIntRegs,
VMStorage[] outputFloatRegs,
VMStorage[] volatileIntRegs,
VMStorage[] volatileFloatRegs,
int stackAlignment,
int shadowSpace,
VMStorage scratch1, VMStorage scratch2) {
return new ABIDescriptor(
INSTANCE,
new VMStorage[][]{
inputIntRegs,
inputFloatRegs,
},
new VMStorage[][]{
outputIntRegs,
outputFloatRegs,
},
new VMStorage[][]{
volatileIntRegs,
volatileFloatRegs,
},
stackAlignment,
shadowSpace,
scratch1, scratch2,
StubLocations.TARGET_ADDRESS.storage(StorageType.PLACEHOLDER),
StubLocations.RETURN_BUFFER.storage(StorageType.PLACEHOLDER),
StubLocations.CAPTURED_STATE_BUFFER.storage(StorageType.PLACEHOLDER));
}
}

View File

@ -0,0 +1,473 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Institute of Software, Chinese Academy of Sciences.
* All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.foreign.abi.riscv64.linux;
import java.lang.foreign.FunctionDescriptor;
import java.lang.foreign.GroupLayout;
import java.lang.foreign.MemoryLayout;
import java.lang.foreign.MemorySegment;
import jdk.internal.foreign.abi.ABIDescriptor;
import jdk.internal.foreign.abi.Binding;
import jdk.internal.foreign.abi.CallingSequence;
import jdk.internal.foreign.abi.CallingSequenceBuilder;
import jdk.internal.foreign.abi.DowncallLinker;
import jdk.internal.foreign.abi.LinkerOptions;
import jdk.internal.foreign.abi.UpcallLinker;
import jdk.internal.foreign.abi.SharedUtils;
import jdk.internal.foreign.abi.VMStorage;
import jdk.internal.foreign.Utils;
import java.lang.foreign.SegmentScope;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodType;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import static jdk.internal.foreign.abi.riscv64.linux.TypeClass.*;
import static jdk.internal.foreign.abi.riscv64.RISCV64Architecture.*;
import static jdk.internal.foreign.abi.riscv64.RISCV64Architecture.Regs.*;
import static jdk.internal.foreign.PlatformLayouts.*;
/**
* For the RISCV64 C ABI specifically, this class uses CallingSequenceBuilder
* to translate a C FunctionDescriptor into a CallingSequence, which can then be turned into a MethodHandle.
*
* This includes taking care of synthetic arguments like pointers to return buffers for 'in-memory' returns.
*/
public class LinuxRISCV64CallArranger {
private static final int STACK_SLOT_SIZE = 8;
public static final int MAX_REGISTER_ARGUMENTS = 8;
private static final ABIDescriptor CLinux = abiFor(
new VMStorage[]{x10, x11, x12, x13, x14, x15, x16, x17},
new VMStorage[]{f10, f11, f12, f13, f14, f15, f16, f17},
new VMStorage[]{x10, x11},
new VMStorage[]{f10, f11},
new VMStorage[]{x5, x6, x7, x28, x29, x30, x31},
new VMStorage[]{f0, f1, f2, f3, f4, f5, f6, f7, f28, f29, f30, f31},
16, // stackAlignment
0, // no shadow space
x28, x29 // scratch 1 & 2
);
public record Bindings(CallingSequence callingSequence,
boolean isInMemoryReturn) {
}
public static Bindings getBindings(MethodType mt, FunctionDescriptor cDesc, boolean forUpcall) {
return getBindings(mt, cDesc, forUpcall, LinkerOptions.empty());
}
public static Bindings getBindings(MethodType mt, FunctionDescriptor cDesc, boolean forUpcall, LinkerOptions options) {
CallingSequenceBuilder csb = new CallingSequenceBuilder(CLinux, forUpcall, options);
BindingCalculator argCalc = forUpcall ? new BoxBindingCalculator(true) : new UnboxBindingCalculator(true);
BindingCalculator retCalc = forUpcall ? new UnboxBindingCalculator(false) : new BoxBindingCalculator(false);
boolean returnInMemory = isInMemoryReturn(cDesc.returnLayout());
if (returnInMemory) {
Class<?> carrier = MemorySegment.class;
MemoryLayout layout = RISCV64.C_POINTER;
csb.addArgumentBindings(carrier, layout, argCalc.getBindings(carrier, layout, false));
} else if (cDesc.returnLayout().isPresent()) {
Class<?> carrier = mt.returnType();
MemoryLayout layout = cDesc.returnLayout().get();
csb.setReturnBindings(carrier, layout, retCalc.getBindings(carrier, layout, false));
}
for (int i = 0; i < mt.parameterCount(); i++) {
Class<?> carrier = mt.parameterType(i);
MemoryLayout layout = cDesc.argumentLayouts().get(i);
boolean isVar = options.isVarargsIndex(i);
csb.addArgumentBindings(carrier, layout, argCalc.getBindings(carrier, layout, isVar));
}
return new Bindings(csb.build(), returnInMemory);
}
public static MethodHandle arrangeDowncall(MethodType mt, FunctionDescriptor cDesc, LinkerOptions options) {
Bindings bindings = getBindings(mt, cDesc, false, options);
MethodHandle handle = new DowncallLinker(CLinux, bindings.callingSequence).getBoundMethodHandle();
if (bindings.isInMemoryReturn) {
handle = SharedUtils.adaptDowncallForIMR(handle, cDesc, bindings.callingSequence);
}
return handle;
}
public static MemorySegment arrangeUpcall(MethodHandle target, MethodType mt, FunctionDescriptor cDesc, SegmentScope scope) {
Bindings bindings = getBindings(mt, cDesc, true);
if (bindings.isInMemoryReturn) {
target = SharedUtils.adaptUpcallForIMR(target, true /* drop return, since we don't have bindings for it */);
}
return UpcallLinker.make(CLinux, target, bindings.callingSequence, scope);
}
private static boolean isInMemoryReturn(Optional<MemoryLayout> returnLayout) {
return returnLayout
.filter(GroupLayout.class::isInstance)
.filter(g -> TypeClass.classifyLayout(g) == TypeClass.STRUCT_REFERENCE)
.isPresent();
}
static class StorageCalculator {
private final boolean forArguments;
// next available register index. 0=integerRegIdx, 1=floatRegIdx
private final int IntegerRegIdx = 0;
private final int FloatRegIdx = 1;
private final int[] nRegs = {0, 0};
private long stackOffset = 0;
public StorageCalculator(boolean forArguments) {
this.forArguments = forArguments;
}
// Aggregates or scalars passed on the stack are aligned to the greater of
// the type alignment and XLEN bits, but never more than the stack alignment.
void alignStack(long alignment) {
alignment = Utils.alignUp(Math.min(Math.max(alignment, STACK_SLOT_SIZE), 16), STACK_SLOT_SIZE);
stackOffset = Utils.alignUp(stackOffset, alignment);
}
VMStorage stackAlloc() {
assert forArguments : "no stack returns";
VMStorage storage = stackStorage((short) STACK_SLOT_SIZE, (int) stackOffset);
stackOffset += STACK_SLOT_SIZE;
return storage;
}
Optional<VMStorage> regAlloc(int storageClass) {
if (nRegs[storageClass] < MAX_REGISTER_ARGUMENTS) {
VMStorage[] source = (forArguments ? CLinux.inputStorage : CLinux.outputStorage)[storageClass];
Optional<VMStorage> result = Optional.of(source[nRegs[storageClass]]);
nRegs[storageClass] += 1;
return result;
}
return Optional.empty();
}
VMStorage getStorage(int storageClass) {
Optional<VMStorage> storage = regAlloc(storageClass);
if (storage.isPresent()) {
return storage.get();
}
// If storageClass is StorageType.FLOAT, and no floating-point register is available,
// try to allocate an integer register.
if (storageClass == StorageType.FLOAT) {
storage = regAlloc(StorageType.INTEGER);
if (storage.isPresent()) {
return storage.get();
}
}
return stackAlloc();
}
VMStorage[] getStorages(MemoryLayout layout, boolean isVariadicArg) {
int regCnt = (int) SharedUtils.alignUp(layout.byteSize(), 8) / 8;
if (isVariadicArg && layout.byteAlignment() == 16 && layout.byteSize() <= 16) {
alignStorage();
// Two registers or stack slots will be allocated, even layout.byteSize <= 8B.
regCnt = 2;
}
VMStorage[] storages = new VMStorage[regCnt];
for (int i = 0; i < regCnt; i++) {
// use integer calling convention.
storages[i] = getStorage(StorageType.INTEGER);
}
return storages;
}
boolean regsAvailable(int integerRegs, int floatRegs) {
return nRegs[IntegerRegIdx] + integerRegs <= MAX_REGISTER_ARGUMENTS &&
nRegs[FloatRegIdx] + floatRegs <= MAX_REGISTER_ARGUMENTS;
}
// Variadic arguments with 2 * XLEN-bit alignment and size at most 2 * XLEN bits
// are passed in an aligned register pair (i.e., the first register in the pair
// is even-numbered), or on the stack by value if none is available.
// After a variadic argument has been passed on the stack, all future arguments
// will also be passed on the stack.
void alignStorage() {
if (nRegs[IntegerRegIdx] + 2 <= MAX_REGISTER_ARGUMENTS) {
nRegs[IntegerRegIdx] = (nRegs[IntegerRegIdx] + 1) & -2;
} else {
nRegs[IntegerRegIdx] = MAX_REGISTER_ARGUMENTS;
stackOffset = Utils.alignUp(stackOffset, 16);
}
}
@Override
public String toString() {
String nReg = "iReg: " + nRegs[IntegerRegIdx] + ", fReg: " + nRegs[FloatRegIdx];
String stack = ", stackOffset: " + stackOffset;
return "{" + nReg + stack + "}";
}
}
abstract static class BindingCalculator {
protected final StorageCalculator storageCalculator;
@Override
public String toString() {
return storageCalculator.toString();
}
protected BindingCalculator(boolean forArguments) {
this.storageCalculator = new LinuxRISCV64CallArranger.StorageCalculator(forArguments);
}
abstract List<Binding> getBindings(Class<?> carrier, MemoryLayout layout, boolean isVariadicArg);
// When handling variadic part, integer calling convention should be used.
static final Map<TypeClass, TypeClass> conventionConverterMap =
Map.ofEntries(Map.entry(FLOAT, INTEGER),
Map.entry(STRUCT_REGISTER_F, STRUCT_REGISTER_X),
Map.entry(STRUCT_REGISTER_XF, STRUCT_REGISTER_X));
}
static class UnboxBindingCalculator extends BindingCalculator {
boolean forArguments;
UnboxBindingCalculator(boolean forArguments) {
super(forArguments);
this.forArguments = forArguments;
}
@Override
List<Binding> getBindings(Class<?> carrier, MemoryLayout layout, boolean isVariadicArg) {
TypeClass typeClass = TypeClass.classifyLayout(layout);
if (isVariadicArg) {
typeClass = BindingCalculator.conventionConverterMap.getOrDefault(typeClass, typeClass);
}
return getBindings(carrier, layout, typeClass, isVariadicArg);
}
List<Binding> getBindings(Class<?> carrier, MemoryLayout layout, TypeClass argumentClass, boolean isVariadicArg) {
Binding.Builder bindings = Binding.builder();
switch (argumentClass) {
case INTEGER -> {
VMStorage storage = storageCalculator.getStorage(StorageType.INTEGER);
bindings.vmStore(storage, carrier);
}
case FLOAT -> {
VMStorage storage = storageCalculator.getStorage(StorageType.FLOAT);
bindings.vmStore(storage, carrier);
}
case POINTER -> {
bindings.unboxAddress();
VMStorage storage = storageCalculator.getStorage(StorageType.INTEGER);
bindings.vmStore(storage, long.class);
}
case STRUCT_REGISTER_X -> {
assert carrier == MemorySegment.class;
// When no register is available, struct will be passed by stack.
// Before allocation, stack must be aligned.
if (!storageCalculator.regsAvailable(1, 0)) {
storageCalculator.alignStack(layout.byteAlignment());
}
VMStorage[] locations = storageCalculator.getStorages(layout, isVariadicArg);
int locIndex = 0;
long offset = 0;
while (offset < layout.byteSize()) {
final long copy = Math.min(layout.byteSize() - offset, 8);
VMStorage storage = locations[locIndex++];
Class<?> type = SharedUtils.primitiveCarrierForSize(copy, false);
if (offset + copy < layout.byteSize()) {
bindings.dup();
}
bindings.bufferLoad(offset, type)
.vmStore(storage, type);
offset += copy;
}
}
case STRUCT_REGISTER_F -> {
assert carrier == MemorySegment.class;
List<FlattenedFieldDesc> descs = getFlattenedFields((GroupLayout) layout);
if (storageCalculator.regsAvailable(0, descs.size())) {
for (int i = 0; i < descs.size(); i++) {
FlattenedFieldDesc desc = descs.get(i);
Class<?> type = desc.layout().carrier();
VMStorage storage = storageCalculator.getStorage(StorageType.FLOAT);
if (i < descs.size() - 1) {
bindings.dup();
}
bindings.bufferLoad(desc.offset(), type)
.vmStore(storage, type);
}
} else {
// If there is not enough register can be used, then fall back to integer calling convention.
return getBindings(carrier, layout, STRUCT_REGISTER_X, isVariadicArg);
}
}
case STRUCT_REGISTER_XF -> {
assert carrier == MemorySegment.class;
if (storageCalculator.regsAvailable(1, 1)) {
List<FlattenedFieldDesc> descs = getFlattenedFields((GroupLayout) layout);
for (int i = 0; i < 2; i++) {
FlattenedFieldDesc desc = descs.get(i);
int storageClass;
if (desc.typeClass() == INTEGER) {
storageClass = StorageType.INTEGER;
} else {
storageClass = StorageType.FLOAT;
}
VMStorage storage = storageCalculator.getStorage(storageClass);
Class<?> type = desc.layout().carrier();
if (i < 1) {
bindings.dup();
}
bindings.bufferLoad(desc.offset(), type)
.vmStore(storage, type);
}
} else {
return getBindings(carrier, layout, STRUCT_REGISTER_X, isVariadicArg);
}
}
case STRUCT_REFERENCE -> {
assert carrier == MemorySegment.class;
bindings.copy(layout)
.unboxAddress();
VMStorage storage = storageCalculator.getStorage(StorageType.INTEGER);
bindings.vmStore(storage, long.class);
}
default -> throw new UnsupportedOperationException("Unhandled class " + argumentClass);
}
return bindings.build();
}
}
static class BoxBindingCalculator extends BindingCalculator {
BoxBindingCalculator(boolean forArguments) {
super(forArguments);
}
@Override
List<Binding> getBindings(Class<?> carrier, MemoryLayout layout, boolean isVariadicArg) {
TypeClass typeClass = TypeClass.classifyLayout(layout);
if (isVariadicArg) {
typeClass = BindingCalculator.conventionConverterMap.getOrDefault(typeClass, typeClass);
}
return getBindings(carrier, layout, typeClass, isVariadicArg);
}
List<Binding> getBindings(Class<?> carrier, MemoryLayout layout, TypeClass argumentClass, boolean isVariadicArg) {
Binding.Builder bindings = Binding.builder();
switch (argumentClass) {
case INTEGER -> {
VMStorage storage = storageCalculator.getStorage(StorageType.INTEGER);
bindings.vmLoad(storage, carrier);
}
case FLOAT -> {
VMStorage storage = storageCalculator.getStorage(StorageType.FLOAT);
bindings.vmLoad(storage, carrier);
}
case POINTER -> {
VMStorage storage = storageCalculator.getStorage(StorageType.INTEGER);
bindings.vmLoad(storage, long.class)
.boxAddressRaw(Utils.pointeeSize(layout));
}
case STRUCT_REGISTER_X -> {
assert carrier == MemorySegment.class;
// When no register is available, struct will be passed by stack.
// Before allocation, stack must be aligned.
if (!storageCalculator.regsAvailable(1, 0)) {
storageCalculator.alignStack(layout.byteAlignment());
}
bindings.allocate(layout);
VMStorage[] locations = storageCalculator.getStorages(layout, isVariadicArg);
int locIndex = 0;
long offset = 0;
while (offset < layout.byteSize()) {
final long copy = Math.min(layout.byteSize() - offset, 8);
VMStorage storage = locations[locIndex++];
Class<?> type = SharedUtils.primitiveCarrierForSize(copy, false);
bindings.dup().vmLoad(storage, type)
.bufferStore(offset, type);
offset += copy;
}
}
case STRUCT_REGISTER_F -> {
assert carrier == MemorySegment.class;
bindings.allocate(layout);
List<FlattenedFieldDesc> descs = getFlattenedFields((GroupLayout) layout);
if (storageCalculator.regsAvailable(0, descs.size())) {
for (FlattenedFieldDesc desc : descs) {
Class<?> type = desc.layout().carrier();
VMStorage storage = storageCalculator.getStorage(StorageType.FLOAT);
bindings.dup()
.vmLoad(storage, type)
.bufferStore(desc.offset(), type);
}
} else {
return getBindings(carrier, layout, STRUCT_REGISTER_X, isVariadicArg);
}
}
case STRUCT_REGISTER_XF -> {
assert carrier == MemorySegment.class;
bindings.allocate(layout);
if (storageCalculator.regsAvailable(1, 1)) {
List<FlattenedFieldDesc> descs = getFlattenedFields((GroupLayout) layout);
for (int i = 0; i < 2; i++) {
FlattenedFieldDesc desc = descs.get(i);
int storageClass;
if (desc.typeClass() == INTEGER) {
storageClass = StorageType.INTEGER;
} else {
storageClass = StorageType.FLOAT;
}
VMStorage storage = storageCalculator.getStorage(storageClass);
Class<?> type = desc.layout().carrier();
bindings.dup()
.vmLoad(storage, type)
.bufferStore(desc.offset(), type);
}
} else {
return getBindings(carrier, layout, STRUCT_REGISTER_X, isVariadicArg);
}
}
case STRUCT_REFERENCE -> {
assert carrier == MemorySegment.class;
VMStorage storage = storageCalculator.getStorage(StorageType.INTEGER);
bindings.vmLoad(storage, long.class)
.boxAddress(layout);
}
default -> throw new UnsupportedOperationException("Unhandled class " + argumentClass);
}
return bindings.build();
}
}
}

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Institute of Software, Chinese Academy of Sciences.
* All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.foreign.abi.riscv64.linux;
import jdk.internal.foreign.abi.AbstractLinker;
import jdk.internal.foreign.abi.LinkerOptions;
import java.lang.foreign.SegmentScope;
import java.lang.foreign.FunctionDescriptor;
import java.lang.foreign.MemorySegment;
import java.lang.foreign.VaList;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodType;
import java.util.function.Consumer;
public final class LinuxRISCV64Linker extends AbstractLinker {
public static LinuxRISCV64Linker getInstance() {
final class Holder {
private static final LinuxRISCV64Linker INSTANCE = new LinuxRISCV64Linker();
}
return Holder.INSTANCE;
}
private LinuxRISCV64Linker() {
// Ensure there is only one instance
}
@Override
protected MethodHandle arrangeDowncall(MethodType inferredMethodType, FunctionDescriptor function, LinkerOptions options) {
return LinuxRISCV64CallArranger.arrangeDowncall(inferredMethodType, function, options);
}
@Override
protected MemorySegment arrangeUpcall(MethodHandle target, MethodType targetType, FunctionDescriptor function, SegmentScope scope) {
return LinuxRISCV64CallArranger.arrangeUpcall(target, targetType, function, scope);
}
public static VaList newVaList(Consumer<VaList.Builder> actions, SegmentScope scope) {
LinuxRISCV64VaList.Builder builder = LinuxRISCV64VaList.builder(scope);
actions.accept(builder);
return builder.build();
}
public static VaList newVaListOfAddress(long address, SegmentScope scope) {
return LinuxRISCV64VaList.ofAddress(address, scope);
}
public static VaList emptyVaList() {
return LinuxRISCV64VaList.empty();
}
}

View File

@ -0,0 +1,302 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Institute of Software, Chinese Academy of Sciences.
* All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.foreign.abi.riscv64.linux;
import java.lang.foreign.GroupLayout;
import java.lang.foreign.MemoryLayout;
import java.lang.foreign.MemorySegment;
import java.lang.foreign.SegmentScope;
import java.lang.foreign.SegmentAllocator;
import java.lang.foreign.ValueLayout;
import java.lang.foreign.VaList;
import jdk.internal.foreign.abi.SharedUtils;
import jdk.internal.foreign.MemorySessionImpl;
import jdk.internal.foreign.Utils;
import java.lang.invoke.VarHandle;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import static java.lang.foreign.ValueLayout.ADDRESS;
import static jdk.internal.foreign.abi.SharedUtils.SimpleVaArg;
import static jdk.internal.foreign.abi.SharedUtils.THROWING_ALLOCATOR;
/**
* Standard va_list implementation as defined by RISC-V ABI document and used on Linux.
* In the base integer calling convention, variadic arguments are passed in the same
* manner as named arguments, with one exception. Variadic arguments with 2 * XLEN-bit
* alignment and size at most 2 * XLEN bits are passed in an aligned register pair
* (i.e., the first register in the pair is even-numbered), or on the stack by value
* if none is available. After a variadic argument has been passed on the stack, all
* future arguments will also be passed on the stack (i.e. the last argument register
* may be left unused due to the aligned register pair rule).
*/
public non-sealed class LinuxRISCV64VaList implements VaList {
// The va_list type is void* on RISCV64.
// See https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#cc-type-representations
private final MemorySegment segment;
private long offset;
private static final long STACK_SLOT_SIZE = 8;
private static final VaList EMPTY
= new SharedUtils.EmptyVaList(MemorySegment.NULL);
public static VaList empty() {
return EMPTY;
}
public LinuxRISCV64VaList(MemorySegment segment, long offset) {
this.segment = segment;
this.offset = offset;
}
private static LinuxRISCV64VaList readFromAddress(long address, SegmentScope scope) {
MemorySegment segment = MemorySegment.ofAddress(address, Long.MAX_VALUE, scope); // size unknown
return new LinuxRISCV64VaList(segment, 0);
}
@Override
public int nextVarg(ValueLayout.OfInt layout) {
return (int) read(layout);
}
@Override
public long nextVarg(ValueLayout.OfLong layout) {
return (long) read(layout);
}
@Override
public double nextVarg(ValueLayout.OfDouble layout) {
return (double) read(layout);
}
@Override
public MemorySegment nextVarg(ValueLayout.OfAddress layout) {
return (MemorySegment) read(layout);
}
@Override
public MemorySegment nextVarg(GroupLayout layout, SegmentAllocator allocator) {
Objects.requireNonNull(allocator);
return (MemorySegment) read(layout, allocator);
}
private Object read(MemoryLayout layout) {
return read(layout, THROWING_ALLOCATOR);
}
private Object read(MemoryLayout layout, SegmentAllocator allocator) {
Objects.requireNonNull(layout);
TypeClass typeClass = TypeClass.classifyLayout(layout);
preAlignStack(layout);
return switch (typeClass) {
case INTEGER, FLOAT, POINTER -> {
checkStackElement(layout);
VarHandle reader = layout.varHandle();
MemorySegment slice = segment.asSlice(offset, layout.byteSize());
Object res = reader.get(slice);
postAlignStack(layout);
yield res;
}
case STRUCT_REGISTER_X, STRUCT_REGISTER_F, STRUCT_REGISTER_XF -> {
checkStackElement(layout);
// Struct is passed indirectly via a pointer in an integer register.
MemorySegment slice = segment.asSlice(offset, layout.byteSize());
MemorySegment seg = allocator.allocate(layout);
seg.copyFrom(slice);
postAlignStack(layout);
yield seg;
}
case STRUCT_REFERENCE -> {
checkStackElement(ADDRESS);
VarHandle addrReader = ADDRESS.varHandle();
MemorySegment slice = segment.asSlice(offset, ADDRESS.byteSize());
MemorySegment addr = (MemorySegment) addrReader.get(slice);
MemorySegment seg = allocator.allocate(layout);
seg.copyFrom(MemorySegment.ofAddress(addr.address(), layout.byteSize(), segment.scope()));
postAlignStack(ADDRESS);
yield seg;
}
};
}
private void checkStackElement(MemoryLayout layout) {
if (Utils.alignUp(layout.byteSize(), STACK_SLOT_SIZE) > segment.byteSize()) {
throw SharedUtils.newVaListNSEE(layout);
}
}
private void preAlignStack(MemoryLayout layout) {
if (layout.byteSize() <= 16 && layout.byteAlignment() == 16) {
offset = Utils.alignUp(offset, 16);
} else {
offset = Utils.alignUp(offset, STACK_SLOT_SIZE);
}
}
private void postAlignStack(MemoryLayout layout) {
if (layout.byteSize() <= 16 && layout.byteAlignment() == 16) {
offset += 16;
} else {
offset += Utils.alignUp(layout.byteSize(), STACK_SLOT_SIZE);
}
}
@Override
public void skip(MemoryLayout... layouts) {
Objects.requireNonNull(layouts);
((MemorySessionImpl) segment.scope()).checkValidState();
for (MemoryLayout layout : layouts) {
Objects.requireNonNull(layout);
preAlignStack(layout);
postAlignStack(layout);
}
}
static LinuxRISCV64VaList.Builder builder(SegmentScope scope) {
return new LinuxRISCV64VaList.Builder(scope);
}
public static VaList ofAddress(long address, SegmentScope scope) {
return readFromAddress(address, scope);
}
@Override
public VaList copy() {
MemorySessionImpl sessionImpl = (MemorySessionImpl) segment.scope();
sessionImpl.checkValidState();
return new LinuxRISCV64VaList(segment, offset);
}
@Override
public MemorySegment segment() {
// make sure that returned segment cannot be accessed
return segment.asSlice(0, 0);
}
public long address() {
return segment.address() + offset;
}
@Override
public String toString() {
return "LinuxRISCV64VaList{" + "seg: " + address() + ", " + "offset: " + offset + '}';
}
public static non-sealed class Builder implements VaList.Builder {
private final SegmentScope scope;
private final List<SimpleVaArg> stackArgs = new ArrayList<>();
Builder(SegmentScope scope) {
this.scope = scope;
}
@Override
public Builder addVarg(ValueLayout.OfInt layout, int value) {
return arg(layout, value);
}
@Override
public Builder addVarg(ValueLayout.OfLong layout, long value) {
return arg(layout, value);
}
@Override
public Builder addVarg(ValueLayout.OfDouble layout, double value) {
return arg(layout, value);
}
@Override
public Builder addVarg(ValueLayout.OfAddress layout, MemorySegment value) {
return arg(layout, value);
}
@Override
public Builder addVarg(GroupLayout layout, MemorySegment value) {
return arg(layout, value);
}
private Builder arg(MemoryLayout layout, Object value) {
Objects.requireNonNull(layout);
Objects.requireNonNull(value);
stackArgs.add(new SimpleVaArg(layout, value));
return this;
}
boolean isEmpty() {
return stackArgs.isEmpty();
}
public VaList build() {
if (isEmpty()) {
return EMPTY;
}
long stackArgsSize = 0;
for (SimpleVaArg arg : stackArgs) {
MemoryLayout layout = arg.layout;
long elementSize = TypeClass.classifyLayout(layout) == TypeClass.STRUCT_REFERENCE ?
ADDRESS.byteSize() : layout.byteSize();
// arguments with 2 * XLEN-bit alignment and size at most 2 * XLEN bits
// are saved on memory aligned with 2 * XLEN (XLEN=64 for RISCV64).
if (layout.byteSize() <= 16 && layout.byteAlignment() == 16) {
stackArgsSize = Utils.alignUp(stackArgsSize, 16);
elementSize = 16;
}
stackArgsSize += Utils.alignUp(elementSize, STACK_SLOT_SIZE);
}
MemorySegment argsSegment = MemorySegment.allocateNative(stackArgsSize, 16, scope);
MemorySegment writeCursor = argsSegment;
for (SimpleVaArg arg : stackArgs) {
MemoryLayout layout;
Object value = arg.value;
if (TypeClass.classifyLayout(arg.layout) == TypeClass.STRUCT_REFERENCE) {
layout = ADDRESS;
} else {
layout = arg.layout;
}
long alignedSize = Utils.alignUp(layout.byteSize(), STACK_SLOT_SIZE);
if (layout.byteSize() <= 16 && layout.byteAlignment() == 16) {
writeCursor = Utils.alignUp(writeCursor, 16);
alignedSize = 16;
}
if (layout instanceof GroupLayout) {
writeCursor.copyFrom((MemorySegment) value);
} else {
VarHandle writer = layout.varHandle();
writer.set(writeCursor, value);
}
writeCursor = writeCursor.asSlice(alignedSize);
}
return new LinuxRISCV64VaList(argsSegment, 0L);
}
}
}

View File

@ -0,0 +1,215 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Institute of Software, Chinese Academy of Sciences.
* All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.foreign.abi.riscv64.linux;
import java.lang.foreign.GroupLayout;
import java.lang.foreign.MemoryLayout;
import java.lang.foreign.MemorySegment;
import java.lang.foreign.PaddingLayout;
import java.lang.foreign.SequenceLayout;
import java.lang.foreign.UnionLayout;
import java.lang.foreign.ValueLayout;
import java.util.ArrayList;
import java.util.List;
public enum TypeClass {
/*
* STRUCT_REFERENCE: Aggregates larger than 2 * XLEN bits are passed by reference and are replaced
* in the argument list with the address. The address will be passed in a register if at least
* one register is available, otherwise it will be passed on the stack.
*
* STRUCT_REGISTER_F: A struct containing just one floating-point real is passed as though it were
* a standalone floating-point real. A struct containing two floating-point reals is passed in two
* floating-point registers, if neither real is more than ABI_FLEN bits wide and at least two
* floating-point argument registers are available. (The registers need not be an aligned pair.)
* Otherwise, it is passed according to the integer calling convention.
*
* STRUCT_REGISTER_XF: A struct containing one floating-point real and one integer (or bitfield), in either
* order, is passed in a floating-point register and an integer register, provided the floating-point real
* is no more than ABI_FLEN bits wide and the integer is no more than XLEN bits wide, and at least one
* floating-point argument register and at least one integer argument register is available. If the struct
* is not passed in this manner, then it is passed according to the integer calling convention.
*
* STRUCT_REGISTER_X: Aggregates whose total size is no more than XLEN bits are passed in a register, with the
* fields laid out as though they were passed in memory. If no register is available, the aggregate is
* passed on the stack. Aggregates whose total size is no more than 2 * XLEN bits are passed in a pair of
* registers; if only one register is available, the first XLEN bits are passed in a register and the
* remaining bits are passed on the stack. If no registers are available, the aggregate is passed on the stack.
*
* See https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
* */
INTEGER,
FLOAT,
POINTER,
STRUCT_REFERENCE,
STRUCT_REGISTER_F,
STRUCT_REGISTER_XF,
STRUCT_REGISTER_X;
private static final int MAX_AGGREGATE_REGS_SIZE = 2;
/*
* Struct will be flattened while classifying. That is, struct{struct{int, double}} will be treated
* same as struct{int, double} and struct{int[2]} will be treated same as struct{int, int}.
* */
private static record FieldCounter(long integerCnt, long floatCnt, long pointerCnt) {
static final FieldCounter EMPTY = new FieldCounter(0, 0, 0);
static final FieldCounter SINGLE_INTEGER = new FieldCounter(1, 0, 0);
static final FieldCounter SINGLE_FLOAT = new FieldCounter(0, 1, 0);
static final FieldCounter SINGLE_POINTER = new FieldCounter(0, 0, 1);
static FieldCounter flatten(MemoryLayout layout) {
if (layout instanceof ValueLayout valueLayout) {
return switch (classifyValueType(valueLayout)) {
case INTEGER -> FieldCounter.SINGLE_INTEGER;
case FLOAT -> FieldCounter.SINGLE_FLOAT;
case POINTER -> FieldCounter.SINGLE_POINTER;
default -> throw new IllegalStateException("Should not reach here.");
};
} else if (layout instanceof GroupLayout groupLayout) {
FieldCounter currCounter = FieldCounter.EMPTY;
for (MemoryLayout memberLayout : groupLayout.memberLayouts()) {
if (memberLayout instanceof PaddingLayout) {
continue;
}
currCounter = currCounter.add(flatten(memberLayout));
}
return currCounter;
} else if (layout instanceof SequenceLayout sequenceLayout) {
long elementCount = sequenceLayout.elementCount();
if (elementCount == 0) {
return FieldCounter.EMPTY;
}
return flatten(sequenceLayout.elementLayout()).mul(elementCount);
} else {
throw new IllegalStateException("Cannot get here: " + layout);
}
}
FieldCounter mul(long m) {
return new FieldCounter(integerCnt * m,
floatCnt * m,
pointerCnt * m);
}
FieldCounter add(FieldCounter other) {
return new FieldCounter(integerCnt + other.integerCnt,
floatCnt + other.floatCnt,
pointerCnt + other.pointerCnt);
}
}
public static record FlattenedFieldDesc(TypeClass typeClass, long offset, ValueLayout layout) {
}
private static List<FlattenedFieldDesc> getFlattenedFieldsInner(long offset, MemoryLayout layout) {
if (layout instanceof ValueLayout valueLayout) {
TypeClass typeClass = classifyValueType(valueLayout);
return List.of(switch (typeClass) {
case INTEGER, FLOAT -> new FlattenedFieldDesc(typeClass, offset, valueLayout);
default -> throw new IllegalStateException("Should not reach here.");
});
} else if (layout instanceof GroupLayout groupLayout) {
List<FlattenedFieldDesc> fields = new ArrayList<>();
for (MemoryLayout memberLayout : groupLayout.memberLayouts()) {
if (memberLayout instanceof PaddingLayout) {
offset += memberLayout.byteSize();
continue;
}
fields.addAll(getFlattenedFieldsInner(offset, memberLayout));
offset += memberLayout.byteSize();
}
return fields;
} else if (layout instanceof SequenceLayout sequenceLayout) {
List<FlattenedFieldDesc> fields = new ArrayList<>();
MemoryLayout elementLayout = sequenceLayout.elementLayout();
for (long i = 0; i < sequenceLayout.elementCount(); i++) {
fields.addAll(getFlattenedFieldsInner(offset, elementLayout));
offset += elementLayout.byteSize();
}
return fields;
} else {
throw new IllegalStateException("Cannot get here: " + layout);
}
}
public static List<FlattenedFieldDesc> getFlattenedFields(GroupLayout layout) {
return getFlattenedFieldsInner(0, layout);
}
// ValueLayout will be classified by its carrier type.
private static TypeClass classifyValueType(ValueLayout type) {
Class<?> carrier = type.carrier();
if (carrier == boolean.class || carrier == byte.class || carrier == char.class ||
carrier == short.class || carrier == int.class || carrier == long.class) {
return INTEGER;
} else if (carrier == float.class || carrier == double.class) {
return FLOAT;
} else if (carrier == MemorySegment.class) {
return POINTER;
} else {
throw new IllegalStateException("Cannot get here: " + carrier.getName());
}
}
private static boolean isRegisterAggregate(MemoryLayout type) {
return type.bitSize() <= MAX_AGGREGATE_REGS_SIZE * 64;
}
private static TypeClass classifyStructType(GroupLayout layout) {
if (layout instanceof UnionLayout) {
return isRegisterAggregate(layout) ? STRUCT_REGISTER_X : STRUCT_REFERENCE;
}
if (!isRegisterAggregate(layout)) {
return STRUCT_REFERENCE;
}
// classify struct by its fields.
FieldCounter counter = FieldCounter.flatten(layout);
if (counter.integerCnt == 0 && counter.pointerCnt == 0 &&
(counter.floatCnt == 1 || counter.floatCnt == 2)) {
return STRUCT_REGISTER_F;
} else if (counter.integerCnt == 1 && counter.floatCnt == 1 &&
counter.pointerCnt == 0) {
return STRUCT_REGISTER_XF;
} else {
return STRUCT_REGISTER_X;
}
}
public static TypeClass classifyLayout(MemoryLayout type) {
if (type instanceof ValueLayout vt) {
return classifyValueType(vt);
} else if (type instanceof GroupLayout gt) {
return classifyStructType(gt);
} else {
throw new IllegalArgumentException("Unsupported layout: " + type);
}
}
}

View File

@ -40,7 +40,7 @@ import static org.testng.Assert.*;
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED LibraryLookupTest
*/
public class LibraryLookupTest {

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED SafeFunctionAccessTest
*/

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED StdLibTest
*/

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestClassLoaderFindNative
*/

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestDowncallBase
*
* @run testng/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestDowncallBase
*
* @run testng/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies

View File

@ -25,7 +25,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestFunctionDescriptor
*/

View File

@ -25,7 +25,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestHeapAlignment
*/

View File

@ -25,7 +25,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestIllegalLink
*/

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
* @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64"
* @run testng/othervm
* -Djdk.internal.foreign.ProgrammableInvoker.USE_SPEC=true
* --enable-native-access=ALL-UNNAMED

View File

@ -67,6 +67,7 @@ public class TestLayoutEquality {
addLayoutConstants(testValues, PlatformLayouts.SysV.class);
addLayoutConstants(testValues, PlatformLayouts.Win64.class);
addLayoutConstants(testValues, PlatformLayouts.AArch64.class);
addLayoutConstants(testValues, PlatformLayouts.RISCV64.class);
return testValues.stream().map(e -> new Object[]{ e }).toArray(Object[][]::new);
}

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
* @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64"
* @run testng TestLinker
*/

View File

@ -33,7 +33,7 @@
/* @test id=UpcallHighArity-FF
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallHighArity
*
* @run testng/othervm/native/manual
@ -45,7 +45,7 @@
/* @test id=UpcallHighArity-TF
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallHighArity
*
* @run testng/othervm/native/manual
@ -57,7 +57,7 @@
/* @test id=UpcallHighArity-FT
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallHighArity
*
* @run testng/othervm/native/manual
@ -69,7 +69,7 @@
/* @test id=UpcallHighArity-TT
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallHighArity
*
* @run testng/othervm/native/manual
@ -81,7 +81,7 @@
/* @test id=DowncallScope-F
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestDowncallBase
*
* @run testng/othervm/native/manual
@ -92,7 +92,7 @@
/* @test id=DowncallScope-T
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestDowncallBase
*
* @run testng/othervm/native/manual
@ -103,7 +103,7 @@
/* @test id=DowncallStack-F
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestDowncallBase
*
* @run testng/othervm/native/manual
@ -114,7 +114,7 @@
/* @test id=DowncallStack-T
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestDowncallBase
*
* @run testng/othervm/native/manual
@ -125,7 +125,7 @@
/* @test id=UpcallScope-FF
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -137,7 +137,7 @@
/* @test id=UpcallScope-TF
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -149,7 +149,7 @@
/* @test id=UpcallScope-FT
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -161,7 +161,7 @@
/* @test id=UpcallScope-TT
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -173,7 +173,7 @@
/* @test id=UpcallAsync-FF
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -185,7 +185,7 @@
/* @test id=UpcallAsync-TF
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -197,7 +197,7 @@
/* @test id=UpcallAsync-FT
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -209,7 +209,7 @@
/* @test id=UpcallAsync-TT
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -221,7 +221,7 @@
/* @test id=UpcallStack-FF
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -233,7 +233,7 @@
/* @test id=UpcallStack-TF
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -245,7 +245,7 @@
/* @test id=UpcallStack-FT
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -257,7 +257,7 @@
/* @test id=UpcallStack-TT
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm/native/manual
@ -270,7 +270,7 @@
/*
* @test id=VarArgs
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper
*
* @run testng/othervm/native/manual

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm
* --enable-native-access=ALL-UNNAMED
* TestNULLAddress

View File

@ -25,7 +25,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestNative
*/

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @modules java.base/jdk.internal.ref
* @run testng/othervm
* --enable-native-access=ALL-UNNAMED

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestScopedOperations
*/

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm -Xmx4G -XX:MaxDirectMemorySize=1M --enable-native-access=ALL-UNNAMED TestSegments
*/

View File

@ -31,7 +31,7 @@ import static org.testng.Assert.*;
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng TestStringEncoding
*/

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @requires !vm.musl
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @library /test/lib
* @build ThrowingUpcall TestUpcallException
*

View File

@ -25,7 +25,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallHighArity
*
* @run testng/othervm/native

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build NativeTestHelper CallGeneratorHelper TestUpcallBase
*
* @run testng/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies

View File

@ -25,7 +25,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
*
* @run testng/othervm/native
* --enable-native-access=ALL-UNNAMED

View File

@ -25,7 +25,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED -Dgenerator.sample.factor=17 TestVarArgs
*/

View File

@ -0,0 +1,543 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Institute of Software, Chinese Academy of Sciences.
* All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @enablePreview
* @requires sun.arch.data.model == "64"
* @modules java.base/jdk.internal.foreign
* java.base/jdk.internal.foreign.abi
* java.base/jdk.internal.foreign.abi.riscv64
* java.base/jdk.internal.foreign.abi.riscv64.linux
* @build CallArrangerTestBase
* @run testng TestRISCV64CallArranger
*/
import java.lang.foreign.FunctionDescriptor;
import java.lang.foreign.MemoryLayout;
import java.lang.foreign.MemorySegment;
import java.lang.foreign.StructLayout;
import jdk.internal.foreign.abi.Binding;
import jdk.internal.foreign.abi.CallingSequence;
import jdk.internal.foreign.abi.LinkerOptions;
import jdk.internal.foreign.abi.riscv64.linux.LinuxRISCV64CallArranger;
import jdk.internal.foreign.abi.StubLocations;
import jdk.internal.foreign.abi.VMStorage;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.lang.invoke.MethodType;
import static java.lang.foreign.Linker.Option.firstVariadicArg;
import static java.lang.foreign.ValueLayout.ADDRESS;
import static jdk.internal.foreign.PlatformLayouts.RISCV64.*;
import static jdk.internal.foreign.abi.Binding.*;
import static jdk.internal.foreign.abi.riscv64.RISCV64Architecture.*;
import static jdk.internal.foreign.abi.riscv64.RISCV64Architecture.Regs.*;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
public class TestRISCV64CallArranger extends CallArrangerTestBase {
private static final short STACK_SLOT_SIZE = 8;
private static final VMStorage TARGET_ADDRESS_STORAGE = StubLocations.TARGET_ADDRESS.storage(StorageType.PLACEHOLDER);
private static final VMStorage RETURN_BUFFER_STORAGE = StubLocations.RETURN_BUFFER.storage(StorageType.PLACEHOLDER);
@Test
public void testEmpty() {
MethodType mt = MethodType.methodType(void.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid();
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testInteger() {
MethodType mt = MethodType.methodType(void.class,
byte.class, short.class, int.class, int.class,
int.class, int.class, long.class, int.class,
int.class, byte.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(
C_CHAR, C_SHORT, C_INT, C_INT,
C_INT, C_INT, C_LONG, C_INT,
C_INT, C_CHAR);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ cast(byte.class, int.class), vmStore(x10, int.class) },
{ cast(short.class, int.class), vmStore(x11, int.class) },
{ vmStore(x12, int.class) },
{ vmStore(x13, int.class) },
{ vmStore(x14, int.class) },
{ vmStore(x15, int.class) },
{ vmStore(x16, long.class) },
{ vmStore(x17, int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 0), int.class) },
{ cast(byte.class, int.class), vmStore(stackStorage(STACK_SLOT_SIZE, 8), int.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testTwoIntTwoFloat() {
MethodType mt = MethodType.methodType(void.class, int.class, int.class, float.class, float.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(C_INT, C_INT, C_FLOAT, C_FLOAT);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(x10, int.class) },
{ vmStore(x11, int.class) },
{ vmStore(f10, float.class) },
{ vmStore(f11, float.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test(dataProvider = "structs")
public void testStruct(MemoryLayout struct, Binding[] expectedBindings) {
MethodType mt = MethodType.methodType(void.class, MemorySegment.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(struct);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
expectedBindings
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@DataProvider
public static Object[][] structs() {
MemoryLayout struct1 = MemoryLayout.structLayout(C_INT, C_INT, C_DOUBLE, C_INT);
return new Object[][]{
// struct s { void* a; double c; };
{
MemoryLayout.structLayout(C_POINTER, C_DOUBLE),
new Binding[]{
dup(),
bufferLoad(0, long.class), vmStore(x10, long.class),
bufferLoad(8, long.class), vmStore(x11, long.class)
}
},
// struct s { int32_t a, b; double c; };
{ MemoryLayout.structLayout(C_INT, C_INT, C_DOUBLE),
new Binding[]{
dup(),
// s.a & s.b
bufferLoad(0, long.class), vmStore(x10, long.class),
// s.c
bufferLoad(8, long.class), vmStore(x11, long.class)
}
},
// struct s { int32_t a, b; double c; int32_t d; };
{ struct1,
new Binding[]{
copy(struct1),
unboxAddress(),
vmStore(x10, long.class)
}
},
// struct s { int32_t a[1]; float b[1]; };
{ MemoryLayout.structLayout(MemoryLayout.sequenceLayout(1, C_INT),
MemoryLayout.sequenceLayout(1, C_FLOAT)),
new Binding[]{
dup(),
// s.a[0]
bufferLoad(0, int.class), vmStore(x10, int.class),
// s.b[0]
bufferLoad(4, float.class), vmStore(f10, float.class)
}
},
// struct s { float a; /* padding */ double b };
{ MemoryLayout.structLayout(C_FLOAT, MemoryLayout.paddingLayout(32), C_DOUBLE),
new Binding[]{
dup(),
// s.a
bufferLoad(0, float.class), vmStore(f10, float.class),
// s.b
bufferLoad(8, double.class), vmStore(f11, double.class),
}
},
// struct __attribute__((__packed__)) s { float a; double b; };
{ MemoryLayout.structLayout(C_FLOAT, C_DOUBLE),
new Binding[]{
dup(),
// s.a
bufferLoad(0, float.class), vmStore(f10, float.class),
// s.b
bufferLoad(4, double.class), vmStore(f11, double.class),
}
},
// struct s { float a; float b __attribute__ ((aligned (8))); }
{ MemoryLayout.structLayout(C_FLOAT, MemoryLayout.paddingLayout(32),
C_FLOAT, MemoryLayout.paddingLayout(32)),
new Binding[]{
dup(),
// s.a
bufferLoad(0, float.class), vmStore(f10, float.class),
// s.b
bufferLoad(8, float.class), vmStore(f11, float.class),
}
}
};
}
@Test
public void testStructFA1() {
MemoryLayout fa = MemoryLayout.structLayout(C_FLOAT, C_FLOAT);
MethodType mt = MethodType.methodType(MemorySegment.class, float.class, int.class, MemorySegment.class);
FunctionDescriptor fd = FunctionDescriptor.of(fa, C_FLOAT, C_INT, fa);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(RETURN_BUFFER_STORAGE, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(f10, float.class) },
{ vmStore(x10, int.class) },
{
dup(),
bufferLoad(0, float.class),
vmStore(f11, float.class),
bufferLoad(4, float.class),
vmStore(f12, float.class)
}
});
checkReturnBindings(callingSequence, new Binding[]{
allocate(fa),
dup(),
vmLoad(f10, float.class),
bufferStore(0, float.class),
dup(),
vmLoad(f11, float.class),
bufferStore(4, float.class)
});
}
@Test
public void testStructFA2() {
MemoryLayout fa = MemoryLayout.structLayout(C_FLOAT, C_DOUBLE);
MethodType mt = MethodType.methodType(MemorySegment.class, float.class, int.class, MemorySegment.class);
FunctionDescriptor fd = FunctionDescriptor.of(fa, C_FLOAT, C_INT, fa);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(RETURN_BUFFER_STORAGE, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(f10, float.class) },
{ vmStore(x10, int.class) },
{
dup(),
bufferLoad(0, float.class),
vmStore(f11, float.class),
bufferLoad(4, double.class),
vmStore(f12, double.class)
}
});
checkReturnBindings(callingSequence, new Binding[]{
allocate(fa),
dup(),
vmLoad(f10, float.class),
bufferStore(0, float.class),
dup(),
vmLoad(f11, double.class),
bufferStore(4, double.class)
});
}
@Test
void spillFloatingPointStruct() {
MemoryLayout struct = MemoryLayout.structLayout(C_FLOAT, C_FLOAT);
// void f(float, float, float, float, float, float, float, struct)
MethodType mt = MethodType.methodType(void.class, float.class, float.class,
float.class, float.class, float.class,
float.class, float.class, MemorySegment.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(C_FLOAT, C_FLOAT, C_FLOAT, C_FLOAT,
C_FLOAT, C_FLOAT, C_FLOAT, struct);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(f10, float.class) },
{ vmStore(f11, float.class) },
{ vmStore(f12, float.class) },
{ vmStore(f13, float.class) },
{ vmStore(f14, float.class) },
{ vmStore(f15, float.class) },
{ vmStore(f16, float.class) },
{
bufferLoad(0, long.class),
vmStore(x10, long.class)
}
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testStructBoth() {
MemoryLayout struct = MemoryLayout.structLayout(C_INT, C_FLOAT);
MethodType mt = MethodType.methodType(void.class, MemorySegment.class, MemorySegment.class, MemorySegment.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(struct, struct, struct);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{
dup(),
bufferLoad(0, int.class),
vmStore(x10, int.class),
bufferLoad(4, float.class),
vmStore(f10, float.class)
},
{
dup(),
bufferLoad(0, int.class),
vmStore(x11, int.class),
bufferLoad(4, float.class),
vmStore(f11, float.class)
},
{
dup(),
bufferLoad(0, int.class),
vmStore(x12, int.class),
bufferLoad(4, float.class),
vmStore(f12, float.class)
}
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testStructStackSpill() {
// A large (> 16 byte) struct argument that is spilled to the
// stack should be passed as a pointer to a copy and occupy one
// stack slot.
MemoryLayout struct = MemoryLayout.structLayout(C_INT, C_INT, C_DOUBLE, C_INT);
MethodType mt = MethodType.methodType(
void.class, MemorySegment.class, MemorySegment.class, int.class, int.class,
int.class, int.class, int.class, int.class, MemorySegment.class, int.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(
struct, struct, C_INT, C_INT, C_INT, C_INT, C_INT, C_INT, struct, C_INT);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ copy(struct), unboxAddress(), vmStore(x10, long.class) },
{ copy(struct), unboxAddress(), vmStore(x11, long.class) },
{ vmStore(x12, int.class) },
{ vmStore(x13, int.class) },
{ vmStore(x14, int.class) },
{ vmStore(x15, int.class) },
{ vmStore(x16, int.class) },
{ vmStore(x17, int.class) },
{ copy(struct), unboxAddress(), vmStore(stackStorage(STACK_SLOT_SIZE, 0), long.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 8), int.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testVarArgsInRegs() {
MethodType mt = MethodType.methodType(void.class, int.class, int.class, float.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(C_INT, C_INT, C_FLOAT);
FunctionDescriptor fdExpected = FunctionDescriptor.ofVoid(ADDRESS, C_INT, C_INT, C_FLOAT);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false, LinkerOptions.forDowncall(fd, firstVariadicArg(1)));
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fdExpected);
// This is identical to the non-variadic calling sequence
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(x10, int.class) },
{ vmStore(x11, int.class) },
{ vmStore(x12, float.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testVarArgsLong() {
MethodType mt = MethodType.methodType(void.class, int.class, int.class, int.class, double.class,
double.class, long.class, long.class, int.class,
double.class, double.class, long.class);
FunctionDescriptor fd = FunctionDescriptor.ofVoid(C_INT, C_INT, C_INT, C_DOUBLE, C_DOUBLE,
C_LONG, C_LONG, C_INT, C_DOUBLE,
C_DOUBLE, C_LONG);
FunctionDescriptor fdExpected = FunctionDescriptor.ofVoid(ADDRESS, C_INT, C_INT, C_INT, C_DOUBLE,
C_DOUBLE, C_LONG, C_LONG, C_INT,
C_DOUBLE, C_DOUBLE, C_LONG);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false, LinkerOptions.forDowncall(fd, firstVariadicArg(1)));
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fdExpected);
// This is identical to the non-variadic calling sequence
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ vmStore(x10, int.class) },
{ vmStore(x11, int.class) },
{ vmStore(x12, int.class) },
{ vmStore(x13, double.class) },
{ vmStore(x14, double.class) },
{ vmStore(x15, long.class) },
{ vmStore(x16, long.class) },
{ vmStore(x17, int.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 0), double.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 8), double.class) },
{ vmStore(stackStorage(STACK_SLOT_SIZE, 16), long.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testReturnStruct1() {
MemoryLayout struct = MemoryLayout.structLayout(C_LONG, C_LONG, C_FLOAT);
MethodType mt = MethodType.methodType(MemorySegment.class, int.class, int.class, float.class);
FunctionDescriptor fd = FunctionDescriptor.of(struct, C_INT, C_INT, C_FLOAT);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertTrue(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(),
MethodType.methodType(void.class, MemorySegment.class, MemorySegment.class,
int.class, int.class, float.class));
assertEquals(callingSequence.functionDesc(),
FunctionDescriptor.ofVoid(ADDRESS, C_POINTER, C_INT, C_INT, C_FLOAT));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) },
{ unboxAddress(), vmStore(x10, long.class) },
{ vmStore(x11, int.class) },
{ vmStore(x12, int.class) },
{ vmStore(f10, float.class) }
});
checkReturnBindings(callingSequence, new Binding[]{});
}
@Test
public void testReturnStruct2() {
MemoryLayout struct = MemoryLayout.structLayout(C_LONG, C_LONG);
MethodType mt = MethodType.methodType(MemorySegment.class);
FunctionDescriptor fd = FunctionDescriptor.of(struct);
LinuxRISCV64CallArranger.Bindings bindings = LinuxRISCV64CallArranger.getBindings(mt, fd, false);
assertFalse(bindings.isInMemoryReturn());
CallingSequence callingSequence = bindings.callingSequence();
assertEquals(callingSequence.callerMethodType(), mt.insertParameterTypes(0, MemorySegment.class, MemorySegment.class));
assertEquals(callingSequence.functionDesc(), fd.insertArgumentLayouts(0, ADDRESS, ADDRESS));
checkArgumentBindings(callingSequence, new Binding[][]{
{ unboxAddress(), vmStore(RETURN_BUFFER_STORAGE, long.class) },
{ unboxAddress(), vmStore(TARGET_ADDRESS_STORAGE, long.class) }
});
checkReturnBindings(callingSequence, new Binding[]{
allocate(struct),
dup(),
vmLoad(x10, long.class),
bufferStore(0, long.class),
dup(),
vmLoad(x11, long.class),
bufferStore(8, long.class)
});
}
}

View File

@ -25,7 +25,7 @@
* @test
* @enablePreview
* @library ../ /test/lib
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestCaptureCallState
*/

View File

@ -25,7 +25,7 @@
* @test
* @enablePreview
* @library ../ /test/lib
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestDontRelease
*/

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @requires !vm.musl
*
* @library /test/lib

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @requires !vm.musl
*
* @library /test/lib

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @build invoker_module/* lookup_module/*
* @run testng/othervm --enable-native-access=invoker_module
* lookup_module/handle.lookup.MethodHandleLookup

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @compile lookup/Lookup.java
* @compile invoker/Invoker.java
* @run main/othervm --enable-native-access=ALL-UNNAMED TestLoaderLookup

View File

@ -30,7 +30,7 @@ import static org.testng.Assert.*;
/*
* @test
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm TestLoaderLookupJNI
*/
public class TestLoaderLookupJNI {

View File

@ -25,7 +25,7 @@
* @test
* @enablePreview
* @library ../
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm
* --enable-native-access=ALL-UNNAMED
* -Xbatch

View File

@ -25,7 +25,7 @@
* @test
* @enablePreview
* @library ../ /test/lib
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @run testng/othervm --enable-native-access=ALL-UNNAMED TestPassHeapSegment
*/

View File

@ -24,7 +24,7 @@
/*
* @test id=default_gc
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @library /test/lib
* @library ../
* @build jdk.test.whitebox.WhiteBox
@ -42,7 +42,7 @@
/*
* @test id=zgc
* @enablePreview
* @requires (((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64")
* @requires (((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64")
* @requires vm.gc.Z
* @library /test/lib
* @library ../
@ -61,7 +61,7 @@
/*
* @test id=shenandoah
* @enablePreview
* @requires (((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64")
* @requires (((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64")
* @requires vm.gc.Shenandoah
* @library /test/lib
* @library ../

View File

@ -24,7 +24,7 @@
/*
* @test id=default_gc
* @enablePreview
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @library /test/lib
* @library ../
* @build jdk.test.whitebox.WhiteBox
@ -42,7 +42,7 @@
/*
* @test id=zgc
* @enablePreview
* @requires (((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64")
* @requires (((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64")
* @requires vm.gc.Z
* @library /test/lib
* @library ../
@ -61,7 +61,7 @@
/*
* @test id=shenandoah
* @enablePreview
* @requires (((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64")
* @requires (((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64")
* @requires vm.gc.Shenandoah
* @library /test/lib
* @library ../

View File

@ -25,7 +25,7 @@
* @test id=default_gc
* @enablePreview
* @bug 8277602
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @library /test/lib
* @library ../
* @build jdk.test.whitebox.WhiteBox

View File

@ -26,7 +26,7 @@
* @test
* @enablePreview
* @library ../
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64" | os.arch == "riscv64"
* @modules java.base/jdk.internal.foreign
* java.base/jdk.internal.foreign.abi
* java.base/jdk.internal.foreign.abi.x64
@ -36,6 +36,8 @@
* java.base/jdk.internal.foreign.abi.aarch64.linux
* java.base/jdk.internal.foreign.abi.aarch64.macos
* java.base/jdk.internal.foreign.abi.aarch64.windows
* java.base/jdk.internal.foreign.abi.riscv64
* java.base/jdk.internal.foreign.abi.riscv64.linux
* @run testng/othervm --enable-native-access=ALL-UNNAMED VaListTest
*/
@ -44,6 +46,7 @@ import java.lang.foreign.SegmentScope;
import java.lang.foreign.VaList;
import jdk.internal.foreign.abi.aarch64.linux.LinuxAArch64Linker;
import jdk.internal.foreign.abi.aarch64.macos.MacOsAArch64Linker;
import jdk.internal.foreign.abi.riscv64.linux.LinuxRISCV64Linker;
import jdk.internal.foreign.abi.x64.sysv.SysVx64Linker;
import jdk.internal.foreign.abi.x64.windows.Windowsx64Linker;
import org.testng.annotations.DataProvider;
@ -134,6 +137,8 @@ public class VaListTest extends NativeTestHelper {
= actions -> LinuxAArch64Linker.newVaList(actions, SegmentScope.auto());
private static final Function<Consumer<VaList.Builder>, VaList> macAArch64VaListFactory
= actions -> MacOsAArch64Linker.newVaList(actions, SegmentScope.auto());
private static final Function<Consumer<VaList.Builder>, VaList> linuxRISCV64VaListFactory
= actions -> LinuxRISCV64Linker.newVaList(actions, SegmentScope.auto());
private static final Function<Consumer<VaList.Builder>, VaList> platformVaListFactory
= (builder) -> VaList.make(builder, SegmentScope.auto());
@ -145,6 +150,8 @@ public class VaListTest extends NativeTestHelper {
= LinuxAArch64Linker::newVaList;
private static final BiFunction<Consumer<VaList.Builder>, SegmentScope, VaList> macAArch64VaListScopedFactory
= MacOsAArch64Linker::newVaList;
private static final BiFunction<Consumer<VaList.Builder>, SegmentScope, VaList> linuxRISCV64VaListScopedFactory
= LinuxRISCV64Linker::newVaList;
private static final BiFunction<Consumer<VaList.Builder>, SegmentScope, VaList> platformVaListScopedFactory
= VaList::make;
@ -160,6 +167,7 @@ public class VaListTest extends NativeTestHelper {
{ sysvVaListFactory, sumIntsJavaFact.apply(SysV.C_INT), SysV.C_INT },
{ linuxAArch64VaListFactory, sumIntsJavaFact.apply(AArch64.C_INT), AArch64.C_INT },
{ macAArch64VaListFactory, sumIntsJavaFact.apply(AArch64.C_INT), AArch64.C_INT },
{ linuxRISCV64VaListFactory, sumIntsJavaFact.apply(RISCV64.C_INT), RISCV64.C_INT },
{ platformVaListFactory, sumIntsNative, C_INT },
};
}
@ -188,6 +196,7 @@ public class VaListTest extends NativeTestHelper {
{ sysvVaListFactory, sumDoublesJavaFact.apply(SysV.C_DOUBLE), SysV.C_DOUBLE },
{ linuxAArch64VaListFactory, sumDoublesJavaFact.apply(AArch64.C_DOUBLE), AArch64.C_DOUBLE },
{ macAArch64VaListFactory, sumDoublesJavaFact.apply(AArch64.C_DOUBLE), AArch64.C_DOUBLE },
{ linuxRISCV64VaListFactory, sumDoublesJavaFact.apply(RISCV64.C_DOUBLE), RISCV64.C_DOUBLE },
{ platformVaListFactory, sumDoublesNative, C_DOUBLE },
};
}
@ -218,6 +227,7 @@ public class VaListTest extends NativeTestHelper {
{ sysvVaListFactory, getIntJavaFact.apply(SysV.C_POINTER), SysV.C_POINTER },
{ linuxAArch64VaListFactory, getIntJavaFact.apply(AArch64.C_POINTER), AArch64.C_POINTER },
{ macAArch64VaListFactory, getIntJavaFact.apply(AArch64.C_POINTER), AArch64.C_POINTER },
{ linuxRISCV64VaListFactory, getIntJavaFact.apply(RISCV64.C_POINTER), RISCV64.C_POINTER },
{ platformVaListFactory, getIntNative, C_POINTER },
};
}
@ -273,6 +283,7 @@ public class VaListTest extends NativeTestHelper {
argsFact.apply(sysvVaListFactory, SysV.C_INT, sumStructJavaFact),
argsFact.apply(linuxAArch64VaListFactory, AArch64.C_INT, sumStructJavaFact),
argsFact.apply(macAArch64VaListFactory, AArch64.C_INT, sumStructJavaFact),
argsFact.apply(linuxRISCV64VaListFactory, RISCV64.C_INT, sumStructJavaFact),
argsFact.apply(platformVaListFactory, C_INT, sumStructNativeFact),
};
}
@ -326,6 +337,7 @@ public class VaListTest extends NativeTestHelper {
argsFact.apply(sysvVaListFactory, SysV.C_LONG_LONG, sumStructJavaFact),
argsFact.apply(linuxAArch64VaListFactory, AArch64.C_LONG_LONG, sumStructJavaFact),
argsFact.apply(macAArch64VaListFactory, AArch64.C_LONG_LONG, sumStructJavaFact),
argsFact.apply(linuxRISCV64VaListFactory, RISCV64.C_LONG_LONG, sumStructJavaFact),
argsFact.apply(platformVaListFactory, C_LONG_LONG, sumStructNativeFact),
};
}
@ -379,6 +391,7 @@ public class VaListTest extends NativeTestHelper {
argsFact.apply(sysvVaListFactory, SysV.C_FLOAT, sumStructJavaFact),
argsFact.apply(linuxAArch64VaListFactory, AArch64.C_FLOAT, sumStructJavaFact),
argsFact.apply(macAArch64VaListFactory, AArch64.C_FLOAT, sumStructJavaFact),
argsFact.apply(linuxRISCV64VaListFactory, RISCV64.C_FLOAT, sumStructJavaFact),
argsFact.apply(platformVaListFactory, C_FLOAT, sumStructNativeFact),
};
}
@ -441,6 +454,7 @@ public class VaListTest extends NativeTestHelper {
argsFact.apply(sysvVaListFactory, SysV.C_LONG_LONG, sumStructJavaFact),
argsFact.apply(linuxAArch64VaListFactory, AArch64.C_LONG_LONG, sumStructJavaFact),
argsFact.apply(macAArch64VaListFactory, AArch64.C_LONG_LONG, sumStructJavaFact),
argsFact.apply(linuxRISCV64VaListFactory, RISCV64.C_LONG_LONG, sumStructJavaFact),
argsFact.apply(platformVaListFactory, C_LONG_LONG, sumStructNativeFact),
};
}
@ -495,6 +509,7 @@ public class VaListTest extends NativeTestHelper {
{ sysvVaListFactory, sumStackJavaFact.apply(SysV.C_LONG_LONG, SysV.C_DOUBLE), SysV.C_LONG_LONG, SysV.C_DOUBLE },
{ linuxAArch64VaListFactory, sumStackJavaFact.apply(AArch64.C_LONG_LONG, AArch64.C_DOUBLE), AArch64.C_LONG_LONG, AArch64.C_DOUBLE },
{ macAArch64VaListFactory, sumStackJavaFact.apply(AArch64.C_LONG_LONG, AArch64.C_DOUBLE), AArch64.C_LONG_LONG, AArch64.C_DOUBLE },
{ linuxRISCV64VaListFactory, sumStackJavaFact.apply(RISCV64.C_LONG_LONG, RISCV64.C_DOUBLE), RISCV64.C_LONG_LONG, RISCV64.C_DOUBLE },
{ platformVaListFactory, sumStackNative, C_LONG_LONG, C_DOUBLE },
};
}
@ -549,6 +564,8 @@ public class VaListTest extends NativeTestHelper {
{ linuxAArch64VaListFactory.apply(b -> {}) },
{ MacOsAArch64Linker.emptyVaList() },
{ macAArch64VaListFactory.apply(b -> {}) },
{ LinuxRISCV64Linker.emptyVaList() },
{ linuxRISCV64VaListFactory.apply(b -> {}) },
};
}
@ -564,6 +581,7 @@ public class VaListTest extends NativeTestHelper {
{ sysvVaListScopedFactory, sumIntsJavaFact.apply(SysV.C_INT), SysV.C_INT },
{ linuxAArch64VaListScopedFactory, sumIntsJavaFact.apply(AArch64.C_INT), AArch64.C_INT },
{ macAArch64VaListScopedFactory, sumIntsJavaFact.apply(AArch64.C_INT), AArch64.C_INT },
{ linuxRISCV64VaListScopedFactory, sumIntsJavaFact.apply(RISCV64.C_INT), RISCV64.C_INT },
{ platformVaListScopedFactory, sumIntsNative, C_INT },
};
}
@ -612,6 +630,7 @@ public class VaListTest extends NativeTestHelper {
{ sysvVaListScopedFactory, SysV.C_INT },
{ linuxAArch64VaListScopedFactory, AArch64.C_INT },
{ macAArch64VaListScopedFactory, AArch64.C_INT },
{ linuxRISCV64VaListScopedFactory, RISCV64.C_INT },
};
}

View File

@ -24,7 +24,7 @@
/*
* @test
* @enablePreview
* @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
* @requires os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch=="riscv64"
* @library ../
* @run testng/othervm
* --enable-native-access=ALL-UNNAMED