8264774: Implementation of Foreign Function and Memory API (Incubator)

Co-authored-by: Paul Sandoz <psandoz@openjdk.org>
Co-authored-by: Jorn Vernee <jvernee@openjdk.org>
Co-authored-by: Vladimir Ivanov <vlivanov@openjdk.org>
Co-authored-by: Athijegannathan Sundararajan <sundar@openjdk.org>
Co-authored-by: Chris Hegarty <chegar@openjdk.org>
Reviewed-by: psandoz, chegar, mchung, vlivanov
This commit is contained in:
Maurizio Cimadamore 2021-06-02 10:53:06 +00:00
parent 71425ddfb4
commit a223189b06
219 changed files with 10936 additions and 5695 deletions

View File

@ -84,3 +84,8 @@ const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) con
return layout;
}
const CallRegs ForeignGlobals::parse_call_regs_impl(jobject jconv) const {
ShouldNotCallThis();
return {};
}

View File

@ -359,6 +359,16 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
return fr;
}
JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const {
ShouldNotCallThis();
return nullptr;
}
frame frame::sender_for_optimized_entry_frame(RegisterMap* map) const {
ShouldNotCallThis();
return {};
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//

View File

@ -878,7 +878,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// 64 bits items (Aarch64 abi) even though java would only store
// 32bits for a parameter. On 32bit it will simply be 32 bits
// So this routine will do 32->32 on 32bit and 32->64 on 64bit
static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
@ -979,7 +979,7 @@ static void object_move(MacroAssembler* masm,
}
// A float arg may have to do float reg int reg conversion
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(src.first()->is_stack() && dst.first()->is_stack() ||
src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
if (src.first()->is_stack()) {
@ -998,7 +998,7 @@ static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
// A long move
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
@ -1022,7 +1022,7 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
// A double move
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(src.first()->is_stack() && dst.first()->is_stack() ||
src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
if (src.first()->is_stack()) {

View File

@ -99,3 +99,12 @@ address ProgrammableUpcallHandler::generate_upcall_stub(jobject rec, jobject jab
return blob->code_begin();
}
address ProgrammableUpcallHandler::generate_optimized_upcall_stub(jobject mh, Method* entry, jobject jabi, jobject jconv) {
ShouldNotCallThis();
return nullptr;
}
bool ProgrammableUpcallHandler::supports_optimized_upcalls() {
return false;
}

View File

@ -87,3 +87,32 @@ const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) con
return layout;
}
const CallRegs ForeignGlobals::parse_call_regs_impl(jobject jconv) const {
oop conv_oop = JNIHandles::resolve_non_null(jconv);
objArrayOop arg_regs_oop = cast<objArrayOop>(conv_oop->obj_field(CallConvOffsets.arg_regs_offset));
objArrayOop ret_regs_oop = cast<objArrayOop>(conv_oop->obj_field(CallConvOffsets.ret_regs_offset));
CallRegs result;
result._args_length = arg_regs_oop->length();
result._arg_regs = NEW_RESOURCE_ARRAY(VMReg, result._args_length);
result._rets_length = ret_regs_oop->length();
result._ret_regs = NEW_RESOURCE_ARRAY(VMReg, result._rets_length);
for (int i = 0; i < result._args_length; i++) {
oop storage = arg_regs_oop->obj_at(i);
jint index = storage->int_field(VMS.index_offset);
jint type = storage->int_field(VMS.type_offset);
result._arg_regs[i] = VMRegImpl::vmStorageToVMReg(type, index);
}
for (int i = 0; i < result._rets_length; i++) {
oop storage = ret_regs_oop->obj_at(i);
jint index = storage->int_field(VMS.index_offset);
jint type = storage->int_field(VMS.type_offset);
result._ret_regs[i] = VMRegImpl::vmStorageToVMReg(type, index);
}
return result;
}

View File

@ -102,6 +102,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
if (is_entry_frame()) {
// an entry frame must have a valid fp.
return fp_safe && is_entry_frame_valid(thread);
} else if (is_optimized_entry_frame()) {
return fp_safe;
}
intptr_t* sender_sp = NULL;
@ -199,6 +201,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
address jcw = (address)sender.entry_frame_call_wrapper();
return thread->is_in_stack_range_excl(jcw, (address)sender.fp());
} else if (sender_blob->is_optimized_entry_blob()) {
return false;
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
@ -349,6 +353,32 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
return fr;
}
JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const {
// need unextended_sp here, since normal sp is wrong for interpreter callees
return reinterpret_cast<JavaFrameAnchor*>(reinterpret_cast<char*>(frame.unextended_sp()) + in_bytes(jfa_sp_offset()));
}
frame frame::sender_for_optimized_entry_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set");
OptimizedEntryBlob* blob = _cb->as_optimized_entry_blob();
// Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender
JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
// Since we are walking the stack now this nested anchor is obviously walkable
// even if it wasn't when it was stacked.
if (!jfa->walkable()) {
// Capture _last_Java_pc (if needed) and mark anchor walkable.
jfa->capture_last_Java_pc();
}
map->clear();
assert(map->include_argument_oops(), "should be set by clear");
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
return fr;
}
//------------------------------------------------------------------------------
// frame::verify_deopt_original_pc
//
@ -478,8 +508,9 @@ frame frame::sender_raw(RegisterMap* map) const {
// update it accordingly
map->set_include_argument_oops(false);
if (is_entry_frame()) return sender_for_entry_frame(map);
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
if (is_entry_frame()) return sender_for_entry_frame(map);
if (is_optimized_entry_frame()) return sender_for_optimized_entry_frame(map);
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
if (_cb != NULL) {

View File

@ -70,8 +70,6 @@ public:
address last_Java_pc(void) { return _last_Java_pc; }
private:
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
public:

View File

@ -1124,7 +1124,7 @@ static void object_move(MacroAssembler* masm,
}
// A float arg may have to do float reg int reg conversion
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
// Because of the calling convention we know that src is either a stack location
@ -1142,7 +1142,7 @@ static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
// A long move
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
// The only legal possibility for a long_move VMRegPair is:
// 1: two stack slots (possibly unaligned)
@ -1161,7 +1161,7 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
// A double move
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
// The only legal possibilities for a double_move VMRegPair are:
// The painful thing here is that like long_move a VMRegPair might be

View File

@ -1167,7 +1167,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// 64 bits items (x86_32/64 abi) even though java would only store
// 32bits for a parameter. On 32bit it will simply be 32 bits
// So this routine will do 32->32 on 32bit and 32->64 on 64bit
static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
@ -1285,7 +1285,7 @@ static void object_move(MacroAssembler* masm,
}
// A float arg may have to do float reg int reg conversion
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
// The calling conventions assures us that each VMregpair is either
@ -1314,7 +1314,7 @@ static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
// A long move
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
// The calling conventions assures us that each VMregpair is either
// all really one physical register or adjacent stack slots.
@ -1339,7 +1339,7 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
// A double move
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
void SharedRuntime::double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
// The calling conventions assures us that each VMregpair is either
// all really one physical register or adjacent stack slots.
@ -1448,13 +1448,13 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
// load the length relative to the body.
__ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
arrayOopDesc::base_offset_in_bytes(in_elem_type)));
move32_64(masm, tmp, length_arg);
SharedRuntime::move32_64(masm, tmp, length_arg);
__ jmpb(done);
__ bind(is_null);
// Pass zeros
__ xorptr(tmp_reg, tmp_reg);
move_ptr(masm, tmp, body_arg);
move32_64(masm, tmp, length_arg);
SharedRuntime::move32_64(masm, tmp, length_arg);
__ bind(done);
__ block_comment("} unpack_array_argument");
@ -1541,8 +1541,8 @@ class ComputeMoveOrder: public StackObj {
GrowableArray<MoveOperation*> edges;
public:
ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
ComputeMoveOrder(int total_in_args, const VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
const BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
// Move operations where the dest is the stack can all be
// scheduled first since they can't interfere with the other moves.
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
@ -4087,3 +4087,13 @@ void OptoRuntime::generate_exception_blob() {
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
}
#endif // COMPILER2
void SharedRuntime::compute_move_order(const BasicType* in_sig_bt,
int total_in_args, const VMRegPair* in_regs,
int total_out_args, VMRegPair* out_regs,
GrowableArray<int>& arg_order,
VMRegPair tmp_vmreg) {
ComputeMoveOrder order(total_in_args, in_regs,
total_out_args, out_regs,
in_sig_bt, arg_order, tmp_vmreg);
}

View File

@ -28,3 +28,12 @@ address ProgrammableUpcallHandler::generate_upcall_stub(jobject rec, jobject jab
Unimplemented();
return nullptr;
}
address ProgrammableUpcallHandler::generate_optimized_upcall_stub(jobject mh, Method* entry, jobject jabi, jobject jconv) {
ShouldNotCallThis();
return nullptr;
}
bool ProgrammableUpcallHandler::supports_optimized_upcalls() {
return false;
}

View File

@ -24,8 +24,17 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "code/codeBlob.hpp"
#include "code/codeBlob.hpp"
#include "code/vmreg.inline.hpp"
#include "compiler/disassembler.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "prims/universalUpcallHandler.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/globalDefinitions.hpp"
#define __ _masm->
@ -141,3 +150,701 @@ address ProgrammableUpcallHandler::generate_upcall_stub(jobject rec, jobject jab
return blob->code_begin();
}
struct ArgMove {
BasicType bt;
VMRegPair from;
VMRegPair to;
bool is_identity() const {
return from.first() == to.first() && from.second() == to.second();
}
};
static GrowableArray<ArgMove> compute_argument_shuffle(Method* entry, int& out_arg_size_bytes, const CallRegs& conv, BasicType& ret_type) {
assert(entry->is_static(), "");
// Fill in the signature array, for the calling-convention call.
const int total_out_args = entry->size_of_parameters();
assert(total_out_args > 0, "receiver arg ");
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_out_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_out_args);
{
int i = 0;
SignatureStream ss(entry->signature());
for (; !ss.at_return_type(); ss.next()) {
out_sig_bt[i++] = ss.type(); // Collect remaining bits of signature
if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
out_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
}
assert(i == total_out_args, "");
ret_type = ss.type();
}
int out_arg_slots = SharedRuntime::java_calling_convention(out_sig_bt, out_regs, total_out_args);
const int total_in_args = total_out_args - 1; // skip receiver
BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
VMRegPair* in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_in_args);
for (int i = 0; i < total_in_args ; i++ ) {
in_sig_bt[i] = out_sig_bt[i+1]; // skip receiver
}
// Now figure out where the args must be stored and how much stack space they require.
conv.calling_convention(in_sig_bt, in_regs, total_in_args);
GrowableArray<int> arg_order(2 * total_in_args);
VMRegPair tmp_vmreg;
tmp_vmreg.set2(rbx->as_VMReg());
// Compute a valid move order, using tmp_vmreg to break any cycles
SharedRuntime::compute_move_order(in_sig_bt,
total_in_args, in_regs,
total_out_args, out_regs,
arg_order,
tmp_vmreg);
GrowableArray<ArgMove> arg_order_vmreg(total_in_args); // conservative
#ifdef ASSERT
bool reg_destroyed[RegisterImpl::number_of_registers];
bool freg_destroyed[XMMRegisterImpl::number_of_registers];
for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
reg_destroyed[r] = false;
}
for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
freg_destroyed[f] = false;
}
#endif // ASSERT
for (int i = 0; i < arg_order.length(); i += 2) {
int in_arg = arg_order.at(i);
int out_arg = arg_order.at(i + 1);
assert(in_arg != -1 || out_arg != -1, "");
BasicType arg_bt = (in_arg != -1 ? in_sig_bt[in_arg] : out_sig_bt[out_arg]);
switch (arg_bt) {
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT:
case T_FLOAT:
break; // process
case T_LONG:
case T_DOUBLE:
assert(in_arg == -1 || (in_arg + 1 < total_in_args && in_sig_bt[in_arg + 1] == T_VOID), "bad arg list: %d", in_arg);
assert(out_arg == -1 || (out_arg + 1 < total_out_args && out_sig_bt[out_arg + 1] == T_VOID), "bad arg list: %d", out_arg);
break; // process
case T_VOID:
continue; // skip
default:
fatal("found in upcall args: %s", type2name(arg_bt));
}
ArgMove move;
move.bt = arg_bt;
move.from = (in_arg != -1 ? in_regs[in_arg] : tmp_vmreg);
move.to = (out_arg != -1 ? out_regs[out_arg] : tmp_vmreg);
if(move.is_identity()) {
continue; // useless move
}
#ifdef ASSERT
if (in_arg != -1) {
if (in_regs[in_arg].first()->is_Register()) {
assert(!reg_destroyed[in_regs[in_arg].first()->as_Register()->encoding()], "destroyed reg!");
} else if (in_regs[in_arg].first()->is_XMMRegister()) {
assert(!freg_destroyed[in_regs[in_arg].first()->as_XMMRegister()->encoding()], "destroyed reg!");
}
}
if (out_arg != -1) {
if (out_regs[out_arg].first()->is_Register()) {
reg_destroyed[out_regs[out_arg].first()->as_Register()->encoding()] = true;
} else if (out_regs[out_arg].first()->is_XMMRegister()) {
freg_destroyed[out_regs[out_arg].first()->as_XMMRegister()->encoding()] = true;
}
}
#endif /* ASSERT */
arg_order_vmreg.push(move);
}
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
out_arg_size_bytes = align_up(stack_slots * VMRegImpl::stack_slot_size, StackAlignmentInBytes);
return arg_order_vmreg;
}
static const char* null_safe_string(const char* str) {
return str == nullptr ? "NULL" : str;
}
#ifdef ASSERT
static void print_arg_moves(const GrowableArray<ArgMove>& arg_moves, Method* entry) {
LogTarget(Trace, foreign) lt;
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
ls.print_cr("Argument shuffle for %s {", entry->name_and_sig_as_C_string());
for (int i = 0; i < arg_moves.length(); i++) {
ArgMove arg_mv = arg_moves.at(i);
BasicType arg_bt = arg_mv.bt;
VMRegPair from_vmreg = arg_mv.from;
VMRegPair to_vmreg = arg_mv.to;
ls.print("Move a %s from (", null_safe_string(type2name(arg_bt)));
from_vmreg.first()->print_on(&ls);
ls.print(",");
from_vmreg.second()->print_on(&ls);
ls.print(") to ");
to_vmreg.first()->print_on(&ls);
ls.print(",");
to_vmreg.second()->print_on(&ls);
ls.print_cr(")");
}
ls.print_cr("}");
}
}
#endif
void save_java_frame_anchor(MacroAssembler* _masm, ByteSize store_offset, Register thread) {
__ block_comment("{ save_java_frame_anchor ");
// upcall->jfa._last_Java_fp = _thread->_anchor._last_Java_fp;
__ movptr(rscratch1, Address(thread, JavaThread::last_Java_fp_offset()));
__ movptr(Address(rsp, store_offset + JavaFrameAnchor::last_Java_fp_offset()), rscratch1);
// upcall->jfa._last_Java_pc = _thread->_anchor._last_Java_pc;
__ movptr(rscratch1, Address(thread, JavaThread::last_Java_pc_offset()));
__ movptr(Address(rsp, store_offset + JavaFrameAnchor::last_Java_pc_offset()), rscratch1);
// upcall->jfa._last_Java_sp = _thread->_anchor._last_Java_sp;
__ movptr(rscratch1, Address(thread, JavaThread::last_Java_sp_offset()));
__ movptr(Address(rsp, store_offset + JavaFrameAnchor::last_Java_sp_offset()), rscratch1);
__ block_comment("} save_java_frame_anchor ");
}
void restore_java_frame_anchor(MacroAssembler* _masm, ByteSize load_offset, Register thread) {
__ block_comment("{ restore_java_frame_anchor ");
// thread->_last_Java_sp = NULL
__ movptr(Address(thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
// ThreadStateTransition::transition_from_java(_thread, _thread_in_vm);
// __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
//_thread->frame_anchor()->copy(&_anchor);
// _thread->_last_Java_fp = upcall->_last_Java_fp;
// _thread->_last_Java_pc = upcall->_last_Java_pc;
// _thread->_last_Java_sp = upcall->_last_Java_sp;
__ movptr(rscratch1, Address(rsp, load_offset + JavaFrameAnchor::last_Java_fp_offset()));
__ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rscratch1);
__ movptr(rscratch1, Address(rsp, load_offset + JavaFrameAnchor::last_Java_pc_offset()));
__ movptr(Address(thread, JavaThread::last_Java_pc_offset()), rscratch1);
__ movptr(rscratch1, Address(rsp, load_offset + JavaFrameAnchor::last_Java_sp_offset()));
__ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rscratch1);
__ block_comment("} restore_java_frame_anchor ");
}
static void save_native_arguments(MacroAssembler* _masm, const CallRegs& conv, int arg_save_area_offset) {
__ block_comment("{ save_native_args ");
int store_offset = arg_save_area_offset;
for (int i = 0; i < conv._args_length; i++) {
VMReg reg = conv._arg_regs[i];
if (reg->is_Register()) {
__ movptr(Address(rsp, store_offset), reg->as_Register());
store_offset += 8;
} else if (reg->is_XMMRegister()) {
// Java API doesn't support vector args
__ movdqu(Address(rsp, store_offset), reg->as_XMMRegister());
store_offset += 16;
}
// do nothing for stack
}
__ block_comment("} save_native_args ");
}
static void restore_native_arguments(MacroAssembler* _masm, const CallRegs& conv, int arg_save_area_offset) {
__ block_comment("{ restore_native_args ");
int load_offset = arg_save_area_offset;
for (int i = 0; i < conv._args_length; i++) {
VMReg reg = conv._arg_regs[i];
if (reg->is_Register()) {
__ movptr(reg->as_Register(), Address(rsp, load_offset));
load_offset += 8;
} else if (reg->is_XMMRegister()) {
// Java API doesn't support vector args
__ movdqu(reg->as_XMMRegister(), Address(rsp, load_offset));
load_offset += 16;
}
// do nothing for stack
}
__ block_comment("} restore_native_args ");
}
static bool is_valid_XMM(XMMRegister reg) {
return reg->is_valid() && (UseAVX >= 3 || (reg->encoding() < 16)); // why is this not covered by is_valid()?
}
// for callee saved regs, according to the caller's ABI
static int compute_reg_save_area_size(const ABIDescriptor& abi) {
int size = 0;
for (Register reg = as_Register(0); reg->is_valid(); reg = reg->successor()) {
if (reg == rbp || reg == rsp) continue; // saved/restored by prologue/epilogue
if (!abi.is_volatile_reg(reg)) {
size += 8; // bytes
}
}
for (XMMRegister reg = as_XMMRegister(0); is_valid_XMM(reg); reg = reg->successor()) {
if (!abi.is_volatile_reg(reg)) {
if (UseAVX >= 3) {
size += 64; // bytes
} else if (UseAVX >= 1) {
size += 32;
} else {
size += 16;
}
}
}
#ifndef _WIN64
// for mxcsr
size += 8;
#endif
return size;
}
static int compute_arg_save_area_size(const CallRegs& conv) {
int result_size = 0;
for (int i = 0; i < conv._args_length; i++) {
VMReg reg = conv._arg_regs[i];
if (reg->is_Register()) {
result_size += 8;
} else if (reg->is_XMMRegister()) {
// Java API doesn't support vector args
result_size += 16;
}
// do nothing for stack
}
return result_size;
}
constexpr int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) {
// 1. iterate all registers in the architecture
// - check if they are volatile or not for the given abi
// - if NOT, we need to save it here
// 2. save mxcsr on non-windows platforms
int offset = reg_save_area_offset;
__ block_comment("{ preserve_callee_saved_regs ");
for (Register reg = as_Register(0); reg->is_valid(); reg = reg->successor()) {
if (reg == rbp || reg == rsp) continue; // saved/restored by prologue/epilogue
if (!abi.is_volatile_reg(reg)) {
__ movptr(Address(rsp, offset), reg);
offset += 8;
}
}
for (XMMRegister reg = as_XMMRegister(0); is_valid_XMM(reg); reg = reg->successor()) {
if (!abi.is_volatile_reg(reg)) {
if (UseAVX >= 3) {
__ evmovdqul(Address(rsp, offset), reg, Assembler::AVX_512bit);
offset += 64;
} else if (UseAVX >= 1) {
__ vmovdqu(Address(rsp, offset), reg);
offset += 32;
} else {
__ movdqu(Address(rsp, offset), reg);
offset += 16;
}
}
}
#ifndef _WIN64
{
const Address mxcsr_save(rsp, offset);
Label skip_ldmx;
__ stmxcsr(mxcsr_save);
__ movl(rax, mxcsr_save);
__ andl(rax, MXCSR_MASK); // Only check control and mask bits
ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
__ cmp32(rax, mxcsr_std);
__ jcc(Assembler::equal, skip_ldmx);
__ ldmxcsr(mxcsr_std);
__ bind(skip_ldmx);
}
#endif
__ block_comment("} preserve_callee_saved_regs ");
}
static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) {
// 1. iterate all registers in the architecture
// - check if they are volatile or not for the given abi
// - if NOT, we need to restore it here
// 2. restore mxcsr on non-windows platforms
int offset = reg_save_area_offset;
__ block_comment("{ restore_callee_saved_regs ");
for (Register reg = as_Register(0); reg->is_valid(); reg = reg->successor()) {
if (reg == rbp || reg == rsp) continue; // saved/restored by prologue/epilogue
if (!abi.is_volatile_reg(reg)) {
__ movptr(reg, Address(rsp, offset));
offset += 8;
}
}
for (XMMRegister reg = as_XMMRegister(0); is_valid_XMM(reg); reg = reg->successor()) {
if (!abi.is_volatile_reg(reg)) {
if (UseAVX >= 3) {
__ evmovdqul(reg, Address(rsp, offset), Assembler::AVX_512bit);
offset += 64;
} else if (UseAVX >= 1) {
__ vmovdqu(reg, Address(rsp, offset));
offset += 32;
} else {
__ movdqu(reg, Address(rsp, offset));
offset += 16;
}
}
}
#ifndef _WIN64
const Address mxcsr_save(rsp, offset);
__ ldmxcsr(mxcsr_save);
#endif
__ block_comment("} restore_callee_saved_regs ");
}
static void shuffle_arguments(MacroAssembler* _masm, const GrowableArray<ArgMove>& arg_moves) {
for (int i = 0; i < arg_moves.length(); i++) {
ArgMove arg_mv = arg_moves.at(i);
BasicType arg_bt = arg_mv.bt;
VMRegPair from_vmreg = arg_mv.from;
VMRegPair to_vmreg = arg_mv.to;
assert(
!((from_vmreg.first()->is_Register() && to_vmreg.first()->is_XMMRegister())
|| (from_vmreg.first()->is_XMMRegister() && to_vmreg.first()->is_Register())),
"move between gp and fp reg not supported");
__ block_comment(err_msg("bt=%s", null_safe_string(type2name(arg_bt))));
switch (arg_bt) {
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT:
SharedRuntime::move32_64(_masm, from_vmreg, to_vmreg);
break;
case T_FLOAT:
SharedRuntime::float_move(_masm, from_vmreg, to_vmreg);
break;
case T_DOUBLE:
SharedRuntime::double_move(_masm, from_vmreg, to_vmreg);
break;
case T_LONG :
SharedRuntime::long_move(_masm, from_vmreg, to_vmreg);
break;
default:
fatal("found in upcall args: %s", type2name(arg_bt));
}
}
}
struct AuxiliarySaves {
JavaFrameAnchor jfa;
uintptr_t thread;
bool should_detach;
};
address ProgrammableUpcallHandler::generate_optimized_upcall_stub(jobject receiver, Method* entry, jobject jabi, jobject jconv) {
ResourceMark rm;
const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi);
const CallRegs conv = ForeignGlobals::parse_call_regs(jconv);
assert(conv._rets_length <= 1, "no multi reg returns");
CodeBuffer buffer("upcall_stub_linkToNative", /* code_size = */ 1024, /* locs_size = */ 1024);
int register_size = sizeof(uintptr_t);
int buffer_alignment = xmm_reg_size;
int out_arg_area = -1;
BasicType ret_type;
GrowableArray<ArgMove> arg_moves = compute_argument_shuffle(entry, out_arg_area, conv, ret_type);
assert(out_arg_area != -1, "Should have been set");
DEBUG_ONLY(print_arg_moves(arg_moves, entry);)
// out_arg_area (for stack arguments) doubles as shadow space for native calls.
// make sure it is big enough.
if (out_arg_area < frame::arg_reg_save_area_bytes) {
out_arg_area = frame::arg_reg_save_area_bytes;
}
int reg_save_area_size = compute_reg_save_area_size(abi);
int arg_save_area_size = compute_arg_save_area_size(conv);
// To spill receiver during deopt
int deopt_spill_size = 1 * BytesPerWord;
int shuffle_area_offset = 0;
int deopt_spill_offset = shuffle_area_offset + out_arg_area;
int arg_save_area_offset = deopt_spill_offset + deopt_spill_size;
int reg_save_area_offset = arg_save_area_offset + arg_save_area_size;
int auxiliary_saves_offset = reg_save_area_offset + reg_save_area_size;
int frame_bottom_offset = auxiliary_saves_offset + sizeof(AuxiliarySaves);
ByteSize jfa_offset = in_ByteSize(auxiliary_saves_offset) + byte_offset_of(AuxiliarySaves, jfa);
ByteSize thread_offset = in_ByteSize(auxiliary_saves_offset) + byte_offset_of(AuxiliarySaves, thread);
ByteSize should_detach_offset = in_ByteSize(auxiliary_saves_offset) + byte_offset_of(AuxiliarySaves, should_detach);
int frame_size = frame_bottom_offset;
frame_size = align_up(frame_size, StackAlignmentInBytes);
// Ok The space we have allocated will look like:
//
//
// FP-> | |
// |---------------------| = frame_bottom_offset = frame_size
// | |
// | AuxiliarySaves |
// |---------------------| = auxiliary_saves_offset
// | |
// | reg_save_area |
// |---------------------| = reg_save_are_offset
// | |
// | arg_save_area |
// |---------------------| = arg_save_are_offset
// | |
// | deopt_spill |
// |---------------------| = deopt_spill_offset
// | |
// SP-> | out_arg_area | needs to be at end for shadow space
//
//
//////////////////////////////////////////////////////////////////////////////
MacroAssembler* _masm = new MacroAssembler(&buffer);
Label call_return;
address start = __ pc();
__ enter(); // set up frame
if ((abi._stack_alignment_bytes % 16) != 0) {
// stack alignment of caller is not a multiple of 16
__ andptr(rsp, -StackAlignmentInBytes); // align stack
}
// allocate frame (frame_size is also aligned, so stack is still aligned)
__ subptr(rsp, frame_size);
// we have to always spill args since we need to do a call to get the thread
// (and maybe attach it).
save_native_arguments(_masm, conv, arg_save_area_offset);
preserve_callee_saved_registers(_masm, abi, reg_save_area_offset);
__ block_comment("{ get_thread");
__ vzeroupper();
__ lea(c_rarg0, Address(rsp, should_detach_offset));
// stack already aligned
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ProgrammableUpcallHandler::maybe_attach_and_get_thread)));
__ movptr(r15_thread, rax);
__ reinit_heapbase();
__ movptr(Address(rsp, thread_offset), r15_thread);
__ block_comment("} get_thread");
// TODO:
// We expect not to be coming from JNI code, but we might be.
// We should figure out what our stance is on supporting that and then maybe add
// some more handling here for:
// - handle blocks
// - check for active exceptions (and emit an error)
__ block_comment("{ safepoint poll");
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
if (os::is_MP()) {
__ membar(Assembler::Membar_mask_bits(
Assembler::LoadLoad | Assembler::StoreLoad |
Assembler::LoadStore | Assembler::StoreStore));
}
// check for safepoint operation in progress and/or pending suspend requests
Label L_after_safepoint_poll;
Label L_safepoint_poll_slow_path;
__ safepoint_poll(L_safepoint_poll_slow_path, r15_thread, false /* at_return */, false /* in_nmethod */);
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::notEqual, L_safepoint_poll_slow_path);
__ bind(L_after_safepoint_poll);
__ block_comment("} safepoint poll");
// change thread state
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
__ block_comment("{ reguard stack check");
Label L_reguard;
Label L_after_reguard;
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
__ jcc(Assembler::equal, L_reguard);
__ bind(L_after_reguard);
__ block_comment("} reguard stack check");
__ block_comment("{ argument shuffle");
// TODO merge these somehow
restore_native_arguments(_masm, conv, arg_save_area_offset);
shuffle_arguments(_masm, arg_moves);
__ block_comment("} argument shuffle");
__ block_comment("{ receiver ");
__ movptr(rscratch1, (intptr_t)receiver);
__ resolve_jobject(rscratch1, r15_thread, rscratch2);
__ movptr(j_rarg0, rscratch1);
__ block_comment("} receiver ");
__ mov_metadata(rbx, entry);
__ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx); // just in case callee is deoptimized
__ reinit_heapbase();
save_java_frame_anchor(_masm, jfa_offset, r15_thread);
__ reset_last_Java_frame(r15_thread, true);
__ call(Address(rbx, Method::from_compiled_offset()));
#ifdef ASSERT
if (conv._rets_length == 1) { // 0 or 1
VMReg j_expected_result_reg;
switch (ret_type) {
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT:
case T_LONG:
j_expected_result_reg = rax->as_VMReg();
break;
case T_FLOAT:
case T_DOUBLE:
j_expected_result_reg = xmm0->as_VMReg();
break;
default:
fatal("unexpected return type: %s", type2name(ret_type));
}
// No need to move for now, since CallArranger can pick a return type
// that goes in the same reg for both CCs. But, at least assert they are the same
assert(conv._ret_regs[0] == j_expected_result_reg,
"unexpected result register: %s != %s", conv._ret_regs[0]->name(), j_expected_result_reg->name());
}
#endif
__ bind(call_return);
// also sets last Java frame
__ movptr(r15_thread, Address(rsp, thread_offset));
// TODO corrupted thread pointer causes havoc. Can we verify it here?
restore_java_frame_anchor(_masm, jfa_offset, r15_thread); // also transitions to native state
__ block_comment("{ maybe_detach_thread");
Label L_after_detach;
__ cmpb(Address(rsp, should_detach_offset), 0);
__ jcc(Assembler::equal, L_after_detach);
__ vzeroupper();
__ mov(c_rarg0, r15_thread);
// stack already aligned
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ProgrammableUpcallHandler::detach_thread)));
__ reinit_heapbase();
__ bind(L_after_detach);
__ block_comment("} maybe_detach_thread");
restore_callee_saved_registers(_masm, abi, reg_save_area_offset);
__ leave();
__ ret(0);
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ L_safepoint_poll_slow_path");
__ bind(L_safepoint_poll_slow_path);
__ vzeroupper();
__ mov(c_rarg0, r15_thread);
// stack already aligned
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ reinit_heapbase();
__ jmp(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ L_reguard");
__ bind(L_reguard);
__ vzeroupper();
// stack already aligned
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
__ reinit_heapbase();
__ jmp(L_after_reguard);
__ block_comment("} L_reguard");
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ exception handler");
intptr_t exception_handler_offset = __ pc() - start;
// TODO: this is always the same, can we bypass and call handle_uncaught_exception directly?
// native caller has no idea how to handle exceptions
// we just crash here. Up to callee to catch exceptions.
__ verify_oop(rax);
__ vzeroupper();
__ mov(c_rarg0, rax);
__ andptr(rsp, -StackAlignmentInBytes); // align stack as required by ABI
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows (not really needed)
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ProgrammableUpcallHandler::handle_uncaught_exception)));
__ should_not_reach_here();
__ block_comment("} exception handler");
_masm->flush();
#ifndef PRODUCT
stringStream ss;
ss.print("optimized_upcall_stub_%s", entry->signature()->as_C_string());
const char* name = _masm->code_string(ss.as_string());
#else // PRODUCT
const char* name = "optimized_upcall_stub";
#endif // PRODUCT
OptimizedEntryBlob* blob = OptimizedEntryBlob::create(name, &buffer, exception_handler_offset, receiver, jfa_offset);
if (TraceOptimizedUpcallStubs) {
blob->print_on(tty);
Disassembler::decode(blob, tty);
}
return blob->code_begin();
}
bool ProgrammableUpcallHandler::supports_optimized_upcalls() {
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,11 +60,6 @@ ciNativeEntryPoint::ciNativeEntryPoint(instanceHandle h_i) : ciInstance(h_i), _n
_ret_moves = getVMRegArray(CURRENT_ENV->get_object(jdk_internal_invoke_NativeEntryPoint::returnMoves(get_oop()))->as_array());
}
address ciNativeEntryPoint::entry_point() const {
VM_ENTRY_MARK;
return jdk_internal_invoke_NativeEntryPoint::addr(get_oop());
}
jint ciNativeEntryPoint::shadow_space() const {
VM_ENTRY_MARK;
return jdk_internal_invoke_NativeEntryPoint::shadow_space(get_oop());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,6 @@ public:
// What kind of ciObject is this?
bool is_native_entry_point() const { return true; }
address entry_point() const;
jint shadow_space() const;
VMReg* argMoves() const;
VMReg* returnMoves() const;

View File

@ -3890,7 +3890,6 @@ bool java_lang_invoke_LambdaForm::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
int jdk_internal_invoke_NativeEntryPoint::_addr_offset;
int jdk_internal_invoke_NativeEntryPoint::_shadow_space_offset;
int jdk_internal_invoke_NativeEntryPoint::_argMoves_offset;
int jdk_internal_invoke_NativeEntryPoint::_returnMoves_offset;
@ -3899,7 +3898,6 @@ int jdk_internal_invoke_NativeEntryPoint::_method_type_offset;
int jdk_internal_invoke_NativeEntryPoint::_name_offset;
#define NEP_FIELDS_DO(macro) \
macro(_addr_offset, k, "addr", long_signature, false); \
macro(_shadow_space_offset, k, "shadowSpace", int_signature, false); \
macro(_argMoves_offset, k, "argMoves", long_array_signature, false); \
macro(_returnMoves_offset, k, "returnMoves", long_array_signature, false); \
@ -3922,10 +3920,6 @@ void jdk_internal_invoke_NativeEntryPoint::serialize_offsets(SerializeClosure* f
}
#endif
address jdk_internal_invoke_NativeEntryPoint::addr(oop entry) {
return (address)entry->long_field(_addr_offset);
}
jint jdk_internal_invoke_NativeEntryPoint::shadow_space(oop entry) {
return entry->int_field(_shadow_space_offset);
}

View File

@ -1043,7 +1043,6 @@ class jdk_internal_invoke_NativeEntryPoint: AllStatic {
friend class JavaClasses;
private:
static int _addr_offset; // type is jlong
static int _shadow_space_offset;
static int _argMoves_offset;
static int _returnMoves_offset;
@ -1057,7 +1056,6 @@ class jdk_internal_invoke_NativeEntryPoint: AllStatic {
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static address addr(oop entry);
static jint shadow_space(oop entry);
static oop argMoves(oop entry);
static oop returnMoves(oop entry);
@ -1073,7 +1071,6 @@ class jdk_internal_invoke_NativeEntryPoint: AllStatic {
static bool is_instance(oop obj);
// Accessors for code generation:
static int addr_offset_in_bytes() { return _addr_offset; }
static int shadow_space_offset_in_bytes() { return _shadow_space_offset; }
static int argMoves_offset_in_bytes() { return _argMoves_offset; }
static int returnMoves_offset_in_bytes() { return _returnMoves_offset; }

View File

@ -346,7 +346,7 @@
template(DEFAULT_CONTEXT_name, "DEFAULT_CONTEXT") \
NOT_LP64( do_alias(intptr_signature, int_signature) ) \
LP64_ONLY( do_alias(intptr_signature, long_signature) ) \
/* Panama Support */ \
/* Foreign API Support */ \
template(jdk_internal_invoke_NativeEntryPoint, "jdk/internal/invoke/NativeEntryPoint") \
template(jdk_internal_invoke_NativeEntryPoint_signature, "Ljdk/internal/invoke/NativeEntryPoint;") \
template(jdk_incubator_foreign_MemoryAccess, "jdk/incubator/foreign/MemoryAccess") \

View File

@ -709,3 +709,30 @@ void SingletonBlob::print_value_on(outputStream* st) const {
void DeoptimizationBlob::print_value_on(outputStream* st) const {
st->print_cr("Deoptimization (frame not available)");
}
// Implementation of OptimizedEntryBlob
OptimizedEntryBlob::OptimizedEntryBlob(const char* name, int size, CodeBuffer* cb, intptr_t exception_handler_offset,
jobject receiver, ByteSize jfa_sp_offset) :
BufferBlob(name, size, cb),
_exception_handler_offset(exception_handler_offset),
_receiver(receiver),
_jfa_sp_offset(jfa_sp_offset) {
CodeCache::commit(this);
}
OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb, intptr_t exception_handler_offset,
jobject receiver, ByteSize jfa_sp_offset) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
OptimizedEntryBlob* blob = nullptr;
unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob));
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) OptimizedEntryBlob(name, size, cb, exception_handler_offset, receiver, jfa_sp_offset);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
return blob;
}

View File

@ -58,6 +58,7 @@ struct CodeBlobType {
// AdapterBlob : Used to hold C2I/I2C adapters
// VtableBlob : Used for holding vtable chunks
// MethodHandlesAdapterBlob : Used to hold MethodHandles adapters
// OptimizedEntryBlob : Used for upcalls from native code
// RuntimeStub : Call to VM runtime methods
// SingletonBlob : Super-class for all blobs that exist in only one instance
// DeoptimizationBlob : Used for deoptimization
@ -75,6 +76,8 @@ struct CodeBlobType {
class CodeBlobLayout;
class OptimizedEntryBlob; // for as_optimized_entry_blob()
class JavaFrameAnchor; // for EntryBlob::jfa_for_frame
class CodeBlob {
friend class VMStructs;
@ -136,6 +139,7 @@ public:
virtual bool is_vtable_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_compiled() const { return false; }
virtual bool is_optimized_entry_blob() const { return false; }
inline bool is_compiled_by_c1() const { return _type == compiler_c1; };
inline bool is_compiled_by_c2() const { return _type == compiler_c2; };
@ -149,6 +153,7 @@ public:
CompiledMethod* as_compiled_method_or_null() { return is_compiled() ? (CompiledMethod*) this : NULL; }
CompiledMethod* as_compiled_method() { assert(is_compiled(), "must be compiled"); return (CompiledMethod*) this; }
CodeBlob* as_codeblob_or_null() const { return (CodeBlob*) this; }
OptimizedEntryBlob* as_optimized_entry_blob() const { assert(is_optimized_entry_blob(), "must be entry blob"); return (OptimizedEntryBlob*) this; }
// Boundaries
address header_begin() const { return (address) this; }
@ -379,6 +384,7 @@ class BufferBlob: public RuntimeBlob {
friend class AdapterBlob;
friend class VtableBlob;
friend class MethodHandlesAdapterBlob;
friend class OptimizedEntryBlob;
friend class WhiteBox;
private:
@ -718,4 +724,33 @@ class SafepointBlob: public SingletonBlob {
bool is_safepoint_stub() const { return true; }
};
//----------------------------------------------------------------------------------------------------
// For optimized upcall stubs
class OptimizedEntryBlob: public BufferBlob {
private:
intptr_t _exception_handler_offset;
jobject _receiver;
ByteSize _jfa_sp_offset;
OptimizedEntryBlob(const char* name, int size, CodeBuffer* cb, intptr_t exception_handler_offset,
jobject receiver, ByteSize jfa_sp_offset);
public:
// Creation
static OptimizedEntryBlob* create(const char* name, CodeBuffer* cb,
intptr_t exception_handler_offset, jobject receiver,
ByteSize jfa_sp_offset);
address exception_handler() { return code_begin() + _exception_handler_offset; }
jobject receiver() { return _receiver; }
ByteSize jfa_sp_offset() const { return _jfa_sp_offset; }
// defined in frame_ARCH.cpp
JavaFrameAnchor* jfa_for_frame(const frame& frame) const;
// Typing
virtual bool is_optimized_entry_blob() const override { return true; }
};
#endif // SHARE_CODE_CODEBLOB_HPP

View File

@ -73,6 +73,7 @@
LOG_TAG(exceptions) \
LOG_TAG(exit) \
LOG_TAG(fingerprint) \
DEBUG_ONLY(LOG_TAG(foreign)) \
LOG_TAG(free) \
LOG_TAG(freelist) \
LOG_TAG(gc) \

View File

@ -418,8 +418,9 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator {
bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
bool allow_inline = C->inlining_incrementally();
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, _input_not_const);
assert(!_input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
bool input_not_const = true;
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
if (cg != NULL) {
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
@ -1054,10 +1055,11 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* c
class NativeCallGenerator : public CallGenerator {
private:
address _call_addr;
ciNativeEntryPoint* _nep;
public:
NativeCallGenerator(ciMethod* m, ciNativeEntryPoint* nep)
: CallGenerator(m), _nep(nep) {}
NativeCallGenerator(ciMethod* m, address call_addr, ciNativeEntryPoint* nep)
: CallGenerator(m), _call_addr(call_addr), _nep(nep) {}
virtual JVMState* generate(JVMState* jvms);
};
@ -1065,13 +1067,12 @@ public:
JVMState* NativeCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
Node* call = kit.make_native_call(tf(), method()->arg_size(), _nep); // -fallback, - nep
Node* call = kit.make_native_call(_call_addr, tf(), method()->arg_size(), _nep); // -fallback, - nep
if (call == NULL) return NULL;
kit.C->print_inlining_update(this);
address addr = _nep->entry_point();
if (kit.C->log() != NULL) {
kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(addr));
kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(_call_addr));
}
return kit.transfer_exceptions_into_jvms();
@ -1204,12 +1205,16 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
case vmIntrinsics::_linkToNative:
{
Node* nep = kit.argument(callee->arg_size() - 1);
if (nep->Opcode() == Op_ConP) {
Node* addr_n = kit.argument(1); // target address
Node* nep_n = kit.argument(callee->arg_size() - 1); // NativeEntryPoint
// This check needs to be kept in sync with the one in CallStaticJavaNode::Ideal
if (addr_n->Opcode() == Op_ConL && nep_n->Opcode() == Op_ConP) {
input_not_const = false;
const TypeOopPtr* oop_ptr = nep->bottom_type()->is_oopptr();
ciNativeEntryPoint* nep = oop_ptr->const_oop()->as_native_entry_point();
return new NativeCallGenerator(callee, nep);
const TypeLong* addr_t = addr_n->bottom_type()->is_long();
const TypeOopPtr* nep_t = nep_n->bottom_type()->is_oopptr();
address addr = (address) addr_t->get_con();
ciNativeEntryPoint* nep = nep_t->const_oop()->as_native_entry_point();
return new NativeCallGenerator(callee, addr, nep);
} else {
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
"NativeEntryPoint not constant");

View File

@ -1066,6 +1066,12 @@ Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
phase->C->prepend_late_inline(cg);
set_generator(NULL);
}
} else if (iid == vmIntrinsics::_linkToNative) {
if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP /* NEP */
&& in(TypeFunc::Parms + 1)->Opcode() == Op_ConL /* address */) {
phase->C->prepend_late_inline(cg);
set_generator(NULL);
}
} else {
assert(callee->has_member_arg(), "wrong type of call?");
if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {

View File

@ -2566,8 +2566,13 @@ Node* GraphKit::sign_extend_short(Node* in) {
}
//-----------------------------make_native_call-------------------------------
Node* GraphKit::make_native_call(const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {
uint n_filtered_args = nargs - 2; // -fallback, -nep;
Node* GraphKit::make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {
// Select just the actual call args to pass on
// [MethodHandle fallback, long addr, HALF addr, ... args , NativeEntryPoint nep]
// | |
// V V
// [ ... args ]
uint n_filtered_args = nargs - 4; // -fallback, -addr (2), -nep;
ResourceMark rm;
Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args);
const Type** arg_types = TypeTuple::fields(n_filtered_args);
@ -2577,7 +2582,7 @@ Node* GraphKit::make_native_call(const TypeFunc* call_type, uint nargs, ciNative
{
for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
vm_arg_pos < n_filtered_args; vm_arg_pos++) {
uint vm_unfiltered_arg_pos = vm_arg_pos + 1; // +1 to skip fallback handle argument
uint vm_unfiltered_arg_pos = vm_arg_pos + 3; // +3 to skip fallback handle argument and addr (2 since long)
Node* node = argument(vm_unfiltered_arg_pos);
const Type* type = call_type->domain()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
VMReg reg = type == Type::HALF
@ -2613,7 +2618,6 @@ Node* GraphKit::make_native_call(const TypeFunc* call_type, uint nargs, ciNative
TypeTuple::make(TypeFunc::Parms + n_returns, ret_types)
);
address call_addr = nep->entry_point();
if (nep->need_transition()) {
RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr,
nep->shadow_space(),

View File

@ -801,7 +801,7 @@ class GraphKit : public Phase {
Node* sign_extend_byte(Node* in);
Node* sign_extend_short(Node* in);
Node* make_native_call(const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep);
Node* make_native_call(address call_addr, const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep);
enum { // flag values for make_runtime_call
RC_NO_FP = 1, // CallLeafNoFPNode

View File

@ -880,7 +880,7 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
save_policy = _matcher._register_save_policy;
break;
case Op_CallNative:
// We use the c reg save policy here since Panama
// We use the c reg save policy here since Foreign Linker
// only supports the C ABI currently.
// TODO compute actual save policy based on nep->abi
save_policy = _matcher._c_reg_save_policy;

View File

@ -59,23 +59,28 @@ const BufferLayout ForeignGlobals::parse_buffer_layout(jobject jlayout) {
return instance().parse_buffer_layout_impl(jlayout);
}
const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
return instance().parse_call_regs_impl(jconv);
}
ForeignGlobals::ForeignGlobals() {
JavaThread* current_thread = JavaThread::current();
ResourceMark rm(current_thread);
// ABIDescriptor
InstanceKlass* k_ABI = find_InstanceKlass(FOREIGN_ABI "ABIDescriptor", current_thread);
const char* strVMSArray = "[[L" FOREIGN_ABI "VMStorage;";
Symbol* symVMSArray = SymbolTable::new_symbol(strVMSArray, (int)strlen(strVMSArray));
ABI.inputStorage_offset = field_offset(k_ABI, "inputStorage", symVMSArray);
ABI.outputStorage_offset = field_offset(k_ABI, "outputStorage", symVMSArray);
ABI.volatileStorage_offset = field_offset(k_ABI, "volatileStorage", symVMSArray);
const char* strVMSArrayArray = "[[L" FOREIGN_ABI "VMStorage;";
Symbol* symVMSArrayArray = SymbolTable::new_symbol(strVMSArrayArray);
ABI.inputStorage_offset = field_offset(k_ABI, "inputStorage", symVMSArrayArray);
ABI.outputStorage_offset = field_offset(k_ABI, "outputStorage", symVMSArrayArray);
ABI.volatileStorage_offset = field_offset(k_ABI, "volatileStorage", symVMSArrayArray);
ABI.stackAlignment_offset = field_offset(k_ABI, "stackAlignment", vmSymbols::int_signature());
ABI.shadowSpace_offset = field_offset(k_ABI, "shadowSpace", vmSymbols::int_signature());
// VMStorage
InstanceKlass* k_VMS = find_InstanceKlass(FOREIGN_ABI "VMStorage", current_thread);
VMS.index_offset = field_offset(k_VMS, "index", vmSymbols::int_signature());
VMS.type_offset = field_offset(k_VMS, "type", vmSymbols::int_signature());
// BufferLayout
InstanceKlass* k_BL = find_InstanceKlass(FOREIGN_ABI "BufferLayout", current_thread);
@ -85,4 +90,41 @@ ForeignGlobals::ForeignGlobals() {
BL.stack_args_offset = field_offset(k_BL, "stack_args", vmSymbols::long_signature());
BL.input_type_offsets_offset = field_offset(k_BL, "input_type_offsets", vmSymbols::long_array_signature());
BL.output_type_offsets_offset = field_offset(k_BL, "output_type_offsets", vmSymbols::long_array_signature());
// CallRegs
const char* strVMSArray = "[L" FOREIGN_ABI "VMStorage;";
Symbol* symVMSArray = SymbolTable::new_symbol(strVMSArray);
InstanceKlass* k_CC = find_InstanceKlass(FOREIGN_ABI "ProgrammableUpcallHandler$CallRegs", current_thread);
CallConvOffsets.arg_regs_offset = field_offset(k_CC, "argRegs", symVMSArray);
CallConvOffsets.ret_regs_offset = field_offset(k_CC, "retRegs", symVMSArray);
}
void CallRegs::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
int src_pos = 0;
for (uint i = 0; i < argcnt; i++) {
switch (sig_bt[i]) {
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
case T_FLOAT:
assert(src_pos < _args_length, "oob");
parm_regs[i].set1(_arg_regs[src_pos++]);
break;
case T_LONG:
case T_DOUBLE:
assert((i + 1) < argcnt && sig_bt[i + 1] == T_VOID, "expecting half");
assert(src_pos < _args_length, "oob");
parm_regs[i].set2(_arg_regs[src_pos++]);
break;
case T_VOID: // Halves of longs and doubles
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
parm_regs[i].set_bad();
break;
default:
ShouldNotReachHere();
break;
}
}
}

View File

@ -24,12 +24,23 @@
#ifndef SHARE_PRIMS_FOREIGN_GLOBALS
#define SHARE_PRIMS_FOREIGN_GLOBALS
#include "code/vmreg.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#include CPU_HEADER(foreign_globals)
struct CallRegs {
VMReg* _arg_regs;
int _args_length;
VMReg* _ret_regs;
int _rets_length;
void calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const;
};
class ForeignGlobals {
private:
struct {
@ -42,6 +53,7 @@ private:
struct {
int index_offset;
int type_offset;
} VMS;
struct {
@ -53,6 +65,11 @@ private:
int output_type_offsets_offset;
} BL;
struct {
int arg_regs_offset;
int ret_regs_offset;
} CallConvOffsets;
ForeignGlobals();
static const ForeignGlobals& instance();
@ -65,9 +82,11 @@ private:
const ABIDescriptor parse_abi_descriptor_impl(jobject jabi) const;
const BufferLayout parse_buffer_layout_impl(jobject jlayout) const;
const CallRegs parse_call_regs_impl(jobject jconv) const;
public:
static const ABIDescriptor parse_abi_descriptor(jobject jabi);
static const BufferLayout parse_buffer_layout(jobject jlayout);
static const CallRegs parse_call_regs(jobject jconv);
};
#endif // SHARE_PRIMS_FOREIGN_GLOBALS

View File

@ -37,7 +37,8 @@ static JNINativeMethod NEP_methods[] = {
{CC "vmStorageToVMReg", CC "(II)J", FN_PTR(NEP_vmStorageToVMReg)},
};
JNI_LEAF(void, JVM_RegisterNativeEntryPointMethods(JNIEnv *env, jclass NEP_class))
JNI_ENTRY(void, JVM_RegisterNativeEntryPointMethods(JNIEnv *env, jclass NEP_class))
ThreadToNativeFromVM ttnfv(thread);
int status = env->RegisterNatives(NEP_class, NEP_methods, sizeof(NEP_methods)/sizeof(JNINativeMethod));
guarantee(status == JNI_OK && !env->ExceptionOccurred(),
"register jdk.internal.invoke.NativeEntryPoint natives");

View File

@ -59,7 +59,8 @@ static JNINativeMethod PI_methods[] = {
{CC "generateAdapter", CC "(" FOREIGN_ABI "/ABIDescriptor;" FOREIGN_ABI "/BufferLayout;" ")J", FN_PTR(PI_generateAdapter)}
};
JNI_LEAF(void, JVM_RegisterProgrammableInvokerMethods(JNIEnv *env, jclass PI_class))
JNI_ENTRY(void, JVM_RegisterProgrammableInvokerMethods(JNIEnv *env, jclass PI_class))
ThreadToNativeFromVM ttnfv(thread);
int status = env->RegisterNatives(PI_class, PI_methods, sizeof(PI_methods)/sizeof(JNINativeMethod));
guarantee(status == JNI_OK && !env->ExceptionOccurred(),
"register jdk.internal.foreign.abi.programmable.ProgrammableInvoker natives");

View File

@ -22,8 +22,10 @@
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "compiler/compilationPolicy.hpp"
#include "memory/resourceArea.hpp"
#include "prims/universalUpcallHandler.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -49,17 +51,29 @@ void ProgrammableUpcallHandler::upcall_helper(JavaThread* thread, jobject rec, a
JavaCalls::call_static(&result, upcall_method.klass, upcall_method.name, upcall_method.sig, &args, CATCH);
}
void ProgrammableUpcallHandler::attach_thread_and_do_upcall(jobject rec, address buff) {
Thread* ProgrammableUpcallHandler::maybe_attach_and_get_thread(bool* should_detach) {
Thread* thread = Thread::current_or_null();
bool should_detach = false;
if (thread == nullptr) {
JavaVM_ *vm = (JavaVM *)(&main_vm);
JNIEnv* p_env = nullptr; // unused
jint result = vm->functions->AttachCurrentThread(vm, (void**) &p_env, nullptr);
guarantee(result == JNI_OK, "Could not attach thread for upcall. JNI error code: %d", result);
should_detach = true;
*should_detach = true;
thread = Thread::current();
} else {
*should_detach = false;
}
return thread;
}
void ProgrammableUpcallHandler::detach_thread(Thread* thread) {
JavaVM_ *vm = (JavaVM *)(&main_vm);
vm->functions->DetachCurrentThread(vm);
}
void ProgrammableUpcallHandler::attach_thread_and_do_upcall(jobject rec, address buff) {
bool should_detach = false;
Thread* thread = maybe_attach_and_get_thread(&should_detach);
{
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
@ -67,8 +81,7 @@ void ProgrammableUpcallHandler::attach_thread_and_do_upcall(jobject rec, address
}
if (should_detach) {
JavaVM_ *vm = (JavaVM *)(&main_vm);
vm->functions->DetachCurrentThread(vm);
detach_thread(thread);
}
}
@ -86,30 +99,59 @@ ProgrammableUpcallHandler::ProgrammableUpcallHandler() {
upcall_method.klass = k;
upcall_method.name = SymbolTable::new_symbol("invoke");
upcall_method.sig = SymbolTable::new_symbol("(L" FOREIGN_ABI "ProgrammableUpcallHandler;J)V");
upcall_method.sig = SymbolTable::new_symbol("(Ljava/lang/invoke/MethodHandle;J)V");
assert(upcall_method.klass->lookup_method(upcall_method.name, upcall_method.sig) != nullptr,
"Could not find upcall method: %s.%s%s", upcall_method.klass->external_name(),
upcall_method.name->as_C_string(), upcall_method.sig->as_C_string());
}
JNI_ENTRY(jlong, PUH_AllocateUpcallStub(JNIEnv *env, jobject rec, jobject abi, jobject buffer_layout))
void ProgrammableUpcallHandler::handle_uncaught_exception(oop exception) {
// Based on CATCH macro
tty->print_cr("Uncaught exception:");
exception->print();
ShouldNotReachHere();
}
JVM_ENTRY(jlong, PUH_AllocateUpcallStub(JNIEnv *env, jclass unused, jobject rec, jobject abi, jobject buffer_layout))
Handle receiver(THREAD, JNIHandles::resolve(rec));
jobject global_rec = JNIHandles::make_global(receiver);
return (jlong) ProgrammableUpcallHandler::generate_upcall_stub(global_rec, abi, buffer_layout);
JNI_END
JVM_ENTRY(jlong, PUH_AllocateOptimizedUpcallStub(JNIEnv *env, jclass unused, jobject mh, jobject abi, jobject conv))
Handle mh_h(THREAD, JNIHandles::resolve(mh));
jobject mh_j = JNIHandles::make_global(mh_h);
oop lform = java_lang_invoke_MethodHandle::form(mh_h());
oop vmentry = java_lang_invoke_LambdaForm::vmentry(lform);
Method* entry = java_lang_invoke_MemberName::vmtarget(vmentry);
const methodHandle mh_entry(THREAD, entry);
assert(entry->method_holder()->is_initialized(), "no clinit barrier");
CompilationPolicy::compile_if_required(mh_entry, CHECK_0);
return (jlong) ProgrammableUpcallHandler::generate_optimized_upcall_stub(mh_j, entry, abi, conv);
JVM_END
JVM_ENTRY(jboolean, PUH_SupportsOptimizedUpcalls(JNIEnv *env, jclass unused))
return (jboolean) ProgrammableUpcallHandler::supports_optimized_upcalls();
JVM_END
#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
static JNINativeMethod PUH_methods[] = {
{CC "allocateUpcallStub", CC "(L" FOREIGN_ABI "ABIDescriptor;L" FOREIGN_ABI "BufferLayout;" ")J", FN_PTR(PUH_AllocateUpcallStub)},
{CC "allocateUpcallStub", CC "(" "Ljava/lang/invoke/MethodHandle;" "L" FOREIGN_ABI "ABIDescriptor;" "L" FOREIGN_ABI "BufferLayout;" ")J", FN_PTR(PUH_AllocateUpcallStub)},
{CC "allocateOptimizedUpcallStub", CC "(" "Ljava/lang/invoke/MethodHandle;" "L" FOREIGN_ABI "ABIDescriptor;" "L" FOREIGN_ABI "ProgrammableUpcallHandler$CallRegs;" ")J", FN_PTR(PUH_AllocateOptimizedUpcallStub)},
{CC "supportsOptimizedUpcalls", CC "()Z", FN_PTR(PUH_SupportsOptimizedUpcalls)},
};
/**
* This one function is exported, used by NativeLookup.
*/
JNI_LEAF(void, JVM_RegisterProgrammableUpcallHandlerMethods(JNIEnv *env, jclass PUH_class))
JNI_ENTRY(void, JVM_RegisterProgrammableUpcallHandlerMethods(JNIEnv *env, jclass PUH_class))
ThreadToNativeFromVM ttnfv(thread);
int status = env->RegisterNatives(PUH_class, PUH_methods, sizeof(PUH_methods)/sizeof(JNINativeMethod));
guarantee(status == JNI_OK && !env->ExceptionOccurred(),
"register jdk.internal.foreign.abi.ProgrammableUpcallHandler natives");

View File

@ -45,8 +45,14 @@ private:
static void upcall_helper(JavaThread* thread, jobject rec, address buff);
static void attach_thread_and_do_upcall(jobject rec, address buff);
static void handle_uncaught_exception(oop exception);
static Thread* maybe_attach_and_get_thread(bool* should_detach);
static void detach_thread(Thread* thread);
public:
static address generate_optimized_upcall_stub(jobject mh, Method* entry, jobject jabi, jobject jconv);
static address generate_upcall_stub(jobject rec, jobject abi, jobject buffer_layout);
static bool supports_optimized_upcalls();
};
#endif // SHARE_VM_PRIMS_UNIVERSALUPCALLHANDLER_HPP

View File

@ -36,8 +36,14 @@ JVM_ENTRY(static jboolean, UH_FreeUpcallStub0(JNIEnv *env, jobject _unused, jlon
return false;
}
//free global JNI handle
jobject* rec_ptr = (jobject*)(void*)cb -> content_begin();
JNIHandles::destroy_global(*rec_ptr);
jobject handle = NULL;
if (cb->is_optimized_entry_blob()) {
handle = ((OptimizedEntryBlob*)cb)->receiver();
} else {
jobject* handle_ptr = (jobject*)(void*)cb->content_begin();
handle = *handle_ptr;
}
JNIHandles::destroy_global(handle);
//free code blob
CodeCache::free(cb);
return true;

View File

@ -2300,13 +2300,13 @@ WB_ENTRY(void, WB_CheckThreadObjOfTerminatingThread(JNIEnv* env, jobject wb, job
}
WB_END
WB_ENTRY(void, WB_VerifyFrames(JNIEnv* env, jobject wb, jboolean log))
WB_ENTRY(void, WB_VerifyFrames(JNIEnv* env, jobject wb, jboolean log, jboolean update_map))
intx tty_token = -1;
if (log) {
tty_token = ttyLocker::hold_tty();
tty->print_cr("[WhiteBox::VerifyFrames] Walking Frames");
}
for (StackFrameStream fst(JavaThread::current(), true, true); !fst.is_done(); fst.next()) {
for (StackFrameStream fst(JavaThread::current(), update_map, true); !fst.is_done(); fst.next()) {
frame* current_frame = fst.current();
if (log) {
current_frame->print_value();
@ -2566,7 +2566,7 @@ static JNINativeMethod methods[] = {
{CC"handshakeWalkStack", CC"(Ljava/lang/Thread;Z)I", (void*)&WB_HandshakeWalkStack },
{CC"asyncHandshakeWalkStack", CC"(Ljava/lang/Thread;)V", (void*)&WB_AsyncHandshakeWalkStack },
{CC"checkThreadObjOfTerminatingThread", CC"(Ljava/lang/Thread;)V", (void*)&WB_CheckThreadObjOfTerminatingThread },
{CC"verifyFrames", CC"(Z)V", (void*)&WB_VerifyFrames },
{CC"verifyFrames", CC"(ZZ)V", (void*)&WB_VerifyFrames },
{CC"addCompilerDirective", CC"(Ljava/lang/String;)I",
(void*)&WB_AddCompilerDirective },
{CC"removeCompilerDirective", CC"(I)V", (void*)&WB_RemoveCompilerDirective },

View File

@ -309,6 +309,8 @@ bool needs_module_property_warning = false;
#define PATH_LEN 4
#define UPGRADE_PATH "upgrade.path"
#define UPGRADE_PATH_LEN 12
#define ENABLE_NATIVE_ACCESS "enable.native.access"
#define ENABLE_NATIVE_ACCESS_LEN 20
void Arguments::add_init_library(const char* name, char* options) {
_libraryList.add(new AgentLibrary(name, options, false, NULL));
@ -347,7 +349,8 @@ bool Arguments::is_internal_module_property(const char* property) {
matches_property_suffix(property_suffix, ADDMODS, ADDMODS_LEN) ||
matches_property_suffix(property_suffix, LIMITMODS, LIMITMODS_LEN) ||
matches_property_suffix(property_suffix, PATH, PATH_LEN) ||
matches_property_suffix(property_suffix, UPGRADE_PATH, UPGRADE_PATH_LEN)) {
matches_property_suffix(property_suffix, UPGRADE_PATH, UPGRADE_PATH_LEN) ||
matches_property_suffix(property_suffix, ENABLE_NATIVE_ACCESS, ENABLE_NATIVE_ACCESS_LEN)) {
return true;
}
}
@ -1964,6 +1967,7 @@ unsigned int addexports_count = 0;
unsigned int addopens_count = 0;
unsigned int addmods_count = 0;
unsigned int patch_mod_count = 0;
unsigned int enable_native_access_count = 0;
// Check the consistency of vm_init_args
bool Arguments::check_vm_args_consistency() {
@ -2406,6 +2410,10 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
if (!create_numbered_module_property("jdk.module.addmods", tail, addmods_count++)) {
return JNI_ENOMEM;
}
} else if (match_option(option, "--enable-native-access=", &tail)) {
if (!create_numbered_module_property("jdk.module.enable.native.access", tail, enable_native_access_count++)) {
return JNI_ENOMEM;
}
} else if (match_option(option, "--limit-modules=", &tail)) {
if (!create_module_property("jdk.module.limitmods", tail, InternalProperty)) {
return JNI_ENOMEM;

View File

@ -158,7 +158,7 @@ void frame::set_pc(address newpc ) {
}
#endif // ASSERT
// Unsafe to use the is_deoptimzed tester after changing pc
// Unsafe to use the is_deoptimized tester after changing pc
_deopt_state = unknown;
_pc = newpc;
_cb = CodeCache::find_blob_unsafe(_pc);
@ -1067,6 +1067,10 @@ void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, const RegisterM
oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
} else if (is_entry_frame()) {
oops_entry_do(f, map);
} else if (is_optimized_entry_frame()) {
// Nothing to do
// receiver is a global ref
// handle block is for JNI
} else if (CodeCache::contains(pc())) {
oops_code_blob_do(f, cf, map, derived_mode);
} else {
@ -1105,7 +1109,9 @@ void frame::verify(const RegisterMap* map) const {
#if COMPILER2_OR_JVMCI
assert(DerivedPointerTable::is_empty(), "must be empty before verify");
#endif
oops_do_internal(&VerifyOopClosure::verify_oop, NULL, map, false, DerivedPointerIterationMode::_ignore);
if (map->update_map()) { // The map has to be up-to-date for the current frame
oops_do_internal(&VerifyOopClosure::verify_oop, NULL, map, false, DerivedPointerIterationMode::_ignore);
}
}

View File

@ -138,6 +138,7 @@ class frame {
bool is_compiled_frame() const;
bool is_safepoint_blob_frame() const;
bool is_deoptimized_frame() const;
bool is_optimized_entry_frame() const;
// testers
bool is_first_frame() const; // oldest frame? (has no sender)
@ -172,6 +173,7 @@ class frame {
frame sender_for_entry_frame(RegisterMap* map) const;
frame sender_for_interpreter_frame(RegisterMap* map) const;
frame sender_for_native_frame(RegisterMap* map) const;
frame sender_for_optimized_entry_frame(RegisterMap* map) const;
bool is_entry_frame_valid(JavaThread* thread) const;

View File

@ -53,6 +53,10 @@ inline bool frame::is_first_frame() const {
return is_entry_frame() && entry_frame_is_first();
}
inline bool frame::is_optimized_entry_frame() const {
return _cb != NULL && _cb->is_optimized_entry_blob();
}
inline address frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const {
if(reg->is_reg()) {
// If it is passed in a register, it got spilled in the stub frame.

View File

@ -2080,6 +2080,9 @@ const intx ObjectAlignmentInBytes = 8;
false AARCH64_ONLY(DEBUG_ONLY(||true)), \
"Mark all threads after a safepoint, and clear on a modify " \
"fence. Add cleanliness checks.") \
\
develop(bool, TraceOptimizedUpcallStubs, false, \
"Trace optimized upcall stub generation") \
// end of RUNTIME_FLAGS

View File

@ -764,7 +764,6 @@ static Handle new_type(Symbol* signature, Klass* k, TRAPS) {
return Handle(THREAD, nt);
}
oop Reflection::new_method(const methodHandle& method, bool for_constant_pool_access, TRAPS) {
// Allow sun.reflect.ConstantPool to refer to <clinit> methods as java.lang.reflect.Methods.
assert(!method()->is_initializer() ||

View File

@ -512,6 +512,9 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr
// JavaCallWrapper::~JavaCallWrapper
return StubRoutines::catch_exception_entry();
}
if (blob != NULL && blob->is_optimized_entry_blob()) {
return ((OptimizedEntryBlob*)blob)->exception_handler();
}
// Interpreted code
if (Interpreter::contains(return_address)) {
// The deferred StackWatermarkSet::after_unwind check will be performed in
@ -1439,7 +1442,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread*
frame stub_frame = current->last_frame();
assert(stub_frame.is_runtime_frame(), "sanity check");
frame caller_frame = stub_frame.sender(&reg_map);
assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_optimized_entry_frame(), "unexpected frame");
#endif /* ASSERT */
methodHandle callee_method;
@ -1471,7 +1474,8 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current)
frame caller_frame = stub_frame.sender(&reg_map);
if (caller_frame.is_interpreted_frame() ||
caller_frame.is_entry_frame()) {
caller_frame.is_entry_frame() ||
caller_frame.is_optimized_entry_frame()) {
Method* callee = current->callee_target();
guarantee(callee != NULL && callee->is_method(), "bad handshake");
current->set_vm_result_2(callee);

View File

@ -462,6 +462,11 @@ class SharedRuntime: AllStatic {
static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots);
static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots);
static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst);
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst);
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst);
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst);
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
// convention (handlizes oops, etc), transitions to native, makes the call,
@ -513,6 +518,12 @@ class SharedRuntime: AllStatic {
const GrowableArray<VMReg>& output_registers);
#endif
static void compute_move_order(const BasicType* in_sig_bt,
int total_in_args, const VMRegPair* in_regs,
int total_out_args, VMRegPair* out_regs,
GrowableArray<int>& arg_order,
VMRegPair tmp_vmreg);
#ifndef PRODUCT
// Collect and print inline cache miss statistics

View File

@ -109,6 +109,8 @@ public final class Module implements AnnotatedElement {
// the module descriptor
private final ModuleDescriptor descriptor;
// true, if this module allows restricted native access
private volatile boolean enableNativeAccess;
/**
* Creates a new named Module. The resulting Module will be defined to the
@ -133,6 +135,10 @@ public final class Module implements AnnotatedElement {
String loc = Objects.toString(uri, null);
Object[] packages = descriptor.packages().toArray();
defineModule0(this, isOpen, vs, loc, packages);
if (loader == null || loader == ClassLoaders.platformClassLoader()) {
// boot/builtin modules are always native
implAddEnableNativeAccess();
}
}
@ -244,6 +250,30 @@ public final class Module implements AnnotatedElement {
return null;
}
/**
* Update this module to allow access to restricted methods.
*/
Module implAddEnableNativeAccess() {
enableNativeAccess = true;
return this;
}
/**
* Update all unnamed modules to allow access to restricted methods.
*/
static void implAddEnableNativeAccessAllUnnamed() {
ALL_UNNAMED_MODULE.enableNativeAccess = true;
}
/**
* Returns true if module m can access restricted methods.
*/
boolean implIsEnableNativeAccess() {
return isNamed() ?
enableNativeAccess :
ALL_UNNAMED_MODULE.enableNativeAccess;
}
// --
// special Module to mean "all unnamed modules"

View File

@ -62,7 +62,6 @@ import java.util.Set;
import java.util.function.Supplier;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Stream;
import jdk.internal.misc.Unsafe;
import jdk.internal.util.StaticProperty;
import jdk.internal.module.ModuleBootstrap;
@ -2274,6 +2273,15 @@ public final class System {
public boolean isReflectivelyOpened(Module m, String pn, Module other) {
return m.isReflectivelyOpened(pn, other);
}
public Module addEnableNativeAccess(Module m) {
return m.implAddEnableNativeAccess();
}
public void addEnableNativeAccessAllUnnamed() {
Module.implAddEnableNativeAccessAllUnnamed();
}
public boolean isEnableNativeAccess(Module m) {
return m.implIsEnableNativeAccess();
}
public ServicesCatalog getServicesCatalog(ModuleLayer layer) {
return layer.getServicesCatalog();
}

View File

@ -1467,6 +1467,11 @@ abstract class MethodHandleImpl {
return GenerateJLIClassesHelper.generateHolderClasses(traces);
}
@Override
public void ensureCustomized(MethodHandle mh) {
mh.customize();
}
@Override
public VarHandle memoryAccessVarHandle(Class<?> carrier, boolean skipAlignmentMaskCheck, long alignmentMask,
ByteOrder order) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ import static java.lang.invoke.MethodHandleStatics.newInternalError;
throw new IllegalArgumentException("Type must only contain primitives: " + type);
if (type != fallback.type())
throw new IllegalArgumentException("Type of fallback must match");
throw new IllegalArgumentException("Type of fallback must match: " + type + " != " + fallback.type());
LambdaForm lform = preparedLambdaForm(type);
return new NativeMethodHandle(type, lform, fallback, nep);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@ import jdk.internal.access.SharedSecrets;
import jdk.internal.access.foreign.MemorySegmentProxy;
import jdk.internal.access.foreign.UnmapperProxy;
import jdk.internal.misc.ScopedMemoryAccess;
import jdk.internal.misc.ScopedMemoryAccess.Scope;
import jdk.internal.misc.Unsafe;
import jdk.internal.misc.VM.BufferPool;
import jdk.internal.vm.annotation.ForceInline;
@ -821,6 +822,18 @@ public abstract class Buffer {
return buffer.segment;
}
@Override
public Scope.Handle acquireScope(Buffer buffer, boolean async) {
var scope = buffer.scope();
if (scope == null) {
return null;
}
if (async && scope.ownerThread() != null) {
throw new IllegalStateException("Confined scope not supported");
}
return scope.acquire();
}
@Override
public void force(FileDescriptor fd, long address, boolean isSync, long offset, long size) {
MappedMemoryUtils.force(fd, address, isSync, offset, size);

View File

@ -256,6 +256,21 @@ public interface JavaLangAccess {
*/
boolean isReflectivelyOpened(Module module, String pn, Module other);
/**
* Updates module m to allow access to restricted methods.
*/
Module addEnableNativeAccess(Module m);
/**
* Updates all unnamed modules to allow access to restricted methods.
*/
void addEnableNativeAccessAllUnnamed();
/**
* Returns true if module m can access restricted methods.
*/
boolean isEnableNativeAccess(Module m);
/**
* Returns the ServicesCatalog for the given Layer.
*/

View File

@ -132,4 +132,11 @@ public interface JavaLangInvokeAccess {
* @return the native method handle
*/
MethodHandle nativeMethodHandle(NativeEntryPoint nep, MethodHandle fallback);
/**
* Ensure given method handle is customized
*
* @param mh the method handle
*/
void ensureCustomized(MethodHandle mh);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@ package jdk.internal.access;
import jdk.internal.access.foreign.MemorySegmentProxy;
import jdk.internal.access.foreign.UnmapperProxy;
import jdk.internal.misc.ScopedMemoryAccess.Scope;
import jdk.internal.misc.VM.BufferPool;
import java.io.FileDescriptor;
@ -85,6 +86,14 @@ public interface JavaNioAccess {
*/
MemorySegmentProxy bufferSegment(Buffer buffer);
/**
* Used by I/O operations to make a buffer's resource scope non-closeable
* (for the duration of the I/O operation) by acquiring a new resource
* scope handle. Null is returned if the buffer has no scope, or
* acquiring is not required to guarantee safety.
*/
Scope.Handle acquireScope(Buffer buffer, boolean async);
/**
* Used by {@code jdk.internal.foreign.MappedMemorySegmentImpl} and byte buffer var handle views.
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,8 +37,6 @@ public class NativeEntryPoint {
registerNatives();
}
private final long addr;
private final int shadowSpace;
// encoded as VMRegImpl*
@ -49,9 +47,8 @@ public class NativeEntryPoint {
private final MethodType methodType; // C2 sees erased version (byte -> int), so need this explicitly
private final String name;
private NativeEntryPoint(long addr, int shadowSpace, long[] argMoves, long[] returnMoves,
private NativeEntryPoint(int shadowSpace, long[] argMoves, long[] returnMoves,
boolean needTransition, MethodType methodType, String name) {
this.addr = addr;
this.shadowSpace = shadowSpace;
this.argMoves = Objects.requireNonNull(argMoves);
this.returnMoves = Objects.requireNonNull(returnMoves);
@ -60,14 +57,15 @@ public class NativeEntryPoint {
this.name = name;
}
public static NativeEntryPoint make(long addr, String name, ABIDescriptorProxy abi, VMStorageProxy[] argMoves, VMStorageProxy[] returnMoves,
public static NativeEntryPoint make(String name, ABIDescriptorProxy abi,
VMStorageProxy[] argMoves, VMStorageProxy[] returnMoves,
boolean needTransition, MethodType methodType) {
if (returnMoves.length > 1) {
throw new IllegalArgumentException("Multiple register return not supported");
}
return new NativeEntryPoint(
addr, abi.shadowSpaceBytes(), encodeVMStorages(argMoves), encodeVMStorages(returnMoves), needTransition, methodType, name);
return new NativeEntryPoint(abi.shadowSpaceBytes(), encodeVMStorages(argMoves), encodeVMStorages(returnMoves),
needTransition, methodType, name);
}
private static long[] encodeVMStorages(VMStorageProxy[] moves) {

View File

@ -62,7 +62,9 @@ public class BootLoader {
private static final String JAVA_HOME = StaticProperty.javaHome();
static {
UNNAMED_MODULE = SharedSecrets.getJavaLangAccess().defineUnnamedModule(null);
JavaLangAccess jla = SharedSecrets.getJavaLangAccess();
UNNAMED_MODULE = jla.defineUnnamedModule(null);
jla.addEnableNativeAccess(UNNAMED_MODULE);
setBootLoaderUnnamedModule0(UNNAMED_MODULE);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,7 +71,7 @@ import jdk.internal.vm.annotation.ForceInline;
*/
public class ScopedMemoryAccess {
private static Unsafe UNSAFE = Unsafe.getUnsafe();
private static final Unsafe UNSAFE = Unsafe.getUnsafe();
private static native void registerNatives();
static {
@ -97,10 +97,21 @@ public class ScopedMemoryAccess {
* which embodies the temporal checks associated with a given memory region.
*/
public interface Scope {
interface Handle {
Scope scope();
}
void checkValidState();
Thread ownerThread();
boolean isImplicit();
Handle acquire();
void release(Handle handle);
/**
* Error thrown when memory access fails because the memory has already been released.
* Note: for performance reasons, this exception is never created by client; instead a shared instance

View File

@ -38,6 +38,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@ -264,7 +265,9 @@ public final class ModuleBootstrap {
if (baseUri == null)
throw new InternalError(JAVA_BASE + " does not have a location");
BootLoader.loadModule(base);
Modules.defineModule(null, base.descriptor(), baseUri);
Module baseModule = Modules.defineModule(null, base.descriptor(), baseUri);
JLA.addEnableNativeAccess(baseModule);
// Step 2a: Scan all modules when --validate-modules specified
@ -455,6 +458,9 @@ public final class ModuleBootstrap {
addExtraReads(bootLayer);
boolean extraExportsOrOpens = addExtraExportsAndOpens(bootLayer);
// add enable native access
addEnableNativeAccess(bootLayer);
Counters.add("jdk.module.boot.7.adjustModulesTime");
// save module finders for later use
@ -766,6 +772,47 @@ public final class ModuleBootstrap {
}
}
/**
* Process the --enable-native-access option to grant access to restricted methods to selected modules.
*/
private static void addEnableNativeAccess(ModuleLayer layer) {
for (String name : decodeEnableNativeAccess()) {
if (name.equals("ALL-UNNAMED")) {
JLA.addEnableNativeAccessAllUnnamed();
} else {
Optional<Module> module = layer.findModule(name);
if (module.isPresent()) {
JLA.addEnableNativeAccess(module.get());
} else {
warnUnknownModule(ENABLE_NATIVE_ACCESS, name);
}
}
}
}
/**
* Returns the set of module names specified by --enable-native-access options.
*/
private static Set<String> decodeEnableNativeAccess() {
String prefix = "jdk.module.enable.native.access.";
int index = 0;
// the system property is removed after decoding
String value = getAndRemoveProperty(prefix + index);
Set<String> modules = new HashSet<>();
if (value == null) {
return modules;
}
while (value != null) {
for (String s : value.split(",")) {
if (!s.isEmpty())
modules.add(s);
}
index++;
value = getAndRemoveProperty(prefix + index);
}
return modules;
}
/**
* Decodes the values of --add-reads, -add-exports, --add-opens or
* --patch-modules options that are encoded in system properties.
@ -889,7 +936,7 @@ public final class ModuleBootstrap {
private static final String ADD_OPENS = "--add-opens";
private static final String ADD_READS = "--add-reads";
private static final String PATCH_MODULE = "--patch-module";
private static final String ENABLE_NATIVE_ACCESS = "--enable-native-access";
/*
* Returns the command-line option name corresponds to the specified

View File

@ -106,6 +106,13 @@ public class Reflection {
}
}
public static void ensureNativeAccess(Class<?> currentClass) {
Module module = currentClass.getModule();
if (!SharedSecrets.getJavaLangAccess().isEnableNativeAccess(module)) {
throw new IllegalCallerException("Illegal native access from: " + module);
}
}
/**
* Verify access to a member and return {@code true} if it is granted.
*

View File

@ -212,7 +212,8 @@ module java.base {
jdk.jartool,
jdk.jfr,
jdk.jlink,
jdk.jpackage;
jdk.jpackage,
jdk.incubator.foreign;
exports jdk.internal.perf to
java.management,
jdk.management.agent,
@ -229,7 +230,8 @@ module java.base {
java.sql.rowset,
jdk.dynalink,
jdk.internal.vm.ci,
jdk.unsupported;
jdk.unsupported,
jdk.incubator.foreign;
exports jdk.internal.vm to
jdk.internal.jvmstat,
jdk.management.agent;

View File

@ -60,6 +60,9 @@ java.launcher.opt.footer = \
\ root modules to resolve in addition to the initial module.\n\
\ <module name> can also be ALL-DEFAULT, ALL-SYSTEM,\n\
\ ALL-MODULE-PATH.\n\
\ --enable-native-access <module name>[,<module name>...]\n\
\ modules that are permitted to perform restricted native operations.\n\
\ <module name> can also be ALL-UNNAMED.\n\
\ --list-modules\n\
\ list observable modules and exit\n\
\ -d <module name>\n\

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,10 @@ package sun.nio.ch;
import java.io.FileDescriptor;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Objects;
import jdk.internal.access.JavaNioAccess;
import jdk.internal.access.SharedSecrets;
import jdk.internal.misc.ScopedMemoryAccess.Scope;
/**
* File-descriptor based I/O utilities that are shared by NIO classes.
@ -47,15 +50,30 @@ public class IOUtil {
NativeDispatcher nd)
throws IOException
{
return write(fd, src, position, false, -1, nd);
return write(fd, src, position, false, false, -1, nd);
}
static int write(FileDescriptor fd, ByteBuffer src, long position,
boolean async, NativeDispatcher nd)
throws IOException
{
return write(fd, src, position, false, async, -1, nd);
}
static int write(FileDescriptor fd, ByteBuffer src, long position,
boolean directIO, int alignment, NativeDispatcher nd)
throws IOException
{
return write(fd, src, position, directIO, false, alignment, nd);
}
static int write(FileDescriptor fd, ByteBuffer src, long position,
boolean directIO, boolean async, int alignment,
NativeDispatcher nd)
throws IOException
{
if (src instanceof DirectBuffer) {
return writeFromNativeBuffer(fd, src, position, directIO, alignment, nd);
return writeFromNativeBuffer(fd, src, position, directIO, async, alignment, nd);
}
// Substitute a native buffer
@ -76,7 +94,7 @@ public class IOUtil {
// Do not update src until we see how many bytes were written
src.position(pos);
int n = writeFromNativeBuffer(fd, bb, position, directIO, alignment, nd);
int n = writeFromNativeBuffer(fd, bb, position, directIO, async, alignment, nd);
if (n > 0) {
// now update src
src.position(pos + n);
@ -89,7 +107,8 @@ public class IOUtil {
private static int writeFromNativeBuffer(FileDescriptor fd, ByteBuffer bb,
long position, boolean directIO,
int alignment, NativeDispatcher nd)
boolean async, int alignment,
NativeDispatcher nd)
throws IOException
{
int pos = bb.position();
@ -105,46 +124,62 @@ public class IOUtil {
int written = 0;
if (rem == 0)
return 0;
if (position != -1) {
written = nd.pwrite(fd,
((DirectBuffer)bb).address() + pos,
rem, position);
} else {
written = nd.write(fd, ((DirectBuffer)bb).address() + pos, rem);
var handle = acquireScope(bb, async);
try {
if (position != -1) {
written = nd.pwrite(fd, bufferAddress(bb) + pos, rem, position);
} else {
written = nd.write(fd, bufferAddress(bb) + pos, rem);
}
} finally {
releaseScope(handle);
}
if (written > 0)
bb.position(pos + written);
return written;
}
static long write(FileDescriptor fd, ByteBuffer[] bufs, NativeDispatcher nd)
static long write(FileDescriptor fd, ByteBuffer[] bufs, boolean async,
NativeDispatcher nd)
throws IOException
{
return write(fd, bufs, 0, bufs.length, false, -1, nd);
return write(fd, bufs, 0, bufs.length, false, async, -1, nd);
}
static long write(FileDescriptor fd, ByteBuffer[] bufs, int offset, int length,
NativeDispatcher nd)
throws IOException
{
return write(fd, bufs, offset, length, false, -1, nd);
return write(fd, bufs, offset, length, false, false, -1, nd);
}
static long write(FileDescriptor fd, ByteBuffer[] bufs, int offset, int length,
boolean directIO, int alignment, NativeDispatcher nd)
boolean direct, int alignment, NativeDispatcher nd)
throws IOException
{
return write(fd, bufs, offset, length, direct, false, alignment, nd);
}
static long write(FileDescriptor fd, ByteBuffer[] bufs, int offset, int length,
boolean directIO, boolean async,
int alignment, NativeDispatcher nd)
throws IOException
{
IOVecWrapper vec = IOVecWrapper.get(length);
boolean completed = false;
int iov_len = 0;
Runnable handleReleasers = null;
try {
// Iterate over buffers to populate native iovec array.
int count = offset + length;
int i = offset;
while (i < count && iov_len < IOV_MAX) {
ByteBuffer buf = bufs[i];
var h = acquireScope(buf, async);
if (h != null) {
handleReleasers = LinkedRunnable.of(Releaser.of(h), handleReleasers);
}
int pos = buf.position();
int lim = buf.limit();
assert (pos <= lim);
@ -170,7 +205,7 @@ public class IOUtil {
pos = shadow.position();
}
vec.putBase(iov_len, ((DirectBuffer)buf).address() + pos);
vec.putBase(iov_len, bufferAddress(buf) + pos);
vec.putLen(iov_len, rem);
iov_len++;
}
@ -203,6 +238,7 @@ public class IOUtil {
return bytesWritten;
} finally {
releaseScopes(handleReleasers);
// if an error occurred then clear refs to buffers and return any shadow
// buffers to cache
if (!completed) {
@ -220,17 +256,32 @@ public class IOUtil {
NativeDispatcher nd)
throws IOException
{
return read(fd, dst, position, false, -1, nd);
return read(fd, dst, position, false, false, -1, nd);
}
static int read(FileDescriptor fd, ByteBuffer dst, long position,
boolean async, NativeDispatcher nd)
throws IOException
{
return read(fd, dst, position, false, async, -1, nd);
}
static int read(FileDescriptor fd, ByteBuffer dst, long position,
boolean directIO, int alignment, NativeDispatcher nd)
throws IOException
{
return read(fd, dst, position, directIO, false, alignment, nd);
}
static int read(FileDescriptor fd, ByteBuffer dst, long position,
boolean directIO, boolean async,
int alignment, NativeDispatcher nd)
throws IOException
{
if (dst.isReadOnly())
throw new IllegalArgumentException("Read-only buffer");
if (dst instanceof DirectBuffer)
return readIntoNativeBuffer(fd, dst, position, directIO, alignment, nd);
return readIntoNativeBuffer(fd, dst, position, directIO, async, alignment, nd);
// Substitute a native buffer
ByteBuffer bb;
@ -242,7 +293,7 @@ public class IOUtil {
bb = Util.getTemporaryDirectBuffer(rem);
}
try {
int n = readIntoNativeBuffer(fd, bb, position, directIO, alignment,nd);
int n = readIntoNativeBuffer(fd, bb, position, directIO, async, alignment, nd);
bb.flip();
if (n > 0)
dst.put(bb);
@ -254,7 +305,8 @@ public class IOUtil {
private static int readIntoNativeBuffer(FileDescriptor fd, ByteBuffer bb,
long position, boolean directIO,
int alignment, NativeDispatcher nd)
boolean async, int alignment,
NativeDispatcher nd)
throws IOException
{
int pos = bb.position();
@ -270,10 +322,15 @@ public class IOUtil {
if (rem == 0)
return 0;
int n = 0;
if (position != -1) {
n = nd.pread(fd, ((DirectBuffer)bb).address() + pos, rem, position);
} else {
n = nd.read(fd, ((DirectBuffer)bb).address() + pos, rem);
var handle = acquireScope(bb, async);
try {
if (position != -1) {
n = nd.pread(fd, bufferAddress(bb) + pos, rem, position);
} else {
n = nd.read(fd, bufferAddress(bb) + pos, rem);
}
} finally {
releaseScope(handle);
}
if (n > 0)
bb.position(pos + n);
@ -283,26 +340,43 @@ public class IOUtil {
static long read(FileDescriptor fd, ByteBuffer[] bufs, NativeDispatcher nd)
throws IOException
{
return read(fd, bufs, 0, bufs.length, false, -1, nd);
return read(fd, bufs, 0, bufs.length, false, false, -1, nd);
}
static long read(FileDescriptor fd, ByteBuffer[] bufs, boolean async,
NativeDispatcher nd)
throws IOException
{
return read(fd, bufs, 0, bufs.length, false, async, -1, nd);
}
static long read(FileDescriptor fd, ByteBuffer[] bufs, int offset, int length,
NativeDispatcher nd)
throws IOException
{
return read(fd, bufs, offset, length, false, -1, nd);
return read(fd, bufs, offset, length, false, false, -1, nd);
}
static long read(FileDescriptor fd, ByteBuffer[] bufs, int offset, int length,
boolean directIO, int alignment, NativeDispatcher nd)
throws IOException
{
return read(fd, bufs, offset, length, directIO, false, alignment, nd);
}
static long read(FileDescriptor fd, ByteBuffer[] bufs, int offset, int length,
boolean directIO, boolean async,
int alignment, NativeDispatcher nd)
throws IOException
{
IOVecWrapper vec = IOVecWrapper.get(length);
boolean completed = false;
int iov_len = 0;
Runnable handleReleasers = null;
try {
// Iterate over buffers to populate native iovec array.
int count = offset + length;
int i = offset;
@ -310,6 +384,10 @@ public class IOUtil {
ByteBuffer buf = bufs[i];
if (buf.isReadOnly())
throw new IllegalArgumentException("Read-only buffer");
var h = acquireScope(buf, async);
if (h != null) {
handleReleasers = LinkedRunnable.of(Releaser.of(h), handleReleasers);
}
int pos = buf.position();
int lim = buf.limit();
assert (pos <= lim);
@ -334,7 +412,7 @@ public class IOUtil {
pos = shadow.position();
}
vec.putBase(iov_len, ((DirectBuffer)buf).address() + pos);
vec.putBase(iov_len, bufferAddress(buf) + pos);
vec.putLen(iov_len, rem);
iov_len++;
}
@ -371,6 +449,7 @@ public class IOUtil {
return bytesRead;
} finally {
releaseScopes(handleReleasers);
// if an error occurred then clear refs to buffers and return any shadow
// buffers to cache
if (!completed) {
@ -384,6 +463,83 @@ public class IOUtil {
}
}
private static final JavaNioAccess NIO_ACCESS = SharedSecrets.getJavaNioAccess();
static Scope.Handle acquireScope(ByteBuffer bb, boolean async) {
return NIO_ACCESS.acquireScope(bb, async);
}
private static void releaseScope(Scope.Handle handle) {
if (handle == null)
return;
try {
handle.scope().release(handle);
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
static Runnable acquireScopes(ByteBuffer[] buffers) {
return acquireScopes(null, buffers);
}
static Runnable acquireScopes(ByteBuffer buf, ByteBuffer[] buffers) {
if (buffers == null) {
assert buf != null;
return IOUtil.Releaser.ofNullable(IOUtil.acquireScope(buf, true));
} else {
assert buf == null;
Runnable handleReleasers = null;
for (var b : buffers) {
var h = IOUtil.acquireScope(b, true);
if (h != null) {
handleReleasers = IOUtil.LinkedRunnable.of(IOUtil.Releaser.of(h), handleReleasers);
}
}
return handleReleasers;
}
}
static void releaseScopes(Runnable releasers) {
if (releasers != null)
releasers.run();
}
static record LinkedRunnable(Runnable node, Runnable next)
implements Runnable
{
LinkedRunnable {
Objects.requireNonNull(node);
}
@Override
public void run() {
try {
node.run();
} finally {
if (next != null)
next.run();
}
}
static LinkedRunnable of(Runnable first, Runnable second) {
return new LinkedRunnable(first, second);
}
}
static record Releaser(Scope.Handle handle) implements Runnable {
Releaser { Objects.requireNonNull(handle) ; }
@Override public void run() { releaseScope(handle); }
static Runnable of(Scope.Handle handle) { return new Releaser(handle); }
static Runnable ofNullable(Scope.Handle handle) {
if (handle == null)
return () -> { };
return new Releaser(handle);
}
}
static long bufferAddress(ByteBuffer buf) {
return NIO_ACCESS.getBufferAddress(buf);
}
public static FileDescriptor newFD(int i) {
FileDescriptor fd = new FileDescriptor();
setfdVal(fd, i);

View File

@ -599,6 +599,7 @@ IsModuleOption(const char* name) {
JLI_StrCmp(name, "-p") == 0 ||
JLI_StrCmp(name, "--upgrade-module-path") == 0 ||
JLI_StrCmp(name, "--add-modules") == 0 ||
JLI_StrCmp(name, "--enable-native-access") == 0 ||
JLI_StrCmp(name, "--limit-modules") == 0 ||
JLI_StrCmp(name, "--add-exports") == 0 ||
JLI_StrCmp(name, "--add-opens") == 0 ||
@ -611,6 +612,7 @@ IsLongFormModuleOption(const char* name) {
return JLI_StrCCmp(name, "--module-path=") == 0 ||
JLI_StrCCmp(name, "--upgrade-module-path=") == 0 ||
JLI_StrCCmp(name, "--add-modules=") == 0 ||
JLI_StrCCmp(name, "--enable-native-access=") == 0 ||
JLI_StrCCmp(name, "--limit-modules=") == 0 ||
JLI_StrCCmp(name, "--add-exports=") == 0 ||
JLI_StrCCmp(name, "--add-reads=") == 0 ||

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -76,6 +76,7 @@ class UnixAsynchronousSocketChannelImpl
private boolean isScatteringRead;
private ByteBuffer readBuffer;
private ByteBuffer[] readBuffers;
private Runnable readScopeHandleReleasers;
private CompletionHandler<Number,Object> readHandler;
private Object readAttachment;
private PendingFuture<Number,Object> readFuture;
@ -86,6 +87,7 @@ class UnixAsynchronousSocketChannelImpl
private boolean isGatheringWrite;
private ByteBuffer writeBuffer;
private ByteBuffer[] writeBuffers;
private Runnable writeScopeHandleReleasers;
private CompletionHandler<Number,Object> writeHandler;
private Object writeAttachment;
private PendingFuture<Number,Object> writeFuture;
@ -392,9 +394,9 @@ class UnixAsynchronousSocketChannelImpl
begin();
if (scattering) {
n = (int)IOUtil.read(fd, readBuffers, nd);
n = (int)IOUtil.read(fd, readBuffers, true, nd);
} else {
n = IOUtil.read(fd, readBuffer, -1, nd);
n = IOUtil.read(fd, readBuffer, -1, true, nd);
}
if (n == IOStatus.UNAVAILABLE) {
// spurious wakeup, is this possible?
@ -409,6 +411,7 @@ class UnixAsynchronousSocketChannelImpl
this.readBuffers = null;
this.readAttachment = null;
this.readHandler = null;
IOUtil.releaseScopes(readScopeHandleReleasers);
// allow another read to be initiated
enableReading();
@ -516,9 +519,9 @@ class UnixAsynchronousSocketChannelImpl
if (attemptRead) {
if (isScatteringRead) {
n = (int)IOUtil.read(fd, dsts, nd);
n = (int)IOUtil.read(fd, dsts, true, nd);
} else {
n = IOUtil.read(fd, dst, -1, nd);
n = IOUtil.read(fd, dst, -1, true, nd);
}
}
@ -526,6 +529,7 @@ class UnixAsynchronousSocketChannelImpl
PendingFuture<V,A> result = null;
synchronized (updateLock) {
this.isScatteringRead = isScatteringRead;
this.readScopeHandleReleasers = IOUtil.acquireScopes(dst, dsts);
this.readBuffer = dst;
this.readBuffers = dsts;
if (handler == null) {
@ -592,9 +596,9 @@ class UnixAsynchronousSocketChannelImpl
begin();
if (gathering) {
n = (int)IOUtil.write(fd, writeBuffers, nd);
n = (int)IOUtil.write(fd, writeBuffers, true, nd);
} else {
n = IOUtil.write(fd, writeBuffer, -1, nd);
n = IOUtil.write(fd, writeBuffer, -1, true, nd);
}
if (n == IOStatus.UNAVAILABLE) {
// spurious wakeup, is this possible?
@ -609,6 +613,7 @@ class UnixAsynchronousSocketChannelImpl
this.writeBuffers = null;
this.writeAttachment = null;
this.writeHandler = null;
IOUtil.releaseScopes(writeScopeHandleReleasers);
// allow another write to be initiated
enableWriting();
@ -702,9 +707,9 @@ class UnixAsynchronousSocketChannelImpl
if (attemptWrite) {
if (isGatheringWrite) {
n = (int)IOUtil.write(fd, srcs, nd);
n = (int)IOUtil.write(fd, srcs, true, nd);
} else {
n = IOUtil.write(fd, src, -1, nd);
n = IOUtil.write(fd, src, -1, true, nd);
}
}
@ -712,6 +717,7 @@ class UnixAsynchronousSocketChannelImpl
PendingFuture<V,A> result = null;
synchronized (updateLock) {
this.isGatheringWrite = isGatheringWrite;
this.writeScopeHandleReleasers = IOUtil.acquireScopes(src, srcs);
this.writeBuffer = src;
this.writeBuffers = srcs;
if (handler == null) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -400,6 +400,7 @@ class WindowsAsynchronousSocketChannelImpl
// set by run method
private ByteBuffer[] shadow;
private Runnable scopeHandleReleasers;
ReadTask(ByteBuffer[] bufs,
boolean scatteringRead,
@ -416,6 +417,7 @@ class WindowsAsynchronousSocketChannelImpl
* it substitutes non-direct buffers with direct buffers.
*/
void prepareBuffers() {
scopeHandleReleasers = IOUtil.acquireScopes(bufs);
shadow = new ByteBuffer[numBufs];
long address = readBufferArray;
for (int i=0; i<numBufs; i++) {
@ -429,10 +431,10 @@ class WindowsAsynchronousSocketChannelImpl
// substitute with direct buffer
ByteBuffer bb = Util.getTemporaryDirectBuffer(rem);
shadow[i] = bb;
a = ((DirectBuffer)bb).address();
a = IOUtil.bufferAddress(bb);
} else {
shadow[i] = dst;
a = ((DirectBuffer)dst).address() + pos;
a = IOUtil.bufferAddress(dst) + pos;
}
unsafe.putAddress(address + OFFSETOF_BUF, a);
unsafe.putInt(address + OFFSETOF_LEN, rem);
@ -490,6 +492,7 @@ class WindowsAsynchronousSocketChannelImpl
Util.releaseTemporaryDirectBuffer(shadow[i]);
}
}
IOUtil.releaseScopes(scopeHandleReleasers);
}
@Override
@ -671,6 +674,7 @@ class WindowsAsynchronousSocketChannelImpl
// set by run method
private ByteBuffer[] shadow;
private Runnable scopeHandleReleasers;
WriteTask(ByteBuffer[] bufs,
boolean gatheringWrite,
@ -687,6 +691,7 @@ class WindowsAsynchronousSocketChannelImpl
* it substitutes non-direct buffers with direct buffers.
*/
void prepareBuffers() {
scopeHandleReleasers = IOUtil.acquireScopes(bufs);
shadow = new ByteBuffer[numBufs];
long address = writeBufferArray;
for (int i=0; i<numBufs; i++) {
@ -703,10 +708,10 @@ class WindowsAsynchronousSocketChannelImpl
bb.flip();
src.position(pos); // leave heap buffer untouched for now
shadow[i] = bb;
a = ((DirectBuffer)bb).address();
a = IOUtil.bufferAddress(bb);
} else {
shadow[i] = src;
a = ((DirectBuffer)src).address() + pos;
a = IOUtil.bufferAddress(src) + pos;
}
unsafe.putAddress(address + OFFSETOF_BUF, a);
unsafe.putInt(address + OFFSETOF_LEN, rem);
@ -754,6 +759,7 @@ class WindowsAsynchronousSocketChannelImpl
Util.releaseTemporaryDirectBuffer(shadow[i]);
}
}
IOUtil.releaseScopes(scopeHandleReleasers);
}
@Override

View File

@ -48,7 +48,7 @@ import static java.lang.constant.ConstantDescs.BSM_INVOKE;
import static java.lang.constant.ConstantDescs.CD_String;
import static java.lang.constant.ConstantDescs.CD_long;
abstract class AbstractLayout implements MemoryLayout {
abstract non-sealed class AbstractLayout implements MemoryLayout {
private final OptionalLong size;
final long alignment;
@ -214,22 +214,22 @@ abstract class AbstractLayout implements MemoryLayout {
static final ConstantDesc LITTLE_ENDIAN = DynamicConstantDesc.ofNamed(BSM_GET_STATIC_FINAL, "LITTLE_ENDIAN", CD_BYTEORDER, CD_BYTEORDER);
static final MethodHandleDesc MH_PADDING = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "ofPaddingBits",
static final MethodHandleDesc MH_PADDING = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "paddingLayout",
MethodTypeDesc.of(CD_MEMORY_LAYOUT, CD_long));
static final MethodHandleDesc MH_VALUE = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "ofValueBits",
static final MethodHandleDesc MH_VALUE = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "valueLayout",
MethodTypeDesc.of(CD_VALUE_LAYOUT, CD_long, CD_BYTEORDER));
static final MethodHandleDesc MH_SIZED_SEQUENCE = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "ofSequence",
static final MethodHandleDesc MH_SIZED_SEQUENCE = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "sequenceLayout",
MethodTypeDesc.of(CD_SEQUENCE_LAYOUT, CD_long, CD_MEMORY_LAYOUT));
static final MethodHandleDesc MH_UNSIZED_SEQUENCE = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "ofSequence",
static final MethodHandleDesc MH_UNSIZED_SEQUENCE = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "sequenceLayout",
MethodTypeDesc.of(CD_SEQUENCE_LAYOUT, CD_MEMORY_LAYOUT));
static final MethodHandleDesc MH_STRUCT = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "ofStruct",
static final MethodHandleDesc MH_STRUCT = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "structLayout",
MethodTypeDesc.of(CD_GROUP_LAYOUT, CD_MEMORY_LAYOUT.arrayType()));
static final MethodHandleDesc MH_UNION = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "ofUnion",
static final MethodHandleDesc MH_UNION = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.INTERFACE_STATIC, CD_MEMORY_LAYOUT, "unionLayout",
MethodTypeDesc.of(CD_GROUP_LAYOUT, CD_MEMORY_LAYOUT.arrayType()));
static final MethodHandleDesc MH_VOID_FUNCTION = MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.STATIC, CD_FUNCTION_DESC, "ofVoid",

View File

@ -28,12 +28,7 @@ package jdk.incubator.foreign;
/**
* Represents a type which is <em>addressable</em>. An addressable type is one which can be projected down to
* a memory address instance (see {@link #address()}). Examples of addressable types are {@link MemorySegment},
* {@link MemoryAddress}, {@link LibraryLookup.Symbol} and {@link CLinker.VaList}.
*
* @apiNote In the future, if the Java language permits, {@link Addressable}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types, such as {@link MemorySegment}, {@link MemoryAddress}, {@link LibraryLookup.Symbol}
* and {@link CLinker.VaList}.
* {@link MemoryAddress} and {@link CLinker.VaList}.
*
* @implSpec
* Implementations of this interface are <a href="{@docRoot}/java.base/java/lang/doc-files/ValueBased.html">value-based</a>.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,10 +25,15 @@
*/
package jdk.incubator.foreign;
import jdk.internal.foreign.AbstractCLinker;
import jdk.internal.foreign.NativeMemorySegmentImpl;
import jdk.internal.foreign.PlatformLayouts;
import jdk.internal.foreign.Utils;
import jdk.internal.foreign.abi.SharedUtils;
import jdk.internal.foreign.abi.aarch64.AArch64VaList;
import jdk.internal.foreign.abi.x64.sysv.SysVVaList;
import jdk.internal.foreign.abi.x64.windows.WinVaList;
import jdk.internal.reflect.CallerSensitive;
import jdk.internal.reflect.Reflection;
import java.lang.constant.Constable;
import java.lang.invoke.MethodHandle;
@ -36,7 +41,6 @@ import java.lang.invoke.MethodType;
import java.nio.charset.Charset;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.stream.Stream;
import static jdk.internal.foreign.PlatformLayouts.*;
@ -65,7 +69,8 @@ import static jdk.internal.foreign.PlatformLayouts.*;
* carrier type can be used to match the native {@code va_list} type.
* <p>
* For the linking process to be successful, some requirements must be satisfied; if {@code M} and {@code F} are
* the method type and the function descriptor, respectively, used during the linking process, then it must be that:
* the method type (obtained after dropping any prefix arguments) and the function descriptor, respectively,
* used during the linking process, then it must be that:
* <ul>
* <li>The arity of {@code M} is the same as that of {@code F};</li>
* <li>If the return type of {@code M} is {@code void}, then {@code F} should have no return layout
@ -100,33 +105,37 @@ import static jdk.internal.foreign.PlatformLayouts.*;
* <p> Unless otherwise specified, passing a {@code null} argument, or an array argument containing one or more {@code null}
* elements to a method in this class causes a {@link NullPointerException NullPointerException} to be thrown. </p>
*
* @apiNote In the future, if the Java language permits, {@link CLinker}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
*
* @implSpec
* Implementations of this interface are immutable, thread-safe and <a href="{@docRoot}/java.base/java/lang/doc-files/ValueBased.html">value-based</a>.
*/
public interface CLinker {
public sealed interface CLinker permits AbstractCLinker {
/**
* Returns the C linker for the current platform.
* <p>
* This method is <em>restricted</em>. Restricted method are unsafe, and, if used incorrectly, their use might crash
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @return a linker for this system.
* @throws IllegalAccessError if the runtime property {@code foreign.restricted} is not set to either
* {@code permit}, {@code warn} or {@code debug} (the default value is set to {@code deny}).
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
@CallerSensitive
static CLinker getInstance() {
Utils.checkRestrictedAccess("CLinker.getInstance");
Reflection.ensureNativeAccess(Reflection.getCallerClass());
return SharedUtils.getSystemLinker();
}
/**
* Obtain a foreign method handle, with given type, which can be used to call a
* target foreign function at a given address and featuring a given function descriptor.
* Obtains a foreign method handle, with the given type and featuring the given function descriptor,
* which can be used to call a target foreign function at the given address.
* <p>
* If the provided method type's return type is {@code MemorySegment}, then the resulting method handle features
* an additional prefix parameter, of type {@link SegmentAllocator}, which will be used by the linker runtime
* to allocate structs returned by-value.
*
* @see LibraryLookup#lookup(String)
*
@ -139,20 +148,58 @@ public interface CLinker {
MethodHandle downcallHandle(Addressable symbol, MethodType type, FunctionDescriptor function);
/**
* Allocates a native segment whose base address (see {@link MemorySegment#address}) can be
* passed to other foreign functions (as a function pointer); calling such a function pointer
* from native code will result in the execution of the provided method handle.
* Obtain a foreign method handle, with the given type and featuring the given function descriptor,
* which can be used to call a target foreign function at the given address.
* <p>
* If the provided method type's return type is {@code MemorySegment}, then the provided allocator will be used by
* the linker runtime to allocate structs returned by-value.
*
* <p>The returned segment is <a href=MemorySegment.html#thread-confinement>shared</a>, and it only features
* the {@link MemorySegment#CLOSE} access mode. When the returned segment is closed,
* the corresponding native stub will be deallocated.</p>
* @see LibraryLookup#lookup(String)
*
* @param symbol downcall symbol.
* @param allocator the segment allocator.
* @param type the method type.
* @param function the function descriptor.
* @return the downcall method handle.
* @throws IllegalArgumentException in the case of a method type and function descriptor mismatch.
*/
MethodHandle downcallHandle(Addressable symbol, SegmentAllocator allocator, MethodType type, FunctionDescriptor function);
/**
* Obtains a foreign method handle, with the given type and featuring the given function descriptor, which can be
* used to call a target foreign function at an address.
* The resulting method handle features a prefix parameter (as the first parameter) corresponding to the address, of
* type {@link Addressable}.
* <p>
* If the provided method type's return type is {@code MemorySegment}, then the resulting method handle features an
* additional prefix parameter (inserted immediately after the address parameter), of type {@link SegmentAllocator}),
* which will be used by the linker runtime to allocate structs returned by-value.
*
* @see LibraryLookup#lookup(String)
*
* @param type the method type.
* @param function the function descriptor.
* @return the downcall method handle.
* @throws IllegalArgumentException in the case of a method type and function descriptor mismatch.
*/
MethodHandle downcallHandle(MethodType type, FunctionDescriptor function);
/**
* Allocates a native stub with given scope which can be passed to other foreign functions (as a function pointer);
* calling such a function pointer from native code will result in the execution of the provided method handle.
*
* <p>The returned memory address is associated with the provided scope. When such scope is closed,
* the corresponding native stub will be deallocated.
*
* @param target the target method handle.
* @param function the function descriptor.
* @param scope the upcall stub scope.
* @return the native stub segment.
* @throws IllegalArgumentException if the target's method type and the function descriptor mismatch.
* @throws IllegalStateException if {@code scope} has been already closed, or if access occurs from a thread other
* than the thread owning {@code scope}.
*/
MemorySegment upcallStub(MethodHandle target, FunctionDescriptor function);
MemoryAddress upcallStub(MethodHandle target, FunctionDescriptor function, ResourceScope scope);
/**
* The layout for the {@code char} C type
@ -205,8 +252,8 @@ public interface CLinker {
}
/**
* Converts a Java string into a null-terminated C string, using the
* platform's default charset, storing the result into a new native memory segment.
* Converts a Java string into a null-terminated C string, using the platform's default charset,
* storing the result into a native memory segment allocated using the provided allocator.
* <p>
* This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
@ -214,35 +261,18 @@ public interface CLinker {
* control over the encoding process is required.
*
* @param str the Java string to be converted into a C string.
* @param allocator the allocator to be used for the native segment allocation.
* @return a new native memory segment containing the converted C string.
*/
static MemorySegment toCString(String str) {
static MemorySegment toCString(String str, SegmentAllocator allocator) {
Objects.requireNonNull(str);
return toCString(str.getBytes());
}
/**
* Converts a Java string into a null-terminated C string, using the given {@link java.nio.charset.Charset charset},
* storing the result into a new native memory segment.
* <p>
* This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
* {@link java.nio.charset.CharsetEncoder} class should be used when more
* control over the encoding process is required.
*
* @param str the Java string to be converted into a C string.
* @param charset The {@link java.nio.charset.Charset} to be used to compute the contents of the C string.
* @return a new native memory segment containing the converted C string.
*/
static MemorySegment toCString(String str, Charset charset) {
Objects.requireNonNull(str);
Objects.requireNonNull(charset);
return toCString(str.getBytes(charset));
Objects.requireNonNull(allocator);
return toCString(str.getBytes(), allocator);
}
/**
* Converts a Java string into a null-terminated C string, using the platform's default charset,
* storing the result into a native memory segment allocated using the provided scope.
* storing the result into a native memory segment associated with the provided resource scope.
* <p>
* This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
@ -250,18 +280,18 @@ public interface CLinker {
* control over the encoding process is required.
*
* @param str the Java string to be converted into a C string.
* @param scope the scope to be used for the native segment allocation.
* @param scope the resource scope to be associated with the returned segment.
* @return a new native memory segment containing the converted C string.
* @throws IllegalStateException if {@code scope} has been already closed, or if access occurs from a thread other
* than the thread owning {@code scope}.
*/
static MemorySegment toCString(String str, NativeScope scope) {
Objects.requireNonNull(str);
Objects.requireNonNull(scope);
return toCString(str.getBytes(), scope);
static MemorySegment toCString(String str, ResourceScope scope) {
return toCString(str, SegmentAllocator.ofScope(scope));
}
/**
* Converts a Java string into a null-terminated C string, using the given {@link java.nio.charset.Charset charset},
* storing the result into a new native memory segment native memory segment allocated using the provided scope.
* Converts a Java string into a null-terminated C string, using the given {@linkplain java.nio.charset.Charset charset},
* storing the result into a new native memory segment native memory segment allocated using the provided allocator.
* <p>
* This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
@ -270,14 +300,34 @@ public interface CLinker {
*
* @param str the Java string to be converted into a C string.
* @param charset The {@link java.nio.charset.Charset} to be used to compute the contents of the C string.
* @param scope the scope to be used for the native segment allocation.
* @param allocator the allocator to be used for the native segment allocation.
* @return a new native memory segment containing the converted C string.
*/
static MemorySegment toCString(String str, Charset charset, NativeScope scope) {
static MemorySegment toCString(String str, Charset charset, SegmentAllocator allocator) {
Objects.requireNonNull(str);
Objects.requireNonNull(charset);
Objects.requireNonNull(scope);
return toCString(str.getBytes(charset), scope);
Objects.requireNonNull(allocator);
return toCString(str.getBytes(charset), allocator);
}
/**
* Converts a Java string into a null-terminated C string, using the given {@linkplain java.nio.charset.Charset charset},
* storing the result into a native memory segment associated with the provided resource scope.
* <p>
* This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
* {@link java.nio.charset.CharsetEncoder} class should be used when more
* control over the encoding process is required.
*
* @param str the Java string to be converted into a C string.
* @param charset The {@link java.nio.charset.Charset} to be used to compute the contents of the C string.
* @param scope the resource scope to be associated with the returned segment.
* @return a new native memory segment containing the converted C string.
* @throws IllegalStateException if {@code scope} has been already closed, or if access occurs from a thread other
* than the thread owning {@code scope}.
*/
static MemorySegment toCString(String str, Charset charset, ResourceScope scope) {
return toCString(str, charset, SegmentAllocator.ofScope(scope));
}
/**
@ -288,37 +338,49 @@ public interface CLinker {
* java.nio.charset.CharsetDecoder} class should be used when more control
* over the decoding process is required.
* <p>
* This method is <em>restricted</em>. Restricted method are unsafe, and, if used incorrectly, their use might crash
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param addr the address at which the string is stored.
* @return a Java string with the contents of the null-terminated C string at given address.
* @throws IllegalArgumentException if the size of the native string is greater than the largest string supported by the platform.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
static String toJavaStringRestricted(MemoryAddress addr) {
Utils.checkRestrictedAccess("CLinker.toJavaStringRestricted");
@CallerSensitive
static String toJavaString(MemoryAddress addr) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
Objects.requireNonNull(addr);
return SharedUtils.toJavaStringInternal(NativeMemorySegmentImpl.EVERYTHING, addr.toRawLongValue(), Charset.defaultCharset());
}
/**
* Converts a null-terminated C string stored at given address into a Java string, using the given {@link java.nio.charset.Charset charset}.
* Converts a null-terminated C string stored at given address into a Java string, using the given {@linkplain java.nio.charset.Charset charset}.
* <p>
* This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement string. The {@link
* java.nio.charset.CharsetDecoder} class should be used when more control
* over the decoding process is required.
* <p>
* This method is <em>restricted</em>. Restricted method are unsafe, and, if used incorrectly, their use might crash
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param addr the address at which the string is stored.
* @param charset The {@link java.nio.charset.Charset} to be used to compute the contents of the Java string.
* @return a Java string with the contents of the null-terminated C string at given address.
* @throws IllegalArgumentException if the size of the native string is greater than the largest string supported by the platform.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
static String toJavaStringRestricted(MemoryAddress addr, Charset charset) {
Utils.checkRestrictedAccess("CLinker.toJavaStringRestricted");
@CallerSensitive
static String toJavaString(MemoryAddress addr, Charset charset) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
Objects.requireNonNull(addr);
Objects.requireNonNull(charset);
return SharedUtils.toJavaStringInternal(NativeMemorySegmentImpl.EVERYTHING, addr.toRawLongValue(), charset);
@ -343,7 +405,7 @@ public interface CLinker {
}
/**
* Converts a null-terminated C string stored at given address into a Java string, using the given {@link java.nio.charset.Charset charset}.
* Converts a null-terminated C string stored at given address into a Java string, using the given {@linkplain java.nio.charset.Charset charset}.
* <p>
* This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement string. The {@link
@ -368,14 +430,8 @@ public interface CLinker {
MemoryAccess.setByteAtOffset(addr, bytes.length, (byte)0);
}
private static MemorySegment toCString(byte[] bytes) {
MemorySegment segment = MemorySegment.allocateNative(bytes.length + 1, 1L);
copy(segment, bytes);
return segment;
}
private static MemorySegment toCString(byte[] bytes, NativeScope scope) {
MemorySegment addr = scope.allocate(bytes.length + 1, 1L);
private static MemorySegment toCString(byte[] bytes, SegmentAllocator allocator) {
MemorySegment addr = allocator.allocate(bytes.length + 1, 1L);
copy(addr, bytes);
return addr;
}
@ -383,16 +439,21 @@ public interface CLinker {
/**
* Allocates memory of given size using malloc.
* <p>
* This method is <em>restricted</em>. Restricted method are unsafe, and, if used incorrectly, their use might crash
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param size memory size to be allocated
* @return addr memory address of the allocated memory
* @throws OutOfMemoryError if malloc could not allocate the required amount of native memory.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
static MemoryAddress allocateMemoryRestricted(long size) {
Utils.checkRestrictedAccess("CLinker.allocateMemoryRestricted");
@CallerSensitive
static MemoryAddress allocateMemory(long size) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
MemoryAddress addr = SharedUtils.allocateMemoryInternal(size);
if (addr.equals(MemoryAddress.NULL)) {
throw new OutOfMemoryError();
@ -404,14 +465,19 @@ public interface CLinker {
/**
* Frees the memory pointed by the given memory address.
* <p>
* This method is <em>restricted</em>. Restricted method are unsafe, and, if used incorrectly, their use might crash
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param addr memory address of the native memory to be freed
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
static void freeMemoryRestricted(MemoryAddress addr) {
Utils.checkRestrictedAccess("CLinker.freeMemoryRestricted");
@CallerSensitive
static void freeMemory(MemoryAddress addr) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
Objects.requireNonNull(addr);
SharedUtils.freeMemoryInternal(addr);
}
@ -431,21 +497,16 @@ public interface CLinker {
*
* <p> Unless otherwise specified, passing a {@code null} argument, or an array argument containing one or more {@code null}
* elements to a method in this class causes a {@link NullPointerException NullPointerException} to be thrown. </p>
*
* @apiNote In the future, if the Java language permits, {@link VaList}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
*
*/
interface VaList extends Addressable, AutoCloseable {
sealed interface VaList extends Addressable permits WinVaList, SysVVaList, AArch64VaList, SharedUtils.EmptyVaList {
/**
* Reads the next value as an {@code int} and advances this va list's position.
*
* @param layout the layout of the value
* @return the value read as an {@code int}
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
* @throws IllegalStateException if the resource scope associated with this instance has been closed
* (see {@link #scope()}).
* @throws IllegalArgumentException if the given memory layout is not compatible with {@code int}
*/
int vargAsInt(MemoryLayout layout);
@ -455,8 +516,8 @@ public interface CLinker {
*
* @param layout the layout of the value
* @return the value read as an {@code long}
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
* @throws IllegalStateException if the resource scope associated with this instance has been closed
* (see {@link #scope()}).
* @throws IllegalArgumentException if the given memory layout is not compatible with {@code long}
*/
long vargAsLong(MemoryLayout layout);
@ -466,8 +527,8 @@ public interface CLinker {
*
* @param layout the layout of the value
* @return the value read as an {@code double}
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
* @throws IllegalStateException if the resource scope associated with this instance has been closed
* (see {@link #scope()}).
* @throws IllegalArgumentException if the given memory layout is not compatible with {@code double}
*/
double vargAsDouble(MemoryLayout layout);
@ -477,8 +538,8 @@ public interface CLinker {
*
* @param layout the layout of the value
* @return the value read as an {@code MemoryAddress}
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
* @throws IllegalStateException if the resource scope associated with this instance has been closed
* (see {@link #scope()}).
* @throws IllegalArgumentException if the given memory layout is not compatible with {@code MemoryAddress}
*/
MemoryAddress vargAsAddress(MemoryLayout layout);
@ -486,102 +547,70 @@ public interface CLinker {
/**
* Reads the next value as a {@code MemorySegment}, and advances this va list's position.
* <p>
* The memory segment returned by this method will be allocated using
* {@link MemorySegment#allocateNative(long, long)}, and will have to be closed separately.
* The memory segment returned by this method will be allocated using the given {@link SegmentAllocator}.
*
* @param layout the layout of the value
* @param allocator the allocator to be used for the native segment allocation
* @return the value read as an {@code MemorySegment}
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
* @throws IllegalStateException if the resource scope associated with this instance has been closed
* (see {@link #scope()}).
* @throws IllegalArgumentException if the given memory layout is not compatible with {@code MemorySegment}
*/
MemorySegment vargAsSegment(MemoryLayout layout);
MemorySegment vargAsSegment(MemoryLayout layout, SegmentAllocator allocator);
/**
* Reads the next value as a {@code MemorySegment}, and advances this va list's position.
* <p>
* The memory segment returned by this method will be allocated using the given {@code NativeScope}.
* The memory segment returned by this method will be associated with the given {@link ResourceScope}.
*
* @param layout the layout of the value
* @param scope the scope to allocate the segment in
* @param scope the resource scope to be associated with the returned segment
* @return the value read as an {@code MemorySegment}
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
* @throws IllegalStateException if the resource scope associated with this instance has been closed
* (see {@link #scope()}).
* @throws IllegalArgumentException if the given memory layout is not compatible with {@code MemorySegment}
* @throws IllegalStateException if {@code scope} has been already closed, or if access occurs from a thread other
* than the thread owning {@code scope}.
*/
MemorySegment vargAsSegment(MemoryLayout layout, NativeScope scope);
MemorySegment vargAsSegment(MemoryLayout layout, ResourceScope scope);
/**
* Skips a number of elements with the given memory layouts, and advances this va list's position.
*
* @param layouts the layout of the value
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
* @throws IllegalStateException if the resource scope associated with this instance has been closed
* (see {@link #scope()}).
*/
void skip(MemoryLayout... layouts);
/**
* A predicate used to check if the memory associated with the C {@code va_list} modelled
* by this instance is still valid to use.
*
* @return true, if the memory associated with the C {@code va_list} modelled by this instance is still valid
* @see #close()
* Returns the resource scope associated with this instance.
* @return the resource scope associated with this instance.
*/
boolean isAlive();
/**
* Releases the underlying C {@code va_list} modelled by this instance, and any native memory that is attached
* to this va list that holds its elements (see {@link VaList#make(Consumer)}).
* <p>
* After calling this method, {@link #isAlive()} will return {@code false} and further attempts to read values
* from this va list will result in an exception.
*
* @see #isAlive()
*/
void close();
ResourceScope scope();
/**
* Copies this C {@code va_list} at its current position. Copying is useful to traverse the va list's elements
* starting from the current position, without affecting the state of the original va list, essentially
* allowing the elements to be traversed multiple times.
* <p>
* If this method needs to allocate native memory for the copy, it will use
* {@link MemorySegment#allocateNative(long, long)} to do so. {@link #close()} will have to be called on the
* returned va list instance to release the allocated memory.
* Any native resource required by the execution of this method will be allocated in the resource scope
* associated with this instance (see {@link #scope()}).
* <p>
* This method only copies the va list cursor itself and not the memory that may be attached to the
* va list which holds its elements. That means that if this va list was created with the
* {@link #make(Consumer)} method, closing this va list will also release the native memory that holds its
* {@link #make(Consumer, ResourceScope)} method, closing this va list will also release the native memory that holds its
* elements, making the copy unusable.
*
* @return a copy of this C {@code va_list}.
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
* @throws IllegalStateException if the resource scope associated with this instance has been closed
* (see {@link #scope()}).
*/
VaList copy();
/**
* Copies this C {@code va_list} at its current position. Copying is useful to traverse the va list's elements
* starting from the current position, without affecting the state of the original va list, essentially
* allowing the elements to be traversed multiple times.
* <p>
* If this method needs to allocate native memory for the copy, it will use
* the given {@code NativeScope} to do so.
* <p>
* This method only copies the va list cursor itself and not the memory that may be attached to the
* va list which holds its elements. That means that if this va list was created with the
* {@link #make(Consumer)} method, closing this va list will also release the native memory that holds its
* elements, making the copy unusable.
*
* @param scope the scope to allocate the copy in
* @return a copy of this C {@code va_list}.
* @throws IllegalStateException if the C {@code va_list} associated with this instance is no longer valid
* (see {@link #close()}).
*/
VaList copy(NativeScope scope);
/**
* Returns the memory address of the C {@code va_list} associated with this instance.
* The returned memory address is associated with same resource scope as that associated with this instance.
*
* @return the memory address of the C {@code va_list} associated with this instance.
*/
@ -589,51 +618,58 @@ public interface CLinker {
MemoryAddress address();
/**
* Constructs a new {@code VaList} instance out of a memory address pointing to an existing C {@code va_list}.
* Constructs a new {@code VaList} instance out of a memory address pointing to an existing C {@code va_list},
* backed by the {@linkplain ResourceScope#globalScope() global} resource scope.
* <p>
* This method is <em>restricted</em>. Restricted method are unsafe, and, if used incorrectly, their use might crash
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param address a memory address pointing to an existing C {@code va_list}.
* @return a new {@code VaList} instance backed by the C {@code va_list} at {@code address}.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
static VaList ofAddressRestricted(MemoryAddress address) {
Utils.checkRestrictedAccess("VaList.ofAddressRestricted");
Objects.requireNonNull(address);
return SharedUtils.newVaListOfAddress(address);
@CallerSensitive
static VaList ofAddress(MemoryAddress address) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
return SharedUtils.newVaListOfAddress(address, ResourceScope.globalScope());
}
/**
* Constructs a new {@code VaList} using a builder (see {@link Builder}).
* Constructs a new {@code VaList} instance out of a memory address pointing to an existing C {@code va_list},
* with given resource scope.
* <p>
* If this method needs to allocate native memory for the va list, it will use
* {@link MemorySegment#allocateNative(long, long)} to do so.
* <p>
* This method will allocate native memory to hold the elements in the va list. This memory
* will be 'attached' to the returned va list instance, and will be released when {@link VaList#close()}
* is called.
* <p>
* Note that when there are no elements added to the created va list,
* this method will return the same as {@link #empty()}.
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param actions a consumer for a builder (see {@link Builder}) which can be used to specify the elements
* of the underlying C {@code va_list}.
* @return a new {@code VaList} instance backed by a fresh C {@code va_list}.
* @param address a memory address pointing to an existing C {@code va_list}.
* @param scope the resource scope to be associated with the returned {@code VaList} instance.
* @return a new {@code VaList} instance backed by the C {@code va_list} at {@code address}.
* @throws IllegalStateException if {@code scope} has been already closed, or if access occurs from a thread other
* than the thread owning {@code scope}.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
static VaList make(Consumer<Builder> actions) {
Objects.requireNonNull(actions);
return SharedUtils.newVaList(actions, MemorySegment::allocateNative);
@CallerSensitive
static VaList ofAddress(MemoryAddress address, ResourceScope scope) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
Objects.requireNonNull(address);
Objects.requireNonNull(scope);
return SharedUtils.newVaListOfAddress(address, scope);
}
/**
* Constructs a new {@code VaList} using a builder (see {@link Builder}).
* Constructs a new {@code VaList} using a builder (see {@link Builder}), associated with a given
* {@linkplain ResourceScope resource scope}.
* <p>
* If this method needs to allocate native memory for the va list, it will use
* the given {@code NativeScope} to do so.
* <p>
* This method will allocate native memory to hold the elements in the va list. This memory
* will be managed by the given {@code NativeScope}, and will be released when the scope is closed.
* If this method needs to allocate native memory, such memory will be managed by the given
* {@linkplain ResourceScope resource scope}, and will be released when the resource scope is {@linkplain ResourceScope#close closed}.
* <p>
* Note that when there are no elements added to the created va list,
* this method will return the same as {@link #empty()}.
@ -642,11 +678,13 @@ public interface CLinker {
* of the underlying C {@code va_list}.
* @param scope the scope to be used for the valist allocation.
* @return a new {@code VaList} instance backed by a fresh C {@code va_list}.
* @throws IllegalStateException if {@code scope} has been already closed, or if access occurs from a thread other
* than the thread owning {@code scope}.
*/
static VaList make(Consumer<Builder> actions, NativeScope scope) {
static VaList make(Consumer<Builder> actions, ResourceScope scope) {
Objects.requireNonNull(actions);
Objects.requireNonNull(scope);
return SharedUtils.newVaList(actions, SharedUtils.Allocator.ofScope(scope));
return SharedUtils.newVaList(actions, scope);
}
/**
@ -665,13 +703,8 @@ public interface CLinker {
*
* <p> Unless otherwise specified, passing a {@code null} argument, or an array argument containing one or more {@code null}
* elements to a method in this class causes a {@link NullPointerException NullPointerException} to be thrown. </p>
*
* @apiNote In the future, if the Java language permits, {@link Builder}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
*
*/
interface Builder {
sealed interface Builder permits WinVaList.Builder, SysVVaList.Builder, AArch64VaList.Builder {
/**
* Adds a native value represented as an {@code int} to the C {@code va_list} being constructed.
@ -729,7 +762,7 @@ public interface CLinker {
* A C type kind. Each kind corresponds to a particular C language builtin type, and can be attached to
* {@link ValueLayout} instances using the {@link MemoryLayout#withAttribute(String, Constable)} in order
* to obtain a layout which can be classified accordingly by {@link CLinker#downcallHandle(Addressable, MethodType, FunctionDescriptor)}
* and {@link CLinker#upcallStub(MethodHandle, FunctionDescriptor)}.
* and {@link CLinker#upcallStub(MethodHandle, FunctionDescriptor, ResourceScope)}.
*/
enum TypeKind {
/**
@ -806,6 +839,6 @@ public interface CLinker {
TypeKind = layout.attribute(TypeKind.ATTR_NAME).orElse(null);
* }</pre></blockquote>
*/
public final static String ATTR_NAME = "abi/kind";
public static final String ATTR_NAME = "abi/kind";
}
}

View File

@ -42,8 +42,8 @@ import java.util.stream.Collectors;
/**
* A group layout is used to combine together multiple <em>member layouts</em>. There are two ways in which member layouts
* can be combined: if member layouts are laid out one after the other, the resulting group layout is said to be a <em>struct</em>
* (see {@link MemoryLayout#ofStruct(MemoryLayout...)}); conversely, if all member layouts are laid out at the same starting offset,
* the resulting group layout is said to be a <em>union</em> (see {@link MemoryLayout#ofUnion(MemoryLayout...)}).
* (see {@link MemoryLayout#structLayout(MemoryLayout...)}); conversely, if all member layouts are laid out at the same starting offset,
* the resulting group layout is said to be a <em>union</em> (see {@link MemoryLayout#unionLayout(MemoryLayout...)}).
* <p>
* This is a <a href="{@docRoot}/java.base/java/lang/doc-files/ValueBased.html">value-based</a>
* class; programmers should treat instances that are
@ -58,7 +58,7 @@ import java.util.stream.Collectors;
* @implSpec
* This class is immutable and thread-safe.
*/
public final class GroupLayout extends AbstractLayout {
public final class GroupLayout extends AbstractLayout implements MemoryLayout {
/**
* The group kind.
@ -118,8 +118,8 @@ public final class GroupLayout extends AbstractLayout {
* Returns the member layouts associated with this group.
*
* @apiNote the order in which member layouts are returned is the same order in which member layouts have
* been passed to one of the group layout factory methods (see {@link MemoryLayout#ofStruct(MemoryLayout...)},
* {@link MemoryLayout#ofUnion(MemoryLayout...)}).
* been passed to one of the group layout factory methods (see {@link MemoryLayout#structLayout(MemoryLayout...)},
* {@link MemoryLayout#unionLayout(MemoryLayout...)}).
*
* @return the member layouts associated with this group.
*/

View File

@ -26,6 +26,8 @@
package jdk.incubator.foreign;
import jdk.internal.foreign.LibrariesHelper;
import jdk.internal.reflect.CallerSensitive;
import jdk.internal.reflect.Reflection;
import java.io.File;
import java.lang.invoke.MethodType;
@ -37,23 +39,22 @@ import java.util.Optional;
* A native library lookup. Exposes a lookup operation for searching symbols, see {@link LibraryLookup#lookup(String)}.
* A given native library remains loaded as long as there is at least one <em>live</em> library lookup instance referring
* to it.
* All symbol instances (see {@link LibraryLookup.Symbol}) generated by a given library lookup object contain a strong reference
* to said lookup object, therefore preventing library unloading; in turn method handle instances obtained from
* {@link CLinker#downcallHandle(Addressable, MethodType, FunctionDescriptor)}) also maintain a strong reference
* to the addressable parameter used for their construction. This means that there is always a strong reachability chain
* from a native method handle to a lookup object (the one that was used to lookup the native library symbol the method handle
* refers to); this is useful to prevent situations where a native library is unloaded in the middle of a native call.
* <p><a id = "var-symbols"></a></p>
* In cases where a client wants to create a memory segment out of a lookup symbol, the client might want to attach the
* lookup symbol to the newly created segment, so that the symbol will be kept reachable as long as the memory segment
* is reachable; this can be achieved by creating the segment using the {@link MemoryAddress#asSegmentRestricted(long, Runnable, Object)}
* restricted segment factory, as follows:
* All instances generated by a given library lookup object contain a strong reference to said lookup object,
* therefore preventing library unloading. For {@linkplain #lookup(String, MemoryLayout) memory segments} obtained from a library lookup object,
* this means that clients can safely dereference memory associated with lookup symbols, as follows:
* <pre>{@code
LibraryLookup defaultLookup = LibraryLookup.defaultLookup();
LibraryLookup.Symbol errno = defaultLookup.lookup("errno");
MemorySegment errnoSegment = errno.address().asRestrictedSegment(4, errno);
* LibraryLookup defaultLookup = LibraryLookup.ofDefault();
* MemorySegment errnoSegment = defaultLookup.lookup("errno", MemoryLayouts.JAVA_INT).get();
* int errno = MemoryAccess.getInt(errnoSegment);
* }</pre>
* <p>
* For {@linkplain #lookup(String) memory addresses} obtained from a library lookup object,
* since {@linkplain CLinker#downcallHandle(Addressable, MethodType, FunctionDescriptor) native method handles}
* also maintain a strong reference to the addressable parameter used for their construction, there is
* always a strong reachability chain from a native method handle to a lookup object (the one that was used to lookup
* the native library symbol the method handle refers to). This is useful to prevent situations where a native library
* is unloaded in the middle of a native call.
* <p>
* To allow for a library to be unloaded, a client will have to discard any strong references it
* maintains, directly, or indirectly to a lookup object associated with given library.
*
@ -63,45 +64,42 @@ MemorySegment errnoSegment = errno.address().asRestrictedSegment(4, errno);
public interface LibraryLookup {
/**
* A symbol retrieved during a library lookup. A lookup symbol has a <em>name</em> and can be projected
* into a memory address (see {@link #name()} and {@link #address()}, respectively).
* Looks up a symbol with given name in this library. The returned memory address maintains a strong reference to this lookup object.
*
* @apiNote In the future, if the Java language permits, {@link Symbol}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
*
* @implSpec
* Implementations of this interface are immutable, thread-safe and <a href="{@docRoot}/java.base/java/lang/doc-files/ValueBased.html">value-based</a>.
* @param name the symbol name.
* @return the memory address associated with the library symbol (if any).
*/
interface Symbol extends Addressable {
/**
* The name of this lookup symbol.
* @return the name of this lookup symbol.
*/
String name();
/**
* The memory address of this lookup symbol. If the memory associated with this symbol needs to be dereferenced,
* clients can obtain a segment from this symbol's address using the {@link MemoryAddress#asSegmentRestricted(long, Runnable, Object)},
* and making sure that the created segment maintains a <a href="LibraryLookup.html#var-symbols">strong reference</a> to this symbol, to prevent library unloading.
* @return the memory address of this lookup symbol.
*/
@Override
MemoryAddress address();
}
Optional<MemoryAddress> lookup(String name);
/**
* Looks up a symbol with given name in this library. The returned symbol maintains a strong reference to this lookup object.
* Looks up a symbol with given name in this library. The returned memory segment has a size that matches that of
* the specified layout, and maintains a strong reference to this lookup object. This method can be useful
* to lookup global variable symbols in a foreign library.
*
* @param name the symbol name.
* @return the library symbol (if any).
* @param layout the layout to be associated with the library symbol.
* @return the memory segment associated with the library symbol (if any).
* @throws IllegalArgumentException if the address associated with the lookup symbol do not match the
* {@linkplain MemoryLayout#byteAlignment() alignment constraints} in {@code layout}.
*/
Optional<Symbol> lookup(String name);
Optional<MemorySegment> lookup(String name, MemoryLayout layout);
/**
* Obtain a default library lookup object.
* <p>
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @return the default library lookup object.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
@CallerSensitive
static LibraryLookup ofDefault() {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
SecurityManager security = System.getSecurityManager();
if (security != null) {
security.checkPermission(new RuntimePermission("java.foreign.getDefaultLibrary"));
@ -111,12 +109,23 @@ public interface LibraryLookup {
/**
* Obtain a library lookup object corresponding to a library identified by given path.
* <p>
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param path the library absolute path.
* @return a library lookup object for given path.
* @throws IllegalArgumentException if the specified path does not correspond to an absolute path,
* e.g. if {@code !path.isAbsolute()}.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
@CallerSensitive
static LibraryLookup ofPath(Path path) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
Objects.requireNonNull(path);
if (!path.isAbsolute()) {
throw new IllegalArgumentException("Not an absolute path: " + path.toString());
@ -134,10 +143,21 @@ public interface LibraryLookup {
* is decorated according to the platform conventions (e.g. on Linux, the {@code lib} prefix is added,
* as well as the {@code .so} extension); the resulting name is then looked up in the standard native
* library path (which can be overriden, by setting the <code>java.library.path</code> property).
* <p>
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param libName the library name.
* @return a library lookup object for given library name.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
@CallerSensitive
static LibraryLookup ofLibrary(String libName) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
Objects.requireNonNull(libName);
SecurityManager security = System.getSecurityManager();
if (security != null) {

View File

@ -1,167 +0,0 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.incubator.foreign;
import jdk.internal.foreign.MappedMemorySegmentImpl;
import java.io.UncheckedIOException;
import java.nio.MappedByteBuffer;
import java.util.Objects;
/**
* This class provides capabilities to manipulate mapped memory segments, such as {@link #force(MemorySegment)},
* and {@link #load(MemorySegment)}. The methods in these class are suitable replacements for some of the
* functionality in the {@link java.nio.MappedByteBuffer} class. Note that, while it is possible to map a segment
* into a byte buffer (see {@link MemorySegment#asByteBuffer()}), and call e.g. {@link MappedByteBuffer#force()} that way,
* this can only be done when the source segment is small enough, due to the size limitation inherent to the
* ByteBuffer API.
* <p>
* Clients requiring sophisticated, low-level control over mapped memory segments, should consider writing
* custom mapped memory segment factories; using JNI, e.g. on Linux, it is possible to call {@code mmap}
* with the desired parameters; the returned address can be easily wrapped into a memory segment, using
* {@link MemoryAddress#ofLong(long)} and {@link MemoryAddress#asSegmentRestricted(long, Runnable, Object)}.
*
* <p> Unless otherwise specified, passing a {@code null} argument, or an array argument containing one or more {@code null}
* elements to a method in this class causes a {@link NullPointerException NullPointerException} to be thrown. </p>
*
* @implNote
* The behavior of some the methods in this class (see {@link #load(MemorySegment)}, {@link #unload(MemorySegment)} and
* {@link #isLoaded(MemorySegment)}) is highly platform-dependent; as a result, calling these methods might
* be a no-op on certain platforms.
*/
public final class MappedMemorySegments {
private MappedMemorySegments() {
// no thanks
}
/**
* Tells whether or not the contents of the given segment is resident in physical
* memory.
*
* <p> A return value of {@code true} implies that it is highly likely
* that all of the data in the given segment is resident in physical memory and
* may therefore be accessed without incurring any virtual-memory page
* faults or I/O operations. A return value of {@code false} does not
* necessarily imply that the segment's content is not resident in physical
* memory.
*
* <p> The returned value is a hint, rather than a guarantee, because the
* underlying operating system may have paged out some of the segment's data
* by the time that an invocation of this method returns. </p>
*
* @param segment the segment whose contents are to be tested.
* @return {@code true} if it is likely that the contents of the given segment
* is resident in physical memory
*
* @throws IllegalStateException if the given segment is not alive, or if the given segment is confined
* and this method is called from a thread other than the segment's owner thread.
* @throws UnsupportedOperationException if the given segment is not a mapped memory segment, e.g. if
* {@code segment.isMapped() == false}.
*/
public static boolean isLoaded(MemorySegment segment) {
return toMappedSegment(segment).isLoaded();
}
/**
* Loads the contents of the given segment into physical memory.
*
* <p> This method makes a best effort to ensure that, when it returns,
* this contents of the given segment is resident in physical memory. Invoking this
* method may cause some number of page faults and I/O operations to
* occur. </p>
*
* @param segment the segment whose contents are to be loaded.
*
* @throws IllegalStateException if the given segment is not alive, or if the given segment is confined
* and this method is called from a thread other than the segment's owner thread.
* @throws UnsupportedOperationException if the given segment is not a mapped memory segment, e.g. if
* {@code segment.isMapped() == false}.
*/
public static void load(MemorySegment segment) {
toMappedSegment(segment).load();
}
/**
* Unloads the contents of the given segment from physical memory.
*
* <p> This method makes a best effort to ensure that the contents of the given segment are
* are no longer resident in physical memory. Accessing this segment's contents
* after invoking this method may cause some number of page faults and I/O operations to
* occur (as this segment's contents might need to be paged back in). </p>
*
* @param segment the segment whose contents are to be unloaded.
*
* @throws IllegalStateException if the given segment is not alive, or if the given segment is confined
* and this method is called from a thread other than the segment's owner thread.
* @throws UnsupportedOperationException if the given segment is not a mapped memory segment, e.g. if
* {@code segment.isMapped() == false}.
*/
public static void unload(MemorySegment segment) {
toMappedSegment(segment).unload();
}
/**
* Forces any changes made to the contents of the given segment to be written to the
* storage device described by the mapped segment's file descriptor.
*
* <p> If this mapping's file descriptor resides on a local storage
* device then when this method returns it is guaranteed that all changes
* made to the segment since it was created, or since this method was last
* invoked, will have been written to that device.
*
* <p> If this mapping's file descriptor does not reside on a local device then no such guarantee
* is made.
*
* <p> If the given segment was not mapped in read/write mode ({@link
* java.nio.channels.FileChannel.MapMode#READ_WRITE}) then
* invoking this method may have no effect. In particular, the
* method has no effect for segments mapped in read-only or private
* mapping modes. This method may or may not have an effect for
* implementation-specific mapping modes.
* </p>
*
* @param segment the segment whose contents are to be written to the storage device described by the
* segment's file descriptor.
*
* @throws IllegalStateException if the given segment is not alive, or if the given segment is confined
* and this method is called from a thread other than the segment's owner thread.
* @throws UnsupportedOperationException if the given segment is not a mapped memory segment, e.g. if
* {@code segment.isMapped() == false}.
* @throws UncheckedIOException if there is an I/O error writing the contents of the segment to the associated storage device
*/
public static void force(MemorySegment segment) {
toMappedSegment(segment).force();
}
static MappedMemorySegmentImpl toMappedSegment(MemorySegment segment) {
Objects.requireNonNull(segment);
if (segment instanceof MappedMemorySegmentImpl) {
return (MappedMemorySegmentImpl)segment;
} else {
throw new UnsupportedOperationException("Not a mapped memory segment");
}
}
}

View File

@ -26,19 +26,28 @@
package jdk.incubator.foreign;
import jdk.internal.foreign.AbstractMemorySegmentImpl;
import jdk.internal.foreign.MemoryAddressImpl;
import jdk.internal.foreign.NativeMemorySegmentImpl;
import jdk.internal.foreign.Utils;
import jdk.internal.ref.CleanerFactory;
import jdk.internal.reflect.CallerSensitive;
import java.lang.ref.Cleaner;
/**
* A memory address models a reference into a memory location. Memory addresses are typically obtained using the
* {@link MemorySegment#address()} method, and can refer to either off-heap or on-heap memory.
* {@link MemorySegment#address()} method, and can refer to either off-heap or on-heap memory. Off-heap memory
* addresses are referred to as <em>native</em> memory addresses (see {@link #isNative()}). Native memory addresses
* allow clients to obtain a raw memory address (expressed as a long value) which can then be used e.g. when interacting
* with native code.
* <p>
* Given an address, it is possible to compute its offset relative to a given segment, which can be useful
* when performing memory dereference operations using a memory access var handle (see {@link MemoryHandles}).
* <p>
* A memory address is associated with a {@linkplain ResourceScope resource scope}; the resource scope determines the
* lifecycle of the memory address, and whether the address can be used from multiple threads. Memory addresses
* obtained from {@linkplain #ofLong(long) numeric values}, or from native code, are associated with the
* {@linkplain ResourceScope#globalScope() global resource scope}. Memory addresses obtained from segments
* are associated with the same scope as the segment from which they have been obtained.
* <p>
* All implementations of this interface must be <a href="{@docRoot}/java.base/java/lang/doc-files/ValueBased.html">value-based</a>;
* programmers should treat instances that are {@linkplain #equals(Object) equal} as interchangeable and should not
* use instances for synchronization, or unpredictable behavior may occur. For example, in a future release,
@ -49,14 +58,10 @@ import java.lang.ref.Cleaner;
* <p> Unless otherwise specified, passing a {@code null} argument, or an array argument containing one or more {@code null}
* elements to a method in this class causes a {@link NullPointerException NullPointerException} to be thrown. </p>
*
* @apiNote In the future, if the Java language permits, {@link MemoryAddress}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
*
* @implSpec
* Implementations of this interface are immutable, thread-safe and <a href="{@docRoot}/java.base/java/lang/doc-files/ValueBased.html">value-based</a>.
*/
public interface MemoryAddress extends Addressable {
public sealed interface MemoryAddress extends Addressable permits MemoryAddressImpl {
@Override
default MemoryAddress address() {
@ -70,9 +75,15 @@ public interface MemoryAddress extends Addressable {
*/
MemoryAddress addOffset(long offset);
/**
* Returns the resource scope associated with this memory address.
* @return the resource scope associated with this memory address.
*/
ResourceScope scope();
/**
* Returns the offset of this memory address into the given segment. More specifically, if both the segment's
* base address and this address are off-heap addresses, the result is computed as
* base address and this address are native addresses, the result is computed as
* {@code this.toRawLongValue() - segment.address().toRawLongValue()}. Otherwise, if both addresses in the form
* {@code (B, O1)}, {@code (B, O2)}, where {@code B} is the same base heap object and {@code O1}, {@code O2}
* are byte offsets (relative to the base object) associated with this address and the segment's base address,
@ -86,82 +97,94 @@ public interface MemoryAddress extends Addressable {
* @return the offset of this memory address into the given segment.
* @param segment the segment relative to which this address offset should be computed
* @throws IllegalArgumentException if {@code segment} is not compatible with this address; this can happen, for instance,
* when {@code segment} models an heap memory region, while this address models an off-heap memory address.
* when {@code segment} models an heap memory region, while this address is a {@linkplain #isNative() native} address.
*/
long segmentOffset(MemorySegment segment);
/**
* Returns a new confined native memory segment with given size, and whose base address is this address; the returned segment has its own temporal
* bounds, and can therefore be closed. This method can be useful when interacting with custom native memory sources (e.g. custom allocators),
* where an address to some underlying memory region is typically obtained from native code (often as a plain {@code long} value).
* <p>
* The returned segment will feature all <a href="#access-modes">access modes</a>
* (see {@link MemorySegment#ALL_ACCESS}), and its confinement thread is the current thread (see {@link Thread#currentThread()}).
Returns a new native memory segment with given size and resource scope (replacing the scope already associated
* with this address), and whose base address is this address. This method can be useful when interacting with custom
* native memory sources (e.g. custom allocators), where an address to some
* underlying memory region is typically obtained from native code (often as a plain {@code long} value).
* The returned segment is not read-only (see {@link MemorySegment#isReadOnly()}), and is associated with the
* provided resource scope.
* <p>
* Clients should ensure that the address and bounds refers to a valid region of memory that is accessible for reading and,
* if appropriate, writing; an attempt to access an invalid memory location from Java code will either return an arbitrary value,
* have no visible effect, or cause an unspecified exception to be thrown.
* <p>
* Calling {@link MemorySegment#close()} on the returned segment will <em>not</em> result in releasing any
* memory resources which might implicitly be associated with the segment. This method is equivalent to the following code:
* This method is equivalent to the following code:
* <pre>{@code
asSegmentRestricted(byteSize, null, null);
asSegment(byteSize, null, scope);
* }</pre>
* This method is <em>restricted</em>. Restricted methods are unsafe, and, if used incorrectly, their use might crash
* <p>
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param bytesSize the desired size.
* @return a new confined native memory segment with given base address and size.
* @param scope the native segment scope.
* @return a new native memory segment with given base address, size and scope.
* @throws IllegalArgumentException if {@code bytesSize <= 0}.
* @throws UnsupportedOperationException if this address is an heap address.
* @throws IllegalAccessError if the runtime property {@code foreign.restricted} is not set to either
* {@code permit}, {@code warn} or {@code debug} (the default value is set to {@code deny}).
* @throws IllegalStateException if either the scope associated with this address or the provided scope
* have been already closed, or if access occurs from a thread other than the thread owning either
* scopes.
* @throws UnsupportedOperationException if this address is not a {@linkplain #isNative() native} address.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
default MemorySegment asSegmentRestricted(long bytesSize) {
return asSegmentRestricted(bytesSize, null, null);
}
@CallerSensitive
MemorySegment asSegment(long bytesSize, ResourceScope scope);
/**
* Returns a new confined native memory segment with given size, and whose base address is this address; the returned segment has its own temporal
* bounds, and can therefore be closed. This method can be useful when interacting with custom native memory sources (e.g. custom allocators),
* where an address to some underlying memory region is typically obtained from native code (often as a plain {@code long} value).
* <p>
* The returned segment will feature all <a href="#access-modes">access modes</a>
* (see {@link MemorySegment#ALL_ACCESS}), and its confinement thread is the current thread (see {@link Thread#currentThread()}).
* Moreover, the returned segment will keep a strong reference to the supplied attachment object (if any), which can
* be useful in cases where the lifecycle of the segment is dependent on that of some other external resource.
* Returns a new native memory segment with given size and resource scope (replacing the scope already associated
* with this address), and whose base address is this address. This method can be useful when interacting with custom
* native memory sources (e.g. custom allocators), where an address to some
* underlying memory region is typically obtained from native code (often as a plain {@code long} value).
* The returned segment is associated with the provided resource scope.
* <p>
* Clients should ensure that the address and bounds refers to a valid region of memory that is accessible for reading and,
* if appropriate, writing; an attempt to access an invalid memory location from Java code will either return an arbitrary value,
* have no visible effect, or cause an unspecified exception to be thrown.
* <p>
* Calling {@link MemorySegment#close()} on the returned segment will <em>not</em> result in releasing any
* memory resources which might implicitly be associated with the segment, but will result in calling the
* provided cleanup action (if any).
* Calling {@link ResourceScope#close()} on the scope associated with the returned segment will result in calling
* the provided cleanup action (if any).
* <p>
* Both the cleanup action and the attachment object (if any) will be preserved under terminal operations such as
* {@link MemorySegment#handoff(Thread)}, {@link MemorySegment#share()} and {@link MemorySegment#registerCleaner(Cleaner)}.
* <p>
* This method is <em>restricted</em>. Restricted methods are unsafe, and, if used incorrectly, their use might crash
* This method is <a href="package-summary.html#restricted"><em>restricted</em></a>.
* Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
* restricted methods, and use safe and supported functionalities, where possible.
*
* @param bytesSize the desired size.
* @param cleanupAction the cleanup action; can be {@code null}.
* @param attachment an attachment object that will be kept strongly reachable by the returned segment; can be {@code null}.
* @return a new confined native memory segment with given base address and size.
* @param scope the native segment scope.
* @return a new native memory segment with given base address, size and scope.
* @throws IllegalArgumentException if {@code bytesSize <= 0}.
* @throws UnsupportedOperationException if this address is an heap address.
* @throws IllegalAccessError if the runtime property {@code foreign.restricted} is not set to either
* {@code permit}, {@code warn} or {@code debug} (the default value is set to {@code deny}).
* @throws IllegalStateException if either the scope associated with this address or the provided scope
* have been already closed, or if access occurs from a thread other than the thread owning either
* scopes.
* @throws UnsupportedOperationException if this address is not a {@linkplain #isNative() native} address.
* @throws IllegalCallerException if access to this method occurs from a module {@code M} and the command line option
* {@code --enable-native-access} is either absent, or does not mention the module name {@code M}, or
* {@code ALL-UNNAMED} in case {@code M} is an unnamed module.
*/
MemorySegment asSegmentRestricted(long bytesSize, Runnable cleanupAction, Object attachment);
@CallerSensitive
MemorySegment asSegment(long bytesSize, Runnable cleanupAction, ResourceScope scope);
/**
* Returns the raw long value associated with this memory address.
* @return The raw long value associated with this memory address.
* @throws UnsupportedOperationException if this memory address is an heap address.
* Is this an off-heap memory address?
* @return true, if this is an off-heap memory address.
*/
boolean isNative();
/**
* Returns the raw long value associated with this native memory address.
* @return The raw long value associated with this native memory address.
* @throws UnsupportedOperationException if this memory address is not a {@linkplain #isNative() native} address.
* @throws IllegalStateException if the scope associated with this segment has been already closed,
* or if access occurs from a thread other than the thread owning either segment.
*/
long toRawLongValue();
@ -169,11 +192,10 @@ public interface MemoryAddress extends Addressable {
* Compares the specified object with this address for equality. Returns {@code true} if and only if the specified
* object is also an address, and it refers to the same memory location as this address.
*
* @apiNote two addresses might be considered equal despite their associated segments differ. This
* can happen, for instance, if the segment associated with one address is a <em>slice</em>
* (see {@link MemorySegment#asSlice(long, long)}) of the segment associated with the other address. Moreover,
* two addresses might be considered equals despite differences in the temporal bounds associated with their
* corresponding segments.
* @apiNote two addresses might be considered equal despite their associated resource scopes differ. This
* can happen, for instance, if the same memory address is used to create memory segments with different
* scopes (using {@link #asSegment(long, ResourceScope)}), and the base address of the resulting segments is
* then compared.
*
* @param that the object to be compared for equality with this address.
* @return {@code true} if the specified object is equal to this address.
@ -189,12 +211,14 @@ public interface MemoryAddress extends Addressable {
int hashCode();
/**
* The off-heap memory address instance modelling the {@code NULL} address.
* The native memory address instance modelling the {@code NULL} address, associated
* with the {@linkplain ResourceScope#globalScope() global} resource scope.
*/
MemoryAddress NULL = new MemoryAddressImpl(null, 0L);
MemoryAddress NULL = new MemoryAddressImpl(null, 0L);
/**
* Obtain an off-heap memory address instance from given long address.
* Obtain a native memory address instance from given long address. The returned address is associated
* with the {@linkplain ResourceScope#globalScope() global} resource scope.
* @param value the long address.
* @return the new memory address instance.
*/

View File

@ -44,16 +44,16 @@ import java.util.Objects;
* (see {@link MemoryHandles#varHandle(Class, ByteOrder)},
* {@link MemoryHandles#varHandle(Class, long, ByteOrder)}). This determines the variable type
* (all primitive types but {@code void} and {@code boolean} are supported), as well as the alignment constraint and the
* byte order associated to a memory access var handle. The resulting memory access var handle can then be combined in various ways
* byte order associated with a memory access var handle. The resulting memory access var handle can then be combined in various ways
* to emulate different addressing modes. The var handles created by this class feature a <em>mandatory</em> coordinate type
* (of type {@link MemorySegment}), and one {@code long} coordinate type, which represents the offset, in bytes, relative
* to the segment, at which dereference should occur.
* <p>
* As an example, consider the memory layout expressed by a {@link GroupLayout} instance constructed as follows:
* <blockquote><pre>{@code
GroupLayout seq = MemoryLayout.ofStruct(
MemoryLayout.ofPaddingBits(32),
MemoryLayout.ofValueBits(32, ByteOrder.BIG_ENDIAN).withName("value")
GroupLayout seq = MemoryLayout.structLayout(
MemoryLayout.paddingLayout(32),
MemoryLayout.valueLayout(32, ByteOrder.BIG_ENDIAN).withName("value")
);
* }</pre></blockquote>
* To access the member layout named {@code value}, we can construct a memory access var handle as follows:
@ -103,7 +103,7 @@ handle = MemoryHandles.insertCoordinates(handle, 1, 4); //(MemorySegment) -> int
*/
public final class MemoryHandles {
private final static JavaLangInvokeAccess JLI = SharedSecrets.getJavaLangInvokeAccess();
private static final JavaLangInvokeAccess JLI = SharedSecrets.getJavaLangInvokeAccess();
private MemoryHandles() {
//sorry, just the one!
@ -254,7 +254,7 @@ public final class MemoryHandles {
* Java {@code int} to avoid dealing with negative values, which would be
* the case if modeled as a Java {@code short}. This is illustrated in the following example:
* <blockquote><pre>{@code
MemorySegment segment = MemorySegment.allocateNative(2);
MemorySegment segment = MemorySegment.allocateNative(2, ResourceScope.newImplicitScope());
VarHandle SHORT_VH = MemoryLayouts.JAVA_SHORT.varHandle(short.class);
VarHandle INT_VH = MemoryHandles.asUnsigned(SHORT_VH, int.class);
SHORT_VH.set(segment, (short)-1);

View File

@ -49,7 +49,7 @@ import java.util.stream.Stream;
* A memory layout can be used to describe the contents of a memory segment in a <em>language neutral</em> fashion.
* There are two leaves in the layout hierarchy, <em>value layouts</em>, which are used to represent values of given size and kind (see
* {@link ValueLayout}) and <em>padding layouts</em> which are used, as the name suggests, to represent a portion of a memory
* segment whose contents should be ignored, and which are primarily present for alignment reasons (see {@link MemoryLayout#ofPaddingBits(long)}).
* segment whose contents should be ignored, and which are primarily present for alignment reasons (see {@link MemoryLayout#paddingLayout(long)}).
* Some common value layout constants are defined in the {@link MemoryLayouts} class.
* <p>
* More complex layouts can be derived from simpler ones: a <em>sequence layout</em> denotes a repetition of one or more
@ -68,11 +68,11 @@ import java.util.stream.Stream;
* The above declaration can be modelled using a layout object, as follows:
*
* <blockquote><pre>{@code
SequenceLayout taggedValues = MemoryLayout.ofSequence(5,
MemoryLayout.ofStruct(
MemoryLayout.ofValueBits(8, ByteOrder.nativeOrder()).withName("kind"),
MemoryLayout.ofPaddingBits(24),
MemoryLayout.ofValueBits(32, ByteOrder.nativeOrder()).withName("value")
SequenceLayout taggedValues = MemoryLayout.sequenceLayout(5,
MemoryLayout.structLayout(
MemoryLayout.valueLayout(8, ByteOrder.nativeOrder()).withName("kind"),
MemoryLayout.paddingLayout(24),
MemoryLayout.valueLayout(32, ByteOrder.nativeOrder()).withName("value")
)
).withName("TaggedValues");
* }</pre></blockquote>
@ -144,17 +144,17 @@ MemoryLayout value = taggedValues.select(PathElement.sequenceElement(),
*
* And, we can also replace the layout named {@code value} with another layout, as follows:
* <blockquote><pre>{@code
MemoryLayout taggedValuesWithHole = taggedValues.map(l -> MemoryLayout.ofPadding(32),
MemoryLayout taggedValuesWithHole = taggedValues.map(l -> MemoryLayout.paddingLayout(32),
PathElement.sequenceElement(), PathElement.groupElement("value"));
* }</pre></blockquote>
*
* That is, the above declaration is identical to the following, more verbose one:
* <blockquote><pre>{@code
MemoryLayout taggedValuesWithHole = MemoryLayout.ofSequence(5,
MemoryLayout.ofStruct(
MemoryLayout.ofValueBits(8, ByteOrder.nativeOrder()).withName("kind").
MemoryLayout.ofPaddingBits(32),
MemoryLayout.ofPaddingBits(32)
MemoryLayout taggedValuesWithHole = MemoryLayout.sequenceLayout(5,
MemoryLayout.structLayout(
MemoryLayout.valueLayout(8, ByteOrder.nativeOrder()).withName("kind"),
MemoryLayout.paddingLayout(32),
MemoryLayout.paddingLayout(32)
));
* }</pre></blockquote>
*
@ -191,17 +191,13 @@ long offset2 = (long) offsetHandle.invokeExact(2L); // 16
*
* Layouts can be optionally associated with one or more <em>attributes</em>. A layout attribute forms a <em>name/value</em>
* pair, where the name is a {@link String} and the value is a {@link Constable}. The most common form of layout attribute
* is the <em>layout name</em> (see {@link #LAYOUT_NAME}), a custom name that can be associated to memory layouts and that can be referred to when
* is the <em>layout name</em> (see {@link #LAYOUT_NAME}), a custom name that can be associated with memory layouts and that can be referred to when
* constructing <a href="MemoryLayout.html#layout-paths"><em>layout paths</em></a>.
*
* @apiNote In the future, if the Java language permits, {@link MemoryLayout}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
*
* @implSpec
* Implementations of this interface are immutable, thread-safe and <a href="{@docRoot}/java.base/java/lang/doc-files/ValueBased.html">value-based</a>.
*/
public interface MemoryLayout extends Constable {
public sealed interface MemoryLayout extends Constable permits AbstractLayout, SequenceLayout, GroupLayout, PaddingLayout, ValueLayout {
/**
* Returns an {@link Optional} containing the nominal descriptor for this
@ -218,7 +214,7 @@ public interface MemoryLayout extends Constable {
* Does this layout have a specified size? A layout does not have a specified size if it is (or contains) a sequence layout whose
* size is unspecified (see {@link SequenceLayout#elementCount()}).
*
* Value layouts (see {@link ValueLayout}) and padding layouts (see {@link MemoryLayout#ofPaddingBits(long)})
* Value layouts (see {@link ValueLayout}) and padding layouts (see {@link MemoryLayout#paddingLayout(long)})
* <em>always</em> have a specified size, therefore this method always returns {@code true} in these cases.
*
* @return {@code true}, if this layout has a specified size.
@ -267,7 +263,7 @@ public interface MemoryLayout extends Constable {
* }</pre></blockquote>
*
* @param name the layout name.
* @return a new layout which is the same as this layout, except for the <em>name</em> associated to it.
* @return a new layout which is the same as this layout, except for the <em>name</em> associated with it.
* @see MemoryLayout#name()
*/
MemoryLayout withName(String name);
@ -316,7 +312,7 @@ public interface MemoryLayout extends Constable {
* Creates a new layout which features the desired alignment constraint.
*
* @param bitAlignment the layout alignment constraint, expressed in bits.
* @return a new layout which is the same as this layout, except for the alignment constraint associated to it.
* @return a new layout which is the same as this layout, except for the alignment constraint associated with it.
* @throws IllegalArgumentException if {@code bitAlignment} is not a power of two, or if it's less than than 8.
*/
MemoryLayout withBitAlignment(long bitAlignment);
@ -357,6 +353,8 @@ public interface MemoryLayout extends Constable {
* layout path contains one or more path elements that select multiple sequence element indices
* (see {@link PathElement#sequenceElement()} and {@link PathElement#sequenceElement(long, long)}).
* @throws UnsupportedOperationException if one of the layouts traversed by the layout path has unspecified size.
* @throws NullPointerException if either {@code elements == null}, or if any of the elements
* in {@code elements} is {@code null}.
*/
default long bitOffset(PathElement... elements) {
return computePathOp(LayoutPath.rootPath(this, MemoryLayout::bitSize), LayoutPath::offset,
@ -380,8 +378,9 @@ public interface MemoryLayout extends Constable {
* }</pre></blockquote>
*
* where {@code x_1}, {@code x_2}, ... {@code x_n} are <em>dynamic</em> values provided as {@code long}
* arguments, whereas {@code c_1}, {@code c_2}, ... {@code c_m} and {@code s_0}, {@code s_1}, ... {@code s_n} are
* <em>static</em> stride constants which are derived from the layout path.
* arguments, whereas {@code c_1}, {@code c_2}, ... {@code c_m} are <em>static</em> offset constants
* and {@code s_0}, {@code s_1}, ... {@code s_n} are <em>static</em> stride constants which are derived from
* the layout path.
*
* @param elements the layout path elements.
* @return a method handle that can be used to compute the bit offset of the layout element
@ -406,6 +405,8 @@ public interface MemoryLayout extends Constable {
* (see {@link PathElement#sequenceElement()} and {@link PathElement#sequenceElement(long, long)}).
* @throws UnsupportedOperationException if one of the layouts traversed by the layout path has unspecified size,
* or if {@code bitOffset(elements)} is not a multiple of 8.
* @throws NullPointerException if either {@code elements == null}, or if any of the elements
* in {@code elements} is {@code null}.
*/
default long byteOffset(PathElement... elements) {
return Utils.bitsToBytesOrThrow(bitOffset(elements), Utils.bitsToBytesThrowOffset);
@ -429,8 +430,9 @@ public interface MemoryLayout extends Constable {
* }</pre></blockquote>
*
* where {@code x_1}, {@code x_2}, ... {@code x_n} are <em>dynamic</em> values provided as {@code long}
* arguments, whereas {@code c_1}, {@code c_2}, ... {@code c_m} and {@code s_0}, {@code s_1}, ... {@code s_n} are
* <em>static</em> stride constants which are derived from the layout path.
* arguments, whereas {@code c_1}, {@code c_2}, ... {@code c_m} are <em>static</em> offset constants
* and {@code s_0}, {@code s_1}, ... {@code s_n} are <em>static</em> stride constants which are derived from
* the layout path.
*
* <p>The method handle will throw an {@link UnsupportedOperationException} if the computed
* offset in bits is not a multiple of 8.
@ -466,9 +468,10 @@ public interface MemoryLayout extends Constable {
offset = c_1 + c_2 + ... + c_m + (x_1 * s_1) + (x_2 * s_2) + ... + (x_n * s_n)
* }</pre></blockquote>
*
* where {@code x_1}, {@code x_2}, ... {@code x_n} are <em>dynamic</em> values provided as optional {@code long}
* access coordinates, whereas {@code c_1}, {@code c_2}, ... {@code c_m} and {@code s_0}, {@code s_1}, ... {@code s_n} are
* <em>static</em> stride constants which are derived from the layout path.
* where {@code x_1}, {@code x_2}, ... {@code x_n} are <em>dynamic</em> values provided as {@code long}
* arguments, whereas {@code c_1}, {@code c_2}, ... {@code c_m} are <em>static</em> offset constants
* and {@code s_0}, {@code s_1}, ... {@code s_n} are <em>static</em> stride constants which are derived from
* the layout path.
*
* @apiNote the resulting var handle will feature an additional {@code long} access coordinate for every
* unspecified sequence access component contained in this layout path. Moreover, the resulting var handle
@ -489,6 +492,50 @@ public interface MemoryLayout extends Constable {
Set.of(), elements);
}
/**
* Creates a method handle which, given a memory segment, returns a {@linkplain MemorySegment#asSlice(long,long) slice}
* corresponding to the layout selected by a given layout path, where the path is considered rooted in this layout.
*
* <p>The returned method handle has a return type of {@code MemorySegment}, features a {@code MemorySegment}
* parameter as leading parameter representing the segment to be sliced, and features as many trailing {@code long}
* parameter types as there are free dimensions in the provided layout path (see {@link PathElement#sequenceElement()},
* where the order of the parameters corresponds to the order of the path elements.
* The returned method handle can be used to create a slice similar to using {@link MemorySegment#asSlice(long, long)},
* but where the offset argument is dynamically compute based on indices specified when invoking the method handle.
*
* <p>The offset of the returned segment is computed as follows:
*
* <blockquote><pre>{@code
bitOffset = c_1 + c_2 + ... + c_m + (x_1 * s_1) + (x_2 * s_2) + ... + (x_n * s_n)
offset = bitOffset / 8
* }</pre></blockquote>
*
* where {@code x_1}, {@code x_2}, ... {@code x_n} are <em>dynamic</em> values provided as {@code long}
* arguments, whereas {@code c_1}, {@code c_2}, ... {@code c_m} are <em>static</em> offset constants
* and {@code s_0}, {@code s_1}, ... {@code s_n} are <em>static</em> stride constants which are derived from
* the layout path.
*
* <p>After the offset is computed, the returned segment is create as if by calling:
* <blockquote><pre>{@code
segment.asSlice(offset, layout.byteSize());
* }</pre></blockquote>
*
* where {@code segment} is the segment to be sliced, and where {@code layout} is the layout selected by the given
* layout path, as per {@link MemoryLayout#select(PathElement...)}.
*
* <p>The method handle will throw an {@link UnsupportedOperationException} if the computed
* offset in bits is not a multiple of 8.
*
* @param elements the layout path elements.
* @return a method handle which can be used to create a slice of the selected layout element, given a segment.
* @throws UnsupportedOperationException if the size of the selected layout in bits is not a multiple of 8.
*/
default MethodHandle sliceHandle(PathElement... elements) {
return computePathOp(LayoutPath.rootPath(this, MemoryLayout::bitSize), LayoutPath::sliceHandle,
Set.of(), elements);
}
/**
* Selects the layout from a path rooted in this layout.
*
@ -535,7 +582,7 @@ public interface MemoryLayout extends Constable {
}
/**
* Is this a padding layout (e.g. a layout created from {@link #ofPaddingBits(long)}) ?
* Is this a padding layout (e.g. a layout created from {@link #paddingLayout(long)}) ?
* @return true, if this layout is a padding layout.
*/
boolean isPadding();
@ -554,14 +601,10 @@ public interface MemoryLayout extends Constable {
* <p> Unless otherwise specified, passing a {@code null} argument, or an array argument containing one or more {@code null}
* elements to a method in this class causes a {@link NullPointerException NullPointerException} to be thrown. </p>
*
* @apiNote In the future, if the Java language permits, {@link PathElement}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
*
* @implSpec
* Implementations of this interface are immutable and thread-safe.
*/
interface PathElement {
sealed interface PathElement permits LayoutPath.PathElementImpl {
/**
* Returns a path element which selects a member layout with given name from a given group layout.
@ -679,7 +722,7 @@ E * (S + I * F)
* @return the new selector layout.
* @throws IllegalArgumentException if {@code size <= 0}.
*/
static MemoryLayout ofPaddingBits(long size) {
static MemoryLayout paddingLayout(long size) {
AbstractLayout.checkSize(size);
return new PaddingLayout(size);
}
@ -692,7 +735,7 @@ E * (S + I * F)
* @return a new value layout.
* @throws IllegalArgumentException if {@code size <= 0}.
*/
static ValueLayout ofValueBits(long size, ByteOrder order) {
static ValueLayout valueLayout(long size, ByteOrder order) {
Objects.requireNonNull(order);
AbstractLayout.checkSize(size);
return new ValueLayout(order, size);
@ -706,7 +749,7 @@ E * (S + I * F)
* @return the new sequence layout with given element layout and size.
* @throws IllegalArgumentException if {@code elementCount < 0}.
*/
static SequenceLayout ofSequence(long elementCount, MemoryLayout elementLayout) {
static SequenceLayout sequenceLayout(long elementCount, MemoryLayout elementLayout) {
AbstractLayout.checkSize(elementCount, true);
OptionalLong size = OptionalLong.of(elementCount);
return new SequenceLayout(size, Objects.requireNonNull(elementLayout));
@ -718,7 +761,7 @@ E * (S + I * F)
* @param elementLayout the element layout of the sequence layout.
* @return the new sequence layout with given element layout.
*/
static SequenceLayout ofSequence(MemoryLayout elementLayout) {
static SequenceLayout sequenceLayout(MemoryLayout elementLayout) {
return new SequenceLayout(OptionalLong.empty(), Objects.requireNonNull(elementLayout));
}
@ -728,7 +771,7 @@ E * (S + I * F)
* @param elements The member layouts of the <em>struct</em> group layout.
* @return a new <em>struct</em> group layout with given member layouts.
*/
static GroupLayout ofStruct(MemoryLayout... elements) {
static GroupLayout structLayout(MemoryLayout... elements) {
Objects.requireNonNull(elements);
return new GroupLayout(GroupLayout.Kind.STRUCT,
Stream.of(elements)
@ -742,7 +785,7 @@ E * (S + I * F)
* @param elements The member layouts of the <em>union</em> layout.
* @return a new <em>union</em> group layout with given member layouts.
*/
static GroupLayout ofUnion(MemoryLayout... elements) {
static GroupLayout unionLayout(MemoryLayout... elements) {
Objects.requireNonNull(elements);
return new GroupLayout(GroupLayout.Kind.UNION,
Stream.of(elements)

View File

@ -46,87 +46,87 @@ public final class MemoryLayouts {
/**
* A value layout constant with size of one byte, and byte order set to {@link ByteOrder#LITTLE_ENDIAN}.
*/
public static final ValueLayout BITS_8_LE = MemoryLayout.ofValueBits(8, ByteOrder.LITTLE_ENDIAN);
public static final ValueLayout BITS_8_LE = MemoryLayout.valueLayout(8, ByteOrder.LITTLE_ENDIAN);
/**
* A value layout constant with size of two bytes, and byte order set to {@link ByteOrder#LITTLE_ENDIAN}.
*/
public static final ValueLayout BITS_16_LE = MemoryLayout.ofValueBits(16, ByteOrder.LITTLE_ENDIAN);
public static final ValueLayout BITS_16_LE = MemoryLayout.valueLayout(16, ByteOrder.LITTLE_ENDIAN);
/**
* A value layout constant with size of four bytes, and byte order set to {@link ByteOrder#LITTLE_ENDIAN}.
*/
public static final ValueLayout BITS_32_LE = MemoryLayout.ofValueBits(32, ByteOrder.LITTLE_ENDIAN);
public static final ValueLayout BITS_32_LE = MemoryLayout.valueLayout(32, ByteOrder.LITTLE_ENDIAN);
/**
* A value layout constant with size of eight bytes, and byte order set to {@link ByteOrder#LITTLE_ENDIAN}.
*/
public static final ValueLayout BITS_64_LE = MemoryLayout.ofValueBits(64, ByteOrder.LITTLE_ENDIAN);
public static final ValueLayout BITS_64_LE = MemoryLayout.valueLayout(64, ByteOrder.LITTLE_ENDIAN);
/**
* A value layout constant with size of one byte, and byte order set to {@link ByteOrder#BIG_ENDIAN}.
*/
public static final ValueLayout BITS_8_BE = MemoryLayout.ofValueBits(8, ByteOrder.BIG_ENDIAN);
public static final ValueLayout BITS_8_BE = MemoryLayout.valueLayout(8, ByteOrder.BIG_ENDIAN);
/**
* A value layout constant with size of two bytes, and byte order set to {@link ByteOrder#BIG_ENDIAN}.
*/
public static final ValueLayout BITS_16_BE = MemoryLayout.ofValueBits(16, ByteOrder.BIG_ENDIAN);
public static final ValueLayout BITS_16_BE = MemoryLayout.valueLayout(16, ByteOrder.BIG_ENDIAN);
/**
* A value layout constant with size of four bytes, and byte order set to {@link ByteOrder#BIG_ENDIAN}.
*/
public static final ValueLayout BITS_32_BE = MemoryLayout.ofValueBits(32, ByteOrder.BIG_ENDIAN);
public static final ValueLayout BITS_32_BE = MemoryLayout.valueLayout(32, ByteOrder.BIG_ENDIAN);
/**
* A value layout constant with size of eight bytes, and byte order set to {@link ByteOrder#BIG_ENDIAN}.
*/
public static final ValueLayout BITS_64_BE = MemoryLayout.ofValueBits(64, ByteOrder.BIG_ENDIAN);
public static final ValueLayout BITS_64_BE = MemoryLayout.valueLayout(64, ByteOrder.BIG_ENDIAN);
/**
* A padding layout constant with size of one byte.
*/
public static final MemoryLayout PAD_8 = MemoryLayout.ofPaddingBits(8);
public static final MemoryLayout PAD_8 = MemoryLayout.paddingLayout(8);
/**
* A padding layout constant with size of two bytes.
*/
public static final MemoryLayout PAD_16 = MemoryLayout.ofPaddingBits(16);
public static final MemoryLayout PAD_16 = MemoryLayout.paddingLayout(16);
/**
* A padding layout constant with size of four bytes.
*/
public static final MemoryLayout PAD_32 = MemoryLayout.ofPaddingBits(32);
public static final MemoryLayout PAD_32 = MemoryLayout.paddingLayout(32);
/**
* A padding layout constant with size of eight bytes.
*/
public static final MemoryLayout PAD_64 = MemoryLayout.ofPaddingBits(64);
public static final MemoryLayout PAD_64 = MemoryLayout.paddingLayout(64);
/**
* A value layout constant whose size is the same as that of a machine address (e.g. {@code size_t}), and byte order set to {@link ByteOrder#nativeOrder()}.
*/
public static final ValueLayout ADDRESS = MemoryLayout.ofValueBits(Unsafe.ADDRESS_SIZE * 8, ByteOrder.nativeOrder());
public static final ValueLayout ADDRESS = MemoryLayout.valueLayout(Unsafe.ADDRESS_SIZE * 8, ByteOrder.nativeOrder());
/**
* A value layout constant whose size is the same as that of a Java {@code byte}, and byte order set to {@link ByteOrder#nativeOrder()}.
*/
public static final ValueLayout JAVA_BYTE = MemoryLayout.ofValueBits(8, ByteOrder.nativeOrder());
public static final ValueLayout JAVA_BYTE = MemoryLayout.valueLayout(8, ByteOrder.nativeOrder());
/**
* A value layout constant whose size is the same as that of a Java {@code char}, and byte order set to {@link ByteOrder#nativeOrder()}.
*/
public static final ValueLayout JAVA_CHAR = MemoryLayout.ofValueBits(16, ByteOrder.nativeOrder());
public static final ValueLayout JAVA_CHAR = MemoryLayout.valueLayout(16, ByteOrder.nativeOrder());
/**
* A value layout constant whose size is the same as that of a Java {@code short}, and byte order set to {@link ByteOrder#nativeOrder()}.
*/
public static final ValueLayout JAVA_SHORT = MemoryLayout.ofValueBits(16, ByteOrder.nativeOrder());
public static final ValueLayout JAVA_SHORT = MemoryLayout.valueLayout(16, ByteOrder.nativeOrder());
/**
* A value layout constant whose size is the same as that of a Java {@code int}, and byte order set to {@link ByteOrder#nativeOrder()}.
*/
public static final ValueLayout JAVA_INT = MemoryLayout.ofValueBits(32, ByteOrder.nativeOrder());
public static final ValueLayout JAVA_INT = MemoryLayout.valueLayout(32, ByteOrder.nativeOrder());
/**
* A value layout constant whose size is the same as that of a Java {@code long}, and byte order set to {@link ByteOrder#nativeOrder()}.
@ -136,13 +136,13 @@ public final class MemoryLayouts {
MemoryLayouts.JAVA_LONG.byteAlignment() == MemoryLayouts.ADDRESS.byteSize();
* }</pre></blockquote>
*/
public static final ValueLayout JAVA_LONG = MemoryLayout.ofValueBits(64, ByteOrder.nativeOrder())
public static final ValueLayout JAVA_LONG = MemoryLayout.valueLayout(64, ByteOrder.nativeOrder())
.withBitAlignment(ADDRESS.bitSize());
/**
* A value layout constant whose size is the same as that of a Java {@code float}, and byte order set to {@link ByteOrder#nativeOrder()}.
*/
public static final ValueLayout JAVA_FLOAT = MemoryLayout.ofValueBits(32, ByteOrder.nativeOrder());
public static final ValueLayout JAVA_FLOAT = MemoryLayout.valueLayout(32, ByteOrder.nativeOrder());
/**
* A value layout constant whose size is the same as that of a Java {@code double}, and byte order set to {@link ByteOrder#nativeOrder()}.
@ -152,6 +152,6 @@ public final class MemoryLayouts {
MemoryLayouts.JAVA_DOUBLE.byteAlignment() == MemoryLayouts.ADDRESS.byteSize();
* }</pre></blockquote>
*/
public static final ValueLayout JAVA_DOUBLE = MemoryLayout.ofValueBits(64, ByteOrder.nativeOrder())
public static final ValueLayout JAVA_DOUBLE = MemoryLayout.valueLayout(64, ByteOrder.nativeOrder())
.withBitAlignment(ADDRESS.bitSize());
}

View File

@ -1,472 +0,0 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.incubator.foreign;
import jdk.internal.foreign.AbstractMemorySegmentImpl;
import jdk.internal.foreign.AbstractNativeScope;
import jdk.internal.foreign.Utils;
import java.lang.invoke.VarHandle;
import java.lang.reflect.Array;
import java.nio.ByteOrder;
import java.util.Objects;
import java.util.OptionalLong;
import java.util.function.Function;
import java.util.stream.Stream;
/**
* A native scope is an abstraction which provides shared temporal bounds for one or more allocations, backed
* by off-heap memory. Native scopes can be either <em>bounded</em> or <em>unbounded</em>, depending on whether the size
* of the native scope is known statically. If an application knows before-hand how much memory it needs to allocate,
* then using a <em>bounded</em> native scope will typically provide better performance than independently allocating the memory
* for each value (e.g. using {@link MemorySegment#allocateNative(long)}), or using an <em>unbounded</em> native scope.
* For this reason, using a bounded native scope is recommended in cases where programs might need to emulate native stack allocation.
* <p>
* Allocation scopes are thread-confined (see {@link #ownerThread()}; as such, the resulting {@link MemorySegment} instances
* returned by the native scope will be backed by memory segments confined by the same owner thread as the native scope's
* owner thread.
* <p>
* To allow for more usability, it is possible for a native scope to reclaim ownership of an existing memory segment
* (see {@link MemorySegment#handoff(NativeScope)}). This might be useful to allow one or more segments which were independently
* created to share the same life-cycle as a given native scope - which in turns enables a client to group all memory
* allocation and usage under a single <em>try-with-resources block</em>.
*
* <p> Unless otherwise specified, passing a {@code null} argument, or an array argument containing one or more {@code null}
* elements to a method in this class causes a {@link NullPointerException NullPointerException} to be thrown. </p>
*
* @apiNote In the future, if the Java language permits, {@link NativeScope}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
*/
public interface NativeScope extends AutoCloseable {
/**
* If this native scope is bounded, returns the size, in bytes, of this native scope.
* @return the size, in bytes, of this native scope (if available).
*/
OptionalLong byteSize();
/**
* The thread owning this native scope.
* @return the thread owning this native scope.
*/
Thread ownerThread();
/**
* Returns the number of allocated bytes in this native scope.
* @return the number of allocated bytes in this native scope.
*/
long allocatedBytes();
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given byte value.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a byte value.
*/
default MemorySegment allocate(ValueLayout layout, byte value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(byte.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given char value.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a char value.
*/
default MemorySegment allocate(ValueLayout layout, char value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(char.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given short value.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a short value.
*/
default MemorySegment allocate(ValueLayout layout, short value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(short.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given int value.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a int value.
*/
default MemorySegment allocate(ValueLayout layout, int value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(int.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given float value.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a float value.
*/
default MemorySegment allocate(ValueLayout layout, float value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(float.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given long value.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a long value.
*/
default MemorySegment allocate(ValueLayout layout, long value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(long.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given double value.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a double value.
*/
default MemorySegment allocate(ValueLayout layout, double value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(double.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given address value
* (expressed as an {@link Addressable} instance).
* The address value might be narrowed according to the platform address size (see {@link MemoryLayouts#ADDRESS}).
* The segment returned by this method cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
* @throws IllegalArgumentException if {@code layout.byteSize() != MemoryLayouts.ADDRESS.byteSize()}.
*/
default MemorySegment allocate(ValueLayout layout, Addressable value) {
Objects.requireNonNull(value);
Objects.requireNonNull(layout);
if (MemoryLayouts.ADDRESS.byteSize() != layout.byteSize()) {
throw new IllegalArgumentException("Layout size mismatch - " + layout.byteSize() + " != " + MemoryLayouts.ADDRESS.byteSize());
}
switch ((int)layout.byteSize()) {
case 4: return allocate(layout, (int)value.address().toRawLongValue());
case 8: return allocate(layout, value.address().toRawLongValue());
default: throw new UnsupportedOperationException("Unsupported pointer size"); // should not get here
}
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given byte array.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * array.length)}.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a byte value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, byte[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given short array.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * array.length)}.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a short value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, short[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given char array.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * array.length)}.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a char value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, char[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given int array.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * array.length)}.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a int value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, int[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given float array.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * array.length)}.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a float value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, float[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given long array.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * array.length)}.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a long value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, long[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given double array.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * array.length)}.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a double value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, double[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory in this native scope with given layout and initialize it with given address array.
* The address value of each array element might be narrowed according to the platform address size (see {@link MemoryLayouts#ADDRESS}).
* The segment returned by this method is associated with a segment which cannot be closed. Moreover, the returned
* segment must conform to the layout alignment constraints.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * array.length)}.
* @throws IllegalArgumentException if {@code layout.byteSize() != MemoryLayouts.ADDRESS.byteSize()}.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, Addressable[] array) {
Objects.requireNonNull(elementLayout);
Objects.requireNonNull(array);
Stream.of(array).forEach(Objects::requireNonNull);
if (MemoryLayouts.ADDRESS.byteSize() != elementLayout.byteSize()) {
throw new IllegalArgumentException("Layout size mismatch - " + elementLayout.byteSize() + " != " + MemoryLayouts.ADDRESS.byteSize());
}
switch ((int)elementLayout.byteSize()) {
case 4: return copyArrayWithSwapIfNeeded(Stream.of(array)
.mapToInt(a -> (int)a.address().toRawLongValue()).toArray(),
elementLayout, MemorySegment::ofArray);
case 8: return copyArrayWithSwapIfNeeded(Stream.of(array)
.mapToLong(a -> a.address().toRawLongValue()).toArray(),
elementLayout, MemorySegment::ofArray);
default: throw new UnsupportedOperationException("Unsupported pointer size"); // should not get here
}
}
private <Z> MemorySegment copyArrayWithSwapIfNeeded(Z array, ValueLayout elementLayout,
Function<Z, MemorySegment> heapSegmentFactory) {
Objects.requireNonNull(array);
Objects.requireNonNull(elementLayout);
Utils.checkPrimitiveCarrierCompat(array.getClass().componentType(), elementLayout);
MemorySegment addr = allocate(MemoryLayout.ofSequence(Array.getLength(array), elementLayout));
if (elementLayout.byteSize() == 1 || (elementLayout.order() == ByteOrder.nativeOrder())) {
addr.copyFrom(heapSegmentFactory.apply(array));
} else {
((AbstractMemorySegmentImpl)addr).copyFromSwap(heapSegmentFactory.apply(array), elementLayout.byteSize());
}
return addr;
}
/**
* Allocate a block of memory in this native scope with given layout. The segment returned by this method is
* associated with a segment which cannot be closed. Moreover, the returned segment must conform to the layout alignment constraints.
* @param layout the layout of the block of memory to be allocated.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < layout.byteSize()}.
*/
default MemorySegment allocate(MemoryLayout layout) {
Objects.requireNonNull(layout);
return allocate(layout.byteSize(), layout.byteAlignment());
}
/**
* Allocate a block of memory corresponding to an array with given element layout and size.
* The segment returned by this method is associated with a segment which cannot be closed.
* Moreover, the returned segment must conform to the layout alignment constraints. This is equivalent to the
* following code:
* <pre>{@code
allocate(MemoryLayout.ofSequence(size, elementLayout));
* }</pre>
* @param elementLayout the array element layout.
* @param count the array element count.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if this is a
* bounded allocation scope, and {@code byteSize().getAsLong() - allocatedBytes() < (elementLayout.byteSize() * count)}.
*/
default MemorySegment allocateArray(MemoryLayout elementLayout, long count) {
Objects.requireNonNull(elementLayout);
return allocate(MemoryLayout.ofSequence(count, elementLayout));
}
/**
* Allocate a block of memory in this native scope with given size. The segment returned by this method is
* associated with a segment which cannot be closed. Moreover, the returned segment must be aligned to {@code size}.
* @param bytesSize the size (in bytes) of the block of memory to be allocated.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if
* {@code limit() - size() < bytesSize}.
*/
default MemorySegment allocate(long bytesSize) {
return allocate(bytesSize, bytesSize);
}
/**
* Allocate a block of memory in this native scope with given size and alignment constraint.
* The segment returned by this method is associated with a segment which cannot be closed. Moreover,
* the returned segment must be aligned to {@code alignment}.
* @param bytesSize the size (in bytes) of the block of memory to be allocated.
* @param bytesAlignment the alignment (in bytes) of the block of memory to be allocated.
* @return a segment for the newly allocated memory block.
* @throws OutOfMemoryError if there is not enough space left in this native scope, that is, if
* {@code limit() - size() < bytesSize}.
*/
MemorySegment allocate(long bytesSize, long bytesAlignment);
/**
* Close this native scope; calling this method will render any segment obtained through this native scope
* unusable and might release any backing memory resources associated with this native scope.
*/
@Override
void close();
/**
* Creates a new bounded native scope, backed by off-heap memory.
* @param size the size of the native scope.
* @return a new bounded native scope, with given size (in bytes).
*/
static NativeScope boundedScope(long size) {
return new AbstractNativeScope.BoundedNativeScope(size);
}
/**
* Creates a new unbounded native scope, backed by off-heap memory.
* @return a new unbounded native scope.
*/
static NativeScope unboundedScope() {
return new AbstractNativeScope.UnboundedNativeScope();
}
}

View File

@ -0,0 +1,285 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.incubator.foreign;
import jdk.internal.foreign.ResourceScopeImpl;
import java.lang.invoke.MethodHandle;
import java.lang.ref.Cleaner;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
import java.util.Objects;
import java.util.Spliterator;
/**
* A resource scope manages the lifecycle of one or more resources. Resources (e.g. {@link MemorySegment}) associated
* with a resource scope can only be accessed while the resource scope is <em>alive</em> (see {@link #isAlive()}),
* and by the thread associated with the resource scope (if any).
*
* <h2>Explicit resource scopes</h2>
*
* Resource scopes obtained from {@link #newConfinedScope()}, {@link #newSharedScope()} support <em>deterministic deallocation</em>;
* We call these resource scopes <em>explicit scopes</em>. Explicit resource scopes can be closed explicitly (see {@link ResourceScope#close()}).
* When a resource scope is closed, it is no longer <em>alive</em> (see {@link #isAlive()}, and subsequent operations on
* resources associated with that scope (e.g. attempting to access a {@link MemorySegment} instance) will fail with {@link IllegalStateException}.
* <p>
* Closing a resource scope will cause all the cleanup actions associated with that scope (see {@link #addCloseAction(Runnable)}) to be called.
* Moreover, closing a resource scope might trigger the releasing of the underlying memory resources associated with said scope; for instance:
* <ul>
* <li>closing the scope associated with a native memory segment results in <em>freeing</em> the native memory associated with it
* (see {@link MemorySegment#allocateNative(long, ResourceScope)}, or {@link SegmentAllocator#arenaAllocator(ResourceScope)})</li>
* <li>closing the scope associated with a mapped memory segment results in the backing memory-mapped file to be unmapped
* (see {@link MemorySegment#mapFile(Path, long, long, FileChannel.MapMode, ResourceScope)})</li>
* <li>closing the scope associated with an upcall stub results in releasing the stub
* (see {@link CLinker#upcallStub(MethodHandle, FunctionDescriptor, ResourceScope)}</li>
* </ul>
* <p>
* Sometimes, explicit scopes can be associated with a {@link Cleaner} instance (see {@link #newConfinedScope(Cleaner)} and
* {@link #newSharedScope(Cleaner)}). We call these resource scopes <em>managed</em> resource scopes. A managed resource scope
* is closed automatically once the scope instance becomes <a href="../../../java/lang/ref/package.html#reachability">unreachable</a>.
* <p>
* Managed scopes can be useful to allow for predictable, deterministic resource deallocation, while still prevent accidental native memory leaks.
* In case a managed resource scope is closed explicitly, no further action will be taken when the scope becomes unreachable;
* that is, cleanup actions (see {@link #addCloseAction(Runnable)}) associated with a resource scope, whether managed or not,
* are called <em>exactly once</em>.
*
* <h2>Implicit resource scopes</h2>
*
* Resource scopes obtained from {@link #newImplicitScope()} cannot be closed explicitly. We call these resource scopes
* <em>implicit scopes</em>. Calling {@link #close()} on an implicit resource scope always results in an exception.
* Resources associated with implicit scopes are released once the scope instance becomes
* <a href="../../../java/lang/ref/package.html#reachability">unreachable</a>.
* <p>
* An important implicit resource scope is the so called {@linkplain #globalScope() global scope}; the global scope is
* an implicit scope that is guaranteed to never become <a href="../../../java/lang/ref/package.html#reachability">unreachable</a>.
* As a results, the global scope will never attempt to release resources associated with it. Such resources must, where
* needed, be managed independently by clients.
*
* <h2><a id = "thread-confinement">Thread confinement</a></h2>
*
* Resource scopes can be further divided into two categories: <em>thread-confined</em> resource scopes, and <em>shared</em>
* resource scopes.
* <p>
* Confined resource scopes (see {@link #newConfinedScope()}), support strong thread-confinement guarantees. Upon creation,
* they are assigned an <em>owner thread</em>, typically the thread which initiated the creation operation (see {@link #ownerThread()}).
* After creating a confined resource scope, only the owner thread will be allowed to directly manipulate the resources
* associated with this resource scope. Any attempt to perform resource access from a thread other than the
* owner thread will result in a runtime failure.
* <p>
* Shared resource scopes (see {@link #newSharedScope()} and {@link #newImplicitScope()}), on the other hand, have no owner thread;
* as such resources associated with this shared resource scopes can be accessed by multiple threads.
* This might be useful when multiple threads need to access the same resource concurrently (e.g. in the case of parallel processing).
* For instance, a client might obtain a {@link Spliterator} from a shared segment, which can then be used to slice the
* segment and allow multiple threads to work in parallel on disjoint segment slices. The following code can be used to sum
* all int values in a memory segment in parallel:
*
* <blockquote><pre>{@code
SequenceLayout SEQUENCE_LAYOUT = MemoryLayout.sequenceLayout(1024, MemoryLayouts.JAVA_INT);
try (ResourceScope scope = ResourceScope.newSharedScope()) {
MemorySegment segment = MemorySegment.allocateNative(SEQUENCE_LAYOUT, scope);
VarHandle VH_int = SEQUENCE_LAYOUT.elementLayout().varHandle(int.class);
int sum = StreamSupport.stream(segment.spliterator(SEQUENCE_LAYOUT), true)
.mapToInt(s -> (int)VH_int.get(s.address()))
.sum();
}
* }</pre></blockquote>
*
* <p>
* Explicit shared resource scopes, while powerful, must be used with caution: if one or more threads accesses
* a resource associated with a shared scope while the scope is being closed from another thread, an exception might occur on both
* the accessing and the closing threads. Clients should refrain from attempting to close a shared resource scope repeatedly
* (e.g. keep calling {@link #close()} until no exception is thrown). Instead, clients of shared resource scopes
* should always ensure that proper synchronization mechanisms (e.g. using resource scope handles, see below) are put in place
* so that threads closing shared resource scopes can never race against threads accessing resources managed by same scopes.
*
* <h2>Resource scope handles</h2>
*
* Resource scopes can be made <em>non-closeable</em> by acquiring one or more resource scope <em>handles</em> (see
* {@link #acquire()}. A resource scope handle can be used to make sure that resources associated with a given resource scope
* (either explicit or implicit) cannot be released for a certain period of time - e.g. during a critical region of code
* involving one or more resources associated with the scope. For instance, an explicit resource scope can only be closed
* <em>after</em> all the handles acquired against that scope have been closed (see {@link Handle#close()}).
* This can be useful when clients need to perform a critical operation on a memory segment, during which they have
* to ensure that the segment will not be released; this can be done as follows:
*
* <blockquote><pre>{@code
MemorySegment segment = ...
ResourceScope.Handle segmentHandle = segment.scope().acquire()
try {
<critical operation on segment>
} finally {
segment.scope().release(segmentHandle);
}
* }</pre></blockquote>
*
* Acquiring implicit resource scopes is also possible, but it is often unnecessary: since resources associated with
* an implicit scope will only be released when the scope becomes <a href="../../../java/lang/ref/package.html#reachability">unreachable</a>,
* clients can use e.g. {@link java.lang.ref.Reference#reachabilityFence(Object)} to make sure that resources associated
* with implicit scopes are not released prematurely. That said, the above code snippet works (trivially) for implicit scopes too.
*
* @implSpec
* Implementations of this interface are immutable, thread-safe and <a href="{@docRoot}/java.base/java/lang/doc-files/ValueBased.html">value-based</a>.
*/
public sealed interface ResourceScope extends AutoCloseable permits ResourceScopeImpl {
/**
* Is this resource scope alive?
* @return true, if this resource scope is alive.
* @see ResourceScope#close()
*/
boolean isAlive();
/**
* The thread owning this resource scope.
* @return the thread owning this resource scope, or {@code null} if this resource scope is shared.
*/
Thread ownerThread();
/**
* Is this resource scope an <em>implicit scope</em>?
* @return true if this scope is an <em>implicit scope</em>.
* @see #newImplicitScope()
* @see #globalScope()
*/
boolean isImplicit();
/**
* Closes this resource scope. As a side-effect, if this operation completes without exceptions, this scope will be marked
* as <em>not alive</em>, and subsequent operations on resources associated with this scope will fail with {@link IllegalStateException}.
* Additionally, upon successful closure, all native resources associated with this resource scope will be released.
*
* @apiNote This operation is not idempotent; that is, closing an already closed resource scope <em>always</em> results in an
* exception being thrown. This reflects a deliberate design choice: resource scope state transitions should be
* manifest in the client code; a failure in any of these transitions reveals a bug in the underlying application
* logic.
*
* @throws IllegalStateException if one of the following condition is met:
* <ul>
* <li>this resource scope is not <em>alive</em>
* <li>this resource scope is confined, and this method is called from a thread other than the thread owning this resource scope</li>
* <li>this resource scope is shared and a resource associated with this scope is accessed while this method is called</li>
* <li>one or more handles (see {@link #acquire()}) associated with this resource scope have not been {@linkplain #release(Handle) released}</li>
* </ul>
* @throws UnsupportedOperationException if this resource scope is {@linkplain #isImplicit() implicit}.
*/
void close();
/**
* Add a custom cleanup action which will be executed when the resource scope is closed.
* The order in which custom cleanup actions are invoked once the scope is closed is unspecified.
* @param runnable the custom cleanup action to be associated with this scope.
* @throws IllegalStateException if this scope has already been closed.
*/
void addCloseAction(Runnable runnable);
/**
* Acquires a resource scope handle associated with this resource scope. An explicit resource scope cannot be
* {@linkplain #close() closed} until all the resource scope handles acquired from it have been {@linkplain #release(Handle)} released}.
* @return a resource scope handle.
*/
Handle acquire();
/**
* Release the provided resource scope handle. This method is idempotent, that is, releasing the same handle
* multiple times has no effect.
* @param handle the resource scope handle to be released.
* @throws IllegalArgumentException if the provided handle is not associated with this scope.
*/
void release(Handle handle);
/**
* An abstraction modelling a resource scope handle. A resource scope handle is typically {@linkplain #acquire() acquired} by clients
* in order to prevent an explicit resource scope from being closed while executing a certain operation.
* Once obtained, resource scope handles can be {@linkplain #release(Handle)} released}; an explicit resource scope can
* be closed only <em>after</em> all the resource scope handles acquired from it have been released.
*/
sealed interface Handle permits ResourceScopeImpl.HandleImpl {
/**
* Returns the resource scope associated with this handle.
* @return the resource scope associated with this handle.
*/
ResourceScope scope();
}
/**
* Create a new confined scope. The resulting scope is closeable, and is not managed by a {@link Cleaner}.
* @return a new confined scope.
*/
static ResourceScope newConfinedScope() {
return ResourceScopeImpl.createConfined( null);
}
/**
* Create a new confined scope managed by a {@link Cleaner}.
* @param cleaner the cleaner to be associated with the returned scope.
* @return a new confined scope, managed by {@code cleaner}.
* @throws NullPointerException if {@code cleaner == null}.
*/
static ResourceScope newConfinedScope(Cleaner cleaner) {
Objects.requireNonNull(cleaner);
return ResourceScopeImpl.createConfined( cleaner);
}
/**
* Create a new shared scope. The resulting scope is closeable, and is not managed by a {@link Cleaner}.
* @return a new shared scope.
*/
static ResourceScope newSharedScope() {
return ResourceScopeImpl.createShared(null);
}
/**
* Create a new shared scope managed by a {@link Cleaner}.
* @param cleaner the cleaner to be associated with the returned scope.
* @return a new shared scope, managed by {@code cleaner}.
* @throws NullPointerException if {@code cleaner == null}.
*/
static ResourceScope newSharedScope(Cleaner cleaner) {
Objects.requireNonNull(cleaner);
return ResourceScopeImpl.createShared(cleaner);
}
/**
* Create a new <em>implicit scope</em>. The implicit scope is a managed, shared, and non-closeable scope which only features
* <a href="ResourceScope.html#implicit-closure"><em>implicit closure</em></a>.
* Since implicit scopes can only be closed implicitly by the garbage collector, it is recommended that implicit
* scopes are only used in cases where deallocation performance is not a critical concern, to avoid unnecessary
* memory pressure.
*
* @return a new implicit scope.
*/
static ResourceScope newImplicitScope() {
return ResourceScopeImpl.createImplicitScope();
}
/**
* Returns an implicit scope which is assumed to be always alive.
* @return the global scope.
*/
static ResourceScope globalScope() {
return ResourceScopeImpl.GLOBAL;
}
}

View File

@ -0,0 +1,466 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.incubator.foreign;
import jdk.internal.foreign.ArenaAllocator;
import jdk.internal.foreign.AbstractMemorySegmentImpl;
import jdk.internal.foreign.ResourceScopeImpl;
import jdk.internal.foreign.Utils;
import java.lang.invoke.VarHandle;
import java.lang.reflect.Array;
import java.nio.ByteOrder;
import java.util.Objects;
import java.util.function.Function;
import java.util.stream.Stream;
/**
* This interface models a memory allocator. Clients implementing this interface
* must implement the {@link #allocate(long, long)} method. This interface defines several default methods
* which can be useful to create segments from several kinds of Java values such as primitives and arrays.
* This interface can be seen as a thin wrapper around the basic capabilities for creating native segments
* (e.g. {@link MemorySegment#allocateNative(long, long, ResourceScope)}); since {@link SegmentAllocator} is a <em>functional interface</em>,
* clients can easily obtain a native allocator by using either a lambda expression or a method reference.
* <p>
* This interface provides a factory, namely {@link SegmentAllocator#ofScope(ResourceScope)} which can be used to obtain
* a <em>scoped</em> allocator, that is, an allocator which creates segment bound by a given scope. This can be useful
* when working inside a <em>try-with-resources</em> construct:
*
* <blockquote><pre>{@code
try (ResourceScope scope = ResourceScope.newConfinedScope()) {
SegmentAllocator allocator = SegmentAllocator.ofScope(scope);
...
}
* }</pre></blockquote>
*
* In addition, this interface also defines factories for commonly used allocators; for instance {@link #arenaAllocator(ResourceScope)}
* and {@link #arenaAllocator(long, ResourceScope)} are arena-style native allocators. Finally {@link #ofSegment(MemorySegment)}
* returns an allocator which wraps a segment (either on-heap or off-heap) and recycles its content upon each new allocation request.
*/
@FunctionalInterface
public interface SegmentAllocator {
/**
* Allocate a block of memory with given layout and initialize it with given byte value.
* @implSpec the default implementation for this method calls {@code this.allocate(layout)}.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a byte value.
*/
default MemorySegment allocate(ValueLayout layout, byte value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(byte.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory with given layout and initialize it with given char value.
* @implSpec the default implementation for this method calls {@code this.allocate(layout)}.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a char value.
*/
default MemorySegment allocate(ValueLayout layout, char value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(char.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory with given layout and initialize it with given short value.
* @implSpec the default implementation for this method calls {@code this.allocate(layout)}.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a short value.
*/
default MemorySegment allocate(ValueLayout layout, short value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(short.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory with given layout and initialize it with given int value.
* @implSpec the default implementation for this method calls {@code this.allocate(layout)}.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a int value.
*/
default MemorySegment allocate(ValueLayout layout, int value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(int.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory with given layout and initialize it with given float value.
* @implSpec the default implementation for this method calls {@code this.allocate(layout)}.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a float value.
*/
default MemorySegment allocate(ValueLayout layout, float value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(float.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory with given layout and initialize it with given long value.
* @implSpec the default implementation for this method calls {@code this.allocate(layout)}.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a long value.
*/
default MemorySegment allocate(ValueLayout layout, long value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(long.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory with given layout and initialize it with given double value.
* @implSpec the default implementation for this method calls {@code this.allocate(layout)}.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize()} does not conform to the size of a double value.
*/
default MemorySegment allocate(ValueLayout layout, double value) {
Objects.requireNonNull(layout);
VarHandle handle = layout.varHandle(double.class);
MemorySegment addr = allocate(layout);
handle.set(addr, value);
return addr;
}
/**
* Allocate a block of memory with given layout and initialize it with given address value
* (expressed as an {@link Addressable} instance).
* The address value might be narrowed according to the platform address size (see {@link MemoryLayouts#ADDRESS}).
* @implSpec the default implementation for this method calls {@code this.allocate(layout)}.
* @param layout the layout of the block of memory to be allocated.
* @param value the value to be set on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize() != MemoryLayouts.ADDRESS.byteSize()}.
*/
default MemorySegment allocate(ValueLayout layout, Addressable value) {
Objects.requireNonNull(value);
Objects.requireNonNull(layout);
if (MemoryLayouts.ADDRESS.byteSize() != layout.byteSize()) {
throw new IllegalArgumentException("Layout size mismatch - " + layout.byteSize() + " != " + MemoryLayouts.ADDRESS.byteSize());
}
return switch ((int)layout.byteSize()) {
case 4 -> allocate(layout, (int)value.address().toRawLongValue());
case 8 -> allocate(layout, value.address().toRawLongValue());
default -> throw new UnsupportedOperationException("Unsupported pointer size"); // should not get here
};
}
/**
* Allocate a block of memory with given layout and initialize it with given byte array.
* @implSpec the default implementation for this method calls {@code this.allocateArray(layout, array.length)}.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a byte value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, byte[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory with given layout and initialize it with given short array.
* @implSpec the default implementation for this method calls {@code this.allocateArray(layout, array.length)}.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a short value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, short[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory with given layout and initialize it with given char array.
* @implSpec the default implementation for this method calls {@code this.allocateArray(layout, array.length)}.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a char value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, char[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory with given layout and initialize it with given int array.
* @implSpec the default implementation for this method calls {@code this.allocateArray(layout, array.length)}.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a int value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, int[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory with given layout and initialize it with given float array.
* @implSpec the default implementation for this method calls {@code this.allocateArray(layout, array.length)}.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a float value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, float[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory with given layout and initialize it with given long array.
* @implSpec the default implementation for this method calls {@code this.allocateArray(layout, array.length)}.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a long value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, long[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory with given layout and initialize it with given double array.
* @implSpec the default implementation for this method calls {@code this.allocateArray(layout, array.length)}.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code elementLayout.byteSize()} does not conform to the size of a double value.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, double[] array) {
return copyArrayWithSwapIfNeeded(array, elementLayout, MemorySegment::ofArray);
}
/**
* Allocate a block of memory with given layout and initialize it with given address array.
* The address value of each array element might be narrowed according to the platform address size (see {@link MemoryLayouts#ADDRESS}).
* @implSpec the default implementation for this method calls {@code this.allocateArray(layout, array.length)}.
* @param elementLayout the element layout of the array to be allocated.
* @param array the array to be copied on the newly allocated memory block.
* @return a segment for the newly allocated memory block.
* @throws IllegalArgumentException if {@code layout.byteSize() != MemoryLayouts.ADDRESS.byteSize()}.
*/
default MemorySegment allocateArray(ValueLayout elementLayout, Addressable[] array) {
Objects.requireNonNull(elementLayout);
Objects.requireNonNull(array);
Stream.of(array).forEach(Objects::requireNonNull);
if (MemoryLayouts.ADDRESS.byteSize() != elementLayout.byteSize()) {
throw new IllegalArgumentException("Layout size mismatch - " + elementLayout.byteSize() + " != " + MemoryLayouts.ADDRESS.byteSize());
}
return switch ((int)elementLayout.byteSize()) {
case 4 -> copyArrayWithSwapIfNeeded(Stream.of(array)
.mapToInt(a -> (int)a.address().toRawLongValue()).toArray(),
elementLayout, MemorySegment::ofArray);
case 8 -> copyArrayWithSwapIfNeeded(Stream.of(array)
.mapToLong(a -> a.address().toRawLongValue()).toArray(),
elementLayout, MemorySegment::ofArray);
default -> throw new UnsupportedOperationException("Unsupported pointer size"); // should not get here
};
}
private <Z> MemorySegment copyArrayWithSwapIfNeeded(Z array, ValueLayout elementLayout,
Function<Z, MemorySegment> heapSegmentFactory) {
Objects.requireNonNull(array);
Objects.requireNonNull(elementLayout);
Utils.checkPrimitiveCarrierCompat(array.getClass().componentType(), elementLayout);
MemorySegment addr = allocate(MemoryLayout.sequenceLayout(Array.getLength(array), elementLayout));
if (elementLayout.byteSize() == 1 || (elementLayout.order() == ByteOrder.nativeOrder())) {
addr.copyFrom(heapSegmentFactory.apply(array));
} else {
((AbstractMemorySegmentImpl)addr).copyFromSwap(heapSegmentFactory.apply(array), elementLayout.byteSize());
}
return addr;
}
/**
* Allocate a block of memory with given layout.
* @implSpec the default implementation for this method calls {@code this.allocate(layout.byteSize(), layout.byteAlignment())}.
* @param layout the layout of the block of memory to be allocated.
* @return a segment for the newly allocated memory block.
*/
default MemorySegment allocate(MemoryLayout layout) {
Objects.requireNonNull(layout);
return allocate(layout.byteSize(), layout.byteAlignment());
}
/**
* Allocate a block of memory corresponding to an array with given element layout and size.
* @implSpec the default implementation for this method calls {@code this.allocate(MemoryLayout.sequenceLayout(count, elementLayout))}.
* @param elementLayout the array element layout.
* @param count the array element count.
* @return a segment for the newly allocated memory block.
*/
default MemorySegment allocateArray(MemoryLayout elementLayout, long count) {
Objects.requireNonNull(elementLayout);
return allocate(MemoryLayout.sequenceLayout(count, elementLayout));
}
/**
* Allocate a block of memory with given size, with default alignment (1-byte aligned).
* @implSpec the default implementation for this method calls {@code this.allocate(bytesSize, 1)}.
* @param bytesSize the size (in bytes) of the block of memory to be allocated.
* @return a segment for the newly allocated memory block.
*/
default MemorySegment allocate(long bytesSize) {
return allocate(bytesSize, 1);
}
/**
* Allocate a block of memory with given size and alignment constraint.
* @param bytesSize the size (in bytes) of the block of memory to be allocated.
* @param bytesAlignment the alignment (in bytes) of the block of memory to be allocated.
* @return a segment for the newly allocated memory block.
*/
MemorySegment allocate(long bytesSize, long bytesAlignment);
/**
* Returns a native arena-based allocator which allocates a single memory segment, of given size (using malloc),
* and then responds to allocation request by returning different slices of that same segment
* (until no further allocation is possible).
* This can be useful when clients want to perform multiple allocation requests while avoiding the cost associated
* with allocating a new off-heap memory region upon each allocation request.
* <p>
* An allocator associated with a <em>shared</em> resource scope is thread-safe and allocation requests may be
* performed concurrently; conversely, if the arena allocator is associated with a <em>confined</em> resource scope,
* allocation requests can only occur from the thread owning the allocator's resource scope.
* <p>
* The returned allocator might throw an {@link OutOfMemoryError} if an incoming allocation request exceeds
* the allocator capacity.
*
* @param size the size (in bytes) of the allocation arena.
* @param scope the scope associated with the segments returned by this allocator.
* @return a new bounded arena-based allocator
* @throws IllegalArgumentException if {@code size <= 0}.
* @throws IllegalStateException if {@code scope} has been already closed, or if access occurs from a thread other
* than the thread owning {@code scope}.
*/
static SegmentAllocator arenaAllocator(long size, ResourceScope scope) {
Objects.requireNonNull(scope);
return scope.ownerThread() == null ?
new ArenaAllocator.BoundedSharedArenaAllocator(scope, size) :
new ArenaAllocator.BoundedArenaAllocator(scope, size);
}
/**
* Returns a native unbounded arena-based allocator.
* <p>
* The returned allocator allocates a memory segment {@code S} of a certain fixed size (using malloc) and then
* responds to allocation requests in one of the following ways:
* <ul>
* <li>if the size of the allocation requests is smaller than the size of {@code S}, and {@code S} has a <em>free</em>
* slice {@code S'} which fits that allocation request, return that {@code S'}.
* <li>if the size of the allocation requests is smaller than the size of {@code S}, and {@code S} has no <em>free</em>
* slices which fits that allocation request, allocate a new segment {@code S'} (using malloc), which has same size as {@code S}
* and set {@code S = S'}; the allocator then tries to respond to the same allocation request again.
* <li>if the size of the allocation requests is bigger than the size of {@code S}, allocate a new segment {@code S'}
* (using malloc), which has a sufficient size to satisfy the allocation request, and return {@code S'}.
* </ul>
* <p>
* This segment allocator can be useful when clients want to perform multiple allocation requests while avoiding the
* cost associated with allocating a new off-heap memory region upon each allocation request.
* <p>
* An allocator associated with a <em>shared</em> resource scope is thread-safe and allocation requests may be
* performed concurrently; conversely, if the arena allocator is associated with a <em>confined</em> resource scope,
* allocation requests can only occur from the thread owning the allocator's resource scope.
* <p>
* The returned allocator might throw an {@link OutOfMemoryError} if an incoming allocation request exceeds
* the system capacity.
*
* @param scope the scope associated with the segments returned by this allocator.
* @return a new unbounded arena-based allocator
* @throws IllegalStateException if {@code scope} has been already closed, or if access occurs from a thread other
* than the thread owning {@code scope}.
*/
static SegmentAllocator arenaAllocator(ResourceScope scope) {
Objects.requireNonNull(scope);
return scope.ownerThread() == null ?
new ArenaAllocator.UnboundedSharedArenaAllocator(scope) :
new ArenaAllocator.UnboundedArenaAllocator(scope);
}
/**
* Returns a segment allocator which responds to allocation requests by recycling a single segment; that is,
* each new allocation request will return a new slice starting at the segment offset {@code 0} (alignment
* constraints are ignored by this allocator). This can be useful to limit allocation requests in case a client
* knows that they have fully processed the contents of the allocated segment before the subsequent allocation request
* takes place.
* <p>
* While the allocator returned by this method is <em>thread-safe</em>, concurrent access on the same recycling
* allocator might cause a thread to overwrite contents written to the underlying segment by a different thread.
*
* @param segment the memory segment to be recycled by the returned allocator.
* @return an allocator which recycles an existing segment upon each new allocation request.
*/
static SegmentAllocator ofSegment(MemorySegment segment) {
Objects.requireNonNull(segment);
return (size, align) -> segment.asSlice(0, size);
}
/**
* Returns a native allocator which responds to allocation requests by allocating new segments
* bound by the given resource scope, using the {@link MemorySegment#allocateNative(long, long, ResourceScope)}
* factory. This code is equivalent (but likely more efficient) to the following:
* <blockquote><pre>{@code
Resource scope = ...
SegmentAllocator scoped = (size, align) -> MemorySegment.allocateNative(size, align, scope);
* }</pre></blockquote>
*
* @param scope the resource scope associated with the segments created by the returned allocator.
* @return an allocator which allocates new memory segment bound by the provided resource scope.
*/
static SegmentAllocator ofScope(ResourceScope scope) {
Objects.requireNonNull(scope);
return (ResourceScopeImpl)scope;
}
}

View File

@ -32,7 +32,6 @@ import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.stream.LongStream;
/**
* A sequence layout. A sequence layout is used to denote a repetition of a given layout, also called the sequence layout's <em>element layout</em>.
@ -41,16 +40,16 @@ import java.util.stream.LongStream;
* that is equal to the sequence layout's element count. In other words this layout:
*
* <pre>{@code
MemoryLayout.ofSequence(3, MemoryLayout.ofValueBits(32, ByteOrder.BIG_ENDIAN));
MemoryLayout.sequenceLayout(3, MemoryLayout.valueLayout(32, ByteOrder.BIG_ENDIAN));
* }</pre>
*
* is equivalent to the following layout:
*
* <pre>{@code
MemoryLayout.ofStruct(
MemoryLayout.ofValueBits(32, ByteOrder.BIG_ENDIAN),
MemoryLayout.ofValueBits(32, ByteOrder.BIG_ENDIAN),
MemoryLayout.ofValueBits(32, ByteOrder.BIG_ENDIAN));
MemoryLayout.structLayout(
MemoryLayout.valueLayout(32, ByteOrder.BIG_ENDIAN),
MemoryLayout.valueLayout(32, ByteOrder.BIG_ENDIAN),
MemoryLayout.valueLayout(32, ByteOrder.BIG_ENDIAN));
* }</pre>
*
* <p>
@ -67,7 +66,7 @@ MemoryLayout.ofStruct(
* @implSpec
* This class is immutable and thread-safe.
*/
public final class SequenceLayout extends AbstractLayout {
public final class SequenceLayout extends AbstractLayout implements MemoryLayout {
private final OptionalLong elemCount;
private final MemoryLayout elementLayout;
@ -123,11 +122,11 @@ public final class SequenceLayout extends AbstractLayout {
* <p>
* For instance, given a sequence layout of the kind:
* <pre>{@code
var seq = MemoryLayout.ofSequence(4, MemoryLayout.ofSequence(3, MemoryLayouts.JAVA_INT));
var seq = MemoryLayout.sequenceLayout(4, MemoryLayout.sequenceLayout(3, MemoryLayouts.JAVA_INT));
* }</pre>
* calling {@code seq.reshape(2, 6)} will yield the following sequence layout:
* <pre>{@code
var reshapeSeq = MemoryLayout.ofSequence(2, MemoryLayout.ofSequence(6, MemoryLayouts.JAVA_INT));
var reshapeSeq = MemoryLayout.sequenceLayout(2, MemoryLayout.sequenceLayout(6, MemoryLayouts.JAVA_INT));
* }</pre>
* <p>
* If one of the provided element count is the special value {@code -1}, then the element
@ -187,7 +186,7 @@ public final class SequenceLayout extends AbstractLayout {
MemoryLayout res = flat.elementLayout();
for (int i = elementCounts.length - 1 ; i >= 0 ; i--) {
res = MemoryLayout.ofSequence(elementCounts[i], res);
res = MemoryLayout.sequenceLayout(elementCounts[i], res);
}
return (SequenceLayout)res;
}
@ -199,11 +198,11 @@ public final class SequenceLayout extends AbstractLayout {
* be dropped and their element counts will be incorporated into that of the returned sequence layout.
* For instance, given a sequence layout of the kind:
* <pre>{@code
var seq = MemoryLayout.ofSequence(4, MemoryLayout.ofSequence(3, MemoryLayouts.JAVA_INT));
var seq = MemoryLayout.sequenceLayout(4, MemoryLayout.sequenceLayout(3, MemoryLayouts.JAVA_INT));
* }</pre>
* calling {@code seq.flatten()} will yield the following sequence layout:
* <pre>{@code
var flattenedSeq = MemoryLayout.ofSequence(12, MemoryLayouts.JAVA_INT);
var flattenedSeq = MemoryLayout.sequenceLayout(12, MemoryLayouts.JAVA_INT);
* }</pre>
* @return a new sequence layout with the same size as this layout (but, possibly, with different
* element count), whose element layout is not a sequence layout.
@ -221,7 +220,7 @@ public final class SequenceLayout extends AbstractLayout {
count = count * elemSeq.elementCount().orElseThrow(this::badUnboundSequenceLayout);
elemLayout = elemSeq.elementLayout();
}
return MemoryLayout.ofSequence(count, elemLayout);
return MemoryLayout.sequenceLayout(count, elemLayout);
}
private UnsupportedOperationException badUnboundSequenceLayout() {

View File

@ -45,19 +45,15 @@
* ranging from {@code 0} to {@code 9}, we can use the following code:
*
* <pre>{@code
try (MemorySegment segment = MemorySegment.allocateNative(10 * 4)) {
for (int i = 0 ; i < 10 ; i++) {
MemoryAccess.setIntAtIndex(segment, i);
}
MemorySegment segment = MemorySegment.allocateNative(10 * 4, ResourceScope.newImplicitScope());
for (int i = 0 ; i < 10 ; i++) {
MemoryAccess.setIntAtIndex(segment, i, 42);
}
* }</pre>
*
* Here create a <em>native</em> memory segment, that is, a memory segment backed by
* off-heap memory; the size of the segment is 40 bytes, enough to store 10 values of the primitive type {@code int}.
* The segment is created inside a <em>try-with-resources</em> construct: this idiom ensures that all the memory resources
* associated with the segment will be released at the end of the block, according to the semantics described in
* Section {@jls 14.20.3} of <cite>The Java Language Specification</cite>. Inside the try-with-resources block, we initialize
* the contents of the memory segment using the
* Inside a loop, we then initialize the contents of the memory segment using the
* {@link jdk.incubator.foreign.MemoryAccess#setIntAtIndex(jdk.incubator.foreign.MemorySegment, long, int)} helper method;
* more specifically, if we view the memory segment as a set of 10 adjacent slots,
* {@code s[i]}, where {@code 0 <= i < 10}, where the size of each slot is exactly 4 bytes, the initialization logic above will set each slot
@ -66,16 +62,25 @@ try (MemorySegment segment = MemorySegment.allocateNative(10 * 4)) {
* <h3><a id="deallocation"></a>Deterministic deallocation</h3>
*
* When writing code that manipulates memory segments, especially if backed by memory which resides outside the Java heap, it is
* crucial that the resources associated with a memory segment are released when the segment is no longer in use, by calling the {@link jdk.incubator.foreign.MemorySegment#close()}
* method either explicitly, or implicitly, by relying on try-with-resources construct (as demonstrated in the example above).
* Closing a given memory segment is an <em>atomic</em> operation which can either succeed - and result in the underlying
* memory associated with the segment to be released, or <em>fail</em> with an exception.
* <p>
* The deterministic deallocation model differs significantly from the implicit strategies adopted within other APIs, most
* notably the {@link java.nio.ByteBuffer} API: in that case, when a native byte buffer is created (see {@link java.nio.ByteBuffer#allocateDirect(int)}),
* the underlying memory is not released until the byte buffer reference becomes <em>unreachable</em>. While implicit deallocation
* models such as this can be very convenient - clients do not have to remember to <em>close</em> a direct buffer - such models can also make it
* hard for clients to ensure that the memory associated with a direct buffer has indeed been released.
* often crucial that the resources associated with a memory segment are released when the segment is no longer in use,
* and in a timely fashion. For this reason, there might be cases where waiting for the garbage collector to determine that a segment
* is <a href="../../../java/lang/ref/package.html#reachability">unreachable</a> is not optimal.
* Clients that operate under these assumptions might want to programmatically release the memory associated
* with a memory segment. This can be done, using the {@link jdk.incubator.foreign.ResourceScope} abstraction, as shown below:
*
* <pre>{@code
try (ResourceScope scope = ResourceScope.newConfinedScope()) {
MemorySegment segment = MemorySegment.allocateNative(10 * 4, scope);
for (int i = 0 ; i < 10 ; i++) {
MemoryAccess.setIntAtIndex(segment, i, 42);
}
}
* }</pre>
*
* This example is almost identical to the prior one; this time we first create a so called <em>resource scope</em>,
* which is used to <em>bind</em> the life-cycle of the segment created immediately afterwards. Note the use of the
* <em>try-with-resources</em> construct: this idiom ensures that all the memory resources associated with the segment will be released
* at the end of the block, according to the semantics described in Section {@jls 14.20.3} of <cite>The Java Language Specification</cite>.
*
* <h3><a id="safety"></a>Safety</h3>
*
@ -86,14 +91,9 @@ try (MemorySegment segment = MemorySegment.allocateNative(10 * 4)) {
* Section {@jls 15.10.4} of <cite>The Java Language Specification</cite>.
* <p>
* Since memory segments can be closed (see above), segments are also validated (upon access) to make sure that
* the segment being accessed has not been closed prematurely. We call this guarantee <em>temporal safety</em>. Note that,
* in the general case, guaranteeing temporal safety can be hard, as multiple threads could attempt to access and/or close
* the same memory segment concurrently. The memory access API addresses this problem by imposing strong
* <em>thread-confinement</em> guarantees on memory segments: upon creation, a memory segment is associated with an owner thread,
* which is the only thread that can either access or close the segment.
* <p>
* Together, spatial and temporal safety ensure that each memory access operation either succeeds - and accesses a valid
* memory location - or fails.
* the resource scope associated with the segment being accessed has not been closed prematurely.
* We call this guarantee <em>temporal safety</em>. Together, spatial and temporal safety ensure that each memory access
* operation either succeeds - and accesses a valid memory location - or fails.
*
* <h2>Foreign function access</h2>
* The key abstractions introduced to support foreign function access are {@link jdk.incubator.foreign.LibraryLookup} and {@link jdk.incubator.foreign.CLinker}.
@ -106,15 +106,16 @@ try (MemorySegment segment = MemorySegment.allocateNative(10 * 4)) {
* we can use the following code:
*
* <pre>{@code
MethodHandle strlen = CLinker.getInstance().downcallHandle(
MethodHandle strlen = CLinker.getInstance().downcallHandle(
LibraryLookup.ofDefault().lookup("strlen").get(),
MethodType.methodType(long.class, MemoryAddress.class),
FunctionDescriptor.of(CLinker.C_LONG, CLinker.C_POINTER)
);
);
try (var cString = CLinker.toCString("Hello")) {
long len = strlen.invokeExact(cString.address()) // 5
}
try (var scope = ResourceScope.newConfinedScope()) {
var cString = CLinker.toCString("Hello", scope);
long len = (long)strlen.invokeExact(cString.address()); // 5
}
* }</pre>
*
* Here, we lookup the {@code strlen} symbol in the <em>default</em> library lookup (see {@link jdk.incubator.foreign.LibraryLookup#ofDefault()}).
@ -126,7 +127,7 @@ try (var cString = CLinker.toCString("Hello")) {
* the method handle invocation (here performed using {@link java.lang.invoke.MethodHandle#invokeExact(java.lang.Object...)})
* into a foreign function call, according to the rules specified by the platform C ABI. The {@link jdk.incubator.foreign.CLinker}
* class also provides many useful methods for interacting with native code, such as converting Java strings into
* native strings and viceversa (see {@link jdk.incubator.foreign.CLinker#toCString(java.lang.String)} and
* native strings and viceversa (see {@link jdk.incubator.foreign.CLinker#toCString(java.lang.String, ResourceScope)} and
* {@link jdk.incubator.foreign.CLinker#toJavaString(jdk.incubator.foreign.MemorySegment)}, respectively), as
* demonstrated in the above example.
*
@ -146,43 +147,44 @@ try (var cString = CLinker.toCString("Hello")) {
* the original segment accordingly, as follows:
*
* <pre>{@code
MemorySegment segment = MemorySegment.allocateNative(100);
MemorySegment segment = MemorySegment.allocateNative(100, scope);
...
MemoryAddress addr = ... //obtain address from native code
int x = MemoryAccess.getIntAtOffset(segment, addr.segmentOffset(segment));
* }</pre>
*
* Secondly, if the client does <em>not</em> have a segment which contains a given memory address, it can create one <em>unsafely</em>,
* using the {@link jdk.incubator.foreign.MemoryAddress#asSegmentRestricted(long)} factory. This allows the client to
* using the {@link jdk.incubator.foreign.MemoryAddress#asSegment(long, ResourceScope)} factory. This allows the client to
* inject extra knowledge about spatial bounds which might, for instance, be available in the documentation of the foreign function
* which produced the native address. Here is how an unsafe segment can be created from a native address:
*
* <pre>{@code
ResourceScope scope = ... // initialize a resource scope object
MemoryAddress addr = ... //obtain address from native code
MemorySegment segment = addr.asSegmentRestricted(4); // segment is 4 bytes long
MemorySegment segment = addr.asSegment(4, scope); // segment is 4 bytes long
int x = MemoryAccess.getInt(segment);
* }</pre>
*
* Alternatively, the client can fall back to use the so called <em>everything</em> segment - that is, a primordial segment
* which covers the entire native heap. This segment can be obtained by calling the {@link jdk.incubator.foreign.MemorySegment#ofNativeRestricted()}
* which covers the entire native heap. This segment can be obtained by calling the {@link jdk.incubator.foreign.MemorySegment#globalNativeSegment()}
* method, so that dereference can happen without the need of creating any additional segment instances:
*
* <pre>{@code
MemoryAddress addr = ... //obtain address from native code
int x = MemoryAccess.getIntAtOffset(MemorySegment.ofNativeRestricted(), addr.toRawLongValue());
int x = MemoryAccess.getIntAtOffset(MemorySegment.globalNativeSegment(), addr.toRawLongValue());
* }</pre>
*
* <h3>Upcalls</h3>
* The {@link jdk.incubator.foreign.CLinker} interface also allows to turn an existing method handle (which might point
* to a Java method) into a native memory segment (see {@link jdk.incubator.foreign.MemorySegment}), so that Java code
* to a Java method) into a native memory address (see {@link jdk.incubator.foreign.MemoryAddress}), so that Java code
* can effectively be passed to other foreign functions. For instance, we can write a method that compares two
* integer values, as follows:
*
* <pre>{@code
class IntComparator {
static int intCompare(MemoryAddress addr1, MemoryAddress addr2) {
return MemoryAccess.getIntAtOffset(MemorySegment.ofNativeRestricted(), addr1.toRawLongValue()) -
MemoryAccess.getIntAtOffset(MemorySegment.ofNativeRestricted(), addr2.toRawLongValue());
return MemoryAccess.getIntAtOffset(MemorySegment.globalNativeSegment(), addr1.toRawLongValue()) -
MemoryAccess.getIntAtOffset(MemorySegment.globalNativeSegment(), addr2.toRawLongValue());
}
}
* }</pre>
@ -197,39 +199,38 @@ MethodHandle intCompareHandle = MethodHandles.lookup().findStatic(IntComparator.
MethodType.methodType(int.class, MemoryAddress.class, MemoryAddress.class));
* }</pre>
*
* Now that we have a method handle instance, we can link it into a fresh native memory segment, using the {@link jdk.incubator.foreign.CLinker} interface, as follows:
* Now that we have a method handle instance, we can link it into a fresh native memory address, using the {@link jdk.incubator.foreign.CLinker} interface, as follows:
*
* <pre>{@code
MemorySegment comparFunc = CLinker.getInstance().upcallStub(
ResourceScope scope = ...
MemoryAddress comparFunc = CLinker.getInstance().upcallStub(
intCompareHandle,
FunctionDescriptor.of(C_INT, C_POINTER, C_POINTER)
FunctionDescriptor.of(C_INT, C_POINTER, C_POINTER),
scope
);
* }</pre>
*
* As before, we need to provide a {@link jdk.incubator.foreign.FunctionDescriptor} instance describing the signature
* of the function pointer we want to create; as before, this, coupled with the method handle type, uniquely determines the
* sequence of steps which will allow foreign code to call {@code intCompareHandle} according to the rules specified
* by the platform C ABI.
* by the platform C ABI. The lifecycle of the memory address returned by
* {@link jdk.incubator.foreign.CLinker#upcallStub(java.lang.invoke.MethodHandle, jdk.incubator.foreign.FunctionDescriptor, jdk.incubator.foreign.ResourceScope)}
* is tied to the {@linkplain jdk.incubator.foreign.ResourceScope resource scope} parameter passed to that method.
*
* <a id="restricted"></a>
* <h2>Restricted methods</h2>
* Some methods in this package are considered <em>restricted</em>. Restricted methods are typically used to bind native
* foreign data and/or functions to first-class Java API elements which can then be used directly by client. For instance
* the restricted method {@link jdk.incubator.foreign.MemoryAddress#asSegmentRestricted(long)} can be used to create
* foreign data and/or functions to first-class Java API elements which can then be used directly by clients. For instance
* the restricted method {@link jdk.incubator.foreign.MemoryAddress#asSegment(long, ResourceScope)} can be used to create
* a fresh segment with given spatial bounds out of a native address.
* <p>
* Binding foreign data and/or functions is generally unsafe and, if done incorrectly, can result in VM crashes, or memory corruption when the bound Java API element is accessed.
* For instance, in the case of {@link jdk.incubator.foreign.MemoryAddress#asSegmentRestricted(long)}, if the provided
* For instance, in the case of {@link jdk.incubator.foreign.MemoryAddress#asSegment(long, ResourceScope)}, if the provided
* spatial bounds are incorrect, a client of the segment returned by that method might crash the VM, or corrupt
* memory when attempting to dereference said segment. For these reasons, it is crucial for code that calls a restricted method
* to never pass arguments that might cause incorrect binding of foreign data and/or functions to a Java API.
* <p>
* Access to restricted methods is <em>disabled</em> by default; to enable restricted methods, the JDK property
* {@code foreign.restricted} must be set to a value other than {@code deny}. The possible values for this property are:
* <ul>
* <li>{@code deny}: issues a runtime exception on each restricted call. This is the default value;</li>
* <li>{@code permit}: allows restricted calls;</li>
* <li>{@code warn}: like permit, but also prints a one-line warning on each restricted call;</li>
* <li>{@code debug}: like permit, but also dumps the stack corresponding to any given restricted call.</li>
* </ul>
* Access to restricted methods is <em>disabled</em> by default; to enable restricted methods, the command line option
* {@code --enable-native-access} must mention the name of the caller's module.
*/
package jdk.incubator.foreign;

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.foreign;
import jdk.incubator.foreign.Addressable;
import jdk.incubator.foreign.CLinker;
import jdk.incubator.foreign.FunctionDescriptor;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.SegmentAllocator;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.util.Objects;
public abstract non-sealed class AbstractCLinker implements CLinker {
public final MethodHandle downcallHandle(Addressable symbol, MethodType type, FunctionDescriptor function) {
Objects.requireNonNull(symbol);
return MethodHandles.insertArguments(downcallHandle(type, function), 0, symbol);
}
public final MethodHandle downcallHandle(Addressable symbol, SegmentAllocator allocator, MethodType type, FunctionDescriptor function) {
Objects.requireNonNull(symbol);
Objects.requireNonNull(allocator);
MethodHandle downcall = MethodHandles.insertArguments(downcallHandle(type, function), 0, symbol);
if (type.returnType().equals(MemorySegment.class)) {
downcall = MethodHandles.insertArguments(downcall, 0, allocator);
}
return downcall;
}
}

View File

@ -35,43 +35,42 @@ import jdk.internal.util.ArraysSupport;
import jdk.internal.vm.annotation.ForceInline;
import sun.security.action.GetPropertyAction;
import java.io.FileDescriptor;
import java.lang.invoke.VarHandle;
import java.lang.ref.Cleaner;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
/**
* This abstract class provides an immutable implementation for the {@code MemorySegment} interface. This class contains information
* about the segment's spatial and temporal bounds; each memory segment implementation is associated with an owner thread which is set at creation time.
* Access to certain sensitive operations on the memory segment will fail with {@code IllegalStateException} if the
* segment is either in an invalid state (e.g. it has already been closed) or if access occurs from a thread other
* than the owner thread. See {@link MemoryScope} for more details on management of temporal bounds. Subclasses
* than the owner thread. See {@link ResourceScopeImpl} for more details on management of temporal bounds. Subclasses
* are defined for each memory segment kind, see {@link NativeMemorySegmentImpl}, {@link HeapMemorySegmentImpl} and
* {@link MappedMemorySegmentImpl}.
*/
public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy implements MemorySegment {
public abstract non-sealed class AbstractMemorySegmentImpl extends MemorySegmentProxy implements MemorySegment {
private static final ScopedMemoryAccess SCOPED_MEMORY_ACCESS = ScopedMemoryAccess.getScopedMemoryAccess();
private static final boolean enableSmallSegments =
Boolean.parseBoolean(GetPropertyAction.privilegedGetProperty("jdk.incubator.foreign.SmallSegments", "true"));
final static int FIRST_RESERVED_FLAG = 1 << 16; // upper 16 bits are reserved
final static int SMALL = FIRST_RESERVED_FLAG;
final static long NONCE = new Random().nextLong();
static final int READ_ONLY = 1;
static final int SMALL = READ_ONLY << 1;
static final long NONCE = new Random().nextLong();
final static JavaNioAccess nioAccess = SharedSecrets.getJavaNioAccess();
static final JavaNioAccess nioAccess = SharedSecrets.getJavaNioAccess();
final long length;
final int mask;
final MemoryScope scope;
final ResourceScopeImpl scope;
@ForceInline
AbstractMemorySegmentImpl(long length, int mask, MemoryScope scope) {
AbstractMemorySegmentImpl(long length, int mask, ResourceScopeImpl scope) {
this.length = length;
this.mask = mask;
this.scope = scope;
@ -81,14 +80,23 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
abstract Object base();
abstract AbstractMemorySegmentImpl dup(long offset, long size, int mask, MemoryScope scope);
abstract AbstractMemorySegmentImpl dup(long offset, long size, int mask, ResourceScopeImpl scope);
abstract ByteBuffer makeByteBuffer();
static int defaultAccessModes(long size) {
return (enableSmallSegments && size < Integer.MAX_VALUE) ?
ALL_ACCESS | SMALL :
ALL_ACCESS;
SMALL : 0;
}
@Override
public AbstractMemorySegmentImpl asReadOnly() {
return dup(0, length, mask | READ_ONLY, scope);
}
@Override
public boolean isReadOnly() {
return isSet(READ_ONLY);
}
@Override
@ -108,14 +116,21 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
}
@Override
public Spliterator<MemorySegment> spliterator(SequenceLayout sequenceLayout) {
Objects.requireNonNull(sequenceLayout);
checkValidState();
if (sequenceLayout.byteSize() != byteSize()) {
throw new IllegalArgumentException();
public Spliterator<MemorySegment> spliterator(MemoryLayout elementLayout) {
Objects.requireNonNull(elementLayout);
if (elementLayout.byteSize() == 0) {
throw new IllegalArgumentException("Element layout size cannot be zero");
}
return new SegmentSplitter(sequenceLayout.elementLayout().byteSize(), sequenceLayout.elementCount().getAsLong(),
withAccessModes(accessModes() & ~CLOSE));
if (byteSize() % elementLayout.byteSize() != 0) {
throw new IllegalArgumentException("Segment size is no a multiple of layout size");
}
return new SegmentSplitter(elementLayout.byteSize(), byteSize() / elementLayout.byteSize(),
this);
}
@Override
public Stream<MemorySegment> elements(MemoryLayout elementLayout) {
return StreamSupport.stream(spliterator(elementLayout), false);
}
@Override
@ -185,10 +200,10 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
/**
* Mismatch over long lengths.
*/
private static long vectorizedMismatchLargeForBytes(MemoryScope aScope, MemoryScope bScope,
Object a, long aOffset,
Object b, long bOffset,
long length) {
private static long vectorizedMismatchLargeForBytes(ResourceScopeImpl aScope, ResourceScopeImpl bScope,
Object a, long aOffset,
Object b, long bOffset,
long length) {
long off = 0;
long remaining = length;
int i, size;
@ -217,132 +232,63 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
@Override
@ForceInline
public final MemoryAddress address() {
checkValidState();
return new MemoryAddressImpl(base(), min());
return new MemoryAddressImpl(this, 0L);
}
@Override
public final ByteBuffer asByteBuffer() {
if (!isSet(READ)) {
throw unsupportedAccessMode(READ);
}
checkArraySize("ByteBuffer", 1);
ByteBuffer _bb = makeByteBuffer();
if (!isSet(WRITE)) {
if (isSet(READ_ONLY)) {
//scope is IMMUTABLE - obtain a RO byte buffer
_bb = _bb.asReadOnlyBuffer();
}
return _bb;
}
@Override
public final int accessModes() {
return mask & ALL_ACCESS;
}
@Override
public final long byteSize() {
return length;
}
@Override
public final boolean isAlive() {
return scope.isAlive();
}
@Override
public Thread ownerThread() {
return scope.ownerThread();
}
@Override
public AbstractMemorySegmentImpl withAccessModes(int accessModes) {
checkAccessModes(accessModes);
if ((~accessModes() & accessModes) != 0) {
throw new IllegalArgumentException("Cannot acquire more access modes");
}
return dup(0, length, (mask & ~ALL_ACCESS) | accessModes, scope);
}
@Override
public boolean hasAccessModes(int accessModes) {
checkAccessModes(accessModes);
return (accessModes() & accessModes) == accessModes;
}
private void checkAccessModes(int accessModes) {
if ((accessModes & ~ALL_ACCESS) != 0) {
throw new IllegalArgumentException("Invalid access modes");
}
}
public MemorySegment handoff(Thread thread) {
Objects.requireNonNull(thread);
checkValidState();
if (!isSet(HANDOFF)) {
throw unsupportedAccessMode(HANDOFF);
}
try {
return dup(0L, length, mask, scope.confineTo(thread));
} finally {
//flush read/writes to segment memory before returning the new segment
VarHandle.fullFence();
}
}
@Override
public MemorySegment share() {
checkValidState();
if (!isSet(SHARE)) {
throw unsupportedAccessMode(SHARE);
}
try {
return dup(0L, length, mask, scope.share());
} finally {
//flush read/writes to segment memory before returning the new segment
VarHandle.fullFence();
}
}
@Override
public MemorySegment handoff(NativeScope scope) {
Objects.requireNonNull(scope);
checkValidState();
if (!isSet(HANDOFF)) {
throw unsupportedAccessMode(HANDOFF);
}
if (!isSet(CLOSE)) {
throw unsupportedAccessMode(CLOSE);
}
MemorySegment dup = handoff(scope.ownerThread());
((AbstractNativeScope)scope).register(dup);
return dup.withAccessModes(accessModes() & (READ | WRITE));
}
@Override
public MemorySegment registerCleaner(Cleaner cleaner) {
Objects.requireNonNull(cleaner);
checkValidState();
if (!isSet(CLOSE)) {
throw unsupportedAccessMode(CLOSE);
}
return dup(0L, length, mask, scope.cleanable(cleaner));
}
@Override
public final void close() {
checkValidState();
if (!isSet(CLOSE)) {
throw unsupportedAccessMode(CLOSE);
}
scope.close();
}
@Override
public boolean isMapped() {
return false;
}
@Override
public boolean isNative() {
return false;
}
@Override
public void load() {
throw new UnsupportedOperationException("Not a mapped segment");
}
@Override
public void unload() {
throw new UnsupportedOperationException("Not a mapped segment");
}
@Override
public boolean isLoaded() {
throw new UnsupportedOperationException("Not a mapped segment");
}
@Override
public void force() {
throw new UnsupportedOperationException("Not a mapped segment");
}
@Override
public final byte[] toByteArray() {
return toArray(byte[].class, 1, byte[]::new, MemorySegment::ofArray);
@ -393,20 +339,13 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
@Override
public void checkAccess(long offset, long length, boolean readOnly) {
if (!readOnly && !isSet(WRITE)) {
throw unsupportedAccessMode(WRITE);
} else if (readOnly && !isSet(READ)) {
throw unsupportedAccessMode(READ);
if (!readOnly && isSet(READ_ONLY)) {
throw new UnsupportedOperationException("Attempt to write a read-only segment");
}
checkBounds(offset, length);
}
private void checkAccessAndScope(long offset, long length, boolean readOnly) {
checkValidState();
checkAccess(offset, length, readOnly);
}
private void checkValidState() {
void checkValidState() {
try {
scope.checkValidState();
} catch (ScopedMemoryAccess.Scope.ScopedAccessError ex) {
@ -432,17 +371,19 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
private int checkArraySize(String typeName, int elemSize) {
if (length % elemSize != 0) {
throw new UnsupportedOperationException(String.format("Segment size is not a multiple of %d. Size: %d", elemSize, length));
throw new IllegalStateException(String.format("Segment size is not a multiple of %d. Size: %d", elemSize, length));
}
long arraySize = length / elemSize;
if (arraySize > (Integer.MAX_VALUE - 8)) { //conservative check
throw new UnsupportedOperationException(String.format("Segment is too large to wrap as %s. Size: %d", typeName, length));
throw new IllegalStateException(String.format("Segment is too large to wrap as %s. Size: %d", typeName, length));
}
return (int)arraySize;
}
private void checkBounds(long offset, long length) {
if (isSmall()) {
if (isSmall() &&
offset < Integer.MAX_VALUE && length < Integer.MAX_VALUE &&
offset > Integer.MIN_VALUE && length > Integer.MIN_VALUE) {
checkBoundsSmall((int)offset, (int)length);
} else {
if (length < 0 ||
@ -454,7 +395,7 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
}
@Override
public MemoryScope scope() {
public ResourceScopeImpl scope() {
return scope;
}
@ -466,31 +407,6 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
}
}
UnsupportedOperationException unsupportedAccessMode(int expected) {
return new UnsupportedOperationException((String.format("Required access mode %s ; current access modes: %s",
modeStrings(expected).get(0), modeStrings(mask))));
}
private List<String> modeStrings(int mode) {
List<String> modes = new ArrayList<>();
if ((mode & READ) != 0) {
modes.add("READ");
}
if ((mode & WRITE) != 0) {
modes.add("WRITE");
}
if ((mode & CLOSE) != 0) {
modes.add("CLOSE");
}
if ((mode & SHARE) != 0) {
modes.add("SHARE");
}
if ((mode & HANDOFF) != 0) {
modes.add("HANDOFF");
}
return modes;
}
private IndexOutOfBoundsException outOfBoundException(long offset, long length) {
return new IndexOutOfBoundsException(String.format("Out of bound access on segment %s; new offset = %d; new length = %d",
this, offset, length));
@ -602,20 +518,20 @@ public abstract class AbstractMemorySegmentImpl extends MemorySegmentProxy imple
int size = limit - pos;
AbstractMemorySegmentImpl bufferSegment = (AbstractMemorySegmentImpl)nioAccess.bufferSegment(bb);
final MemoryScope bufferScope;
final ResourceScopeImpl bufferScope;
int modes;
if (bufferSegment != null) {
bufferScope = bufferSegment.scope;
modes = bufferSegment.mask;
} else {
bufferScope = MemoryScope.createConfined(bb, MemoryScope.DUMMY_CLEANUP_ACTION, null);
bufferScope = ResourceScopeImpl.GLOBAL;
modes = defaultAccessModes(size);
}
if (bb.isReadOnly()) {
modes &= ~WRITE;
modes |= READ_ONLY;
}
if (base != null) {
return new HeapMemorySegmentImpl.OfByte(bbAddress + pos, (byte[])base, size, modes, bufferScope);
return new HeapMemorySegmentImpl.OfByte(bbAddress + pos, (byte[])base, size, modes);
} else if (unmapper == null) {
return new NativeMemorySegmentImpl(bbAddress + pos, size, modes, bufferScope);
} else {

View File

@ -1,172 +0,0 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.foreign;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.NativeScope;
import java.util.ArrayList;
import java.util.List;
import java.util.OptionalLong;
public abstract class AbstractNativeScope implements NativeScope {
private final List<MemorySegment> segments = new ArrayList<>();
private final Thread ownerThread;
private static final int SCOPE_MASK = MemorySegment.READ | MemorySegment.WRITE; // no terminal operations allowed
AbstractNativeScope() {
this.ownerThread = Thread.currentThread();
}
@Override
public Thread ownerThread() {
return ownerThread;
}
@Override
public void close() {
segments.forEach(MemorySegment::close);
}
void checkOwnerThread() {
if (Thread.currentThread() != ownerThread()) {
throw new IllegalStateException("Attempt to access scope from different thread");
}
}
MemorySegment newSegment(long size, long align) {
MemorySegment segment = MemorySegment.allocateNative(size, align);
segments.add(segment);
return segment;
}
MemorySegment newSegment(long size) {
return newSegment(size, size);
}
public void register(MemorySegment segment) {
segments.add(segment);
}
public static class UnboundedNativeScope extends AbstractNativeScope {
private static final long BLOCK_SIZE = 4 * 1024;
private static final long MAX_ALLOC_SIZE = BLOCK_SIZE / 2;
private MemorySegment segment;
private long sp = 0L;
private long size = 0L;
@Override
public OptionalLong byteSize() {
return OptionalLong.empty();
}
@Override
public long allocatedBytes() {
return size;
}
public UnboundedNativeScope() {
super();
this.segment = newSegment(BLOCK_SIZE);
}
@Override
public MemorySegment allocate(long bytesSize, long bytesAlignment) {
checkOwnerThread();
if (Utils.alignUp(bytesSize, bytesAlignment) > MAX_ALLOC_SIZE) {
MemorySegment segment = newSegment(bytesSize, bytesAlignment);
return segment.withAccessModes(SCOPE_MASK);
}
// try to slice from current segment first...
MemorySegment slice = trySlice(bytesSize, bytesAlignment);
if (slice == null) {
// ... if that fails, allocate a new segment and slice from there
sp = 0L;
segment = newSegment(BLOCK_SIZE, 1L);
slice = trySlice(bytesSize, bytesAlignment);
if (slice == null) {
// this should not be possible - allocations that do not fit in BLOCK_SIZE should get their own
// standalone segment (see above).
throw new AssertionError("Cannot get here!");
}
}
return slice;
}
private MemorySegment trySlice(long bytesSize, long bytesAlignment) {
long min = segment.address().toRawLongValue();
long start = Utils.alignUp(min + sp, bytesAlignment) - min;
if (segment.byteSize() - start < bytesSize) {
return null;
} else {
MemorySegment slice = segment.asSlice(start, bytesSize)
.withAccessModes(SCOPE_MASK);
sp = start + bytesSize;
size += Utils.alignUp(bytesSize, bytesAlignment);
return slice;
}
}
}
public static class BoundedNativeScope extends AbstractNativeScope {
private final MemorySegment segment;
private long sp = 0L;
@Override
public OptionalLong byteSize() {
return OptionalLong.of(segment.byteSize());
}
@Override
public long allocatedBytes() {
return sp;
}
public BoundedNativeScope(long size) {
super();
this.segment = newSegment(size, 1);
}
@Override
public MemorySegment allocate(long bytesSize, long bytesAlignment) {
checkOwnerThread();
long min = segment.address().toRawLongValue();
long start = Utils.alignUp(min + sp, bytesAlignment) - min;
try {
MemorySegment slice = segment.asSlice(start, bytesSize)
.withAccessModes(SCOPE_MASK);
sp = start + bytesSize;
return slice;
} catch (IndexOutOfBoundsException ex) {
throw new OutOfMemoryError("Not enough space left to allocate");
}
}
}
}

View File

@ -0,0 +1,124 @@
package jdk.internal.foreign;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.SegmentAllocator;
import jdk.incubator.foreign.ResourceScope;
public abstract class ArenaAllocator implements SegmentAllocator {
protected MemorySegment segment;
protected long sp = 0L;
ArenaAllocator(MemorySegment segment) {
this.segment = segment;
}
MemorySegment trySlice(long bytesSize, long bytesAlignment) {
long min = segment.address().toRawLongValue();
long start = Utils.alignUp(min + sp, bytesAlignment) - min;
if (segment.byteSize() - start < bytesSize) {
return null;
} else {
MemorySegment slice = segment.asSlice(start, bytesSize);
sp = start + bytesSize;
return slice;
}
}
void checkConfinementIfNeeded() {
Thread ownerThread = scope().ownerThread();
if (ownerThread != null && ownerThread != Thread.currentThread()) {
throw new IllegalStateException("Attempt to allocate outside confinement thread");
}
}
ResourceScope scope() {
return segment.scope();
}
public static class UnboundedArenaAllocator extends ArenaAllocator {
private static final long DEFAULT_BLOCK_SIZE = 4 * 1024;
public UnboundedArenaAllocator(ResourceScope scope) {
super(MemorySegment.allocateNative(DEFAULT_BLOCK_SIZE, 1, scope));
}
private MemorySegment newSegment(long size, long align) {
return MemorySegment.allocateNative(size, align, segment.scope());
}
@Override
public MemorySegment allocate(long bytesSize, long bytesAlignment) {
checkConfinementIfNeeded();
// try to slice from current segment first...
MemorySegment slice = trySlice(bytesSize, bytesAlignment);
if (slice != null) {
return slice;
} else {
long maxPossibleAllocationSize = bytesSize + bytesAlignment - 1;
if (maxPossibleAllocationSize > DEFAULT_BLOCK_SIZE) {
// too big
return newSegment(bytesSize, bytesAlignment);
} else {
// allocate a new segment and slice from there
sp = 0L;
segment = newSegment(DEFAULT_BLOCK_SIZE, 1L);
return trySlice(bytesSize, bytesAlignment);
}
}
}
}
public static class BoundedArenaAllocator extends ArenaAllocator {
public BoundedArenaAllocator(ResourceScope scope, long size) {
super(MemorySegment.allocateNative(size, 1, scope));
}
@Override
public MemorySegment allocate(long bytesSize, long bytesAlignment) {
checkConfinementIfNeeded();
// try to slice from current segment first...
MemorySegment slice = trySlice(bytesSize, bytesAlignment);
if (slice != null) {
return slice;
} else {
throw new OutOfMemoryError("Not enough space left to allocate");
}
}
}
public static class BoundedSharedArenaAllocator extends BoundedArenaAllocator {
public BoundedSharedArenaAllocator(ResourceScope scope, long size) {
super(scope, size);
}
@Override
public synchronized MemorySegment allocate(long bytesSize, long bytesAlignment) {
return super.allocate(bytesSize, bytesAlignment);
}
}
public static class UnboundedSharedArenaAllocator implements SegmentAllocator {
final ResourceScope scope;
final ThreadLocal<ArenaAllocator> allocators = new ThreadLocal<>() {
@Override
protected ArenaAllocator initialValue() {
return new UnboundedArenaAllocator(scope);
}
};
public UnboundedSharedArenaAllocator(ResourceScope scope) {
this.scope = scope;
}
@Override
public MemorySegment allocate(long bytesSize, long bytesAlignment) {
return allocators.get().allocate(bytesSize, bytesAlignment);
}
}
}

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.foreign;
import jdk.incubator.foreign.ResourceScope;
import jdk.internal.vm.annotation.ForceInline;
import java.lang.ref.Cleaner;
import java.lang.ref.Reference;
/**
* A confined scope, which features an owner thread. The liveness check features an additional
* confinement check - that is, calling any operation on this scope from a thread other than the
* owner thread will result in an exception. Because of this restriction, checking the liveness bit
* can be performed in plain mode.
*/
final class ConfinedScope extends ResourceScopeImpl {
private boolean closed; // = false
private int lockCount = 0;
private final Thread owner;
public ConfinedScope(Thread owner, Cleaner cleaner) {
super(cleaner, new ConfinedResourceList());
this.owner = owner;
}
@ForceInline
public final void checkValidState() {
if (owner != Thread.currentThread()) {
throw new IllegalStateException("Attempted access outside owning thread");
}
if (closed) {
throw new IllegalStateException("Already closed");
}
}
@Override
public boolean isAlive() {
return !closed;
}
@Override
public HandleImpl acquire() {
checkValidState();
lockCount++;
return new ConfinedHandle();
}
void justClose() {
this.checkValidState();
if (lockCount == 0) {
closed = true;
} else {
throw new IllegalStateException("Scope is acquired by " + lockCount + " locks");
}
}
@Override
public Thread ownerThread() {
return owner;
}
/**
* A confined resource list; no races are possible here.
*/
static final class ConfinedResourceList extends ResourceList {
@Override
void add(ResourceCleanup cleanup) {
if (fst != ResourceCleanup.CLOSED_LIST) {
cleanup.next = fst;
fst = cleanup;
} else {
throw new IllegalStateException("Already closed!");
}
}
@Override
void cleanup() {
if (fst != ResourceCleanup.CLOSED_LIST) {
ResourceCleanup prev = fst;
fst = ResourceCleanup.CLOSED_LIST;
cleanup(prev);
} else {
throw new IllegalStateException("Attempt to cleanup an already closed resource list");
}
}
}
/**
* A confined resource scope handle; no races are possible here.
*/
final class ConfinedHandle implements HandleImpl {
boolean released = false;
@Override
public ResourceScopeImpl scope() {
return ConfinedScope.this;
}
@Override
public void release() {
checkValidState(); // thread check
if (!released) {
released = true;
lockCount--;
}
}
}
}

View File

@ -34,7 +34,6 @@ import jdk.internal.vm.annotation.ForceInline;
import java.nio.ByteBuffer;
import java.util.Objects;
import java.util.function.Supplier;
/**
* Implementation for heap memory segments. An heap memory segment is composed by an offset and
@ -52,8 +51,8 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
final H base;
@ForceInline
HeapMemorySegmentImpl(long offset, H base, long length, int mask, MemoryScope scope) {
super(length, mask, scope);
HeapMemorySegmentImpl(long offset, H base, long length, int mask) {
super(length, mask, ResourceScopeImpl.GLOBAL);
this.offset = offset;
this.base = base;
}
@ -67,7 +66,7 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
}
@Override
abstract HeapMemorySegmentImpl<H> dup(long offset, long size, int mask, MemoryScope scope);
abstract HeapMemorySegmentImpl<H> dup(long offset, long size, int mask, ResourceScopeImpl scope);
@Override
ByteBuffer makeByteBuffer() {
@ -75,20 +74,20 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
throw new UnsupportedOperationException("Not an address to an heap-allocated byte array");
}
JavaNioAccess nioAccess = SharedSecrets.getJavaNioAccess();
return nioAccess.newHeapByteBuffer((byte[]) base(), (int)min() - BYTE_ARR_BASE, (int) byteSize(), this);
return nioAccess.newHeapByteBuffer((byte[]) base(), (int)min() - BYTE_ARR_BASE, (int) byteSize(), null);
}
// factories
public static class OfByte extends HeapMemorySegmentImpl<byte[]> {
OfByte(long offset, byte[] base, long length, int mask, MemoryScope scope) {
super(offset, base, length, mask, scope);
OfByte(long offset, byte[] base, long length, int mask) {
super(offset, base, length, mask);
}
@Override
OfByte dup(long offset, long size, int mask, MemoryScope scope) {
return new OfByte(this.offset + offset, base, size, mask, scope);
OfByte dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new OfByte(this.offset + offset, base, size, mask);
}
@Override
@ -99,20 +98,19 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
public static MemorySegment fromArray(byte[] arr) {
Objects.requireNonNull(arr);
long byteSize = (long)arr.length * Unsafe.ARRAY_BYTE_INDEX_SCALE;
MemoryScope scope = MemoryScope.createConfined(null, MemoryScope.DUMMY_CLEANUP_ACTION, null);
return new OfByte(Unsafe.ARRAY_BYTE_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize), scope);
return new OfByte(Unsafe.ARRAY_BYTE_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize));
}
}
public static class OfChar extends HeapMemorySegmentImpl<char[]> {
OfChar(long offset, char[] base, long length, int mask, MemoryScope scope) {
super(offset, base, length, mask, scope);
OfChar(long offset, char[] base, long length, int mask) {
super(offset, base, length, mask);
}
@Override
OfChar dup(long offset, long size, int mask, MemoryScope scope) {
return new OfChar(this.offset + offset, base, size, mask, scope);
OfChar dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new OfChar(this.offset + offset, base, size, mask);
}
@Override
@ -123,20 +121,19 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
public static MemorySegment fromArray(char[] arr) {
Objects.requireNonNull(arr);
long byteSize = (long)arr.length * Unsafe.ARRAY_CHAR_INDEX_SCALE;
MemoryScope scope = MemoryScope.createConfined(null, MemoryScope.DUMMY_CLEANUP_ACTION, null);
return new OfChar(Unsafe.ARRAY_CHAR_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize), scope);
return new OfChar(Unsafe.ARRAY_CHAR_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize));
}
}
public static class OfShort extends HeapMemorySegmentImpl<short[]> {
OfShort(long offset, short[] base, long length, int mask, MemoryScope scope) {
super(offset, base, length, mask, scope);
OfShort(long offset, short[] base, long length, int mask) {
super(offset, base, length, mask);
}
@Override
OfShort dup(long offset, long size, int mask, MemoryScope scope) {
return new OfShort(this.offset + offset, base, size, mask, scope);
OfShort dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new OfShort(this.offset + offset, base, size, mask);
}
@Override
@ -147,20 +144,19 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
public static MemorySegment fromArray(short[] arr) {
Objects.requireNonNull(arr);
long byteSize = (long)arr.length * Unsafe.ARRAY_SHORT_INDEX_SCALE;
MemoryScope scope = MemoryScope.createConfined(null, MemoryScope.DUMMY_CLEANUP_ACTION, null);
return new OfShort(Unsafe.ARRAY_SHORT_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize), scope);
return new OfShort(Unsafe.ARRAY_SHORT_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize));
}
}
public static class OfInt extends HeapMemorySegmentImpl<int[]> {
OfInt(long offset, int[] base, long length, int mask, MemoryScope scope) {
super(offset, base, length, mask, scope);
OfInt(long offset, int[] base, long length, int mask) {
super(offset, base, length, mask);
}
@Override
OfInt dup(long offset, long size, int mask, MemoryScope scope) {
return new OfInt(this.offset + offset, base, size, mask, scope);
OfInt dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new OfInt(this.offset + offset, base, size, mask);
}
@Override
@ -171,20 +167,19 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
public static MemorySegment fromArray(int[] arr) {
Objects.requireNonNull(arr);
long byteSize = (long)arr.length * Unsafe.ARRAY_INT_INDEX_SCALE;
MemoryScope scope = MemoryScope.createConfined(null, MemoryScope.DUMMY_CLEANUP_ACTION, null);
return new OfInt(Unsafe.ARRAY_INT_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize), scope);
return new OfInt(Unsafe.ARRAY_INT_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize));
}
}
public static class OfLong extends HeapMemorySegmentImpl<long[]> {
OfLong(long offset, long[] base, long length, int mask, MemoryScope scope) {
super(offset, base, length, mask, scope);
OfLong(long offset, long[] base, long length, int mask) {
super(offset, base, length, mask);
}
@Override
OfLong dup(long offset, long size, int mask, MemoryScope scope) {
return new OfLong(this.offset + offset, base, size, mask, scope);
OfLong dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new OfLong(this.offset + offset, base, size, mask);
}
@Override
@ -195,20 +190,19 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
public static MemorySegment fromArray(long[] arr) {
Objects.requireNonNull(arr);
long byteSize = (long)arr.length * Unsafe.ARRAY_LONG_INDEX_SCALE;
MemoryScope scope = MemoryScope.createConfined(null, MemoryScope.DUMMY_CLEANUP_ACTION, null);
return new OfLong(Unsafe.ARRAY_LONG_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize), scope);
return new OfLong(Unsafe.ARRAY_LONG_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize));
}
}
public static class OfFloat extends HeapMemorySegmentImpl<float[]> {
OfFloat(long offset, float[] base, long length, int mask, MemoryScope scope) {
super(offset, base, length, mask, scope);
OfFloat(long offset, float[] base, long length, int mask) {
super(offset, base, length, mask);
}
@Override
OfFloat dup(long offset, long size, int mask, MemoryScope scope) {
return new OfFloat(this.offset + offset, base, size, mask, scope);
OfFloat dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new OfFloat(this.offset + offset, base, size, mask);
}
@Override
@ -219,20 +213,19 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
public static MemorySegment fromArray(float[] arr) {
Objects.requireNonNull(arr);
long byteSize = (long)arr.length * Unsafe.ARRAY_FLOAT_INDEX_SCALE;
MemoryScope scope = MemoryScope.createConfined(null, MemoryScope.DUMMY_CLEANUP_ACTION, null);
return new OfFloat(Unsafe.ARRAY_FLOAT_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize), scope);
return new OfFloat(Unsafe.ARRAY_FLOAT_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize));
}
}
public static class OfDouble extends HeapMemorySegmentImpl<double[]> {
OfDouble(long offset, double[] base, long length, int mask, MemoryScope scope) {
super(offset, base, length, mask, scope);
OfDouble(long offset, double[] base, long length, int mask) {
super(offset, base, length, mask);
}
@Override
OfDouble dup(long offset, long size, int mask, MemoryScope scope) {
return new OfDouble(this.offset + offset, base, size, mask, scope);
OfDouble dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new OfDouble(this.offset + offset, base, size, mask);
}
@Override
@ -243,8 +236,8 @@ public abstract class HeapMemorySegmentImpl<H> extends AbstractMemorySegmentImpl
public static MemorySegment fromArray(double[] arr) {
Objects.requireNonNull(arr);
long byteSize = (long)arr.length * Unsafe.ARRAY_DOUBLE_INDEX_SCALE;
MemoryScope scope = MemoryScope.createConfined(null, MemoryScope.DUMMY_CLEANUP_ACTION, null);
return new OfDouble(Unsafe.ARRAY_DOUBLE_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize), scope);
return new OfDouble(Unsafe.ARRAY_DOUBLE_BASE_OFFSET, arr, byteSize, defaultAccessModes(byteSize));
}
}
}

View File

@ -35,7 +35,6 @@ import jdk.internal.access.foreign.MemorySegmentProxy;
import jdk.incubator.foreign.GroupLayout;
import jdk.incubator.foreign.SequenceLayout;
import jdk.incubator.foreign.ValueLayout;
import sun.invoke.util.Wrapper;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
@ -62,6 +61,7 @@ public class LayoutPath {
private static final MethodHandle ADD_STRIDE;
private static final MethodHandle MH_ADD_SCALED_OFFSET;
private static final MethodHandle MH_SLICE;
private static final int UNSPECIFIED_ELEM_INDEX = -1;
@ -72,6 +72,8 @@ public class LayoutPath {
MethodType.methodType(long.class, MemorySegment.class, long.class, long.class, long.class));
MH_ADD_SCALED_OFFSET = lookup.findStatic(LayoutPath.class, "addScaledOffset",
MethodType.methodType(long.class, long.class, long.class, long.class));
MH_SLICE = lookup.findVirtual(MemorySegment.class, "asSlice",
MethodType.methodType(MemorySegment.class, long.class, long.class));
} catch (Throwable ex) {
throw new ExceptionInInitializerError(ex);
}
@ -200,6 +202,22 @@ public class LayoutPath {
return mh;
}
public MethodHandle sliceHandle() {
if (strides.length == 0) {
// trigger checks eagerly
Utils.bitsToBytesOrThrow(offset, Utils.bitsToBytesThrowOffset);
}
MethodHandle offsetHandle = offsetHandle(); // bit offset
offsetHandle = MethodHandles.filterReturnValue(offsetHandle, Utils.MH_bitsToBytesOrThrowForOffset); // byte offset
MethodHandle sliceHandle = MH_SLICE; // (MS, long, long) -> MS
sliceHandle = MethodHandles.insertArguments(sliceHandle, 2, layout.byteSize()); // (MS, long) -> MS
sliceHandle = MethodHandles.collectArguments(sliceHandle, 1, offsetHandle); // (MS, ...) -> MS
return sliceHandle;
}
public MemoryLayout layout() {
return layout;
}
@ -211,9 +229,9 @@ public class LayoutPath {
} else if (enclosing.layout instanceof SequenceLayout) {
SequenceLayout seq = (SequenceLayout)enclosing.layout;
if (seq.elementCount().isPresent()) {
return enclosing.map(l -> dup(l, MemoryLayout.ofSequence(seq.elementCount().getAsLong(), newLayout)));
return enclosing.map(l -> dup(l, MemoryLayout.sequenceLayout(seq.elementCount().getAsLong(), newLayout)));
} else {
return enclosing.map(l -> dup(l, MemoryLayout.ofSequence(newLayout)));
return enclosing.map(l -> dup(l, MemoryLayout.sequenceLayout(newLayout)));
}
} else if (enclosing.layout instanceof GroupLayout) {
GroupLayout g = (GroupLayout)enclosing.layout;
@ -221,9 +239,9 @@ public class LayoutPath {
//if we selected a layout in a group we must have a valid index
newElements.set((int)elementIndex, newLayout);
if (g.isUnion()) {
return enclosing.map(l -> dup(l, MemoryLayout.ofUnion(newElements.toArray(new MemoryLayout[0]))));
return enclosing.map(l -> dup(l, MemoryLayout.unionLayout(newElements.toArray(new MemoryLayout[0]))));
} else {
return enclosing.map(l -> dup(l, MemoryLayout.ofStruct(newElements.toArray(new MemoryLayout[0]))));
return enclosing.map(l -> dup(l, MemoryLayout.structLayout(newElements.toArray(new MemoryLayout[0]))));
}
} else {
return newLayout;
@ -299,7 +317,7 @@ public class LayoutPath {
* This class provides an immutable implementation for the {@code PathElement} interface. A path element implementation
* is simply a pointer to one of the selector methods provided by the {@code LayoutPath} class.
*/
public static class PathElementImpl implements MemoryLayout.PathElement, UnaryOperator<LayoutPath> {
public static final class PathElementImpl implements MemoryLayout.PathElement, UnaryOperator<LayoutPath> {
public enum PathKind {
SEQUENCE_ELEMENT("unbound sequence element"),

View File

@ -29,27 +29,27 @@ import jdk.incubator.foreign.MemoryAddress;
import java.io.File;
import jdk.incubator.foreign.LibraryLookup;
import jdk.incubator.foreign.MemoryLayout;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
import jdk.internal.loader.NativeLibraries;
import jdk.internal.loader.NativeLibrary;
import jdk.internal.ref.CleanerFactory;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.IdentityHashMap;
import java.lang.ref.Reference;
import java.lang.ref.WeakReference;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Supplier;
public final class LibrariesHelper {
private LibrariesHelper() {}
private final static NativeLibraries nativeLibraries =
private static final NativeLibraries nativeLibraries =
NativeLibraries.rawNativeLibraries(LibrariesHelper.class, true);
private final static Map<NativeLibrary, AtomicInteger> loadedLibraries = new IdentityHashMap<>();
private static final Map<NativeLibrary, WeakReference<ResourceScope>> loadedLibraries = new ConcurrentHashMap<>();
/**
* Load the specified shared library.
@ -76,67 +76,69 @@ public final class LibrariesHelper {
"Library not found: " + path);
}
// return the absolute path of the library of given name by searching
// in the given array of paths.
private static Optional<Path> findLibraryPath(Path[] paths, String libName) {
return Arrays.stream(paths).
map(p -> p.resolve(System.mapLibraryName(libName))).
filter(Files::isRegularFile).map(Path::toAbsolutePath).findFirst();
}
public static LibraryLookup getDefaultLibrary() {
return LibraryLookupImpl.DEFAULT_LOOKUP;
}
synchronized static LibraryLookupImpl lookup(Supplier<NativeLibrary> librarySupplier, String notFoundMsg) {
static LibraryLookupImpl lookup(Supplier<NativeLibrary> librarySupplier, String notFoundMsg) {
NativeLibrary library = librarySupplier.get();
if (library == null) {
throw new IllegalArgumentException(notFoundMsg);
}
AtomicInteger refCount = loadedLibraries.computeIfAbsent(library, lib -> new AtomicInteger());
refCount.incrementAndGet();
LibraryLookupImpl lookup = new LibraryLookupImpl(library);
CleanerFactory.cleaner().register(lookup, () -> tryUnload(library));
return lookup;
}
synchronized static void tryUnload(NativeLibrary library) {
AtomicInteger refCount = loadedLibraries.get(library);
if (refCount.decrementAndGet() == 0) {
loadedLibraries.remove(library);
nativeLibraries.unload(library);
ResourceScope[] holder = new ResourceScope[1];
try {
WeakReference<ResourceScope> scopeRef = loadedLibraries.computeIfAbsent(library, lib -> {
ResourceScopeImpl s = ResourceScopeImpl.createImplicitScope();
holder[0] = s; // keep the scope alive at least until the outer method returns
s.addOrCleanupIfFail(ResourceScopeImpl.ResourceList.ResourceCleanup.ofRunnable(() -> {
nativeLibraries.unload(library);
loadedLibraries.remove(library);
}));
return new WeakReference<>(s);
});
return new LibraryLookupImpl(library, scopeRef.get());
} finally {
Reference.reachabilityFence(holder);
}
}
static class LibraryLookupImpl implements LibraryLookup {
//Todo: in principle we could expose a scope accessor, so that users could unload libraries at will
static final class LibraryLookupImpl implements LibraryLookup {
final NativeLibrary library;
final MemorySegment librarySegment;
LibraryLookupImpl(NativeLibrary library) {
LibraryLookupImpl(NativeLibrary library, ResourceScope scope) {
this.library = library;
this.librarySegment = MemoryAddress.NULL.asSegment(Long.MAX_VALUE, scope);
}
@Override
public Optional<Symbol> lookup(String name) {
public final Optional<MemoryAddress> lookup(String name) {
try {
Objects.requireNonNull(name);
MemoryAddress addr = MemoryAddress.ofLong(library.lookup(name));
return Optional.of(new Symbol() { // inner class - retains a link to enclosing lookup
@Override
public String name() {
return name;
}
@Override
public MemoryAddress address() {
return addr;
}
});
return Optional.of(librarySegment.asSlice(addr).address());
} catch (NoSuchMethodException ex) {
return Optional.empty();
}
}
static LibraryLookup DEFAULT_LOOKUP = new LibraryLookupImpl(NativeLibraries.defaultLibrary);
@Override
public final Optional<MemorySegment> lookup(String name, MemoryLayout layout) {
try {
Objects.requireNonNull(name);
Objects.requireNonNull(layout);
MemoryAddress addr = MemoryAddress.ofLong(library.lookup(name));
if (addr.toRawLongValue() % layout.byteAlignment() != 0) {
throw new IllegalArgumentException("Bad layout alignment constraints: " + layout.byteAlignment());
}
return Optional.of(librarySegment.asSlice(addr, layout.byteSize()));
} catch (NoSuchMethodException ex) {
return Optional.empty();
}
}
static LibraryLookup DEFAULT_LOOKUP = new LibraryLookupImpl(NativeLibraries.defaultLibrary, ResourceScopeImpl.GLOBAL);
}
/* used for testing */

View File

@ -27,10 +27,10 @@ package jdk.internal.foreign;
import jdk.incubator.foreign.MemorySegment;
import jdk.internal.access.foreign.UnmapperProxy;
import jdk.internal.misc.ExtendedMapMode;
import jdk.internal.misc.ScopedMemoryAccess;
import sun.nio.ch.FileChannelImpl;
import java.io.FileDescriptor;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
@ -40,7 +40,6 @@ import java.nio.file.OpenOption;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.Objects;
import java.util.Optional;
/**
* Implementation for a mapped memory segments. A mapped memory segment is a native memory segment, which
@ -54,18 +53,19 @@ public class MappedMemorySegmentImpl extends NativeMemorySegmentImpl {
static ScopedMemoryAccess SCOPED_MEMORY_ACCESS = ScopedMemoryAccess.getScopedMemoryAccess();
MappedMemorySegmentImpl(long min, UnmapperProxy unmapper, long length, int mask, MemoryScope scope) {
MappedMemorySegmentImpl(long min, UnmapperProxy unmapper, long length, int mask, ResourceScopeImpl scope) {
super(min, length, mask, scope);
this.unmapper = unmapper;
}
@Override
ByteBuffer makeByteBuffer() {
return nioAccess.newMappedByteBuffer(unmapper, min, (int)length, null, this);
return nioAccess.newMappedByteBuffer(unmapper, min, (int)length, null,
scope == ResourceScopeImpl.GLOBAL ? null : this);
}
@Override
MappedMemorySegmentImpl dup(long offset, long size, int mask, MemoryScope scope) {
MappedMemorySegmentImpl dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new MappedMemorySegmentImpl(min + offset, unmapper, size, mask, scope);
}
@ -77,11 +77,6 @@ public class MappedMemorySegmentImpl extends NativeMemorySegmentImpl {
return (MappedMemorySegmentImpl)super.asSlice(offset, newSize);
}
@Override
public MappedMemorySegmentImpl withAccessModes(int accessModes) {
return (MappedMemorySegmentImpl)super.withAccessModes(accessModes);
}
@Override
public boolean isMapped() {
return true;
@ -111,9 +106,10 @@ public class MappedMemorySegmentImpl extends NativeMemorySegmentImpl {
// factories
public static MemorySegment makeMappedSegment(Path path, long bytesOffset, long bytesSize, FileChannel.MapMode mapMode) throws IOException {
public static MemorySegment makeMappedSegment(Path path, long bytesOffset, long bytesSize, FileChannel.MapMode mapMode, ResourceScopeImpl scope) throws IOException {
Objects.requireNonNull(path);
Objects.requireNonNull(mapMode);
scope.checkValidStateSlow();
if (bytesSize < 0) throw new IllegalArgumentException("Requested bytes size must be >= 0.");
if (bytesOffset < 0) throw new IllegalArgumentException("Requested bytes offset must be >= 0.");
FileSystem fs = path.getFileSystem();
@ -125,22 +121,31 @@ public class MappedMemorySegmentImpl extends NativeMemorySegmentImpl {
UnmapperProxy unmapperProxy = ((FileChannelImpl)channelImpl).mapInternal(mapMode, bytesOffset, bytesSize);
int modes = defaultAccessModes(bytesSize);
if (mapMode == FileChannel.MapMode.READ_ONLY) {
modes &= ~WRITE;
modes |= READ_ONLY;
}
if (unmapperProxy != null) {
MemoryScope scope = MemoryScope.createConfined(null, unmapperProxy::unmap, null);
return new MappedMemorySegmentImpl(unmapperProxy.address(), unmapperProxy, bytesSize,
AbstractMemorySegmentImpl segment = new MappedMemorySegmentImpl(unmapperProxy.address(), unmapperProxy, bytesSize,
modes, scope);
scope.addOrCleanupIfFail(new ResourceScopeImpl.ResourceList.ResourceCleanup() {
@Override
public void cleanup() {
unmapperProxy.unmap();
}
});
return segment;
} else {
return new EmptyMappedMemorySegmentImpl(modes);
return new EmptyMappedMemorySegmentImpl(modes, scope);
}
}
}
private static OpenOption[] openOptions(FileChannel.MapMode mapMode) {
if (mapMode == FileChannel.MapMode.READ_ONLY) {
if (mapMode == FileChannel.MapMode.READ_ONLY ||
mapMode == ExtendedMapMode.READ_ONLY_SYNC) {
return new OpenOption[] { StandardOpenOption.READ };
} else if (mapMode == FileChannel.MapMode.READ_WRITE || mapMode == FileChannel.MapMode.PRIVATE) {
} else if (mapMode == FileChannel.MapMode.READ_WRITE ||
mapMode == FileChannel.MapMode.PRIVATE ||
mapMode == ExtendedMapMode.READ_WRITE_SYNC) {
return new OpenOption[] { StandardOpenOption.READ, StandardOpenOption.WRITE };
} else {
throw new UnsupportedOperationException("Unsupported map mode: " + mapMode);
@ -149,9 +154,8 @@ public class MappedMemorySegmentImpl extends NativeMemorySegmentImpl {
static class EmptyMappedMemorySegmentImpl extends MappedMemorySegmentImpl {
public EmptyMappedMemorySegmentImpl(int modes) {
super(0, null, 0, modes,
MemoryScope.createConfined(null, MemoryScope.DUMMY_CLEANUP_ACTION, null));
public EmptyMappedMemorySegmentImpl(int modes, ResourceScopeImpl scope) {
super(0, null, 0, modes, scope);
}
@Override

View File

@ -27,7 +27,10 @@ package jdk.internal.foreign;
import jdk.incubator.foreign.MemoryAddress;
import jdk.incubator.foreign.MemorySegment;
import jdk.internal.reflect.CallerSensitive;
import jdk.internal.reflect.Reflection;
import jdk.incubator.foreign.ResourceScope;
import java.util.Objects;
/**
@ -36,52 +39,75 @@ import java.util.Objects;
*/
public final class MemoryAddressImpl implements MemoryAddress {
private final Object base;
private final AbstractMemorySegmentImpl segment;
private final long offset;
public MemoryAddressImpl(Object base, long offset) {
this.base = base;
public MemoryAddressImpl(AbstractMemorySegmentImpl segment, long offset) {
this.segment = segment;
this.offset = offset;
}
Object base() {
return segment != null ? segment.base() : null;
}
long offset() {
return segment != null ?
segment.min() + offset : offset;
}
// MemoryAddress methods
@Override
public ResourceScope scope() {
return segment != null ?
segment.scope() : ResourceScope.globalScope();
}
@Override
public MemoryAddress addOffset(long offset) {
return new MemoryAddressImpl(segment, this.offset + offset);
}
@Override
public long segmentOffset(MemorySegment segment) {
Objects.requireNonNull(segment);
AbstractMemorySegmentImpl segmentImpl = (AbstractMemorySegmentImpl)segment;
if (segmentImpl.base() != base) {
throw new IllegalArgumentException("Invalid segment: " + segment);
if (segmentImpl.base() != base()) {
throw new IllegalArgumentException("Incompatible segment: " + segment);
}
return offset - segmentImpl.min();
return offset() - segmentImpl.min();
}
@Override
public boolean isNative() {
return base() == null;
}
@Override
public long toRawLongValue() {
if (base != null) {
throw new UnsupportedOperationException("Not a native address");
if (segment != null) {
if (segment.base() != null) {
throw new UnsupportedOperationException("Not a native address");
}
segment.checkValidState();
}
return offset;
}
@Override
public MemoryAddress addOffset(long bytes) {
return new MemoryAddressImpl(base, offset + bytes);
return offset();
}
// Object methods
@Override
public int hashCode() {
return Objects.hash(base, offset);
return Objects.hash(base(), offset());
}
@Override
public boolean equals(Object that) {
if (that instanceof MemoryAddressImpl) {
MemoryAddressImpl addr = (MemoryAddressImpl)that;
return Objects.equals(base, addr.base) &&
offset == addr.offset;
return Objects.equals(base(), addr.base()) &&
offset() == addr.offset();
} else {
return false;
}
@ -89,23 +115,38 @@ public final class MemoryAddressImpl implements MemoryAddress {
@Override
public String toString() {
return "MemoryAddress{ base: " + base + " offset=0x" + Long.toHexString(offset) + " }";
return "MemoryAddress{ base: " + base() + " offset=0x" + Long.toHexString(offset()) + " }";
}
@Override
public MemorySegment asSegmentRestricted(long bytesSize, Runnable cleanupAction, Object attachment) {
Utils.checkRestrictedAccess("MemoryAddress.asSegmentRestricted");
@CallerSensitive
public final MemorySegment asSegment(long bytesSize, ResourceScope scope) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
return asSegment(bytesSize, null, scope);
}
@Override
@CallerSensitive
public final MemorySegment asSegment(long bytesSize, Runnable cleanupAction, ResourceScope scope) {
Reflection.ensureNativeAccess(Reflection.getCallerClass());
Objects.requireNonNull(scope);
if (bytesSize <= 0) {
throw new IllegalArgumentException("Invalid size : " + bytesSize);
}
return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(this, bytesSize, cleanupAction, attachment);
return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(this, bytesSize,
cleanupAction,
(ResourceScopeImpl) scope);
}
public static MemorySegment ofLongUnchecked(long value) {
return ofLongUnchecked(value, Long.MAX_VALUE);
}
public static MemorySegment ofLongUnchecked(long value, long byteSize, ResourceScopeImpl resourceScope) {
return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(MemoryAddress.ofLong(value), byteSize, null, resourceScope);
}
public static MemorySegment ofLongUnchecked(long value, long byteSize) {
return MemoryAddress.ofLong(value).asSegmentRestricted(byteSize).share();
return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(MemoryAddress.ofLong(value), byteSize, null, ResourceScopeImpl.GLOBAL);
}
}

View File

@ -1,307 +0,0 @@
/*
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.foreign;
import jdk.internal.misc.ScopedMemoryAccess;
import jdk.internal.ref.PhantomCleanable;
import jdk.internal.vm.annotation.ForceInline;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.VarHandle;
import java.lang.ref.Cleaner;
import java.lang.ref.Reference;
import java.util.Objects;
/**
* This class manages the temporal bounds associated with a memory segment as well
* as thread confinement. A scope has a liveness bit, which is updated when the scope is closed
* (this operation is triggered by {@link AbstractMemorySegmentImpl#close()}). This bit is consulted prior
* to memory access (see {@link #checkValidState()}).
* There are two kinds of memory scope: confined memory scope and shared memory scope.
* A confined memory scope has an associated owner thread that confines some operations to
* associated owner thread such as {@link #close()} or {@link #checkValidState()}.
* Shared scopes do not feature an owner thread - meaning their operations can be called, in a racy
* manner, by multiple threads. To guarantee temporal safety in the presence of concurrent thread,
* shared scopes use a more sophisticated synchronization mechanism, which guarantees that no concurrent
* access is possible when a scope is being closed (see {@link jdk.internal.misc.ScopedMemoryAccess}).
*/
abstract class MemoryScope implements ScopedMemoryAccess.Scope {
static final Runnable DUMMY_CLEANUP_ACTION = () -> { };
private MemoryScope(Object ref, Runnable cleanupAction, Cleaner cleaner) {
Objects.requireNonNull(cleanupAction);
this.ref = ref;
this.cleanupAction = cleanupAction;
this.scopeCleanable = cleaner != null ?
new ScopeCleanable(this, cleaner, cleanupAction) :
null;
}
/**
* Creates a confined memory scope with given attachment and cleanup action. The returned scope
* is assumed to be confined on the current thread.
* @param ref an optional reference to an instance that needs to be kept reachable
* @param cleanupAction a cleanup action to be executed when returned scope is closed
* @return a confined memory scope
*/
static MemoryScope createConfined(Object ref, Runnable cleanupAction, Cleaner cleaner) {
return new ConfinedScope(Thread.currentThread(), ref, cleanupAction, cleaner);
}
/**
* Creates a shared memory scope with given attachment and cleanup action.
* @param ref an optional reference to an instance that needs to be kept reachable
* @param cleanupAction a cleanup action to be executed when returned scope is closed
* @return a shared memory scope
*/
static MemoryScope createShared(Object ref, Runnable cleanupAction, Cleaner cleaner) {
return new SharedScope(ref, cleanupAction, cleaner);
}
protected final Object ref;
protected final ScopeCleanable scopeCleanable;
protected final Runnable cleanupAction;
/**
* Closes this scope, executing any cleanup action (where provided).
* @throws IllegalStateException if this scope is already closed or if this is
* a confined scope and this method is called outside of the owner thread.
*/
final void close() {
try {
justClose();
cleanupAction.run();
if (scopeCleanable != null) {
scopeCleanable.clear();
}
} finally {
Reference.reachabilityFence(this);
}
}
abstract void justClose();
/**
* Duplicates this scope with given new "owner" thread and {@link #close() closes} it.
* @param newOwner new owner thread of the returned memory scope
* @return a new confined scope, which is a duplicate of this scope, but with a new owner thread.
* @throws IllegalStateException if this scope is already closed or if this is
* a confined scope and this method is called outside of the owner thread.
*/
final MemoryScope confineTo(Thread newOwner) {
try {
justClose();
if (scopeCleanable != null) {
scopeCleanable.clear();
}
return new ConfinedScope(newOwner, ref, cleanupAction, scopeCleanable != null ?
scopeCleanable.cleaner : null);
} finally {
Reference.reachabilityFence(this);
}
}
/**
* Duplicates this scope with given new "owner" thread and {@link #close() closes} it.
* @return a new shared scope, which is a duplicate of this scope.
* @throws IllegalStateException if this scope is already closed or if this is
* a confined scope and this method is called outside of the owner thread,
* or if this is already a shared scope.
*/
final MemoryScope share() {
try {
justClose();
if (scopeCleanable != null) {
scopeCleanable.clear();
}
return new SharedScope(ref, cleanupAction, scopeCleanable != null ?
scopeCleanable.cleaner : null);
} finally {
Reference.reachabilityFence(this);
}
}
final MemoryScope cleanable(Cleaner cleaner) {
if (scopeCleanable != null) {
throw new IllegalStateException("Already registered with a cleaner");
}
try {
justClose();
return ownerThread() == null ?
new SharedScope(ref, cleanupAction, cleaner) :
new ConfinedScope(ownerThread(), ref, cleanupAction, cleaner);
} finally {
Reference.reachabilityFence(this);
}
}
/**
* Returns "owner" thread of this scope.
* @return owner thread (or null for a shared scope)
*/
public abstract Thread ownerThread();
/**
* Returns true, if this scope is still alive. This method may be called in any thread.
* @return {@code true} if this scope is not closed yet.
*/
public abstract boolean isAlive();
/**
* Checks that this scope is still alive (see {@link #isAlive()}).
* @throws IllegalStateException if this scope is already closed or if this is
* a confined scope and this method is called outside of the owner thread.
*/
public abstract void checkValidState();
@Override
protected Object clone() throws CloneNotSupportedException {
throw new CloneNotSupportedException();
}
/**
* A confined scope, which features an owner thread. The liveness check features an additional
* confinement check - that is, calling any operation on this scope from a thread other than the
* owner thread will result in an exception. Because of this restriction, checking the liveness bit
* can be performed in plain mode (see {@link #checkAliveRaw(MemoryScope)}).
*/
static class ConfinedScope extends MemoryScope {
private boolean closed; // = false
final Thread owner;
public ConfinedScope(Thread owner, Object ref, Runnable cleanupAction, Cleaner cleaner) {
super(ref, cleanupAction, cleaner);
this.owner = owner;
}
@ForceInline
public final void checkValidState() {
if (owner != Thread.currentThread()) {
throw new IllegalStateException("Attempted access outside owning thread");
}
if (closed) {
throw ScopedAccessError.INSTANCE;
}
}
@Override
public boolean isAlive() {
return !closed;
}
void justClose() {
checkValidState();
closed = true;
}
@Override
public Thread ownerThread() {
return owner;
}
}
/**
* A shared scope, which can be shared across multiple threads. Closing a shared scope has to ensure that
* (i) only one thread can successfully close a scope (e.g. in a close vs. close race) and that
* (ii) no other thread is accessing the memory associated with this scope while the segment is being
* closed. To ensure the former condition, a CAS is performed on the liveness bit. Ensuring the latter
* is trickier, and require a complex synchronization protocol (see {@link jdk.internal.misc.ScopedMemoryAccess}).
* Since it is the responsibility of the closing thread to make sure that no concurrent access is possible,
* checking the liveness bit upon access can be performed in plain mode (see {@link #checkAliveRaw(MemoryScope)}),
* as in the confined case.
*/
static class SharedScope extends MemoryScope {
static ScopedMemoryAccess SCOPED_MEMORY_ACCESS = ScopedMemoryAccess.getScopedMemoryAccess();
final static int ALIVE = 0;
final static int CLOSING = 1;
final static int CLOSED = 2;
int state = ALIVE;
private static final VarHandle STATE;
static {
try {
STATE = MethodHandles.lookup().findVarHandle(SharedScope.class, "state", int.class);
} catch (Throwable ex) {
throw new ExceptionInInitializerError(ex);
}
}
SharedScope(Object ref, Runnable cleanupAction, Cleaner cleaner) {
super(ref, cleanupAction, cleaner);
}
@Override
public Thread ownerThread() {
return null;
}
@Override
public void checkValidState() {
if (state != ALIVE) {
throw ScopedAccessError.INSTANCE;
}
}
void justClose() {
if (!STATE.compareAndSet(this, ALIVE, CLOSING)) {
throw new IllegalStateException("Already closed");
}
boolean success = SCOPED_MEMORY_ACCESS.closeScope(this);
STATE.setVolatile(this, success ? CLOSED : ALIVE);
if (!success) {
throw new IllegalStateException("Cannot close while another thread is accessing the segment");
}
}
@Override
public boolean isAlive() {
return (int)STATE.getVolatile(this) != CLOSED;
}
}
static class ScopeCleanable extends PhantomCleanable<MemoryScope> {
final Cleaner cleaner;
final Runnable cleanupAction;
public ScopeCleanable(MemoryScope referent, Cleaner cleaner, Runnable cleanupAction) {
super(referent, cleaner);
this.cleaner = cleaner;
this.cleanupAction = cleanupAction;
}
@Override
protected void performCleanup() {
cleanupAction.run();
}
}
}

View File

@ -28,6 +28,8 @@ package jdk.internal.foreign;
import jdk.incubator.foreign.MemoryAddress;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
import jdk.incubator.foreign.SegmentAllocator;
import jdk.internal.misc.Unsafe;
import jdk.internal.misc.VM;
import jdk.internal.vm.annotation.ForceInline;
@ -41,34 +43,40 @@ import java.nio.ByteBuffer;
*/
public class NativeMemorySegmentImpl extends AbstractMemorySegmentImpl {
public static final MemorySegment EVERYTHING = makeNativeSegmentUnchecked(MemoryAddress.NULL, Long.MAX_VALUE, MemoryScope.DUMMY_CLEANUP_ACTION, null)
.share()
.withAccessModes(READ | WRITE);
public static final MemorySegment EVERYTHING = makeNativeSegmentUnchecked(MemoryAddress.NULL, Long.MAX_VALUE, null, ResourceScopeImpl.GLOBAL);
private static final Unsafe unsafe = Unsafe.getUnsafe();
public static final SegmentAllocator IMPLICIT_ALLOCATOR = (size, align) -> MemorySegment.allocateNative(size, align, ResourceScope.newImplicitScope());
// The maximum alignment supported by malloc - typically 16 on
// 64-bit platforms and 8 on 32-bit platforms.
private final static long MAX_MALLOC_ALIGN = Unsafe.ADDRESS_SIZE == 4 ? 8 : 16;
private static final long MAX_MALLOC_ALIGN = Unsafe.ADDRESS_SIZE == 4 ? 8 : 16;
private static final boolean skipZeroMemory = GetBooleanAction.privilegedGetProperty("jdk.internal.foreign.skipZeroMemory");
final long min;
@ForceInline
NativeMemorySegmentImpl(long min, long length, int mask, MemoryScope scope) {
NativeMemorySegmentImpl(long min, long length, int mask, ResourceScopeImpl scope) {
super(length, mask, scope);
this.min = min;
}
@Override
NativeMemorySegmentImpl dup(long offset, long size, int mask, MemoryScope scope) {
NativeMemorySegmentImpl dup(long offset, long size, int mask, ResourceScopeImpl scope) {
return new NativeMemorySegmentImpl(min + offset, size, mask, scope);
}
@Override
ByteBuffer makeByteBuffer() {
return nioAccess.newDirectByteBuffer(min(), (int) this.length, null, this);
return nioAccess.newDirectByteBuffer(min(), (int) this.length, null,
scope == ResourceScopeImpl.GLOBAL ? null : this);
}
@Override
public boolean isNative() {
return true;
}
@Override
@ -83,7 +91,8 @@ public class NativeMemorySegmentImpl extends AbstractMemorySegmentImpl {
// factories
public static MemorySegment makeNativeSegment(long bytesSize, long alignmentBytes) {
public static MemorySegment makeNativeSegment(long bytesSize, long alignmentBytes, ResourceScopeImpl scope) {
scope.checkValidStateSlow();
if (VM.isDirectMemoryPageAligned()) {
alignmentBytes = Math.max(alignmentBytes, nioAccess.pageSize());
}
@ -98,12 +107,15 @@ public class NativeMemorySegmentImpl extends AbstractMemorySegmentImpl {
unsafe.setMemory(buf, alignedSize, (byte)0);
}
long alignedBuf = Utils.alignUp(buf, alignmentBytes);
MemoryScope scope = MemoryScope.createConfined(null, () -> {
AbstractMemorySegmentImpl segment = new NativeMemorySegmentImpl(buf, alignedSize,
defaultAccessModes(alignedSize), scope);
scope.addOrCleanupIfFail(new ResourceScopeImpl.ResourceList.ResourceCleanup() {
@Override
public void cleanup() {
unsafe.freeMemory(buf);
nioAccess.unreserveMemory(alignedSize, bytesSize);
}, null);
MemorySegment segment = new NativeMemorySegmentImpl(buf, alignedSize,
defaultAccessModes(alignedSize), scope);
}
});
if (alignedSize != bytesSize) {
long delta = alignedBuf - buf;
segment = segment.asSlice(delta, bytesSize);
@ -111,8 +123,12 @@ public class NativeMemorySegmentImpl extends AbstractMemorySegmentImpl {
return segment;
}
public static MemorySegment makeNativeSegmentUnchecked(MemoryAddress min, long bytesSize, Runnable cleanupAction, Object ref) {
return new NativeMemorySegmentImpl(min.toRawLongValue(), bytesSize, defaultAccessModes(bytesSize),
MemoryScope.createConfined(ref, cleanupAction == null ? MemoryScope.DUMMY_CLEANUP_ACTION : cleanupAction, null));
public static MemorySegment makeNativeSegmentUnchecked(MemoryAddress min, long bytesSize, Runnable cleanupAction, ResourceScopeImpl scope) {
scope.checkValidStateSlow();
AbstractMemorySegmentImpl segment = new NativeMemorySegmentImpl(min.toRawLongValue(), bytesSize, defaultAccessModes(bytesSize), scope);
if (cleanupAction != null) {
scope.addCloseAction(cleanupAction);
}
return segment;
}
}

View File

@ -32,7 +32,6 @@ import jdk.incubator.foreign.ValueLayout;
import java.nio.ByteOrder;
import static java.nio.ByteOrder.LITTLE_ENDIAN;
import static jdk.incubator.foreign.MemoryLayouts.ADDRESS;
public class PlatformLayouts {
public static <Z extends MemoryLayout> Z pick(Z sysv, Z win64, Z aarch64) {
@ -51,42 +50,42 @@ public class PlatformLayouts {
}
private static ValueLayout ofChar(ByteOrder order, long bitSize) {
return MemoryLayout.ofValueBits(bitSize, order)
return MemoryLayout.valueLayout(bitSize, order)
.withAttribute(CLinker.TypeKind.ATTR_NAME, CLinker.TypeKind.CHAR);
}
private static ValueLayout ofShort(ByteOrder order, long bitSize) {
return MemoryLayout.ofValueBits(bitSize, order)
return MemoryLayout.valueLayout(bitSize, order)
.withAttribute(CLinker.TypeKind.ATTR_NAME, CLinker.TypeKind.SHORT);
}
private static ValueLayout ofInt(ByteOrder order, long bitSize) {
return MemoryLayout.ofValueBits(bitSize, order)
return MemoryLayout.valueLayout(bitSize, order)
.withAttribute(CLinker.TypeKind.ATTR_NAME, CLinker.TypeKind.INT);
}
private static ValueLayout ofLong(ByteOrder order, long bitSize) {
return MemoryLayout.ofValueBits(bitSize, order)
return MemoryLayout.valueLayout(bitSize, order)
.withAttribute(CLinker.TypeKind.ATTR_NAME, CLinker.TypeKind.LONG);
}
private static ValueLayout ofLongLong(ByteOrder order, long bitSize) {
return MemoryLayout.ofValueBits(bitSize, order)
return MemoryLayout.valueLayout(bitSize, order)
.withAttribute(CLinker.TypeKind.ATTR_NAME, CLinker.TypeKind.LONG_LONG);
}
private static ValueLayout ofFloat(ByteOrder order, long bitSize) {
return MemoryLayout.ofValueBits(bitSize, order)
return MemoryLayout.valueLayout(bitSize, order)
.withAttribute(CLinker.TypeKind.ATTR_NAME, CLinker.TypeKind.FLOAT);
}
private static ValueLayout ofDouble(ByteOrder order, long bitSize) {
return MemoryLayout.ofValueBits(bitSize, order)
return MemoryLayout.valueLayout(bitSize, order)
.withAttribute(CLinker.TypeKind.ATTR_NAME, CLinker.TypeKind.DOUBLE);
}
private static ValueLayout ofPointer(ByteOrder order, long bitSize) {
return MemoryLayout.ofValueBits(bitSize, order)
return MemoryLayout.valueLayout(bitSize, order)
.withAttribute(CLinker.TypeKind.ATTR_NAME, CLinker.TypeKind.POINTER);
}
@ -162,7 +161,7 @@ public class PlatformLayouts {
* The name of the layout attribute (see {@link MemoryLayout#attributes()} used to mark variadic parameters. The
* attribute value must be a boolean.
*/
public final static String VARARGS_ATTRIBUTE_NAME = "abi/windows/varargs";
public static final String VARARGS_ATTRIBUTE_NAME = "abi/windows/varargs";
/**
* The {@code char} native type.

View File

@ -0,0 +1,322 @@
/*
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.foreign;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
import jdk.incubator.foreign.SegmentAllocator;
import jdk.internal.misc.ScopedMemoryAccess;
import jdk.internal.ref.CleanerFactory;
import java.lang.ref.Cleaner;
import java.lang.ref.Reference;
import java.util.Objects;
/**
* This class manages the temporal bounds associated with a memory segment as well
* as thread confinement. A scope has a liveness bit, which is updated when the scope is closed
* (this operation is triggered by {@link ResourceScope#close()}). This bit is consulted prior
* to memory access (see {@link #checkValidState()}).
* There are two kinds of memory scope: confined memory scope and shared memory scope.
* A confined memory scope has an associated owner thread that confines some operations to
* associated owner thread such as {@link #close()} or {@link #checkValidState()}.
* Shared scopes do not feature an owner thread - meaning their operations can be called, in a racy
* manner, by multiple threads. To guarantee temporal safety in the presence of concurrent thread,
* shared scopes use a more sophisticated synchronization mechanism, which guarantees that no concurrent
* access is possible when a scope is being closed (see {@link jdk.internal.misc.ScopedMemoryAccess}).
*/
public abstract non-sealed class ResourceScopeImpl implements ResourceScope, ScopedMemoryAccess.Scope, SegmentAllocator {
final ResourceList resourceList;
@Override
public void addCloseAction(Runnable runnable) {
Objects.requireNonNull(runnable);
addInternal(ResourceList.ResourceCleanup.ofRunnable(runnable));
}
@Override
public boolean isImplicit() {
return false;
}
/**
* Add a cleanup action. If a failure occurred (because of a add vs. close race), call the cleanup action.
* This semantics is useful when allocating new memory segments, since we first do a malloc/mmap and _then_
* we register the cleanup (free/munmap) against the scope; so, if registration fails, we still have to
* cleanup memory. From the perspective of the client, such a failure would manifest as a factory
* returning a segment that is already "closed" - which is always possible anyway (e.g. if the scope
* is closed _after_ the cleanup for the segment is registered but _before_ the factory returns the
* new segment to the client). For this reason, it's not worth adding extra complexity to the segment
* initialization logic here - and using an optimistic logic works well in practice.
*/
public void addOrCleanupIfFail(ResourceList.ResourceCleanup resource) {
try {
addInternal(resource);
} catch (Throwable ex) {
resource.cleanup();
}
}
void addInternal(ResourceList.ResourceCleanup resource) {
try {
checkValidStateSlow();
resourceList.add(resource);
} catch (ScopedMemoryAccess.Scope.ScopedAccessError err) {
throw new IllegalStateException("Already closed");
}
}
protected ResourceScopeImpl(Cleaner cleaner, ResourceList resourceList) {
this.resourceList = resourceList;
if (cleaner != null) {
cleaner.register(this, resourceList);
}
}
public static ResourceScopeImpl createImplicitScope() {
return new ImplicitScopeImpl(CleanerFactory.cleaner());
}
public static ResourceScopeImpl createConfined(Thread thread, Cleaner cleaner) {
return new ConfinedScope(thread, cleaner);
}
/**
* Creates a confined memory scope with given attachment and cleanup action. The returned scope
* is assumed to be confined on the current thread.
* @return a confined memory scope
*/
public static ResourceScopeImpl createConfined(Cleaner cleaner) {
return new ConfinedScope(Thread.currentThread(), cleaner);
}
/**
* Creates a shared memory scope with given attachment and cleanup action.
* @return a shared memory scope
*/
public static ResourceScopeImpl createShared(Cleaner cleaner) {
return new SharedScope(cleaner);
}
private final void release0(HandleImpl handle) {
try {
Objects.requireNonNull(handle);
if (handle.scope() != this) {
throw new IllegalArgumentException("Cannot release an handle acquired from another scope");
}
handle.release();
} finally {
Reference.reachabilityFence(this);
}
}
@Override
public final void release(ResourceScope.Handle handle) {
release0((HandleImpl)handle);
}
@Override
public final void release(ScopedMemoryAccess.Scope.Handle handle) {
release0((HandleImpl)handle);
}
@Override
public abstract HandleImpl acquire();
/**
* Internal interface used to implement resource scope handles.
*/
public non-sealed interface HandleImpl extends ResourceScope.Handle, ScopedMemoryAccess.Scope.Handle {
@Override
ResourceScopeImpl scope();
void release();
}
/**
* Closes this scope, executing any cleanup action (where provided).
* @throws IllegalStateException if this scope is already closed or if this is
* a confined scope and this method is called outside of the owner thread.
*/
public void close() {
try {
justClose();
resourceList.cleanup();
} finally {
Reference.reachabilityFence(this);
}
}
abstract void justClose();
/**
* Returns "owner" thread of this scope.
* @return owner thread (or null for a shared scope)
*/
public abstract Thread ownerThread();
/**
* Returns true, if this scope is still alive. This method may be called in any thread.
* @return {@code true} if this scope is not closed yet.
*/
public abstract boolean isAlive();
/**
* This is a faster version of {@link #checkValidStateSlow()}, which is called upon memory access, and which
* relies on invariants associated with the memory scope implementations (typically, volatile access
* to the closed state bit is replaced with plain access, and ownership check is removed where not needed.
* Should be used with care.
*/
public abstract void checkValidState();
/**
* Checks that this scope is still alive (see {@link #isAlive()}).
* @throws IllegalStateException if this scope is already closed or if this is
* a confined scope and this method is called outside of the owner thread.
*/
public final void checkValidStateSlow() {
if (ownerThread() != null && Thread.currentThread() != ownerThread()) {
throw new IllegalStateException("Attempted access outside owning thread");
} else if (!isAlive()) {
throw new IllegalStateException("Already closed");
}
}
@Override
protected Object clone() throws CloneNotSupportedException {
throw new CloneNotSupportedException();
}
/**
* Allocates a segment using this scope. Used by {@link SegmentAllocator#ofScope(ResourceScope)}.
*/
@Override
public MemorySegment allocate(long bytesSize, long bytesAlignment) {
return MemorySegment.allocateNative(bytesSize, bytesAlignment, this);
}
/**
* A non-closeable, shared scope. Similar to a shared scope, but its {@link #close()} method throws unconditionally.
* In addition, non-closeable scopes feature a much simpler scheme for generating resource scope handles, where
* the scope itself also acts as a resource scope handle and is returned by {@link #acquire()}.
*/
static class ImplicitScopeImpl extends SharedScope implements HandleImpl {
public ImplicitScopeImpl(Cleaner cleaner) {
super(cleaner);
}
@Override
public HandleImpl acquire() {
return this;
}
@Override
public boolean isImplicit() {
return true;
}
@Override
public void close() {
throw new UnsupportedOperationException("Scope cannot be closed");
}
@Override
public void release() {
// do nothing
}
@Override
public ResourceScopeImpl scope() {
return this;
}
}
/**
* The global, always alive, non-closeable, shared scope. This is like a {@link ImplicitScopeImpl non-closeable scope},
* except that the operation which adds new resources to the global scope does nothing: as the scope can never
* become not-alive, there is nothing to track.
*/
public static final ResourceScopeImpl GLOBAL = new ImplicitScopeImpl( null) {
@Override
void addInternal(ResourceList.ResourceCleanup resource) {
// do nothing
}
};
/**
* A list of all cleanup actions associated with a resource scope. Cleanup actions are modelled as instances
* of the {@link ResourceCleanup} class, and, together, form a linked list. Depending on whether a scope
* is shared or confined, different implementations of this class will be used, see {@link ConfinedScope.ConfinedResourceList}
* and {@link SharedScope.SharedResourceList}.
*/
public abstract static class ResourceList implements Runnable {
ResourceCleanup fst;
abstract void add(ResourceCleanup cleanup);
abstract void cleanup();
public final void run() {
cleanup(); // cleaner interop
}
static void cleanup(ResourceCleanup first) {
ResourceCleanup current = first;
while (current != null) {
current.cleanup();
current = current.next;
}
}
public static abstract class ResourceCleanup {
ResourceCleanup next;
public abstract void cleanup();
static final ResourceCleanup CLOSED_LIST = new ResourceCleanup() {
@Override
public void cleanup() {
throw new IllegalStateException("This resource list has already been closed!");
}
};
static ResourceCleanup ofRunnable(Runnable cleanupAction) {
return new ResourceCleanup() {
@Override
public void cleanup() {
cleanupAction.run();
}
};
}
}
}
}

View File

@ -0,0 +1,196 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.foreign;
import jdk.incubator.foreign.ResourceScope;
import jdk.internal.misc.ScopedMemoryAccess;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.VarHandle;
import java.lang.ref.Cleaner;
import java.lang.ref.Reference;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* A shared scope, which can be shared across multiple threads. Closing a shared scope has to ensure that
* (i) only one thread can successfully close a scope (e.g. in a close vs. close race) and that
* (ii) no other thread is accessing the memory associated with this scope while the segment is being
* closed. To ensure the former condition, a CAS is performed on the liveness bit. Ensuring the latter
* is trickier, and require a complex synchronization protocol (see {@link jdk.internal.misc.ScopedMemoryAccess}).
* Since it is the responsibility of the closing thread to make sure that no concurrent access is possible,
* checking the liveness bit upon access can be performed in plain mode, as in the confined case.
*/
class SharedScope extends ResourceScopeImpl {
private static final ScopedMemoryAccess SCOPED_MEMORY_ACCESS = ScopedMemoryAccess.getScopedMemoryAccess();
private static final int ALIVE = 0;
private static final int CLOSING = -1;
private static final int CLOSED = -2;
private static final int MAX_FORKS = Integer.MAX_VALUE;
private int state = ALIVE;
private static final VarHandle STATE;
static {
try {
STATE = MethodHandles.lookup().findVarHandle(jdk.internal.foreign.SharedScope.class, "state", int.class);
} catch (Throwable ex) {
throw new ExceptionInInitializerError(ex);
}
}
SharedScope(Cleaner cleaner) {
super(cleaner, new SharedResourceList());
}
@Override
public Thread ownerThread() {
return null;
}
@Override
public void checkValidState() {
if (state < ALIVE) {
throw ScopedAccessError.INSTANCE;
}
}
@Override
public HandleImpl acquire() {
int value;
do {
value = (int) STATE.getVolatile(this);
if (value < ALIVE) {
//segment is not alive!
throw new IllegalStateException("Already closed");
} else if (value == MAX_FORKS) {
//overflow
throw new IllegalStateException("Segment acquire limit exceeded");
}
} while (!STATE.compareAndSet(this, value, value + 1));
return new SharedHandle();
}
void justClose() {
int prevState = (int) STATE.compareAndExchange(this, ALIVE, CLOSING);
if (prevState < 0) {
throw new IllegalStateException("Already closed");
} else if (prevState != ALIVE) {
throw new IllegalStateException("Scope is acquired by " + prevState + " locks");
}
boolean success = SCOPED_MEMORY_ACCESS.closeScope(this);
STATE.setVolatile(this, success ? CLOSED : ALIVE);
if (!success) {
throw new IllegalStateException("Cannot close while another thread is accessing the segment");
}
}
@Override
public boolean isAlive() {
return (int) STATE.getVolatile(this) != CLOSED;
}
/**
* A shared resource list; this implementation has to handle add vs. add races, as well as add vs. cleanup races.
*/
static class SharedResourceList extends ResourceList {
static final VarHandle FST;
static {
try {
FST = MethodHandles.lookup().findVarHandle(ResourceList.class, "fst", ResourceCleanup.class);
} catch (Throwable ex) {
throw new ExceptionInInitializerError();
}
}
@Override
void add(ResourceCleanup cleanup) {
while (true) {
ResourceCleanup prev = (ResourceCleanup) FST.getAcquire(this);
cleanup.next = prev;
ResourceCleanup newSegment = (ResourceCleanup) FST.compareAndExchangeRelease(this, prev, cleanup);
if (newSegment == ResourceCleanup.CLOSED_LIST) {
// too late
throw new IllegalStateException("Already closed");
} else if (newSegment == prev) {
return; //victory
}
// keep trying
}
}
void cleanup() {
// At this point we are only interested about add vs. close races - not close vs. close
// (because MemoryScope::justClose ensured that this thread won the race to close the scope).
// So, the only "bad" thing that could happen is that some other thread adds to this list
// while we're closing it.
if (FST.getAcquire(this) != ResourceCleanup.CLOSED_LIST) {
//ok now we're really closing down
ResourceCleanup prev = null;
while (true) {
prev = (ResourceCleanup) FST.getAcquire(this);
// no need to check for DUMMY, since only one thread can get here!
if (FST.weakCompareAndSetRelease(this, prev, ResourceCleanup.CLOSED_LIST)) {
break;
}
}
cleanup(prev);
} else {
throw new IllegalStateException("Attempt to cleanup an already closed resource list");
}
}
}
/**
* A shared resource scope handle; this implementation has to handle close vs. close races.
*/
class SharedHandle implements HandleImpl {
final AtomicBoolean released = new AtomicBoolean(false);
@Override
public ResourceScopeImpl scope() {
return SharedScope.this;
}
@Override
public void release() {
if (released.compareAndSet(false, true)) {
int value;
do {
value = (int) STATE.getVolatile(jdk.internal.foreign.SharedScope.this);
if (value <= ALIVE) {
//cannot get here - we can't close segment twice
throw new IllegalStateException("Already closed");
}
} while (!STATE.compareAndSet(jdk.internal.foreign.SharedScope.this, value, value - 1));
}
}
}
}

View File

@ -44,14 +44,10 @@ import static sun.security.action.GetPropertyAction.*;
* This class contains misc helper functions to support creation of memory segments.
*/
public final class Utils {
// used when testing invoke exact behavior of memory access handles
private static final boolean SHOULD_ADAPT_HANDLES
= Boolean.parseBoolean(privilegedGetProperty("jdk.internal.foreign.SHOULD_ADAPT_HANDLES", "true"));
private static final String foreignRestrictedAccess = Optional.ofNullable(VM.getSavedProperty("foreign.restricted"))
.orElse("deny");
private static final MethodHandle SEGMENT_FILTER;
public static final MethodHandle MH_bitsToBytesOrThrowForOffset;
@ -107,27 +103,6 @@ public final class Utils {
return (AbstractMemorySegmentImpl)segment;
}
public static void checkRestrictedAccess(String method) {
switch (foreignRestrictedAccess) {
case "deny" -> throwIllegalAccessError(foreignRestrictedAccess, method);
case "warn" -> System.err.println("WARNING: Accessing restricted foreign method: " + method);
case "debug" -> {
StringBuilder sb = new StringBuilder("DEBUG: restricted foreign method: \" + method");
StackWalker.getInstance().forEach(f -> sb.append(System.lineSeparator())
.append("\tat ")
.append(f));
System.err.println(sb.toString());
}
case "permit" -> {}
default -> throwIllegalAccessError(foreignRestrictedAccess, method);
}
}
private static void throwIllegalAccessError(String value, String method) {
throw new IllegalAccessError("Illegal access to restricted foreign method: " + method +
" ; system property 'foreign.restricted' is set to '" + value + "'");
}
public static void checkPrimitiveCarrierCompat(Class<?> carrier, MemoryLayout layout) {
checkLayoutType(layout, ValueLayout.class);
if (!isValidPrimitiveCarrier(carrier))
@ -150,5 +125,4 @@ public final class Utils {
if (!layoutType.isInstance(layout))
throw new IllegalArgumentException("Expected a " + layoutType.getSimpleName() + ": " + layout);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,11 +28,13 @@ import jdk.incubator.foreign.MemoryAddress;
import jdk.incubator.foreign.MemoryHandles;
import jdk.incubator.foreign.MemoryLayout;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
import jdk.incubator.foreign.SegmentAllocator;
import jdk.internal.foreign.MemoryAddressImpl;
import jdk.internal.foreign.ResourceScopeImpl;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.util.ArrayList;
import java.util.Deque;
import java.util.List;
@ -44,7 +46,6 @@ import java.nio.ByteOrder;
import static java.lang.invoke.MethodHandles.collectArguments;
import static java.lang.invoke.MethodHandles.filterArguments;
import static java.lang.invoke.MethodHandles.insertArguments;
import static java.lang.invoke.MethodHandles.permuteArguments;
import static java.lang.invoke.MethodType.methodType;
/**
@ -218,16 +219,99 @@ public abstract class Binding {
MH_BASE_ADDRESS = lookup.findVirtual(MemorySegment.class, "address",
methodType(MemoryAddress.class));
MH_COPY_BUFFER = lookup.findStatic(Binding.Copy.class, "copyBuffer",
methodType(MemorySegment.class, MemorySegment.class, long.class, long.class, SharedUtils.Allocator.class));
methodType(MemorySegment.class, MemorySegment.class, long.class, long.class, Context.class));
MH_ALLOCATE_BUFFER = lookup.findStatic(Binding.Allocate.class, "allocateBuffer",
methodType(MemorySegment.class, long.class, long.class, SharedUtils.Allocator.class));
methodType(MemorySegment.class, long.class, long.class, Context.class));
MH_TO_SEGMENT = lookup.findStatic(Binding.ToSegment.class, "toSegment",
methodType(MemorySegment.class, MemoryAddress.class, long.class));
methodType(MemorySegment.class, MemoryAddress.class, long.class, Context.class));
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
/**
* A binding context is used as an helper to carry out evaluation of certain bindings; for instance,
* it helps {@link Allocate} bindings, by providing the {@link SegmentAllocator} that should be used for
* the allocation operation, or {@link ToSegment} bindings, by providing the {@link ResourceScope} that
* should be used to create an unsafe struct from a memory address.
*/
public static class Context implements AutoCloseable {
private final SegmentAllocator allocator;
private final ResourceScope scope;
private Context(SegmentAllocator allocator, ResourceScope scope) {
this.allocator = allocator;
this.scope = scope;
}
public SegmentAllocator allocator() {
return allocator;
}
public ResourceScope scope() {
return scope;
}
@Override
public void close() {
scope().close();
}
/**
* Create a binding context from given native scope.
*/
public static Context ofBoundedAllocator(long size) {
ResourceScope scope = ResourceScope.newConfinedScope();
return new Context(SegmentAllocator.arenaAllocator(size, scope), scope);
}
/**
* Create a binding context from given segment allocator. The resulting context will throw when
* the context's scope is accessed.
*/
public static Context ofAllocator(SegmentAllocator allocator) {
return new Context(allocator, null) {
@Override
public ResourceScope scope() {
throw new UnsupportedOperationException();
}
};
}
/**
* Create a binding context from given scope. The resulting context will throw when
* the context's allocator is accessed.
*/
public static Context ofScope() {
ResourceScope scope = ResourceScope.newConfinedScope();
return new Context(null, scope) {
@Override
public SegmentAllocator allocator() { throw new UnsupportedOperationException(); }
};
}
/**
* Dummy binding context. Throws exceptions when attempting to access scope, return a throwing allocator, and has
* an idempotent {@link #close()}.
*/
public static final Context DUMMY = new Context(null, null) {
@Override
public SegmentAllocator allocator() {
return SharedUtils.THROWING_ALLOCATOR;
}
@Override
public ResourceScope scope() {
throw new UnsupportedOperationException();
}
@Override
public void close() {
// do nothing
}
};
}
enum Tag {
VM_STORE,
VM_LOAD,
@ -255,31 +339,10 @@ public abstract class Binding {
public abstract void verify(Deque<Class<?>> stack);
public abstract void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator);
BindingInterpreter.LoadFunc loadFunc, Context context);
public abstract MethodHandle specialize(MethodHandle specializedHandle, int insertPos, int allocatorPos);
private static MethodHandle mergeArguments(MethodHandle mh, int sourceIndex, int destIndex) {
MethodType oldType = mh.type();
Class<?> sourceType = oldType.parameterType(sourceIndex);
Class<?> destType = oldType.parameterType(destIndex);
if (sourceType != destType) {
// TODO meet?
throw new IllegalArgumentException("Parameter types differ: " + sourceType + " != " + destType);
}
MethodType newType = oldType.dropParameterTypes(destIndex, destIndex + 1);
int[] reorder = new int[oldType.parameterCount()];
assert destIndex > sourceIndex;
for (int i = 0, index = 0; i < reorder.length; i++) {
if (i != destIndex) {
reorder[i] = index++;
} else {
reorder[i] = sourceIndex;
}
}
return permuteArguments(mh, newType, reorder);
}
private static void checkType(Class<?> type) {
if (!type.isPrimitive() || type == void.class || type == boolean.class)
throw new IllegalArgumentException("Illegal type: " + type);
@ -477,7 +540,7 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
storeFunc.store(storage(), type(), stack.pop());
}
@ -512,7 +575,7 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
stack.push(loadFunc.load(storage(), type()));
}
@ -564,7 +627,10 @@ public abstract class Binding {
}
public VarHandle varHandle() {
return MemoryHandles.insertCoordinates(MemoryHandles.varHandle(type, ByteOrder.nativeOrder()), 1, offset);
// alignment is set to 1 byte here to avoid exceptions for cases where we do super word
// copies of e.g. 2 int fields of a struct as a single long, while the struct is only
// 4-byte-aligned (since it only contains ints)
return MemoryHandles.insertCoordinates(MemoryHandles.varHandle(type, 1, ByteOrder.nativeOrder()), 1, offset);
}
}
@ -589,7 +655,7 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
Object value = stack.pop();
MemorySegment operand = (MemorySegment) stack.pop();
MemorySegment writeAddress = operand.asSlice(offset());
@ -633,7 +699,7 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
MemorySegment operand = (MemorySegment) stack.pop();
MemorySegment readAddress = operand.asSlice(offset());
stack.push(SharedUtils.read(readAddress, type()));
@ -673,8 +739,8 @@ public abstract class Binding {
}
private static MemorySegment copyBuffer(MemorySegment operand, long size, long alignment,
SharedUtils.Allocator allocator) {
MemorySegment copy = allocator.allocate(size, alignment);
Context context) {
MemorySegment copy = context.allocator().allocate(size, alignment);
copy.copyFrom(operand.asSlice(0, size));
return copy;
}
@ -705,9 +771,9 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
MemorySegment operand = (MemorySegment) stack.pop();
MemorySegment copy = copyBuffer(operand, size, alignment, allocator);
MemorySegment copy = copyBuffer(operand, size, alignment, context);
stack.push(copy);
}
@ -715,7 +781,7 @@ public abstract class Binding {
public MethodHandle specialize(MethodHandle specializedHandle, int insertPos, int allocatorPos) {
MethodHandle filter = insertArguments(MH_COPY_BUFFER, 1, size, alignment);
specializedHandle = collectArguments(specializedHandle, insertPos, filter);
return mergeArguments(specializedHandle, allocatorPos, insertPos + 1);
return SharedUtils.mergeArguments(specializedHandle, allocatorPos, insertPos + 1);
}
@Override
@ -748,8 +814,8 @@ public abstract class Binding {
this.alignment = alignment;
}
private static MemorySegment allocateBuffer(long size, long allignment, SharedUtils.Allocator allocator) {
return allocator.allocate(size, allignment);
private static MemorySegment allocateBuffer(long size, long allignment, Context context) {
return context.allocator().allocate(size, allignment);
}
public long size() {
@ -776,15 +842,15 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
stack.push(allocateBuffer(size, alignment, allocator));
BindingInterpreter.LoadFunc loadFunc, Context context) {
stack.push(allocateBuffer(size, alignment, context));
}
@Override
public MethodHandle specialize(MethodHandle specializedHandle, int insertPos, int allocatorPos) {
MethodHandle allocateBuffer = insertArguments(MH_ALLOCATE_BUFFER, 0, size, alignment);
specializedHandle = collectArguments(specializedHandle, insertPos, allocateBuffer);
return mergeArguments(specializedHandle, allocatorPos, insertPos);
return SharedUtils.mergeArguments(specializedHandle, allocatorPos, insertPos);
}
@Override
@ -823,7 +889,7 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
stack.push(((MemoryAddress)stack.pop()).toRawLongValue());
}
@ -839,7 +905,7 @@ public abstract class Binding {
}
/**
* Box_ADDRESS()
* BOX_ADDRESS()
* Pops a 'long' from the operand stack, converts it to a 'MemoryAddress',
* and pushes that onto the operand stack.
*/
@ -858,7 +924,7 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
stack.push(MemoryAddress.ofLong((long) stack.pop()));
}
@ -893,7 +959,7 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
stack.push(((MemorySegment) stack.pop()).address());
}
@ -909,8 +975,8 @@ public abstract class Binding {
}
/**
* BASE_ADDRESS([size])
* Pops a MemoryAddress from the operand stack, and takes the converts it to a MemorySegment
* TO_SEGMENT([size])
* Pops a MemoryAddress from the operand stack, and converts it to a MemorySegment
* with the given size, and pushes that onto the operand stack
*/
public static class ToSegment extends Binding {
@ -922,9 +988,8 @@ public abstract class Binding {
this.size = size;
}
// FIXME should register with scope
private static MemorySegment toSegment(MemoryAddress operand, long size) {
return MemoryAddressImpl.ofLongUnchecked(operand.toRawLongValue(), size);
private static MemorySegment toSegment(MemoryAddress operand, long size, Context context) {
return MemoryAddressImpl.ofLongUnchecked(operand.toRawLongValue(), size, (ResourceScopeImpl) context.scope);
}
@Override
@ -936,16 +1001,17 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
MemoryAddress operand = (MemoryAddress) stack.pop();
MemorySegment segment = toSegment(operand, size);
MemorySegment segment = toSegment(operand, size, context);
stack.push(segment);
}
@Override
public MethodHandle specialize(MethodHandle specializedHandle, int insertPos, int allocatorPos) {
MethodHandle toSegmentHandle = insertArguments(MH_TO_SEGMENT, 1, size);
return filterArguments(specializedHandle, insertPos, toSegmentHandle);
specializedHandle = collectArguments(specializedHandle, insertPos, toSegmentHandle);
return SharedUtils.mergeArguments(specializedHandle, allocatorPos, insertPos + 1);
}
@Override
@ -988,7 +1054,7 @@ public abstract class Binding {
@Override
public void interpret(Deque<Object> stack, BindingInterpreter.StoreFunc storeFunc,
BindingInterpreter.LoadFunc loadFunc, SharedUtils.Allocator allocator) {
BindingInterpreter.LoadFunc loadFunc, Context context) {
stack.push(stack.peekLast());
}
@ -1012,7 +1078,7 @@ public abstract class Binding {
*/
@Override
public MethodHandle specialize(MethodHandle specializedHandle, int insertPos, int allocatorPos) {
return mergeArguments(specializedHandle, insertPos, insertPos + 1);
return SharedUtils.mergeArguments(specializedHandle, insertPos, insertPos + 1);
}
@Override

View File

@ -30,19 +30,19 @@ import java.util.List;
public class BindingInterpreter {
static void unbox(Object arg, List<Binding> bindings, StoreFunc storeFunc, SharedUtils.Allocator allocator) {
static void unbox(Object arg, List<Binding> bindings, StoreFunc storeFunc, Binding.Context context) {
Deque<Object> stack = new ArrayDeque<>();
stack.push(arg);
for (Binding b : bindings) {
b.interpret(stack, storeFunc, null, allocator);
b.interpret(stack, storeFunc, null, context);
}
}
static Object box(List<Binding> bindings, LoadFunc loadFunc, SharedUtils.Allocator allocator) {
static Object box(List<Binding> bindings, LoadFunc loadFunc, Binding.Context context) {
Deque<Object> stack = new ArrayDeque<>();
for (Binding b : bindings) {
b.interpret(stack, null, loadFunc, allocator);
b.interpret(stack, null, loadFunc, context);
}
return stack.pop();
}

View File

@ -26,6 +26,7 @@ package jdk.internal.foreign.abi;
import jdk.incubator.foreign.MemoryLayouts;
import jdk.incubator.foreign.MemorySegment;
import jdk.internal.foreign.MemoryAddressImpl;
import java.io.PrintStream;
import java.lang.invoke.VarHandle;
@ -115,7 +116,7 @@ class BufferLayout {
return Long.toHexString((long) VH_LONG.get(buffer.asSlice(offset)));
}
private static void dumpValues(jdk.internal.foreign.abi.Architecture arch, MemorySegment buff, PrintStream stream,
private void dumpValues(jdk.internal.foreign.abi.Architecture arch, MemorySegment buff, PrintStream stream,
Map<jdk.internal.foreign.abi.VMStorage, Long> offsets) {
for (var entry : offsets.entrySet()) {
VMStorage storage = entry.getKey();
@ -128,6 +129,14 @@ class BufferLayout {
}
stream.println("}");
}
long stack_ptr = (long) VH_LONG.get(buff.asSlice(stack_args));
long stack_bytes = (long) VH_LONG.get(buff.asSlice(stack_args_bytes));
MemorySegment stackArgs = MemoryAddressImpl.ofLongUnchecked(stack_ptr, stack_bytes);
stream.println("Stack {");
for (int i = 0; i < stack_bytes / 8; i += 8) {
stream.printf(" @%d: %s%n", i, getLongString(stackArgs, i));
}
stream.println("}");
}
void dump(Architecture arch, MemorySegment buff, PrintStream stream) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,12 +25,13 @@
package jdk.internal.foreign.abi;
import jdk.incubator.foreign.Addressable;
import jdk.incubator.foreign.MemoryAddress;
import jdk.incubator.foreign.MemoryLayouts;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.NativeScope;
import jdk.incubator.foreign.ResourceScope;
import jdk.incubator.foreign.SegmentAllocator;
import jdk.internal.access.JavaLangInvokeAccess;
import jdk.internal.access.SharedSecrets;
import jdk.internal.foreign.Utils;
import jdk.internal.invoke.NativeEntryPoint;
import jdk.internal.invoke.VMStorageProxy;
import sun.security.action.GetPropertyAction;
@ -39,24 +40,19 @@ import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.lang.invoke.VarHandle;
import java.lang.ref.Reference;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static java.lang.invoke.MethodHandles.collectArguments;
import static java.lang.invoke.MethodHandles.dropArguments;
import static java.lang.invoke.MethodHandles.empty;
import static java.lang.invoke.MethodHandles.filterArguments;
import static java.lang.invoke.MethodHandles.identity;
import static java.lang.invoke.MethodHandles.insertArguments;
import static java.lang.invoke.MethodHandles.tryFinally;
import static java.lang.invoke.MethodType.methodType;
import static jdk.internal.foreign.abi.SharedUtils.Allocator.THROWING_ALLOCATOR;
import static jdk.internal.foreign.abi.SharedUtils.DEFAULT_ALLOCATOR;
import static sun.security.action.GetBooleanAction.privilegedGetProperty;
/**
@ -78,26 +74,27 @@ public class ProgrammableInvoker {
private static final MethodHandle MH_INVOKE_MOVES;
private static final MethodHandle MH_INVOKE_INTERP_BINDINGS;
private static final MethodHandle MH_MAKE_SCOPE;
private static final MethodHandle MH_CLOSE_SCOPE;
private static final MethodHandle MH_WRAP_SCOPE;
private static final MethodHandle MH_ADDR_TO_LONG;
private static final MethodHandle MH_WRAP_ALLOCATOR;
private static final Map<ABIDescriptor, Long> adapterStubs = new ConcurrentHashMap<>();
private static final MethodHandle EMPTY_OBJECT_ARRAY_HANDLE = MethodHandles.constant(Object[].class, new Object[0]);
static {
try {
MethodHandles.Lookup lookup = MethodHandles.lookup();
MH_INVOKE_MOVES = lookup.findVirtual(ProgrammableInvoker.class, "invokeMoves",
methodType(Object.class, Object[].class, Binding.VMStore[].class, Binding.VMLoad[].class));
methodType(Object.class, long.class, Object[].class, Binding.VMStore[].class, Binding.VMLoad[].class));
MH_INVOKE_INTERP_BINDINGS = lookup.findVirtual(ProgrammableInvoker.class, "invokeInterpBindings",
methodType(Object.class, Object[].class, MethodHandle.class, Map.class, Map.class));
MH_MAKE_SCOPE = lookup.findStatic(NativeScope.class, "boundedScope",
methodType(NativeScope.class, long.class));
MH_CLOSE_SCOPE = lookup.findVirtual(NativeScope.class, "close",
methodType(void.class));
MH_WRAP_SCOPE = lookup.findStatic(SharedUtils.Allocator.class, "ofScope",
methodType(SharedUtils.Allocator.class, NativeScope.class));
methodType(Object.class, Addressable.class, SegmentAllocator.class, Object[].class, MethodHandle.class, Map.class, Map.class));
MH_WRAP_ALLOCATOR = lookup.findStatic(Binding.Context.class, "ofAllocator",
methodType(Binding.Context.class, SegmentAllocator.class));
MethodHandle MH_Addressable_address = lookup.findVirtual(Addressable.class, "address",
methodType(MemoryAddress.class));
MethodHandle MH_MemoryAddress_toRawLongValue = lookup.findVirtual(MemoryAddress.class, "toRawLongValue",
methodType(long.class));
MH_ADDR_TO_LONG = filterArguments(MH_MemoryAddress_toRawLongValue, 0, MH_Addressable_address);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
@ -109,17 +106,15 @@ public class ProgrammableInvoker {
private final CallingSequence callingSequence;
private final Addressable addr;
private final long stubAddress;
private final long bufferCopySize;
public ProgrammableInvoker(ABIDescriptor abi, Addressable addr, CallingSequence callingSequence) {
public ProgrammableInvoker(ABIDescriptor abi, CallingSequence callingSequence) {
this.abi = abi;
this.layout = BufferLayout.of(abi);
this.stubAddress = adapterStubs.computeIfAbsent(abi, key -> generateAdapter(key, layout));
this.addr = addr;
this.callingSequence = callingSequence;
this.stackArgsBytes = argMoveBindingsStream(callingSequence)
@ -128,24 +123,7 @@ public class ProgrammableInvoker {
.count()
* abi.arch.typeSize(abi.arch.stackType());
this.bufferCopySize = bufferCopySize(callingSequence);
}
private static long bufferCopySize(CallingSequence callingSequence) {
// FIXME: > 16 bytes alignment might need extra space since the
// starting address of the allocator might be un-aligned.
long size = 0;
for (int i = 0; i < callingSequence.argumentCount(); i++) {
List<Binding> bindings = callingSequence.argumentBindings(i);
for (Binding b : bindings) {
if (b instanceof Binding.Copy) {
Binding.Copy c = (Binding.Copy) b;
size = Utils.alignUp(size, c.alignment());
size += c.size();
}
}
}
return size;
this.bufferCopySize = SharedUtils.bufferCopySize(callingSequence);
}
public MethodHandle getBoundMethodHandle() {
@ -160,41 +138,53 @@ public class ProgrammableInvoker {
: Object[].class;
MethodType leafType = methodType(returnType, argMoveTypes);
MethodType leafTypeWithAddress = leafType.insertParameterTypes(0, long.class);
MethodHandle handle = insertArguments(MH_INVOKE_MOVES.bindTo(this), 1, argMoves, retMoves)
.asCollector(Object[].class, leafType.parameterCount())
.asType(leafType);
MethodHandle handle = insertArguments(MH_INVOKE_MOVES.bindTo(this), 2, argMoves, retMoves);
MethodHandle collector = makeCollectorHandle(leafType);
handle = collectArguments(handle, 1, collector);
handle = handle.asType(leafTypeWithAddress);
boolean isSimple = !(retMoves.length > 1);
boolean usesStackArgs = stackArgsBytes != 0;
if (USE_INTRINSICS && isSimple && !usesStackArgs) {
NativeEntryPoint nep = NativeEntryPoint.make(
addr.address().toRawLongValue(),
"native_call",
abi,
toStorageArray(argMoves),
toStorageArray(retMoves),
!callingSequence.isTrivial(),
leafType
leafTypeWithAddress
);
handle = JLIA.nativeMethodHandle(nep, handle);
}
handle = filterArguments(handle, 0, MH_ADDR_TO_LONG);
if (USE_SPEC && isSimple) {
handle = specialize(handle);
} else {
Map<VMStorage, Integer> argIndexMap = indexMap(argMoves);
Map<VMStorage, Integer> retIndexMap = indexMap(retMoves);
Map<VMStorage, Integer> argIndexMap = SharedUtils.indexMap(argMoves);
Map<VMStorage, Integer> retIndexMap = SharedUtils.indexMap(retMoves);
handle = insertArguments(MH_INVOKE_INTERP_BINDINGS.bindTo(this), 1, handle, argIndexMap, retIndexMap);
handle = handle.asCollector(Object[].class, callingSequence.methodType().parameterCount())
.asType(callingSequence.methodType());
handle = insertArguments(MH_INVOKE_INTERP_BINDINGS.bindTo(this), 3, handle, argIndexMap, retIndexMap);
MethodHandle collectorInterp = makeCollectorHandle(callingSequence.methodType());
handle = collectArguments(handle, 2, collectorInterp);
handle = handle.asType(handle.type().changeReturnType(callingSequence.methodType().returnType()));
}
return handle;
}
// Funnel from type to Object[]
private static MethodHandle makeCollectorHandle(MethodType type) {
return type.parameterCount() == 0
? EMPTY_OBJECT_ARRAY_HANDLE
: identity(Object[].class)
.asCollector(Object[].class, type.parameterCount())
.asType(type.changeReturnType(Object[].class));
}
private Stream<Binding.VMStore> argMoveBindingsStream(CallingSequence callingSequence) {
return callingSequence.argumentBindings()
.filter(Binding.VMStore.class::isInstance)
@ -215,17 +205,12 @@ public class ProgrammableInvoker {
private MethodHandle specialize(MethodHandle leafHandle) {
MethodType highLevelType = callingSequence.methodType();
MethodType leafType = leafHandle.type();
MethodHandle specializedHandle = leafHandle; // initial
int argInsertPos = 1;
int argContextPos = 1;
MethodHandle specializedHandle = dropArguments(leafHandle, argContextPos, Binding.Context.class);
int argInsertPos = -1;
int argAllocatorPos = -1;
if (bufferCopySize > 0) {
argAllocatorPos = 0;
specializedHandle = dropArguments(specializedHandle, argAllocatorPos, SharedUtils.Allocator.class);
argInsertPos++;
}
for (int i = 0; i < highLevelType.parameterCount(); i++) {
List<Binding> bindings = callingSequence.argumentBindings(i);
argInsertPos += bindings.stream().filter(Binding.VMStore.class::isInstance).count() + 1;
@ -235,48 +220,37 @@ public class ProgrammableInvoker {
if (binding.tag() == Binding.Tag.VM_STORE) {
argInsertPos--;
} else {
specializedHandle = binding.specialize(specializedHandle, argInsertPos, argAllocatorPos);
specializedHandle = binding.specialize(specializedHandle, argInsertPos, argContextPos);
}
}
}
if (highLevelType.returnType() != void.class) {
MethodHandle returnFilter = identity(highLevelType.returnType());
int retAllocatorPos = 0;
int retContextPos = 0;
int retInsertPos = 1;
returnFilter = dropArguments(returnFilter, retAllocatorPos, SharedUtils.Allocator.class);
returnFilter = dropArguments(returnFilter, retContextPos, Binding.Context.class);
List<Binding> bindings = callingSequence.returnBindings();
for (int j = bindings.size() - 1; j >= 0; j--) {
Binding binding = bindings.get(j);
returnFilter = binding.specialize(returnFilter, retInsertPos, retAllocatorPos);
returnFilter = binding.specialize(returnFilter, retInsertPos, retContextPos);
}
returnFilter = insertArguments(returnFilter, retAllocatorPos, DEFAULT_ALLOCATOR);
specializedHandle = MethodHandles.filterReturnValue(specializedHandle, returnFilter);
returnFilter = MethodHandles.filterArguments(returnFilter, retContextPos, MH_WRAP_ALLOCATOR);
// (SegmentAllocator, Addressable, Context, ...) -> ...
specializedHandle = MethodHandles.collectArguments(returnFilter, retInsertPos, specializedHandle);
// (Addressable, SegmentAllocator, Context, ...) -> ...
specializedHandle = SharedUtils.swapArguments(specializedHandle, 0, 1); // normalize parameter order
} else {
specializedHandle = MethodHandles.dropArguments(specializedHandle, 1, SegmentAllocator.class);
}
if (bufferCopySize > 0) {
// insert try-finally to close the NativeScope used for Binding.Copy
MethodHandle closer = leafType.returnType() == void.class
// (Throwable, NativeScope) -> void
? collectArguments(empty(methodType(void.class, Throwable.class)), 1, MH_CLOSE_SCOPE)
// (Throwable, V, NativeScope) -> V
: collectArguments(dropArguments(identity(specializedHandle.type().returnType()), 0, Throwable.class),
2, MH_CLOSE_SCOPE);
// Handle takes a SharedUtils.Allocator, so need to wrap our NativeScope
specializedHandle = filterArguments(specializedHandle, argAllocatorPos, MH_WRAP_SCOPE);
specializedHandle = tryFinally(specializedHandle, closer);
MethodHandle makeScopeHandle = insertArguments(MH_MAKE_SCOPE, 0, bufferCopySize);
specializedHandle = collectArguments(specializedHandle, argAllocatorPos, makeScopeHandle);
}
// now bind the internal context parameter
argContextPos++; // skip over the return SegmentAllocator (inserted by the above code)
specializedHandle = SharedUtils.wrapWithAllocator(specializedHandle, argContextPos, bufferCopySize, false);
return specializedHandle;
}
private static Map<VMStorage, Integer> indexMap(Binding.Move[] moves) {
return IntStream.range(0, moves.length)
.boxed()
.collect(Collectors.toMap(i -> moves[i].storage(), i -> i));
}
/**
* Does a native invocation by moving primitive values from the arg array into an intermediate buffer
* and calling the assembly stub that forwards arguments from the buffer to the target function
@ -286,14 +260,15 @@ public class ProgrammableInvoker {
* @param returnBindings Binding.Move values describing how return values should be copied
* @return null, a single primitive value, or an Object[] of primitive values
*/
Object invokeMoves(Object[] args, Binding.VMStore[] argBindings, Binding.VMLoad[] returnBindings) {
Object invokeMoves(long addr, Object[] args, Binding.VMStore[] argBindings, Binding.VMLoad[] returnBindings) {
MemorySegment stackArgsSeg = null;
try (MemorySegment argBuffer = MemorySegment.allocateNative(layout.size, 64)) {
try (ResourceScope scope = ResourceScope.newConfinedScope()) {
MemorySegment argBuffer = MemorySegment.allocateNative(layout.size, 64, scope);
if (stackArgsBytes > 0) {
stackArgsSeg = MemorySegment.allocateNative(stackArgsBytes, 8);
stackArgsSeg = MemorySegment.allocateNative(stackArgsBytes, 8, scope);
}
VH_LONG.set(argBuffer.asSlice(layout.arguments_next_pc), addr.address().toRawLongValue());
VH_LONG.set(argBuffer.asSlice(layout.arguments_next_pc), addr);
VH_LONG.set(argBuffer.asSlice(layout.stack_args_bytes), stackArgsBytes);
VH_LONG.set(argBuffer.asSlice(layout.stack_args), stackArgsSeg == null ? 0L : stackArgsSeg.address().toRawLongValue());
@ -333,32 +308,33 @@ public class ProgrammableInvoker {
}
return returns;
}
} finally {
if (stackArgsSeg != null) {
stackArgsSeg.close();
}
}
}
Object invokeInterpBindings(Object[] args, MethodHandle leaf,
Object invokeInterpBindings(Addressable address, SegmentAllocator allocator, Object[] args, MethodHandle leaf,
Map<VMStorage, Integer> argIndexMap,
Map<VMStorage, Integer> retIndexMap) throws Throwable {
SharedUtils.Allocator unboxAllocator = bufferCopySize != 0
? SharedUtils.Allocator.ofScope(NativeScope.boundedScope(bufferCopySize))
: THROWING_ALLOCATOR;
try (unboxAllocator) {
Binding.Context unboxContext = bufferCopySize != 0
? Binding.Context.ofBoundedAllocator(bufferCopySize)
: Binding.Context.DUMMY;
try (unboxContext) {
// do argument processing, get Object[] as result
Object[] moves = new Object[leaf.type().parameterCount()];
Object[] leafArgs = new Object[leaf.type().parameterCount()];
leafArgs[0] = address; // addr
for (int i = 0; i < args.length; i++) {
Object arg = args[i];
BindingInterpreter.unbox(arg, callingSequence.argumentBindings(i),
(storage, type, value) -> {
moves[argIndexMap.get(storage)] = value;
}, unboxAllocator);
leafArgs[argIndexMap.get(storage) + 1] = value; // +1 to skip addr
}, unboxContext);
}
// call leaf
Object o = leaf.invokeWithArguments(moves);
Object o = leaf.invokeWithArguments(leafArgs);
// make sure arguments are reachable during the call
// technically we only need to do all Addressable parameters here
Reference.reachabilityFence(address);
Reference.reachabilityFence(args);
// return value processing
if (o == null) {
@ -366,10 +342,10 @@ public class ProgrammableInvoker {
} else if (o instanceof Object[]) {
Object[] oArr = (Object[]) o;
return BindingInterpreter.box(callingSequence.returnBindings(),
(storage, type) -> oArr[retIndexMap.get(storage)], DEFAULT_ALLOCATOR);
(storage, type) -> oArr[retIndexMap.get(storage)], Binding.Context.ofAllocator(allocator));
} else {
return BindingInterpreter.box(callingSequence.returnBindings(), (storage, type) -> o,
DEFAULT_ALLOCATOR);
Binding.Context.ofAllocator(allocator));
}
}
}

View File

@ -26,22 +26,32 @@
package jdk.internal.foreign.abi;
import jdk.incubator.foreign.MemoryAddress;
import jdk.incubator.foreign.MemoryHandles;
import jdk.incubator.foreign.MemoryLayouts;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
import jdk.incubator.foreign.SegmentAllocator;
import jdk.internal.access.JavaLangInvokeAccess;
import jdk.internal.access.SharedSecrets;
import jdk.internal.foreign.MemoryAddressImpl;
import jdk.internal.foreign.Utils;
import jdk.internal.vm.annotation.Stable;
import sun.security.action.GetPropertyAction;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.lang.invoke.VarHandle;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Stream;
import static jdk.internal.foreign.abi.SharedUtils.DEFAULT_ALLOCATOR;
import static java.lang.invoke.MethodHandles.dropArguments;
import static java.lang.invoke.MethodHandles.filterReturnValue;
import static java.lang.invoke.MethodHandles.identity;
import static java.lang.invoke.MethodHandles.insertArguments;
import static java.lang.invoke.MethodHandles.lookup;
import static java.lang.invoke.MethodType.methodType;
import static jdk.internal.foreign.abi.SharedUtils.mergeArguments;
import static sun.security.action.GetBooleanAction.privilegedGetProperty;
/**
@ -49,59 +59,214 @@ import static sun.security.action.GetBooleanAction.privilegedGetProperty;
* takes an array of storage pointers, which describes the state of the CPU at the time of the upcall. This can be used
* by the Java code to fetch the upcall arguments and to store the results to the desired location, as per system ABI.
*/
public class ProgrammableUpcallHandler implements UpcallHandler {
public class ProgrammableUpcallHandler {
private static final boolean DEBUG =
privilegedGetProperty("jdk.internal.foreign.ProgrammableUpcallHandler.DEBUG");
private static final boolean USE_SPEC = Boolean.parseBoolean(
GetPropertyAction.privilegedGetProperty("jdk.internal.foreign.ProgrammableUpcallHandler.USE_SPEC", "true"));
private static final boolean USE_INTRINSICS = Boolean.parseBoolean(
GetPropertyAction.privilegedGetProperty("jdk.internal.foreign.ProgrammableUpcallHandler.USE_INTRINSICS", "true"));
private static final JavaLangInvokeAccess JLI = SharedSecrets.getJavaLangInvokeAccess();
private static final VarHandle VH_LONG = MemoryLayouts.JAVA_LONG.varHandle(long.class);
@Stable
private final MethodHandle mh;
private final MethodType type;
private final CallingSequence callingSequence;
private final long entryPoint;
private static final MethodHandle MH_invokeMoves;
private static final MethodHandle MH_invokeInterpBindings;
private final ABIDescriptor abi;
private final BufferLayout layout;
public ProgrammableUpcallHandler(ABIDescriptor abi, MethodHandle target, CallingSequence callingSequence) {
this.abi = abi;
this.layout = BufferLayout.of(abi);
this.type = callingSequence.methodType();
this.callingSequence = callingSequence;
this.mh = target.asSpreader(Object[].class, callingSequence.methodType().parameterCount());
this.entryPoint = allocateUpcallStub(abi, layout);
}
@Override
public long entryPoint() {
return entryPoint;
}
public static void invoke(ProgrammableUpcallHandler handler, long address) {
handler.invoke(MemoryAddress.ofLong(address));
}
private void invoke(MemoryAddress buffer) {
static {
try {
MemorySegment bufferBase = MemoryAddressImpl.ofLongUnchecked(buffer.toRawLongValue(), layout.size);
MethodHandles.Lookup lookup = lookup();
MH_invokeMoves = lookup.findStatic(ProgrammableUpcallHandler.class, "invokeMoves",
methodType(void.class, MemoryAddress.class, MethodHandle.class,
Binding.VMLoad[].class, Binding.VMStore[].class, ABIDescriptor.class, BufferLayout.class));
MH_invokeInterpBindings = lookup.findStatic(ProgrammableUpcallHandler.class, "invokeInterpBindings",
methodType(Object.class, Object[].class, MethodHandle.class, Map.class, Map.class,
CallingSequence.class, long.class));
} catch (ReflectiveOperationException e) {
throw new InternalError(e);
}
}
if (DEBUG) {
System.err.println("Buffer state before:");
layout.dump(abi.arch, bufferBase, System.err);
public static UpcallHandler make(ABIDescriptor abi, MethodHandle target, CallingSequence callingSequence) {
Binding.VMLoad[] argMoves = argMoveBindings(callingSequence);
Binding.VMStore[] retMoves = retMoveBindings(callingSequence);
boolean isSimple = !(retMoves.length > 1);
Class<?> llReturn = !isSimple
? Object[].class
: retMoves.length == 1
? retMoves[0].type()
: void.class;
Class<?>[] llParams = Arrays.stream(argMoves).map(Binding.Move::type).toArray(Class<?>[]::new);
MethodType llType = MethodType.methodType(llReturn, llParams);
MethodHandle doBindings;
long bufferCopySize = SharedUtils.bufferCopySize(callingSequence);
if (USE_SPEC && isSimple) {
doBindings = specializedBindingHandle(target, callingSequence, llReturn, bufferCopySize);
assert doBindings.type() == llType;
} else {
Map<VMStorage, Integer> argIndices = SharedUtils.indexMap(argMoves);
Map<VMStorage, Integer> retIndices = SharedUtils.indexMap(retMoves);
target = target.asSpreader(Object[].class, callingSequence.methodType().parameterCount());
doBindings = insertArguments(MH_invokeInterpBindings, 1, target, argIndices, retIndices, callingSequence,
bufferCopySize);
doBindings = doBindings.asCollector(Object[].class, llType.parameterCount());
doBindings = doBindings.asType(llType);
}
long entryPoint;
boolean usesStackArgs = argMoveBindingsStream(callingSequence)
.map(Binding.VMLoad::storage)
.anyMatch(s -> abi.arch.isStackType(s.type()));
if (USE_INTRINSICS && isSimple && !usesStackArgs && supportsOptimizedUpcalls()) {
checkPrimitive(doBindings.type());
JLI.ensureCustomized(doBindings);
VMStorage[] args = Arrays.stream(argMoves).map(Binding.Move::storage).toArray(VMStorage[]::new);
VMStorage[] rets = Arrays.stream(retMoves).map(Binding.Move::storage).toArray(VMStorage[]::new);
CallRegs conv = new CallRegs(args, rets);
entryPoint = allocateOptimizedUpcallStub(doBindings, abi, conv);
} else {
BufferLayout layout = BufferLayout.of(abi);
MethodHandle doBindingsErased = doBindings.asSpreader(Object[].class, doBindings.type().parameterCount());
MethodHandle invokeMoves = insertArguments(MH_invokeMoves, 1, doBindingsErased, argMoves, retMoves, abi, layout);
entryPoint = allocateUpcallStub(invokeMoves, abi, layout);
}
return () -> entryPoint;
}
private static void checkPrimitive(MethodType type) {
if (!type.returnType().isPrimitive()
|| type.parameterList().stream().anyMatch(p -> !p.isPrimitive()))
throw new IllegalArgumentException("MethodHandle type must be primitive: " + type);
}
private static Stream<Binding.VMLoad> argMoveBindingsStream(CallingSequence callingSequence) {
return callingSequence.argumentBindings()
.filter(Binding.VMLoad.class::isInstance)
.map(Binding.VMLoad.class::cast);
}
private static Binding.VMLoad[] argMoveBindings(CallingSequence callingSequence) {
return argMoveBindingsStream(callingSequence)
.toArray(Binding.VMLoad[]::new);
}
private static Binding.VMStore[] retMoveBindings(CallingSequence callingSequence) {
return callingSequence.returnBindings().stream()
.filter(Binding.VMStore.class::isInstance)
.map(Binding.VMStore.class::cast)
.toArray(Binding.VMStore[]::new);
}
private static MethodHandle specializedBindingHandle(MethodHandle target, CallingSequence callingSequence,
Class<?> llReturn, long bufferCopySize) {
MethodType highLevelType = callingSequence.methodType();
MethodHandle specializedHandle = target; // initial
int argAllocatorPos = 0;
int argInsertPos = 1;
specializedHandle = dropArguments(specializedHandle, argAllocatorPos, Binding.Context.class);
for (int i = 0; i < highLevelType.parameterCount(); i++) {
MethodHandle filter = identity(highLevelType.parameterType(i));
int filterAllocatorPos = 0;
int filterInsertPos = 1; // +1 for allocator
filter = dropArguments(filter, filterAllocatorPos, Binding.Context.class);
List<Binding> bindings = callingSequence.argumentBindings(i);
for (int j = bindings.size() - 1; j >= 0; j--) {
Binding binding = bindings.get(j);
filter = binding.specialize(filter, filterInsertPos, filterAllocatorPos);
}
specializedHandle = MethodHandles.collectArguments(specializedHandle, argInsertPos, filter);
specializedHandle = mergeArguments(specializedHandle, argAllocatorPos, argInsertPos + filterAllocatorPos);
argInsertPos += filter.type().parameterCount() - 1; // -1 for allocator
}
MemorySegment stackArgsBase = MemoryAddressImpl.ofLongUnchecked((long)VH_LONG.get(bufferBase.asSlice(layout.stack_args)));
Object[] args = new Object[type.parameterCount()];
for (int i = 0 ; i < type.parameterCount() ; i++) {
if (llReturn != void.class) {
int retAllocatorPos = -1; // assumed not needed
int retInsertPos = 0;
MethodHandle filter = identity(llReturn);
List<Binding> bindings = callingSequence.returnBindings();
for (int j = bindings.size() - 1; j >= 0; j--) {
Binding binding = bindings.get(j);
filter = binding.specialize(filter, retInsertPos, retAllocatorPos);
}
specializedHandle = filterReturnValue(specializedHandle, filter);
}
specializedHandle = SharedUtils.wrapWithAllocator(specializedHandle, argAllocatorPos, bufferCopySize, true);
return specializedHandle;
}
public static void invoke(MethodHandle mh, long address) throws Throwable {
mh.invokeExact(MemoryAddress.ofLong(address));
}
private static void invokeMoves(MemoryAddress buffer, MethodHandle leaf,
Binding.VMLoad[] argBindings, Binding.VMStore[] returnBindings,
ABIDescriptor abi, BufferLayout layout) throws Throwable {
MemorySegment bufferBase = MemoryAddressImpl.ofLongUnchecked(buffer.toRawLongValue(), layout.size);
if (DEBUG) {
System.err.println("Buffer state before:");
layout.dump(abi.arch, bufferBase, System.err);
}
MemorySegment stackArgsBase = MemoryAddressImpl.ofLongUnchecked((long)VH_LONG.get(bufferBase.asSlice(layout.stack_args)));
Object[] moves = new Object[argBindings.length];
for (int i = 0; i < moves.length; i++) {
Binding.VMLoad binding = argBindings[i];
VMStorage storage = binding.storage();
MemorySegment ptr = abi.arch.isStackType(storage.type())
? stackArgsBase.asSlice(storage.index() * abi.arch.typeSize(abi.arch.stackType()))
: bufferBase.asSlice(layout.argOffset(storage));
moves[i] = SharedUtils.read(ptr, binding.type());
}
// invokeInterpBindings, and then actual target
Object o = leaf.invoke(moves);
if (o == null) {
// nop
} else if (o instanceof Object[] returns) {
for (int i = 0; i < returnBindings.length; i++) {
Binding.VMStore binding = returnBindings[i];
VMStorage storage = binding.storage();
MemorySegment ptr = bufferBase.asSlice(layout.retOffset(storage));
SharedUtils.writeOverSized(ptr, binding.type(), returns[i]);
}
} else { // single Object
Binding.VMStore binding = returnBindings[0];
VMStorage storage = binding.storage();
MemorySegment ptr = bufferBase.asSlice(layout.retOffset(storage));
SharedUtils.writeOverSized(ptr, binding.type(), o);
}
if (DEBUG) {
System.err.println("Buffer state after:");
layout.dump(abi.arch, bufferBase, System.err);
}
}
private static Object invokeInterpBindings(Object[] moves, MethodHandle leaf,
Map<VMStorage, Integer> argIndexMap,
Map<VMStorage, Integer> retIndexMap,
CallingSequence callingSequence,
long bufferCopySize) throws Throwable {
Binding.Context allocator = bufferCopySize != 0
? Binding.Context.ofBoundedAllocator(bufferCopySize)
: Binding.Context.ofScope();
try (allocator) {
/// Invoke interpreter, got array of high-level arguments back
Object[] args = new Object[callingSequence.methodType().parameterCount()];
for (int i = 0; i < args.length; i++) {
args[i] = BindingInterpreter.box(callingSequence.argumentBindings(i),
(storage, type) -> {
MemorySegment ptr = abi.arch.isStackType(storage.type())
? stackArgsBase.asSlice(storage.index() * abi.arch.typeSize(abi.arch.stackType()))
: bufferBase.asSlice(layout.argOffset(storage));
return SharedUtils.read(ptr, type);
}, DEFAULT_ALLOCATOR);
(storage, type) -> moves[argIndexMap.get(storage)], allocator);
}
if (DEBUG) {
@ -109,31 +274,36 @@ public class ProgrammableUpcallHandler implements UpcallHandler {
System.err.println(Arrays.toString(args).indent(2));
}
Object o = mh.invoke(args);
// invoke our target
Object o = leaf.invoke(args);
if (DEBUG) {
System.err.println("Java return:");
System.err.println(Objects.toString(o).indent(2));
}
if (mh.type().returnType() != void.class) {
Object[] returnMoves = new Object[retIndexMap.size()];
if (leaf.type().returnType() != void.class) {
BindingInterpreter.unbox(o, callingSequence.returnBindings(),
(storage, type, value) -> {
MemorySegment ptr = bufferBase.asSlice(layout.retOffset(storage));
SharedUtils.writeOverSized(ptr, type, value);
}, null);
(storage, type, value) -> returnMoves[retIndexMap.get(storage)] = value, null);
}
if (DEBUG) {
System.err.println("Buffer state after:");
layout.dump(abi.arch, bufferBase, System.err);
if (returnMoves.length == 0) {
return null;
} else if (returnMoves.length == 1) {
return returnMoves[0];
} else {
return returnMoves;
}
} catch (Throwable t) {
throw new IllegalStateException(t);
}
}
public native long allocateUpcallStub(ABIDescriptor abi, BufferLayout layout);
// used for transporting data into native code
private static record CallRegs(VMStorage[] argRegs, VMStorage[] retRegs) {}
static native long allocateOptimizedUpcallStub(MethodHandle mh, ABIDescriptor abi, CallRegs conv);
static native long allocateUpcallStub(MethodHandle mh, ABIDescriptor abi, BufferLayout layout);
static native boolean supportsOptimizedUpcalls();
private static native void registerNatives();
static {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
*/
package jdk.internal.foreign.abi;
import jdk.incubator.foreign.Addressable;
import jdk.incubator.foreign.FunctionDescriptor;
import jdk.incubator.foreign.GroupLayout;
import jdk.incubator.foreign.MemoryAccess;
@ -32,7 +33,8 @@ import jdk.incubator.foreign.MemoryHandles;
import jdk.incubator.foreign.MemoryLayout;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.LibraryLookup;
import jdk.incubator.foreign.NativeScope;
import jdk.incubator.foreign.ResourceScope;
import jdk.incubator.foreign.SegmentAllocator;
import jdk.incubator.foreign.SequenceLayout;
import jdk.incubator.foreign.CLinker;
import jdk.incubator.foreign.ValueLayout;
@ -47,15 +49,24 @@ import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.lang.invoke.VarHandle;
import java.lang.ref.Reference;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static java.lang.invoke.MethodHandles.collectArguments;
import static java.lang.invoke.MethodHandles.constant;
import static java.lang.invoke.MethodHandles.dropArguments;
import static java.lang.invoke.MethodHandles.dropReturn;
import static java.lang.invoke.MethodHandles.empty;
import static java.lang.invoke.MethodHandles.filterArguments;
import static java.lang.invoke.MethodHandles.identity;
import static java.lang.invoke.MethodHandles.insertArguments;
import static java.lang.invoke.MethodHandles.permuteArguments;
import static java.lang.invoke.MethodHandles.tryFinally;
import static java.lang.invoke.MethodType.methodType;
import static jdk.incubator.foreign.CLinker.*;
@ -64,27 +75,35 @@ public class SharedUtils {
private static final MethodHandle MH_ALLOC_BUFFER;
private static final MethodHandle MH_BASEADDRESS;
private static final MethodHandle MH_BUFFER_COPY;
static final Allocator DEFAULT_ALLOCATOR = MemorySegment::allocateNative;
private static final MethodHandle MH_MAKE_CONTEXT_NO_ALLOCATOR;
private static final MethodHandle MH_MAKE_CONTEXT_BOUNDED_ALLOCATOR;
private static final MethodHandle MH_CLOSE_CONTEXT;
private static final MethodHandle MH_REACHBILITY_FENCE;
static {
try {
var lookup = MethodHandles.lookup();
MH_ALLOC_BUFFER = lookup.findStatic(SharedUtils.class, "allocateNative",
MethodHandles.Lookup lookup = MethodHandles.lookup();
MH_ALLOC_BUFFER = lookup.findVirtual(SegmentAllocator.class, "allocate",
methodType(MemorySegment.class, MemoryLayout.class));
MH_BASEADDRESS = lookup.findVirtual(MemorySegment.class, "address",
methodType(MemoryAddress.class));
MH_BUFFER_COPY = lookup.findStatic(SharedUtils.class, "bufferCopy",
methodType(MemoryAddress.class, MemoryAddress.class, MemorySegment.class));
MH_MAKE_CONTEXT_NO_ALLOCATOR = lookup.findStatic(Binding.Context.class, "ofScope",
methodType(Binding.Context.class));
MH_MAKE_CONTEXT_BOUNDED_ALLOCATOR = lookup.findStatic(Binding.Context.class, "ofBoundedAllocator",
methodType(Binding.Context.class, long.class));
MH_CLOSE_CONTEXT = lookup.findVirtual(Binding.Context.class, "close",
methodType(void.class));
MH_REACHBILITY_FENCE = lookup.findStatic(Reference.class, "reachabilityFence",
methodType(void.class, Object.class));
} catch (ReflectiveOperationException e) {
throw new BootstrapMethodError(e);
}
}
// workaround for https://bugs.openjdk.java.net/browse/JDK-8239083
private static MemorySegment allocateNative(MemoryLayout layout) {
return MemorySegment.allocateNative(layout);
}
// this allocator should be used when no allocation is expected
public static final SegmentAllocator THROWING_ALLOCATOR = (size, align) -> { throw new IllegalStateException("Cannot get here"); };
/**
* Align the specified type from a given address
@ -154,22 +173,19 @@ public class SharedUtils {
*/
public static MethodHandle adaptDowncallForIMR(MethodHandle handle, FunctionDescriptor cDesc) {
if (handle.type().returnType() != void.class)
throw new IllegalArgumentException("return expected to be void for in memory returns");
if (handle.type().parameterType(0) != MemoryAddress.class)
throw new IllegalArgumentException("MemoryAddress expected as first param");
throw new IllegalArgumentException("return expected to be void for in memory returns: " + handle.type());
if (handle.type().parameterType(2) != MemoryAddress.class)
throw new IllegalArgumentException("MemoryAddress expected as third param: " + handle.type());
if (cDesc.returnLayout().isEmpty())
throw new IllegalArgumentException("Return layout needed: " + cDesc);
MethodHandle ret = identity(MemorySegment.class); // (MemorySegment) MemorySegment
handle = collectArguments(ret, 1, handle); // (MemorySegment, MemoryAddress ...) MemorySegment
handle = collectArguments(handle, 1, MH_BASEADDRESS); // (MemorySegment, MemorySegment ...) MemorySegment
MethodType oldType = handle.type(); // (MemorySegment, MemorySegment, ...) MemorySegment
MethodType newType = oldType.dropParameterTypes(0, 1); // (MemorySegment, ...) MemorySegment
int[] reorder = IntStream.range(-1, newType.parameterCount()).toArray();
reorder[0] = 0; // [0, 0, 1, 2, 3, ...]
handle = permuteArguments(handle, newType, reorder); // (MemorySegment, ...) MemoryAddress
handle = collectArguments(handle, 0, insertArguments(MH_ALLOC_BUFFER, 0, cDesc.returnLayout().get())); // (...) MemoryAddress
handle = collectArguments(ret, 1, handle); // (MemorySegment, Addressable, SegmentAllocator, MemoryAddress, ...) MemorySegment
handle = collectArguments(handle, 3, MH_BASEADDRESS); // (MemorySegment, Addressable, SegmentAllocator, MemorySegment, ...) MemorySegment
handle = mergeArguments(handle, 0, 3); // (MemorySegment, Addressable, SegmentAllocator, ...) MemorySegment
handle = collectArguments(handle, 0, insertArguments(MH_ALLOC_BUFFER, 1, cDesc.returnLayout().get())); // (SegmentAllocator, Addressable, SegmentAllocator, ...) MemoryAddress
handle = mergeArguments(handle, 0, 2); // (SegmentAllocator, Addressable, ...) MemoryAddress
handle = swapArguments(handle, 0, 1); // (Addressable, SegmentAllocator, ...) MemoryAddress
return handle;
}
@ -181,12 +197,16 @@ public class SharedUtils {
* @param target the target handle to adapt
* @return the adapted handle
*/
public static MethodHandle adaptUpcallForIMR(MethodHandle target) {
public static MethodHandle adaptUpcallForIMR(MethodHandle target, boolean dropReturn) {
if (target.type().returnType() != MemorySegment.class)
throw new IllegalArgumentException("Must return MemorySegment for IMR");
target = collectArguments(MH_BUFFER_COPY, 1, target); // (MemoryAddress, ...) MemoryAddress
if (dropReturn) { // no handling for return value, need to drop it
target = dropReturn(target);
}
return target;
}
@ -223,18 +243,26 @@ public class SharedUtils {
cDesc.returnLayout().ifPresent(rl -> checkCompatibleType(mt.returnType(), rl, addressSize));
}
public static Class<?> primitiveCarrierForSize(long size) {
if (size == 1) {
return byte.class;
} else if(size == 2) {
return short.class;
} else if (size <= 4) {
return int.class;
} else if (size <= 8) {
return long.class;
public static Class<?> primitiveCarrierForSize(long size, boolean useFloat) {
if (useFloat) {
if (size == 4) {
return float.class;
} else if (size == 8) {
return double.class;
}
} else {
if (size == 1) {
return byte.class;
} else if (size == 2) {
return short.class;
} else if (size <= 4) {
return int.class;
} else if (size <= 8) {
return long.class;
}
}
throw new IllegalArgumentException("Size too large: " + size);
throw new IllegalArgumentException("No type for size: " + size + " isFloat=" + useFloat);
}
public static CLinker getSystemLinker() {
@ -264,16 +292,136 @@ public class SharedUtils {
throw new IllegalArgumentException("String too large");
}
static long bufferCopySize(CallingSequence callingSequence) {
// FIXME: > 16 bytes alignment might need extra space since the
// starting address of the allocator might be un-aligned.
long size = 0;
for (int i = 0; i < callingSequence.argumentCount(); i++) {
List<Binding> bindings = callingSequence.argumentBindings(i);
for (Binding b : bindings) {
if (b instanceof Binding.Copy) {
Binding.Copy c = (Binding.Copy) b;
size = Utils.alignUp(size, c.alignment());
size += c.size();
} else if (b instanceof Binding.Allocate) {
Binding.Allocate c = (Binding.Allocate) b;
size = Utils.alignUp(size, c.alignment());
size += c.size();
}
}
}
return size;
}
static Map<VMStorage, Integer> indexMap(Binding.Move[] moves) {
return IntStream.range(0, moves.length)
.boxed()
.collect(Collectors.toMap(i -> moves[i].storage(), i -> i));
}
static MethodHandle mergeArguments(MethodHandle mh, int sourceIndex, int destIndex) {
MethodType oldType = mh.type();
Class<?> sourceType = oldType.parameterType(sourceIndex);
Class<?> destType = oldType.parameterType(destIndex);
if (sourceType != destType) {
// TODO meet?
throw new IllegalArgumentException("Parameter types differ: " + sourceType + " != " + destType);
}
MethodType newType = oldType.dropParameterTypes(destIndex, destIndex + 1);
int[] reorder = new int[oldType.parameterCount()];
assert destIndex > sourceIndex;
for (int i = 0, index = 0; i < reorder.length; i++) {
if (i != destIndex) {
reorder[i] = index++;
} else {
reorder[i] = sourceIndex;
}
}
return permuteArguments(mh, newType, reorder);
}
static MethodHandle swapArguments(MethodHandle mh, int firstArg, int secondArg) {
MethodType mtype = mh.type();
int[] perms = new int[mtype.parameterCount()];
MethodType swappedType = MethodType.methodType(mtype.returnType());
for (int i = 0 ; i < perms.length ; i++) {
int dst = i;
if (i == firstArg) dst = secondArg;
if (i == secondArg) dst = firstArg;
perms[i] = dst;
swappedType = swappedType.appendParameterTypes(mtype.parameterType(dst));
}
return permuteArguments(mh, swappedType, perms);
}
private static MethodHandle reachabilityFenceHandle(Class<?> type) {
return MH_REACHBILITY_FENCE.asType(MethodType.methodType(void.class, type));
}
static MethodHandle wrapWithAllocator(MethodHandle specializedHandle,
int allocatorPos, long bufferCopySize,
boolean upcall) {
// insert try-finally to close the NativeScope used for Binding.Copy
MethodHandle closer;
int insertPos;
if (specializedHandle.type().returnType() == void.class) {
closer = empty(methodType(void.class, Throwable.class)); // (Throwable) -> void
insertPos = 1;
} else {
closer = identity(specializedHandle.type().returnType()); // (V) -> V
closer = dropArguments(closer, 0, Throwable.class); // (Throwable, V) -> V
insertPos = 2;
}
// downcalls get the leading Addressable/SegmentAllocator param as well
if (!upcall) {
closer = collectArguments(closer, insertPos++, reachabilityFenceHandle(Addressable.class));
closer = dropArguments(closer, insertPos++, SegmentAllocator.class); // (Throwable, V?, Addressable, SegmentAllocator) -> V/void
}
closer = collectArguments(closer, insertPos++, MH_CLOSE_CONTEXT); // (Throwable, V?, Addressable?, BindingContext) -> V/void
if (!upcall) {
// now for each Addressable parameter, add a reachability fence
MethodType specType = specializedHandle.type();
// skip 3 for address, segment allocator, and binding context
for (int i = 3; i < specType.parameterCount(); i++) {
Class<?> param = specType.parameterType(i);
if (Addressable.class.isAssignableFrom(param)) {
closer = collectArguments(closer, insertPos++, reachabilityFenceHandle(param));
} else {
closer = dropArguments(closer, insertPos++, param);
}
}
}
MethodHandle contextFactory;
if (bufferCopySize > 0) {
contextFactory = MethodHandles.insertArguments(MH_MAKE_CONTEXT_BOUNDED_ALLOCATOR, 0, bufferCopySize);
} else if (upcall) {
contextFactory = MH_MAKE_CONTEXT_NO_ALLOCATOR;
} else {
// this path is probably never used now, since ProgrammableInvoker never calls this routine with bufferCopySize == 0
contextFactory = constant(Binding.Context.class, Binding.Context.DUMMY);
}
specializedHandle = tryFinally(specializedHandle, closer);
specializedHandle = collectArguments(specializedHandle, allocatorPos, contextFactory);
return specializedHandle;
}
// lazy init MH_ALLOC and MH_FREE handles
private static class AllocHolder {
final static LibraryLookup LOOKUP = LibraryLookup.ofDefault();
static final LibraryLookup LOOKUP = LibraryLookup.ofDefault();
final static MethodHandle MH_MALLOC = getSystemLinker().downcallHandle(LOOKUP.lookup("malloc").get(),
static final MethodHandle MH_MALLOC = getSystemLinker().downcallHandle(LOOKUP.lookup("malloc").get(),
MethodType.methodType(MemoryAddress.class, long.class),
FunctionDescriptor.of(C_POINTER, C_LONG_LONG));
final static MethodHandle MH_FREE = getSystemLinker().downcallHandle(LOOKUP.lookup("free").get(),
static final MethodHandle MH_FREE = getSystemLinker().downcallHandle(LOOKUP.lookup("free").get(),
MethodType.methodType(void.class, MemoryAddress.class),
FunctionDescriptor.ofVoid(C_POINTER));
}
@ -294,25 +442,25 @@ public class SharedUtils {
}
}
public static VaList newVaList(Consumer<VaList.Builder> actions, Allocator allocator) {
public static VaList newVaList(Consumer<VaList.Builder> actions, ResourceScope scope) {
return switch (CABI.current()) {
case Win64 -> Windowsx64Linker.newVaList(actions, allocator);
case SysV -> SysVx64Linker.newVaList(actions, allocator);
case AArch64 -> AArch64Linker.newVaList(actions, allocator);
case Win64 -> Windowsx64Linker.newVaList(actions, scope);
case SysV -> SysVx64Linker.newVaList(actions, scope);
case AArch64 -> AArch64Linker.newVaList(actions, scope);
};
}
public static VarHandle vhPrimitiveOrAddress(Class<?> carrier, MemoryLayout layout) {
return carrier == MemoryAddress.class
? MemoryHandles.asAddressVarHandle(layout.varHandle(primitiveCarrierForSize(layout.byteSize())))
? MemoryHandles.asAddressVarHandle(layout.varHandle(primitiveCarrierForSize(layout.byteSize(), false)))
: layout.varHandle(carrier);
}
public static VaList newVaListOfAddress(MemoryAddress ma) {
public static VaList newVaListOfAddress(MemoryAddress ma, ResourceScope scope) {
return switch (CABI.current()) {
case Win64 -> Windowsx64Linker.newVaListOfAddress(ma);
case SysV -> SysVx64Linker.newVaListOfAddress(ma);
case AArch64 -> AArch64Linker.newVaListOfAddress(ma);
case Win64 -> Windowsx64Linker.newVaListOfAddress(ma, scope);
case SysV -> SysVx64Linker.newVaListOfAddress(ma, scope);
case AArch64 -> AArch64Linker.newVaListOfAddress(ma, scope);
};
}
@ -336,7 +484,7 @@ public class SharedUtils {
public static MethodHandle unboxVaLists(MethodType type, MethodHandle handle, MethodHandle unboxer) {
for (int i = 0; i < type.parameterCount(); i++) {
if (type.parameterType(i) == VaList.class) {
handle = MethodHandles.filterArguments(handle, i, unboxer);
handle = filterArguments(handle, i + 1, unboxer); // +1 for leading address
}
}
return handle;
@ -346,7 +494,7 @@ public class SharedUtils {
MethodType type = handle.type();
for (int i = 0; i < type.parameterCount(); i++) {
if (type.parameterType(i) == VaList.class) {
handle = MethodHandles.filterArguments(handle, i, boxer);
handle = filterArguments(handle, i, boxer);
}
}
return handle;
@ -365,37 +513,6 @@ public class SharedUtils {
.orElse(false);
}
public interface Allocator extends AutoCloseable {
Allocator THROWING_ALLOCATOR = (size, align) -> { throw new UnsupportedOperationException("Null allocator"); };
default MemorySegment allocate(MemoryLayout layout) {
return allocate(layout.byteSize(), layout.byteAlignment());
}
default MemorySegment allocate(long size) {
return allocate(size, 1);
}
@Override
default void close() {}
MemorySegment allocate(long size, long align);
static Allocator ofScope(NativeScope scope) {
return new Allocator() {
@Override
public MemorySegment allocate(long size, long align) {
return scope.allocate(size, align);
}
@Override
public void close() {
scope.close();
}
};
}
}
public static class SimpleVaArg {
public final Class<?> carrier;
public final MemoryLayout layout;
@ -409,12 +526,12 @@ public class SharedUtils {
public VarHandle varHandle() {
return carrier == MemoryAddress.class
? MemoryHandles.asAddressVarHandle(layout.varHandle(primitiveCarrierForSize(layout.byteSize())))
? MemoryHandles.asAddressVarHandle(layout.varHandle(primitiveCarrierForSize(layout.byteSize(), false)))
: layout.varHandle(carrier);
}
}
public static class EmptyVaList implements VaList {
public static non-sealed class EmptyVaList implements VaList {
private final MemoryAddress address;
@ -447,12 +564,12 @@ public class SharedUtils {
}
@Override
public MemorySegment vargAsSegment(MemoryLayout layout) {
public MemorySegment vargAsSegment(MemoryLayout layout, SegmentAllocator allocator) {
throw uoe();
}
@Override
public MemorySegment vargAsSegment(MemoryLayout layout, NativeScope scope) {
public MemorySegment vargAsSegment(MemoryLayout layout, ResourceScope scope) {
throw uoe();
}
@ -462,13 +579,8 @@ public class SharedUtils {
}
@Override
public boolean isAlive() {
return true;
}
@Override
public void close() {
throw uoe();
public ResourceScope scope() {
return ResourceScope.globalScope();
}
@Override
@ -476,11 +588,6 @@ public class SharedUtils {
return this;
}
@Override
public VaList copy(NativeScope scope) {
throw uoe();
}
@Override
public MemoryAddress address() {
return address;

View File

@ -26,18 +26,16 @@ package jdk.internal.foreign.abi;
import jdk.incubator.foreign.MemoryAddress;
import jdk.incubator.foreign.MemorySegment;
import jdk.internal.foreign.MemoryAddressImpl;
import jdk.internal.foreign.ResourceScopeImpl;
import jdk.internal.foreign.NativeMemorySegmentImpl;
public class UpcallStubs {
public static MemorySegment upcallAddress(UpcallHandler handler) {
public static MemoryAddress upcallAddress(UpcallHandler handler, ResourceScopeImpl scope) {
long stubAddress = handler.entryPoint();
return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(
MemoryAddress.ofLong(stubAddress), 0, () -> freeUpcallStub(stubAddress), null)
.share()
.withAccessModes(MemorySegment.CLOSE | MemorySegment.HANDOFF | MemorySegment.SHARE);
};
return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(MemoryAddress.ofLong(stubAddress), 0,
() -> freeUpcallStub(stubAddress), scope).address();
}
private static void freeUpcallStub(long stubAddress) {
if (!freeUpcallStub0(stubAddress)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -25,12 +25,12 @@
*/
package jdk.internal.foreign.abi.aarch64;
import jdk.incubator.foreign.Addressable;
import jdk.incubator.foreign.FunctionDescriptor;
import jdk.incubator.foreign.MemoryAddress;
import jdk.incubator.foreign.MemoryLayout;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.CLinker;
import jdk.incubator.foreign.ResourceScope;
import jdk.internal.foreign.AbstractCLinker;
import jdk.internal.foreign.ResourceScopeImpl;
import jdk.internal.foreign.abi.SharedUtils;
import jdk.internal.foreign.abi.UpcallStubs;
@ -40,13 +40,11 @@ import java.lang.invoke.MethodType;
import java.util.Objects;
import java.util.function.Consumer;
import static jdk.internal.foreign.PlatformLayouts.*;
/**
* ABI implementation based on ARM document "Procedure Call Standard for
* the ARM 64-bit Architecture".
*/
public class AArch64Linker implements CLinker {
public final class AArch64Linker extends AbstractCLinker {
private static AArch64Linker instance;
static final long ADDRESS_SIZE = 64; // bits
@ -59,8 +57,8 @@ public class AArch64Linker implements CLinker {
MethodHandles.Lookup lookup = MethodHandles.lookup();
MH_unboxVaList = lookup.findVirtual(VaList.class, "address",
MethodType.methodType(MemoryAddress.class));
MH_boxVaList = lookup.findStatic(AArch64Linker.class, "newVaListOfAddress",
MethodType.methodType(VaList.class, MemoryAddress.class));
MH_boxVaList = MethodHandles.insertArguments(lookup.findStatic(AArch64Linker.class, "newVaListOfAddress",
MethodType.methodType(VaList.class, MemoryAddress.class, ResourceScope.class)), 1, ResourceScope.globalScope());
} catch (ReflectiveOperationException e) {
throw new ExceptionInInitializerError(e);
}
@ -74,32 +72,36 @@ public class AArch64Linker implements CLinker {
}
@Override
public MethodHandle downcallHandle(Addressable symbol, MethodType type, FunctionDescriptor function) {
Objects.requireNonNull(symbol);
public final MethodHandle downcallHandle(MethodType type, FunctionDescriptor function) {
Objects.requireNonNull(type);
Objects.requireNonNull(function);
MethodType llMt = SharedUtils.convertVaListCarriers(type, AArch64VaList.CARRIER);
MethodHandle handle = CallArranger.arrangeDowncall(symbol, llMt, function);
MethodHandle handle = CallArranger.arrangeDowncall(llMt, function);
if (!type.returnType().equals(MemorySegment.class)) {
// not returning segment, just insert a throwing allocator
handle = MethodHandles.insertArguments(handle, 1, SharedUtils.THROWING_ALLOCATOR);
}
handle = SharedUtils.unboxVaLists(type, handle, MH_unboxVaList);
return handle;
}
@Override
public MemorySegment upcallStub(MethodHandle target, FunctionDescriptor function) {
public final MemoryAddress upcallStub(MethodHandle target, FunctionDescriptor function, ResourceScope scope) {
Objects.requireNonNull(scope);
Objects.requireNonNull(target);
Objects.requireNonNull(function);
target = SharedUtils.boxVaLists(target, MH_boxVaList);
return UpcallStubs.upcallAddress(CallArranger.arrangeUpcall(target, target.type(), function));
return UpcallStubs.upcallAddress(CallArranger.arrangeUpcall(target, target.type(), function), (ResourceScopeImpl) scope);
}
public static VaList newVaList(Consumer<VaList.Builder> actions, SharedUtils.Allocator allocator) {
AArch64VaList.Builder builder = AArch64VaList.builder(allocator);
public static VaList newVaList(Consumer<VaList.Builder> actions, ResourceScope scope) {
AArch64VaList.Builder builder = AArch64VaList.builder(scope);
actions.accept(builder);
return builder.build();
}
public static VaList newVaListOfAddress(MemoryAddress ma) {
return AArch64VaList.ofAddress(ma);
public static VaList newVaListOfAddress(MemoryAddress ma, ResourceScope scope) {
return AArch64VaList.ofAddress(ma, scope);
}
public static VaList emptyVaList() {

View File

@ -26,7 +26,6 @@
package jdk.internal.foreign.abi.aarch64;
import jdk.incubator.foreign.*;
import jdk.internal.foreign.NativeMemorySegmentImpl;
import jdk.internal.foreign.Utils;
import jdk.internal.foreign.abi.SharedUtils;
import jdk.internal.misc.Unsafe;
@ -42,11 +41,12 @@ import static jdk.internal.foreign.PlatformLayouts.AArch64;
import static jdk.incubator.foreign.CLinker.VaList;
import static jdk.incubator.foreign.MemoryLayout.PathElement.groupElement;
import static jdk.internal.foreign.abi.SharedUtils.SimpleVaArg;
import static jdk.internal.foreign.abi.SharedUtils.THROWING_ALLOCATOR;
import static jdk.internal.foreign.abi.SharedUtils.checkCompatibleType;
import static jdk.internal.foreign.abi.SharedUtils.vhPrimitiveOrAddress;
import static jdk.internal.foreign.abi.aarch64.CallArranger.MAX_REGISTER_ARGUMENTS;
public class AArch64VaList implements VaList {
public non-sealed class AArch64VaList implements VaList {
private static final Unsafe U = Unsafe.getUnsafe();
static final Class<?> CARRIER = MemoryAddress.class;
@ -62,7 +62,7 @@ public class AArch64VaList implements VaList {
// int __vr_offs; // offset from __vr_top to next FP/SIMD register arg
// } va_list;
static final GroupLayout LAYOUT = MemoryLayout.ofStruct(
static final GroupLayout LAYOUT = MemoryLayout.structLayout(
AArch64.C_POINTER.withName("__stack"),
AArch64.C_POINTER.withName("__gr_top"),
AArch64.C_POINTER.withName("__vr_top"),
@ -71,14 +71,14 @@ public class AArch64VaList implements VaList {
).withName("__va_list");
private static final MemoryLayout GP_REG
= MemoryLayout.ofValueBits(64, ByteOrder.nativeOrder());
= MemoryLayout.valueLayout(64, ByteOrder.nativeOrder());
private static final MemoryLayout FP_REG
= MemoryLayout.ofValueBits(128, ByteOrder.nativeOrder());
= MemoryLayout.valueLayout(128, ByteOrder.nativeOrder());
private static final MemoryLayout LAYOUT_GP_REGS
= MemoryLayout.ofSequence(MAX_REGISTER_ARGUMENTS, GP_REG);
= MemoryLayout.sequenceLayout(MAX_REGISTER_ARGUMENTS, GP_REG);
private static final MemoryLayout LAYOUT_FP_REGS
= MemoryLayout.ofSequence(MAX_REGISTER_ARGUMENTS, FP_REG);
= MemoryLayout.sequenceLayout(MAX_REGISTER_ARGUMENTS, FP_REG);
private static final int GP_SLOT_SIZE = (int) GP_REG.byteSize();
private static final int FP_SLOT_SIZE = (int) FP_REG.byteSize();
@ -104,31 +104,27 @@ public class AArch64VaList implements VaList {
private final MemorySegment segment;
private final MemorySegment gpRegsArea;
private final MemorySegment fpRegsArea;
private final List<MemorySegment> attachedSegments;
private AArch64VaList(MemorySegment segment, MemorySegment gpRegsArea, MemorySegment fpRegsArea,
List<MemorySegment> attachedSegments) {
private AArch64VaList(MemorySegment segment, MemorySegment gpRegsArea, MemorySegment fpRegsArea) {
this.segment = segment;
this.gpRegsArea = gpRegsArea;
this.fpRegsArea = fpRegsArea;
this.attachedSegments = attachedSegments;
}
private static AArch64VaList readFromSegment(MemorySegment segment) {
MemorySegment gpRegsArea = handoffIfNeeded(grTop(segment).addOffset(-MAX_GP_OFFSET)
.asSegmentRestricted(MAX_GP_OFFSET), segment.ownerThread());
MemorySegment gpRegsArea = grTop(segment).addOffset(-MAX_GP_OFFSET).asSegment(
MAX_GP_OFFSET, segment.scope());
MemorySegment fpRegsArea = handoffIfNeeded(vrTop(segment).addOffset(-MAX_FP_OFFSET)
.asSegmentRestricted(MAX_FP_OFFSET), segment.ownerThread());
return new AArch64VaList(segment, gpRegsArea, fpRegsArea, List.of(gpRegsArea, fpRegsArea));
MemorySegment fpRegsArea = vrTop(segment).addOffset(-MAX_FP_OFFSET).asSegment(
MAX_FP_OFFSET, segment.scope());
return new AArch64VaList(segment, gpRegsArea, fpRegsArea);
}
private static MemoryAddress emptyListAddress() {
long ptr = U.allocateMemory(LAYOUT.byteSize());
MemorySegment ms = MemoryAddress.ofLong(ptr)
.asSegmentRestricted(LAYOUT.byteSize(), () -> U.freeMemory(ptr), null)
.share();
cleaner.register(AArch64VaList.class, ms::close);
MemorySegment ms = MemoryAddress.ofLong(ptr).asSegment(
LAYOUT.byteSize(), () -> U.freeMemory(ptr), ResourceScope.newSharedScope());
cleaner.register(AArch64VaList.class, () -> ms.scope().close());
VH_stack.set(ms, MemoryAddress.NULL);
VH_gr_top.set(ms, MemoryAddress.NULL);
VH_vr_top.set(ms, MemoryAddress.NULL);
@ -234,21 +230,21 @@ public class AArch64VaList implements VaList {
}
@Override
public MemorySegment vargAsSegment(MemoryLayout layout) {
return (MemorySegment) read(MemorySegment.class, layout);
public MemorySegment vargAsSegment(MemoryLayout layout, SegmentAllocator allocator) {
Objects.requireNonNull(allocator);
return (MemorySegment) read(MemorySegment.class, layout, allocator);
}
@Override
public MemorySegment vargAsSegment(MemoryLayout layout, NativeScope scope) {
Objects.requireNonNull(scope);
return (MemorySegment) read(MemorySegment.class, layout, SharedUtils.Allocator.ofScope(scope));
public MemorySegment vargAsSegment(MemoryLayout layout, ResourceScope scope) {
return vargAsSegment(layout, SegmentAllocator.ofScope(scope));
}
private Object read(Class<?> carrier, MemoryLayout layout) {
return read(carrier, layout, MemorySegment::allocateNative);
return read(carrier, layout, THROWING_ALLOCATOR);
}
private Object read(Class<?> carrier, MemoryLayout layout, SharedUtils.Allocator allocator) {
private Object read(Class<?> carrier, MemoryLayout layout, SegmentAllocator allocator) {
Objects.requireNonNull(layout);
checkCompatibleType(carrier, layout, AArch64Linker.ADDRESS_SIZE);
@ -257,22 +253,18 @@ public class AArch64VaList implements VaList {
preAlignStack(layout);
return switch (typeClass) {
case STRUCT_REGISTER, STRUCT_HFA, STRUCT_REFERENCE -> {
try (MemorySegment slice = handoffIfNeeded(stackPtr()
.asSegmentRestricted(layout.byteSize()), segment.ownerThread())) {
MemorySegment seg = allocator.allocate(layout);
seg.copyFrom(slice);
postAlignStack(layout);
yield seg;
}
MemorySegment slice = stackPtr().asSegment(layout.byteSize(), scope());
MemorySegment seg = allocator.allocate(layout);
seg.copyFrom(slice);
postAlignStack(layout);
yield seg;
}
case POINTER, INTEGER, FLOAT -> {
VarHandle reader = vhPrimitiveOrAddress(carrier, layout);
try (MemorySegment slice = handoffIfNeeded(stackPtr()
.asSegmentRestricted(layout.byteSize()), segment.ownerThread())) {
Object res = reader.get(slice);
postAlignStack(layout);
yield res;
}
MemorySegment slice = stackPtr().asSegment(layout.byteSize(), scope());
Object res = reader.get(slice);
postAlignStack(layout);
yield res;
}
};
} else {
@ -314,12 +306,10 @@ public class AArch64VaList implements VaList {
gpRegsArea.asSlice(currentGPOffset()));
consumeGPSlots(1);
try (MemorySegment slice = handoffIfNeeded(ptr
.asSegmentRestricted(layout.byteSize()), segment.ownerThread())) {
MemorySegment seg = allocator.allocate(layout);
seg.copyFrom(slice);
yield seg;
}
MemorySegment slice = ptr.asSegment(layout.byteSize(), scope());
MemorySegment seg = allocator.allocate(layout);
seg.copyFrom(slice);
yield seg;
}
case POINTER, INTEGER -> {
VarHandle reader = SharedUtils.vhPrimitiveOrAddress(carrier, layout);
@ -356,40 +346,24 @@ public class AArch64VaList implements VaList {
}
}
static AArch64VaList.Builder builder(SharedUtils.Allocator allocator) {
return new AArch64VaList.Builder(allocator);
static AArch64VaList.Builder builder(ResourceScope scope) {
return new AArch64VaList.Builder(scope);
}
public static VaList ofAddress(MemoryAddress ma) {
return readFromSegment(ma.asSegmentRestricted(LAYOUT.byteSize()));
public static VaList ofAddress(MemoryAddress ma, ResourceScope scope) {
return readFromSegment(ma.asSegment(LAYOUT.byteSize(), scope));
}
@Override
public boolean isAlive() {
return segment.isAlive();
}
@Override
public void close() {
segment.close();
attachedSegments.forEach(MemorySegment::close);
public ResourceScope scope() {
return segment.scope();
}
@Override
public VaList copy() {
return copy(MemorySegment::allocateNative);
}
@Override
public VaList copy(NativeScope scope) {
Objects.requireNonNull(scope);
return copy(SharedUtils.Allocator.ofScope(scope));
}
private VaList copy(SharedUtils.Allocator allocator) {
MemorySegment copy = allocator.allocate(LAYOUT);
MemorySegment copy = MemorySegment.allocateNative(LAYOUT, segment.scope());
copy.copyFrom(segment);
return new AArch64VaList(copy, gpRegsArea, fpRegsArea, List.of());
return new AArch64VaList(copy, gpRegsArea, fpRegsArea);
}
@Override
@ -423,8 +397,8 @@ public class AArch64VaList implements VaList {
+ '}';
}
static class Builder implements VaList.Builder {
private final SharedUtils.Allocator allocator;
public static non-sealed class Builder implements VaList.Builder {
private final ResourceScope scope;
private final MemorySegment gpRegs;
private final MemorySegment fpRegs;
@ -432,10 +406,10 @@ public class AArch64VaList implements VaList {
private long currentFPOffset = 0;
private final List<SimpleVaArg> stackArgs = new ArrayList<>();
Builder(SharedUtils.Allocator allocator) {
this.allocator = allocator;
this.gpRegs = allocator.allocate(LAYOUT_GP_REGS);
this.fpRegs = allocator.allocate(LAYOUT_FP_REGS);
Builder(ResourceScope scope) {
this.scope = scope;
this.gpRegs = MemorySegment.allocateNative(LAYOUT_GP_REGS, scope);
this.fpRegs = MemorySegment.allocateNative(LAYOUT_FP_REGS, scope);
}
@Override
@ -534,8 +508,8 @@ public class AArch64VaList implements VaList {
return EMPTY;
}
SegmentAllocator allocator = SegmentAllocator.arenaAllocator(scope);
MemorySegment vaListSegment = allocator.allocate(LAYOUT);
List<MemorySegment> attachedSegments = new ArrayList<>();
MemoryAddress stackArgsPtr = MemoryAddress.NULL;
if (!stackArgs.isEmpty()) {
long stackArgsSize = stackArgs.stream()
@ -549,7 +523,6 @@ public class AArch64VaList implements VaList {
writer.set(stackArgsSegment, arg.value);
stackArgsSegment = stackArgsSegment.asSlice(alignedSize);
}
attachedSegments.add(stackArgsSegment);
}
VH_gr_top.set(vaListSegment, gpRegs.asSlice(gpRegs.byteSize()).address());
@ -558,16 +531,9 @@ public class AArch64VaList implements VaList {
VH_gr_offs.set(vaListSegment, -MAX_GP_OFFSET);
VH_vr_offs.set(vaListSegment, -MAX_FP_OFFSET);
attachedSegments.add(gpRegs);
attachedSegments.add(fpRegs);
assert gpRegs.ownerThread() == vaListSegment.ownerThread();
assert fpRegs.ownerThread() == vaListSegment.ownerThread();
return new AArch64VaList(vaListSegment, gpRegs, fpRegs, attachedSegments);
assert gpRegs.scope().ownerThread() == vaListSegment.scope().ownerThread();
assert fpRegs.scope().ownerThread() == vaListSegment.scope().ownerThread();
return new AArch64VaList(vaListSegment, gpRegs, fpRegs);
}
}
private static MemorySegment handoffIfNeeded(MemorySegment segment, Thread thread) {
return segment.ownerThread() == thread ?
segment : segment.handoff(thread);
}
}

Some files were not shown because too many files have changed in this diff Show More