8301997: Move method resolution information out of the cpCache

Co-authored-by: Gui Cao <gcao@openjdk.org>
Co-authored-by: Fei Yang <fyang@openjdk.org>
Co-authored-by: Martin Doerr <mdoerr@openjdk.org>
Co-authored-by: Amit Kumar <amitkumar@openjdk.org>
Reviewed-by: coleenp, adinn, fparain
This commit is contained in:
Matias Saavedra Silva 2023-11-15 19:04:11 +00:00
parent 891d8cfaf2
commit ffa35d8cf1
79 changed files with 2027 additions and 2738 deletions

View File

@ -38,6 +38,7 @@
#include "oops/methodData.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
@ -201,64 +202,6 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
}
}
// Return
// Rindex: index into constant pool
// Rcache: address of cache entry - ConstantPoolCache::base_offset()
//
// A caller must add ConstantPoolCache::base_offset() to Rcache to get
// the true address of the cache entry.
//
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index,
int bcp_offset,
size_t index_size) {
assert_different_registers(cache, index);
assert_different_registers(cache, rcpool);
get_cache_index_at_bcp(index, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry
// aarch64 already has the cache in rcpool so there is no need to
// install it in cache. instead we pre-add the indexed offset to
// rcpool and return it in cache. All clients of this method need to
// be modified accordingly.
add(cache, rcpool, index, Assembler::LSL, 5);
}
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register index,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size) {
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
// We use a 32-bit load here since the layout of 64-bit words on
// little-endian machines allow us that.
// n.b. unlike x86 cache already includes the index offset
lea(bytecode, Address(cache,
ConstantPoolCache::base_offset()
+ ConstantPoolCacheEntry::indices_offset()));
ldarw(bytecode, bytecode);
const int shift_count = (1 + byte_no) * BitsPerByte;
ubfx(bytecode, bytecode, shift_count, BitsPerByte);
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
size_t index_size) {
assert(cache != tmp, "must use different register");
get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header
add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord); // construct pointer to cache entry
}
void InterpreterMacroAssembler::get_method_counters(Register method,
Register mcs, Label& skip) {
Label has_counters;
@ -295,18 +238,6 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
}
void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
Register method,
Register cache) {
const int method_offset = in_bytes(
ConstantPoolCache::base_offset() +
((byte_no == TemplateTable::f2_byte)
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
ldr(method, Address(cache, method_offset)); // get f1 Method*
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
//
@ -1866,3 +1797,15 @@ void InterpreterMacroAssembler::load_field_entry(Register cache, Register index,
add(cache, cache, Array<ResolvedFieldEntry>::base_offset_in_bytes());
lea(cache, Address(cache, index));
}
void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
// Get index out of bytecode pointer
get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
mov(cache, sizeof(ResolvedMethodEntry));
mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
// Get address of field entries array
ldr(cache, Address(rcpool, ConstantPoolCache::method_entries_offset()));
add(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes());
lea(cache, Address(cache, index));
}

View File

@ -140,9 +140,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_method_counters(Register method, Register mcs, Label& skip);
@ -152,8 +149,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_klass_at(index);
void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
void load_resolved_method_at_index(int byte_no, Register method, Register cache);
void pop_ptr(Register r = r0);
void pop_i(Register r = r0);
void pop_l(Register r = r0);
@ -324,6 +319,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_resolved_indy_entry(Register cache, Register index);
void load_field_entry(Register cache, Register index, int bcp_offset = 1);
void load_method_entry(Register cache, Register index, int bcp_offset = 1);
};
#endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP

View File

@ -42,6 +42,7 @@
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/arguments.hpp"
@ -493,10 +494,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ add(esp, esp, cache, Assembler::LSL, 3);
} else {
// Pop N words from the stack
__ get_cache_and_index_at_bcp(cache, index, 1, index_size);
__ ldr(cache, Address(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
__ andr(cache, cache, ConstantPoolCacheEntry::parameter_size_mask);
assert(index_size == sizeof(u2), "Can only be u2");
__ load_method_entry(cache, index);
__ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
__ add(esp, esp, cache, Assembler::LSL, 3);
}

View File

@ -40,6 +40,7 @@
#include "oops/oop.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
@ -488,15 +489,15 @@ void TemplateTable::condy_helper(Label& Done)
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
__ mov(off, flags);
__ andw(off, off, ConstantPoolCacheEntry::field_index_mask);
__ andw(off, off, ConstantPoolCache::field_index_mask);
const Address field(obj, off);
// What sort of thing are we loading?
// x86 uses a shift and mask or wings it with a shift plus assert
// the mask is not needed. aarch64 just uses bitfield extract
__ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,
ConstantPoolCacheEntry::tos_state_bits);
__ ubfxw(flags, flags, ConstantPoolCache::tos_state_shift,
ConstantPoolCache::tos_state_bits);
switch (bytecode()) {
case Bytecodes::_ldc:
@ -2257,19 +2258,27 @@ void TemplateTable::_return(TosState state)
// volatile-stores although it could just as well go before
// volatile-loads.
void TemplateTable::resolve_cache_and_index(int byte_no,
void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
Register Rcache,
Register index,
size_t index_size) {
Register index) {
const Register temp = r19;
assert_different_registers(Rcache, index, temp);
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
Label resolved, clinit_barrier_slow;
Bytecodes::Code code = bytecode();
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ load_method_entry(Rcache, index);
switch(byte_no) {
case f1_byte:
__ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
break;
case f2_byte:
__ lea(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
break;
}
// Load-acquire the bytecode to match store-release in InterpreterRuntime
__ ldarb(temp, temp);
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
__ br(Assembler::EQ, resolved);
@ -2281,14 +2290,14 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ load_method_entry(Rcache, index);
// n.b. unlike x86 Rcache is now rcpool plus the indexed offset
// so all clients ofthis method must be modified accordingly
__ bind(resolved);
// Class initialization barrier for static methods
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
__ load_resolved_method_at_index(byte_no, temp, Rcache);
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
__ clinit_barrier(temp, rscratch1, nullptr, &clinit_barrier_slow);
}
@ -2357,32 +2366,103 @@ void TemplateTable::load_resolved_field_entry(Register obj,
}
}
// The Rcache and index registers must be set before call
// n.b unlike x86 cache already includes the index offset
void TemplateTable::load_field_cp_cache_entry(Register obj,
Register cache,
Register index,
Register off,
Register flags,
bool is_static = false) {
assert_different_registers(cache, index, flags, off);
void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
Register method,
Register flags) {
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Field offset
__ ldr(off, Address(cache, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f2_offset())));
// Flags
__ ldrw(flags, Address(cache, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset())));
// setup registers
const Register index = flags;
assert_different_registers(method, cache, flags);
// klass overwrite register
if (is_static) {
__ ldr(obj, Address(cache, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f1_offset())));
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ldr(obj, Address(obj, mirror_offset));
__ resolve_oop_handle(obj, r5, rscratch2);
}
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
__ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
}
void TemplateTable::load_resolved_method_entry_handle(Register cache,
Register method,
Register ref_index,
Register flags) {
// setup registers
const Register index = ref_index;
assert_different_registers(method, flags);
assert_different_registers(method, cache, index);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// maybe push appendix to arguments (just before return address)
Label L_no_push;
__ tbz(flags, ResolvedMethodEntry::has_appendix_shift, L_no_push);
// invokehandle uses an index into the resolved references array
__ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
Register appendix = method;
__ load_resolved_reference_at_index(appendix, ref_index);
__ push(appendix); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
__ ldr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
}
void TemplateTable::load_resolved_method_entry_interface(Register cache,
Register klass,
Register method_or_table_index,
Register flags) {
// setup registers
const Register index = method_or_table_index;
assert_different_registers(method_or_table_index, cache, flags);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// Invokeinterface can behave in different ways:
// If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
// behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
// vtable index is placed in the register.
// Otherwise, the registers will be populated with the klass and method.
Label NotVirtual; Label NotVFinal; Label Done;
__ tbz(flags, ResolvedMethodEntry::is_forced_virtual_shift, NotVirtual);
__ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
__ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ b(Done);
__ bind(NotVFinal);
__ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
__ b(Done);
__ bind(NotVirtual);
__ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ ldr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
__ bind(Done);
}
void TemplateTable::load_resolved_method_entry_virtual(Register cache,
Register method_or_table_index,
Register flags) {
// setup registers
const Register index = flags;
assert_different_registers(method_or_table_index, cache, flags);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f2_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// method_or_table_index can either be an itable index or a method depending on the virtual final flag
Label NotVFinal; Label Done;
__ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, NotVFinal);
__ ldr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ b(Done);
__ bind(NotVFinal);
__ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
__ bind(Done);
}
// The rmethod register is input and overwritten to be the adapter method for the
@ -2454,44 +2534,6 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
}
}
void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register method,
Register itable_index,
Register flags,
bool is_invokevirtual,
bool is_invokevfinal, /*unused*/
bool is_invokedynamic /*unused*/) {
// setup registers
const Register cache = rscratch2;
const Register index = r4;
assert_different_registers(method, flags);
assert_different_registers(method, cache, index);
assert_different_registers(itable_index, flags);
assert_different_registers(itable_index, cache, index);
// determine constant pool cache field offsets
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes(
ConstantPoolCache::base_offset() +
(is_invokevirtual
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset());
// access constant pool cache fields
const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset());
size_t index_size = sizeof(u2);
resolve_cache_and_index(byte_no, cache, index, index_size);
__ ldr(method, Address(cache, method_offset));
if (itable_index != noreg) {
__ ldr(itable_index, Address(cache, index_offset));
}
__ ldrw(flags, Address(cache, flags_offset));
}
// The registers cache and index expected to be set before call.
// Correct values of the cache and index registers are preserved.
void TemplateTable::jvmti_post_field_access(Register cache, Register index,
@ -3242,68 +3284,25 @@ void TemplateTable::fast_xaccess(TosState state)
//-----------------------------------------------------------------------------
// Calls
void TemplateTable::prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index, // itable index, MethodType, etc.
Register recv, // if caller wants to see it
Register flags // if caller wants to test it
) {
// determine flags
Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = (recv != noreg);
const bool save_flags = (flags != noreg);
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
assert(flags == noreg || flags == r3, "");
assert(recv == noreg || recv == r2, "");
void TemplateTable::prepare_invoke(Register cache, Register recv) {
// setup registers & access constant pool cache
if (recv == noreg) recv = r2;
if (flags == noreg) flags = r3;
assert_different_registers(method, index, recv, flags);
Bytecodes::Code code = bytecode();
const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
// save 'interpreter return address'
__ save_bcp();
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// maybe push appendix to arguments (just before return address)
if (is_invokehandle) {
Label L_no_push;
__ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
__ push(r19);
__ mov(r19, index);
__ load_resolved_reference_at_index(index, r19);
__ pop(r19);
__ push(index); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
}
// Load TOS state for later
__ load_unsigned_byte(rscratch2, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
// load receiver if needed (note: no return address pushed yet)
if (load_receiver) {
__ andw(recv, flags, ConstantPoolCacheEntry::parameter_size_mask);
// FIXME -- is this actually correct? looks like it should be 2
// const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
// const int receiver_is_at_end = -1; // back off one slot to get receiver
// Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
// __ movptr(recv, recv_addr);
__ add(rscratch1, esp, recv, ext::uxtx, 3); // FIXME: uxtb here?
__ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
__ add(rscratch1, esp, recv, ext::uxtx, 3);
__ ldr(recv, Address(rscratch1, -Interpreter::expr_offset_in_bytes(1)));
__ verify_oop(recv);
}
// compute return type
// x86 uses a shift and mask or wings it with a shift plus assert
// the mask is not needed. aarch64 just uses bitfield extract
__ ubfxw(rscratch2, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
// load return address
{
const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
@ -3321,7 +3320,7 @@ void TemplateTable::invokevirtual_helper(Register index,
assert_different_registers(index, recv, r0, r3);
// Test for an invoke of a final method
Label notFinal;
__ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
__ tbz(flags, ResolvedMethodEntry::is_vfinal_shift, notFinal);
const Register method = index; // method must be rmethod
assert(method == rmethod,
@ -3360,7 +3359,10 @@ void TemplateTable::invokevirtual(int byte_no)
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
prepare_invoke(byte_no, rmethod, noreg, r2, r3);
load_resolved_method_entry_virtual(r2, // ResolvedMethodEntry*
rmethod, // Method* or itable index
r3); // flags
prepare_invoke(r2, r2); // recv
// rmethod: index (actually a Method*)
// r2: receiver
@ -3374,8 +3376,10 @@ void TemplateTable::invokespecial(int byte_no)
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, rmethod, noreg, // get f1 Method*
r2); // get receiver also for null check
load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry*
rmethod, // Method*
r3); // flags
prepare_invoke(r2, r2); // get receiver also for null check
__ verify_oop(r2);
__ null_check(r2);
// do the call
@ -3389,7 +3393,11 @@ void TemplateTable::invokestatic(int byte_no)
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, rmethod); // get f1 Method*
load_resolved_method_entry_special_or_static(r2, // ResolvedMethodEntry*
rmethod, // Method*
r3); // flags
prepare_invoke(r2, r2); // get receiver also for null check
// do the call
__ profile_call(r0);
__ profile_arguments_type(r0, rmethod, r4, false);
@ -3405,8 +3413,11 @@ void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, r0, rmethod, // get f1 Klass*, f2 Method*
r2, r3); // recv, flags
load_resolved_method_entry_interface(r2, // ResolvedMethodEntry*
r0, // Klass*
rmethod, // Method* or itable/vtable index
r3); // flags
prepare_invoke(r2, r2); // receiver
// r0: interface klass (from f1)
// rmethod: method (from f2)
@ -3419,7 +3430,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCache.cpp for details.
Label notObjectMethod;
__ tbz(r3, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
__ tbz(r3, ResolvedMethodEntry::is_forced_virtual_shift, notObjectMethod);
invokevirtual_helper(rmethod, r2, r3);
__ bind(notObjectMethod);
@ -3428,7 +3439,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Check for private method invocation - indicated by vfinal
Label notVFinal;
__ tbz(r3, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
__ tbz(r3, ResolvedMethodEntry::is_vfinal_shift, notVFinal);
// Get receiver klass into r3
__ load_klass(r3, r2);
@ -3525,7 +3536,12 @@ void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, rmethod, r0, r2);
load_resolved_method_entry_handle(r2, // ResolvedMethodEntry*
rmethod, // Method*
r0, // Resolved reference
r3); // flags
prepare_invoke(r2, r2);
__ verify_method_ptr(r2);
__ verify_oop(r2);
__ null_check(r2);
@ -3547,9 +3563,9 @@ void TemplateTable::invokedynamic(int byte_no) {
load_invokedynamic_entry(rmethod);
// r0: CallSite object (from cpool->resolved_references[])
// rmethod: MH.linkToCallSite method (from f2)
// rmethod: MH.linkToCallSite method
// Note: r0_callsite is already pushed by prepare_invoke
// Note: r0_callsite is already pushed
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call

View File

@ -26,12 +26,7 @@
#ifndef CPU_AARCH64_TEMPLATETABLE_AARCH64_HPP
#define CPU_AARCH64_TEMPLATETABLE_AARCH64_HPP
static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index = noreg, // itable index, MethodType, etc.
Register recv = noreg, // if caller wants to see it
Register flags = noreg // if caller wants to test it
);
static void prepare_invoke(Register cache, Register recv);
static void invokevirtual_helper(Register index, Register recv,
Register flags);

View File

@ -84,8 +84,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
void load_resolved_method_at_index(int byte_no, Register cache, Register method);
void load_receiver(Register Rparam_count, Register Rrecv_dst);
// helpers for expression stack
@ -126,9 +124,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size);
void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
void load_resolved_indy_entry(Register cache, Register index);
void load_field_entry(Register cache, Register index, int bcp_offset = 1);
void load_method_entry(Register cache, Register index, int bcp_offset = 1);
void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);

View File

@ -34,6 +34,7 @@
#include "oops/methodData.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/frame.inline.hpp"
@ -453,13 +454,6 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_of
// Rdst now contains cp cache index.
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, int bcp_offset,
size_t index_size) {
get_cache_index_at_bcp(cache, bcp_offset, index_size);
sldi(cache, cache, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord));
add(cache, R27_constPoolCache, cache);
}
// Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
// from (Rsrc)+offset.
void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
@ -484,13 +478,14 @@ void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
}
void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
// Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp
// Get index out of bytecode pointer
get_cache_index_at_bcp(index, 1, sizeof(u4));
// Get address of invokedynamic array
ld_ptr(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()), R27_constPoolCache);
// Scale the index to be the entry index * sizeof(ResolvedIndyEntry)
sldi(index, index, log2i_exact(sizeof(ResolvedIndyEntry)));
addi(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
add(cache, cache, index);
}
@ -511,6 +506,18 @@ void InterpreterMacroAssembler::load_field_entry(Register cache, Register index,
add(cache, cache, index);
}
void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
// Get index out of bytecode pointer
get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
// Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
mulli(index, index, sizeof(ResolvedMethodEntry));
// Get address of field entries array
ld_ptr(cache, ConstantPoolCache::method_entries_offset(), R27_constPoolCache);
addi(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes());
add(cache, cache, index); // method_entries + base_offset + scaled index
}
// Load object from cpool->resolved_references(index).
// Kills:
// - index
@ -564,18 +571,6 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, R
ldx(Rklass, Rklass, Roffset);
}
void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
Register cache,
Register method) {
const int method_offset = in_bytes(
ConstantPoolCache::base_offset() +
((byte_no == TemplateTable::f2_byte)
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
ld(method, method_offset, cache); // get f1 Method*
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,

View File

@ -40,6 +40,7 @@
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/arguments.hpp"
@ -647,16 +648,11 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
const Register size = R12_scratch2;
if (index_size == sizeof(u4)) {
__ load_resolved_indy_entry(cache, size /* tmp */);
__ lhz(size, Array<ResolvedIndyEntry>::base_offset_in_bytes() + in_bytes(ResolvedIndyEntry::num_parameters_offset()), cache);
__ lhz(size, in_bytes(ResolvedIndyEntry::num_parameters_offset()), cache);
} else {
__ get_cache_and_index_at_bcp(cache, 1, index_size);
// Get least significant byte of 64 bit value:
#if defined(VM_LITTLE_ENDIAN)
__ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
#else
__ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
#endif
assert(index_size == sizeof(u2), "Can only be u2");
__ load_method_entry(cache, size /* tmp */);
__ lhz(size, in_bytes(ResolvedMethodEntry::num_parameters_offset()), cache);
}
__ sldi(size, size, Interpreter::logStackElementSize);
__ add(R15_esp, R15_esp, size);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,11 +26,10 @@
#ifndef CPU_PPC_TEMPLATETABLE_PPC_HPP
#define CPU_PPC_TEMPLATETABLE_PPC_HPP
static void prepare_invoke(int byte_no, Register Rmethod, Register Rret_addr, Register Rindex, Register Rrecv, Register Rflags,
Register Rscratch1, Register Rscratch2);
static void invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2);
static void prepare_invoke(Register Rcache, Register Rret_addr, Register Rrecv, Register Rscratch);
static void invokevfinal_helper(Register Rcache, Register Rscratch1, Register Rscratch2, Register Rscratch3);
static void generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp);
static void invokeinterface_object_method(Register Rrecv_klass, Register Rret, Register Rflags, Register Rindex, Register Rtemp, Register Rtemp2);
static void invokeinterface_object_method(Register Rrecv_klass, Register Rret, Register Rflags, Register Rcache, Register Rtemp, Register Rtemp2);
// Branch_conditional which takes TemplateTable::Condition.
static void branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert = false);

View File

@ -41,6 +41,7 @@
#include "oops/oop.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
@ -389,10 +390,10 @@ void TemplateTable::condy_helper(Label& Done) {
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
__ andi(off, flags, ConstantPoolCacheEntry::field_index_mask);
__ andi(off, flags, ConstantPoolCache::field_index_mask);
// What sort of thing are we loading?
__ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
__ rldicl(flags, flags, 64-ConstantPoolCache::tos_state_shift, 64-ConstantPoolCache::tos_state_bits);
switch (bytecode()) {
case Bytecodes::_ldc:
@ -2191,14 +2192,13 @@ void TemplateTable::_return(TosState state) {
//
// Returns:
// - Rcache: The const pool cache entry that contains the resolved result.
// - Rresult: Either noreg or output for f1/f2.
//
// Kills:
// - Rscratch
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
__ get_cache_and_index_at_bcp(Rcache, 1, index_size);
void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rcache, Register Rscratch) {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
Label Lresolved, Ldone, L_clinit_barrier_slow;
Register Rindex = Rscratch;
Bytecodes::Code code = bytecode();
switch (code) {
@ -2208,13 +2208,11 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
break;
}
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
// We are resolved if the indices offset contains the current bytecode.
#if defined(VM_LITTLE_ENDIAN)
__ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
#else
__ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
#endif
const int bytecode_offset = (byte_no == f1_byte) ? in_bytes(ResolvedMethodEntry::bytecode1_offset())
: in_bytes(ResolvedMethodEntry::bytecode2_offset());
__ load_method_entry(Rcache, Rindex);
// Load-acquire the bytecode to match store-release in InterpreterRuntime
__ lbz(Rscratch, bytecode_offset, Rcache);
// Acquire by cmp-br-isync (see below).
__ cmpdi(CCR0, Rscratch, (int)code);
__ beq(CCR0, Lresolved);
@ -2227,7 +2225,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ call_VM(noreg, entry, R4_ARG2, true);
// Update registers with resolved info.
__ get_cache_and_index_at_bcp(Rcache, 1, index_size);
__ load_method_entry(Rcache, Rindex);
__ b(Ldone);
__ bind(Lresolved);
@ -2238,7 +2236,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
const Register method = Rscratch;
const Register klass = Rscratch;
__ load_resolved_method_at_index(byte_no, Rcache, method);
__ ld(method, in_bytes(ResolvedMethodEntry::method_offset()), Rcache);
__ load_method_holder(klass, method);
__ clinit_barrier(klass, R16_thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
}
@ -2308,32 +2306,73 @@ void TemplateTable::load_resolved_field_entry(Register obj,
}
}
// Load the constant pool cache entry at field accesses into registers.
// The Rcache and Rindex registers must be set before call.
// Input:
// - Rcache, Rindex
// Output:
// - Robj, Roffset, Rflags
// Kills:
// - R11, R12
void TemplateTable::load_field_cp_cache_entry(Register Robj,
Register Rcache,
Register Rindex /* unused on PPC64 */,
Register Roffset,
Register Rflags,
bool is_static) {
assert_different_registers(Rcache, Rflags, Roffset, R11_scratch1, R12_scratch2);
assert(Rindex == noreg, "parameter not used on PPC64");
void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
Register method,
Register flags) {
assert_different_registers(cache, method, flags);
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
__ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache);
__ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache);
if (is_static) {
__ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
__ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
__ resolve_oop_handle(Robj, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_NONE);
// Acquire not needed here. Following access has an address dependency on this value.
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, method /* tmp */);
if (flags != noreg) {
__ lbz(flags, in_bytes(ResolvedMethodEntry::flags_offset()), cache);
}
__ ld(method, in_bytes(ResolvedMethodEntry::method_offset()), cache);
}
void TemplateTable::load_resolved_method_entry_handle(Register cache,
Register method,
Register ref_index,
Register flags) {
// setup registers
assert_different_registers(cache, method, ref_index, flags);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, method /* tmp */);
__ lbz(flags, in_bytes(ResolvedMethodEntry::flags_offset()), cache);
// maybe push appendix to arguments (just before return address)
Label L_no_push;
__ testbitdi(CCR0, R0, flags, ResolvedMethodEntry::has_appendix_shift);
__ bfalse(CCR0, L_no_push);
// invokehandle uses an index into the resolved references array
__ lhz(ref_index, in_bytes(ResolvedMethodEntry::resolved_references_index_offset()), cache);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
Register appendix = method;
assert(cache->is_nonvolatile(), "C-call in resolve_oop_handle");
__ load_resolved_reference_at_index(appendix, ref_index, R11_scratch1, R12_scratch2);
__ verify_oop(appendix);
__ push_ptr(appendix); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
__ ld(method, in_bytes(ResolvedMethodEntry::method_offset()), cache);
}
void TemplateTable::load_resolved_method_entry_interface(Register cache,
Register klass,
Register method_or_table_index,
Register flags) {
// setup registers
assert_different_registers(method_or_table_index, cache, flags);
assert(klass == noreg, "to be determined by caller");
assert(method_or_table_index == noreg, "to be determined by caller");
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, flags /* tmp */);
__ lbz(flags, in_bytes(ResolvedMethodEntry::flags_offset()), cache);
}
void TemplateTable::load_resolved_method_entry_virtual(Register cache,
Register method_or_table_index,
Register flags) {
// setup registers
assert_different_registers(cache, flags);
assert(method_or_table_index == noreg, "to be determined by caller");
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f2_byte, cache, flags /* tmp */);
__ lbz(flags, in_bytes(ResolvedMethodEntry::flags_offset()), cache);
}
// Sets registers:
@ -2348,13 +2387,12 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
const Register cache = R31;
const Register index = R21_tmp1;
const Register tmp = R11_scratch1;
const int array_base_offset = Array<ResolvedIndyEntry>::base_offset_in_bytes();
assert_different_registers(method, appendix, cache, index, tmp);
Label resolved;
__ load_resolved_indy_entry(cache, index);
__ ld_ptr(method, array_base_offset + in_bytes(ResolvedIndyEntry::method_offset()), cache);
__ ld_ptr(method, in_bytes(ResolvedIndyEntry::method_offset()), cache);
// The invokedynamic is unresolved iff method is null
__ cmpdi(CCR0, method, 0);
@ -2368,7 +2406,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
__ call_VM(noreg, entry, R4_ARG2, true);
// Update registers with resolved info
__ load_resolved_indy_entry(cache, index);
__ ld_ptr(method, array_base_offset + in_bytes(ResolvedIndyEntry::method_offset()), cache);
__ ld_ptr(method, in_bytes(ResolvedIndyEntry::method_offset()), cache);
DEBUG_ONLY(__ cmpdi(CCR0, method, 0));
__ asm_assert_ne("Should be resolved by now");
@ -2377,12 +2415,12 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
Label L_no_push;
// Check if there is an appendix
__ lbz(index, array_base_offset + in_bytes(ResolvedIndyEntry::flags_offset()), cache);
__ lbz(index, in_bytes(ResolvedIndyEntry::flags_offset()), cache);
__ rldicl_(R0, index, 64-ResolvedIndyEntry::has_appendix_shift, 63);
__ beq(CCR0, L_no_push);
// Get appendix
__ lhz(index, array_base_offset + in_bytes(ResolvedIndyEntry::resolved_references_index_offset()), cache);
__ lhz(index, in_bytes(ResolvedIndyEntry::resolved_references_index_offset()), cache);
// Push the appendix as a trailing parameter
assert(cache->is_nonvolatile(), "C-call in resolve_oop_handle");
__ load_resolved_reference_at_index(appendix, index, /* temp */ ret_addr, tmp);
@ -2396,7 +2434,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
// compute return type
__ lbz(index, array_base_offset + in_bytes(ResolvedIndyEntry::result_type_offset()), cache);
__ lbz(index, in_bytes(ResolvedIndyEntry::result_type_offset()), cache);
__ load_dispatch_table(Rtable_addr, (address*)table_addr);
__ sldi(index, index, LogBytesPerWord);
// Get return address.
@ -2404,54 +2442,6 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
}
}
// Load the constant pool cache entry at invokes into registers.
// Resolve if necessary.
// Input Registers:
// - None, bcp is used, though
//
// Return registers:
// - Rmethod (f1 field or f2 if invokevirtual)
// - Ritable_index (f2 field)
// - Rflags (flags field)
//
// Kills:
// - R21
//
void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register Rmethod,
Register Ritable_index,
Register Rflags,
bool is_invokevirtual,
bool is_invokevfinal,
bool is_invokedynamic /*unused*/) {
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Determine constant pool cache field offsets.
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset()));
const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset());
// Access constant pool cache fields.
const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset());
Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP.
if (is_invokevfinal) {
assert(Ritable_index == noreg, "register not used");
// Already resolved.
__ get_cache_and_index_at_bcp(Rcache, 1);
} else {
resolve_cache_and_index(byte_no, Rcache, /* temp */ Rmethod, sizeof(u2));
}
__ ld(Rmethod, method_offset, Rcache);
__ ld(Rflags, flags_offset, Rcache);
if (Ritable_index != noreg) {
__ ld(Ritable_index, index_offset, Rcache);
}
}
// ============================================================================
// Field access
@ -3422,81 +3412,31 @@ void TemplateTable::fast_xaccess(TosState state) {
// ============================================================================
// Calls
// Common code for invoke
//
// Input:
// - byte_no
//
// Output:
// - Rmethod: The method to invoke next or i-klass (invokeinterface).
// - Rret_addr: The return address to return to.
// - Rindex: MethodType (invokehandle), CallSite obj (invokedynamic) or Method (invokeinterface)
// - Rrecv: Cache for "this" pointer, might be noreg if static call.
// - Rflags: Method flags from const pool cache.
//
// Kills:
// - Rscratch1
//
void TemplateTable::prepare_invoke(int byte_no,
Register Rmethod, // linked method (or i-klass)
void TemplateTable::prepare_invoke(Register Rcache,
Register Rret_addr,// return address
Register Rindex, // itable index, MethodType, Method, etc.
Register Rrecv, // If caller wants to see it.
Register Rflags, // If caller wants to test it.
Register Rscratch1,
Register Rscratch2
Register Rscratch
) {
// Determine flags.
const Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = false; // should not reach here with invokedynamic
const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = (Rrecv != noreg);
const bool load_receiver = (Rrecv != noreg);
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
assert_different_registers(Rmethod, Rindex, Rflags, Rscratch1);
assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch1);
assert_different_registers(Rret_addr, Rscratch1);
load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic);
// Saving of SP done in call_from_interpreter.
// Maybe push "appendix" to arguments.
if (is_invokehandle) {
Label Ldone;
Register reference = Rscratch1;
__ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63);
__ beq(CCR0, Ldone);
// Push "appendix" (MethodType, CallSite, etc.).
// This must be done before we get the receiver,
// since the parameter_size includes it.
__ load_resolved_reference_at_index(reference, Rindex, /* temp */ Rret_addr, Rscratch2);
__ verify_oop(reference);
__ push_ptr(reference);
__ bind(Ldone);
}
// Load receiver if needed (after appendix is pushed so parameter size is correct).
if (load_receiver) {
Register Rparam_count = Rscratch1;
__ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask);
Register Rparam_count = Rscratch;
__ lhz(Rparam_count, in_bytes(ResolvedMethodEntry::num_parameters_offset()), Rcache);
__ load_receiver(Rparam_count, Rrecv);
__ verify_oop(Rrecv);
}
// Get return address.
{
Register Rtable_addr = Rscratch1;
Register Rtable_addr = Rscratch;
Register Rret_type = Rret_addr;
address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
// Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
__ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
__ lbz(Rret_type, in_bytes(ResolvedMethodEntry::type_offset()), Rcache);
__ load_dispatch_table(Rtable_addr, (address*)table_addr);
__ sldi(Rret_type, Rret_type, LogBytesPerWord);
// Get return address.
@ -3527,40 +3467,35 @@ void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex,
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
Register Rtable_addr = R11_scratch1,
Rret_type = R12_scratch2,
Rret_addr = R5_ARG3,
Register Rret_addr = R5_ARG3,
Rflags = R22_tmp2, // Should survive C call.
Rrecv = R3_ARG1,
Rrecv_klass = Rrecv,
Rvtableindex_or_method = R31, // Should survive C call.
Rnum_params = R4_ARG2,
Rnew_bc = R6_ARG4;
Rnew_bc = R6_ARG4,
Rcache = R7_ARG5;
Label LnotFinal;
load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
load_resolved_method_entry_virtual(Rcache, noreg, Rflags);
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
// Handle final method separately.
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
__ bfalse(CCR0, LnotFinal);
if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) {
patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
}
invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
invokevfinal_helper(Rcache, R11_scratch1, R12_scratch2, Rflags /* tmp */);
__ align(32, 12);
__ bind(LnotFinal);
// Load "this" pointer (receiver).
__ rldicl(Rnum_params, Rflags, 64, 48);
__ load_receiver(Rnum_params, Rrecv);
__ verify_oop(Rrecv);
prepare_invoke(Rcache, Rret_addr, Rrecv, R11_scratch1);
// Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
__ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
__ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
__ sldi(Rret_type, Rret_type, LogBytesPerWord);
__ ldx(Rret_addr, Rret_type, Rtable_addr);
// Get vtable index.
__ lhz(Rvtableindex_or_method, in_bytes(ResolvedMethodEntry::table_index_offset()), Rcache);
// Get receiver klass.
__ load_klass_check_null_throw(Rrecv_klass, Rrecv, R11_scratch1);
__ verify_klass_ptr(Rrecv_klass);
__ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
@ -3572,32 +3507,33 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
Register Rflags = R22_tmp2,
Rmethod = R31;
load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false);
invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2);
Register Rcache = R31;
__ load_method_entry(Rcache, R11_scratch1);
invokevfinal_helper(Rcache, R11_scratch1, R12_scratch2, R22_tmp2);
}
void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) {
void TemplateTable::invokevfinal_helper(Register Rcache, Register Rscratch1, Register Rscratch2, Register Rscratch3) {
assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2);
assert_different_registers(Rcache, Rscratch1, Rscratch2, Rscratch3);
// Load receiver from stack slot.
Register Rrecv = Rscratch2;
Register Rnum_params = Rrecv;
__ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
__ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
Register Rmethod = Rscratch3;
__ ld(Rmethod, in_bytes(ResolvedMethodEntry::method_offset()), Rcache);
// Get return address.
Register Rtable_addr = Rscratch1,
Rret_addr = Rflags,
Rret_type = Rret_addr;
Register Rtable_addr = Rscratch2,
Rret_addr = Rcache,
Rret_type = Rscratch1;
// Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
__ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
__ lbz(Rret_type, in_bytes(ResolvedMethodEntry::type_offset()), Rcache);
__ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
__ sldi(Rret_type, Rret_type, LogBytesPerWord);
__ ldx(Rret_addr, Rret_type, Rtable_addr);
__ ldx(Rret_addr, Rret_type, Rtable_addr); // kills Rcache
Register Rnum_params = Rscratch2,
Rrecv = Rscratch2;
__ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
__ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
// Load receiver and receiver null check.
__ load_receiver(Rnum_params, Rrecv);
@ -3615,13 +3551,15 @@ void TemplateTable::invokespecial(int byte_no) {
assert(byte_no == f1_byte, "use this argument");
transition(vtos, vtos);
Register Rtable_addr = R3_ARG1,
Register Rcache = R3_ARG1,
Rret_addr = R4_ARG2,
Rflags = R5_ARG3,
Rreceiver = R6_ARG4,
Rreceiver = R5_ARG3,
Rmethod = R31;
prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1, R12_scratch2);
load_resolved_method_entry_special_or_static(Rcache, // ResolvedMethodEntry*
Rmethod, // Method* or itable index
noreg); // flags
prepare_invoke(Rcache, Rret_addr, Rreceiver, R11_scratch1); // recv
// Receiver null check.
__ null_check_throw(Rreceiver, -1, R11_scratch1);
@ -3636,11 +3574,13 @@ void TemplateTable::invokestatic(int byte_no) {
assert(byte_no == f1_byte, "use this argument");
transition(vtos, vtos);
Register Rtable_addr = R3_ARG1,
Rret_addr = R4_ARG2,
Rflags = R5_ARG3;
Register Rcache = R3_ARG1,
Rret_addr = R4_ARG2;
prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1, R12_scratch2);
load_resolved_method_entry_special_or_static(Rcache, // ResolvedMethodEntry*
R19_method, // Method* or itable index
noreg); // flags
prepare_invoke(Rcache, Rret_addr, noreg, R11_scratch1); // recv
__ profile_call(R11_scratch1, R12_scratch2);
// Argument and return type profiling.
@ -3651,30 +3591,34 @@ void TemplateTable::invokestatic(int byte_no) {
void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
Register Rret,
Register Rflags,
Register Rmethod,
Register Rcache,
Register Rtemp1,
Register Rtemp2) {
assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
assert_different_registers(Rcache, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
Label LnotFinal;
// Check for vfinal.
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
__ bfalse(CCR0, LnotFinal);
Register Rscratch = Rflags; // Rflags is dead now.
Register Rscratch = Rflags, // Rflags is dead now.
Rmethod = Rtemp2,
Rindex = Rtemp2;
// Final call case.
__ profile_final_call(Rtemp1, Rscratch);
// Argument and return type profiling.
__ ld(Rmethod, in_bytes(ResolvedMethodEntry::method_offset()), Rcache);
__ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true);
// Do the final call - the index (f2) contains the method.
__ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */);
// Non-final callc case.
__ bind(LnotFinal);
__ lhz(Rindex, in_bytes(ResolvedMethodEntry::table_index_offset()), Rcache);
__ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch);
generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
}
void TemplateTable::invokeinterface(int byte_no) {
@ -3683,16 +3627,18 @@ void TemplateTable::invokeinterface(int byte_no) {
const Register Rscratch1 = R11_scratch1,
Rscratch2 = R12_scratch2,
Rmethod = R6_ARG4,
Rmethod2 = R9_ARG7,
Rinterface_klass = R5_ARG3,
Rret_addr = R8_ARG6,
Rindex = R10_ARG8,
Rreceiver = R3_ARG1,
Rrecv_klass = R4_ARG2,
Rflags = R7_ARG5;
Rinterface_klass = R5_ARG3,
Rmethod = R6_ARG4,
Rmethod2 = R7_ARG5,
Rret_addr = R8_ARG6,
Rindex = R9_ARG7,
Rflags = R10_ARG8,
Rcache = R31;
prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1, /* temp */ Rrecv_klass);
load_resolved_method_entry_interface(Rcache, noreg, noreg, Rflags);
prepare_invoke(Rcache, Rret_addr, Rreceiver, Rscratch1); // recv
// First check for Object case, then private interface method,
// then regular interface method.
@ -3702,20 +3648,23 @@ void TemplateTable::invokeinterface(int byte_no) {
// Check corner case object method.
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
// java.lang.Object. See ResolvedMethodEntry for details:
// The invokeinterface was rewritten to a invokevirtual, hence we have
// to handle this corner case.
Label LnotObjectMethod, Lthrow_ame;
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_forced_virtual_shift);
__ bfalse(CCR0, LnotObjectMethod);
invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2);
invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rcache, Rscratch1, Rscratch2);
__ bind(LnotObjectMethod);
__ ld(Rinterface_klass, in_bytes(ResolvedMethodEntry::klass_offset()), Rcache);
__ ld(Rmethod, in_bytes(ResolvedMethodEntry::method_offset()), Rcache);
// Check for private method invocation - indicated by vfinal
Label LnotVFinal, L_no_such_interface, L_subtype;
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
__ bfalse(CCR0, LnotVFinal);
__ check_klass_subtype(Rrecv_klass, Rinterface_klass, Rscratch1, Rscratch2, L_subtype);
@ -3733,7 +3682,6 @@ void TemplateTable::invokeinterface(int byte_no) {
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch, Rrecv_klass /* scratch */);
__ bind(LnotVFinal);
__ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2,
L_no_such_interface, /*return_method=*/false);
@ -3800,14 +3748,18 @@ void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
const Register Rret_addr = R3_ARG1,
Rflags = R31,
Rflags = R12_scratch2,
Rrecv = R5_ARG3,
Rmethod = R22_tmp2,
Rscratch1 = R30,
Rscratch2 = R11_scratch1,
Rscratch3 = R12_scratch2;
Rcache = R31;
prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2, Rscratch3);
load_resolved_method_entry_handle(Rcache, // ResolvedMethodEntry*
Rmethod, // Method* or itable index
Rscratch1,
Rflags);
prepare_invoke(Rcache, Rret_addr, Rrecv, Rscratch1);
__ verify_method_ptr(Rmethod);
__ null_check_throw(Rrecv, -1, Rscratch2);

View File

@ -38,6 +38,7 @@
#include "oops/methodData.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
@ -229,71 +230,6 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
}
}
// Return
// Rindex: index into constant pool
// Rcache: address of cache entry - ConstantPoolCache::base_offset()
//
// A caller must add ConstantPoolCache::base_offset() to Rcache to get
// the true address of the cache entry.
//
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index,
int bcp_offset,
size_t index_size) {
assert_different_registers(cache, index);
assert_different_registers(cache, xcpool);
// register "cache" is trashed in next shadd, so lets use it as a temporary register
get_cache_index_at_bcp(index, cache, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// Convert from field index to ConstantPoolCacheEntry
// riscv already has the cache in xcpool so there is no need to
// install it in cache. Instead we pre-add the indexed offset to
// xcpool and return it in cache. All clients of this method need to
// be modified accordingly.
shadd(cache, index, xcpool, cache, 5);
}
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register index,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size) {
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
// We use a 32-bit load here since the layout of 64-bit words on
// little-endian machines allow us that.
// n.b. unlike x86 cache already includes the index offset
la(bytecode, Address(cache,
ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::indices_offset()));
membar(MacroAssembler::AnyAny);
lwu(bytecode, bytecode);
membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
const int shift_count = (1 + byte_no) * BitsPerByte;
slli(bytecode, bytecode, XLEN - (shift_count + BitsPerByte));
srli(bytecode, bytecode, XLEN - BitsPerByte);
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
size_t index_size) {
assert_different_registers(cache, tmp);
// register "cache" is trashed in next ld, so lets use it as a temporary register
get_cache_index_at_bcp(tmp, cache, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// Convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord,
"else change next line");
ld(cache, Address(fp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header
add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
// construct pointer to cache entry
shadd(cache, tmp, cache, tmp, 2 + LogBytesPerWord);
}
// Load object from cpool->resolved_references(index)
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index, Register tmp) {
@ -319,18 +255,6 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
ld(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
}
void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
Register method,
Register cache) {
const int method_offset = in_bytes(
ConstantPoolCache::base_offset() +
((byte_no == TemplateTable::f2_byte)
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
ld(method, Address(cache, method_offset)); // get f1 Method*
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
//
@ -1960,7 +1884,6 @@ void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Registe
slli(index, index, log2i_exact(sizeof(ResolvedIndyEntry)));
add(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
add(cache, cache, index);
la(cache, Address(cache, 0));
}
void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) {
@ -1977,7 +1900,6 @@ void InterpreterMacroAssembler::load_field_entry(Register cache, Register index,
ld(cache, Address(xcpool, ConstantPoolCache::field_entries_offset()));
add(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
add(cache, cache, index);
la(cache, Address(cache, 0));
}
void InterpreterMacroAssembler::get_method_counters(Register method,
@ -1992,6 +1914,18 @@ void InterpreterMacroAssembler::get_method_counters(Register method,
bind(has_counters);
}
void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
// Get index out of bytecode pointer
get_cache_index_at_bcp(index, cache, bcp_offset, sizeof(u2));
mv(cache, sizeof(ResolvedMethodEntry));
mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
// Get address of field entries array
ld(cache, Address(xcpool, ConstantPoolCache::method_entries_offset()));
add(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes());
add(cache, cache, index);
}
#ifdef ASSERT
void InterpreterMacroAssembler::verify_access_flags(Register access_flags, uint32_t flag,
const char* msg, bool stop_by_hit) {

View File

@ -136,9 +136,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_method_counters(Register method, Register mcs, Label& skip);
@ -148,8 +145,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load cpool->resolved_klass_at(index).
void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
void load_resolved_method_at_index(int byte_no, Register method, Register cache);
void pop_ptr(Register r = x10);
void pop_i(Register r = x10);
void pop_l(Register r = x10);
@ -302,6 +297,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_resolved_indy_entry(Register cache, Register index);
void load_field_entry(Register cache, Register index, int bcp_offset = 1);
void load_method_entry(Register cache, Register index, int bcp_offset = 1);
#ifdef ASSERT
void verify_access_flags(Register access_flags, uint32_t flag,

View File

@ -41,6 +41,7 @@
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/arguments.hpp"
@ -454,9 +455,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ shadd(esp, cache, esp, t0, 3);
} else {
// Pop N words from the stack
__ get_cache_and_index_at_bcp(cache, index, 1, index_size);
__ ld(cache, Address(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
__ andi(cache, cache, ConstantPoolCacheEntry::parameter_size_mask);
assert(index_size == sizeof(u2), "Can only be u2");
__ load_method_entry(cache, index);
__ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
__ shadd(esp, cache, esp, t0, 3);
}

View File

@ -40,6 +40,7 @@
#include "oops/oop.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
@ -471,14 +472,14 @@ void TemplateTable::condy_helper(Label& Done) {
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
__ mv(off, flags);
__ mv(t0, ConstantPoolCacheEntry::field_index_mask);
__ mv(t0, ConstantPoolCache::field_index_mask);
__ andrw(off, off, t0);
__ add(off, obj, off);
const Address field(off, 0); // base + R---->base + offset
__ slli(flags, flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + ConstantPoolCacheEntry::tos_state_bits));
__ srli(flags, flags, XLEN - ConstantPoolCacheEntry::tos_state_bits); // (1 << 5) - 4 --> 28~31==> flags:0~3
__ slli(flags, flags, XLEN - (ConstantPoolCache::tos_state_shift + ConstantPoolCache::tos_state_bits));
__ srli(flags, flags, XLEN - ConstantPoolCache::tos_state_bits); // (1 << 5) - 4 --> 28~31==> flags:0~3
switch (bytecode()) {
case Bytecodes::_ldc: // fall through
@ -2168,21 +2169,32 @@ void TemplateTable::_return(TosState state) {
// volatile-stores although it could just as well go before
// volatile-loads.
void TemplateTable::resolve_cache_and_index(int byte_no,
Register Rcache,
Register index,
size_t index_size) {
void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
Register Rcache,
Register index) {
const Register temp = x9;
assert_different_registers(Rcache, index, temp);
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
Label resolved, clinit_barrier_slow;
Bytecodes::Code code = bytecode();
__ load_method_entry(Rcache, index);
switch(byte_no) {
case f1_byte:
__ add(temp, Rcache, in_bytes(ResolvedMethodEntry::bytecode1_offset()));
break;
case f2_byte:
__ add(temp, Rcache, in_bytes(ResolvedMethodEntry::bytecode2_offset()));
break;
}
// Load-acquire the bytecode to match store-release in InterpreterRuntime
__ membar(MacroAssembler::AnyAny);
__ lbu(temp, Address(temp, 0));
__ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ mv(t0, (int) code);
__ beq(temp, t0, resolved);
__ beq(temp, t0, resolved); // have we resolved this bytecode?
// resolve first time through
// Class initialization barrier slow path lands here as well.
@ -2193,14 +2205,14 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ load_method_entry(Rcache, index);
// n.b. unlike x86 Rcache is now rcpool plus the indexed offset
// so all clients ofthis method must be modified accordingly
__ bind(resolved);
// Class initialization barrier for static methods
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
__ load_resolved_method_at_index(byte_no, temp, Rcache);
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
__ clinit_barrier(temp, t0, nullptr, &clinit_barrier_slow);
}
@ -2271,32 +2283,107 @@ void TemplateTable::load_resolved_field_entry(Register obj,
}
}
// The Rcache and index registers must be set before call
// n.b unlike x86 cache already includes the index offset
void TemplateTable::load_field_cp_cache_entry(Register obj,
Register cache,
Register index,
Register off,
Register flags,
bool is_static = false) {
assert_different_registers(cache, index, flags, off);
void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
Register method,
Register flags) {
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Field offset
__ ld(off, Address(cache, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f2_offset())));
// Flags
__ lwu(flags, Address(cache, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset())));
// setup registers
const Register index = flags;
assert_different_registers(method, cache, flags);
// klass overwrite register
if (is_static) {
__ ld(obj, Address(cache, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f1_offset())));
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld(obj, Address(obj, mirror_offset));
__ resolve_oop_handle(obj, x15, t1);
}
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
__ ld(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
}
void TemplateTable::load_resolved_method_entry_handle(Register cache,
Register method,
Register ref_index,
Register flags) {
// setup registers
const Register index = ref_index;
assert_different_registers(method, flags);
assert_different_registers(method, cache, index);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// maybe push appendix to arguments (just before return address)
Label L_no_push;
__ test_bit(t0, flags, ResolvedMethodEntry::has_appendix_shift);
__ beqz(t0, L_no_push);
// invokehandle uses an index into the resolved references array
__ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
Register appendix = method;
__ load_resolved_reference_at_index(appendix, ref_index);
__ push_reg(appendix); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
__ ld(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
}
void TemplateTable::load_resolved_method_entry_interface(Register cache,
Register klass,
Register method_or_table_index,
Register flags) {
// setup registers
const Register index = method_or_table_index;
assert_different_registers(method_or_table_index, cache, flags);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// Invokeinterface can behave in different ways:
// If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
// behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
// vtable index is placed in the register.
// Otherwise, the registers will be populated with the klass and method.
Label NotVirtual; Label NotVFinal; Label Done;
__ test_bit(t0, flags, ResolvedMethodEntry::is_forced_virtual_shift);
__ beqz(t0, NotVirtual);
__ test_bit(t0, flags, ResolvedMethodEntry::is_vfinal_shift);
__ beqz(t0, NotVFinal);
__ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ j(Done);
__ bind(NotVFinal);
__ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
__ j(Done);
__ bind(NotVirtual);
__ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ ld(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
__ bind(Done);
}
void TemplateTable::load_resolved_method_entry_virtual(Register cache,
Register method_or_table_index,
Register flags) {
// setup registers
const Register index = flags;
assert_different_registers(method_or_table_index, cache, flags);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f2_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// method_or_table_index can either be an itable index or a method depending on the virtual final flag
Label NotVFinal; Label Done;
__ test_bit(t0, flags, ResolvedMethodEntry::is_vfinal_shift);
__ beqz(t0, NotVFinal);
__ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ j(Done);
__ bind(NotVFinal);
__ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
__ bind(Done);
}
// The xmethod register is input and overwritten to be the adapter method for the
@ -2369,42 +2456,6 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
}
}
void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register method,
Register itable_index,
Register flags,
bool is_invokevirtual,
bool is_invokevfinal, /*unused*/
bool is_invokedynamic /*unused*/) {
// setup registers
const Register cache = t1;
const Register index = x14;
assert_different_registers(method, flags);
assert_different_registers(method, cache, index);
assert_different_registers(itable_index, flags);
assert_different_registers(itable_index, cache, index);
// determine constant pool cache field offsets
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes(ConstantPoolCache::base_offset() +
(is_invokevirtual ?
ConstantPoolCacheEntry::f2_offset() :
ConstantPoolCacheEntry::f1_offset()));
const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset());
// access constant pool cache fields
const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset());
size_t index_size = sizeof(u2);
resolve_cache_and_index(byte_no, cache, index, index_size);
__ ld(method, Address(cache, method_offset));
if (itable_index != noreg) {
__ ld(itable_index, Address(cache, index_offset));
}
__ lwu(flags, Address(cache, flags_offset));
}
// The registers cache and index expected to be set before call.
// Correct values of the cache and index registers are preserved.
void TemplateTable::jvmti_post_field_access(Register cache, Register index,
@ -3181,68 +3232,25 @@ void TemplateTable::fast_xaccess(TosState state) {
//-----------------------------------------------------------------------------
// Calls
void TemplateTable::prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index, // itable index, MethodType, etc.
Register recv, // if caller wants to see it
Register flags // if caller wants to test it
) {
// determine flags
const Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = (recv != noreg);
const bool save_flags = (flags != noreg);
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
assert(flags == noreg || flags == x13, "");
assert(recv == noreg || recv == x12, "");
void TemplateTable::prepare_invoke(Register cache, Register recv) {
// setup registers & access constant pool cache
if (recv == noreg) {
recv = x12;
}
if (flags == noreg) {
flags = x13;
}
assert_different_registers(method, index, recv, flags);
Bytecodes::Code code = bytecode();
const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
// save 'interpreter return address'
__ save_bcp();
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// maybe push appendix to arguments (just before return address)
if (is_invokehandle) {
Label L_no_push;
__ test_bit(t0, flags, ConstantPoolCacheEntry::has_appendix_shift);
__ beqz(t0, L_no_push);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
__ push_reg(x9);
__ mv(x9, index);
__ load_resolved_reference_at_index(index, x9);
__ pop_reg(x9);
__ push_reg(index); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
}
// Load TOS state for later
__ load_unsigned_byte(t1, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
// load receiver if needed (note: no return address pushed yet)
if (load_receiver) {
__ andi(recv, flags, ConstantPoolCacheEntry::parameter_size_mask); // parameter_size_mask = 1 << 8
__ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
__ shadd(t0, recv, esp, t0, 3);
__ ld(recv, Address(t0, -Interpreter::expr_offset_in_bytes(1)));
__ verify_oop(recv);
}
// compute return type
__ slli(t1, flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + ConstantPoolCacheEntry::tos_state_bits));
__ srli(t1, t1, XLEN - ConstantPoolCacheEntry::tos_state_bits); // (1 << 5) - 4 --> 28~31==> t1:0~3
// load return address
{
const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
@ -3259,7 +3267,7 @@ void TemplateTable::invokevirtual_helper(Register index,
assert_different_registers(index, recv, x10, x13);
// Test for an invoke of a final method
Label notFinal;
__ test_bit(t0, flags, ConstantPoolCacheEntry::is_vfinal_shift);
__ test_bit(t0, flags, ResolvedMethodEntry::is_vfinal_shift);
__ beqz(t0, notFinal);
const Register method = index; // method must be xmethod
@ -3295,7 +3303,10 @@ void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
prepare_invoke(byte_no, xmethod, noreg, x12, x13);
load_resolved_method_entry_virtual(x12, // ResolvedMethodEntry*
xmethod, // Method* or itable index
x13); // flags
prepare_invoke(x12, x12); // recv
// xmethod: index (actually a Method*)
// x12: receiver
@ -3308,8 +3319,11 @@ void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, xmethod, noreg, // get f1 Method*
x12); // get receiver also for null check
load_resolved_method_entry_special_or_static(x12, // ResolvedMethodEntry*
xmethod, // Method*
x13); // flags
prepare_invoke(x12, x12); // get receiver also for null check
__ verify_oop(x12);
__ null_check(x12);
// do the call
@ -3322,7 +3336,11 @@ void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, xmethod); // get f1 Method*
load_resolved_method_entry_special_or_static(x12, // ResolvedMethodEntry*
xmethod, // Method*
x13); // flags
prepare_invoke(x12, x12); // get receiver also for null check
// do the call
__ profile_call(x10);
__ profile_arguments_type(x10, xmethod, x14, false);
@ -3337,8 +3355,11 @@ void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, x10, xmethod, // get f1 Klass*, f2 Method*
x12, x13); // recv, flags
load_resolved_method_entry_interface(x12, // ResolvedMethodEntry*
x10, // Klass*
xmethod, // Method* or itable/vtable index
x13); // flags
prepare_invoke(x12, x12); // receiver
// x10: interface klass (from f1)
// xmethod: method (from f2)
@ -3351,7 +3372,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCache.cpp for details
Label notObjectMethod;
__ test_bit(t0, x13, ConstantPoolCacheEntry::is_forced_virtual_shift);
__ test_bit(t0, x13, ResolvedMethodEntry::is_forced_virtual_shift);
__ beqz(t0, notObjectMethod);
invokevirtual_helper(xmethod, x12, x13);
@ -3361,7 +3382,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Check for private method invocation - indicated by vfinal
Label notVFinal;
__ test_bit(t0, x13, ConstantPoolCacheEntry::is_vfinal_shift);
__ test_bit(t0, x13, ResolvedMethodEntry::is_vfinal_shift);
__ beqz(t0, notVFinal);
// Check receiver klass into x13
@ -3458,7 +3479,12 @@ void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, xmethod, x10, x12);
load_resolved_method_entry_handle(x12, // ResolvedMethodEntry*
xmethod, // Method*
x10, // Resolved reference
x13); // flags
prepare_invoke(x12, x12);
__ verify_method_ptr(x12);
__ verify_oop(x12);
__ null_check(x12);
@ -3480,9 +3506,9 @@ void TemplateTable::invokedynamic(int byte_no) {
load_invokedynamic_entry(xmethod);
// x10: CallSite object (from cpool->resolved_references[])
// xmethod: MH.linkToCallSite method (from f2)
// xmethod: MH.linkToCallSite method
// Note: x10_callsite is already pushed by prepare_invoke
// Note: x10_callsite is already pushed
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call

View File

@ -27,12 +27,7 @@
#ifndef CPU_RISCV_TEMPLATETABLE_RISCV_HPP
#define CPU_RISCV_TEMPLATETABLE_RISCV_HPP
static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index = noreg, // itable index, MethodType, etc.
Register recv = noreg, // if caller wants to see it
Register flags = noreg // if caller wants to test it
);
static void prepare_invoke(Register cache, Register recv);
static void invokevirtual_helper(Register index, Register recv,
Register flags);

View File

@ -38,6 +38,7 @@
#include "oops/methodData.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
@ -337,18 +338,6 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, int bcp_o
BLOCK_COMMENT("}");
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register cpe_offset,
int bcp_offset, size_t index_size) {
BLOCK_COMMENT("get_cache_and_index_at_bcp {");
assert_different_registers(cache, cpe_offset);
get_cache_index_at_bcp(cpe_offset, bcp_offset, index_size);
z_lg(cache, Address(Z_fp, _z_ijava_state_neg(cpoolCache)));
// Convert from field index to ConstantPoolCache offset in bytes.
z_sllg(cpe_offset, cpe_offset, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord));
BLOCK_COMMENT("}");
}
void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
// Get index out of bytecode pointer.
get_cache_index_at_bcp(index, 1, sizeof(u4));
@ -389,26 +378,24 @@ void InterpreterMacroAssembler::load_field_entry(Register cache, Register index,
z_la(cache, Array<ResolvedFieldEntry>::base_offset_in_bytes(), index, cache);
}
// Kills Z_R0_scratch.
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register cpe_offset,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size) {
BLOCK_COMMENT("get_cache_and_index_and_bytecode_at_bcp {");
get_cache_and_index_at_bcp(cache, cpe_offset, bcp_offset, index_size);
void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
// Get field index out of bytecode pointer.
get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
// We want to load (from CP cache) the bytecode that corresponds to the passed-in byte_no.
// It is located at (cache + cpe_offset + base_offset + indices_offset + (8-1) (last byte in DW) - (byte_no+1).
// Instead of loading, shifting and masking a DW, we just load that one byte of interest with z_llgc (unsigned).
const int base_ix_off = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset());
const int off_in_DW = (8-1) - (1+byte_no);
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
assert(ConstantPoolCacheEntry::bytecode_1_mask == 0xff, "");
load_sized_value(bytecode, Address(cache, cpe_offset, base_ix_off+off_in_DW), 1, false /*signed*/);
// Get the address of the ResolvedMethodEntry array.
get_constant_pool_cache(cache);
z_lg(cache, Address(cache, in_bytes(ConstantPoolCache::method_entries_offset())));
BLOCK_COMMENT("}");
// Scale the index to form a byte offset into the ResolvedMethodEntry array
size_t entry_size = sizeof(ResolvedMethodEntry);
if (is_power_of_2(entry_size)) {
z_sllg(index, index, exact_log2(entry_size));
} else {
z_mghi(index, entry_size);
}
// Calculate the final field address.
z_la(cache, Array<ResolvedMethodEntry>::base_offset_in_bytes(), index, cache);
}
// Load object from cpool->resolved_references(index).
@ -448,29 +435,6 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register cpool, Re
z_lg(iklass, Address(iklass, offset, Array<Klass*>::base_offset_in_bytes()));
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
size_t index_size) {
BLOCK_COMMENT("get_cache_entry_pointer_at_bcp {");
get_cache_and_index_at_bcp(cache, tmp, bcp_offset, index_size);
add2reg_with_index(cache, in_bytes(ConstantPoolCache::base_offset()), tmp, cache);
BLOCK_COMMENT("}");
}
void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
Register cache,
Register cpe_offset,
Register method) {
const int method_offset = in_bytes(
ConstantPoolCache::base_offset() +
((byte_no == TemplateTable::f2_byte)
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
z_lg(method, Address(cache, cpe_offset, method_offset)); // get f1 Method*
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,

View File

@ -111,19 +111,14 @@ class InterpreterMacroAssembler: public MacroAssembler {
// a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3.
void gen_subtype_check(Register sub_klass, Register super_klass, Register tmp1, Register tmp2, Label &ok_is_subtype);
void get_cache_and_index_at_bcp(Register cache, Register cpe_offset, int bcp_offset, size_t index_size = sizeof(u2));
void load_resolved_indy_entry(Register cache, Register index);
void load_field_entry(Register cache, Register index, int bcp_offset = 1);
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register cpe_offset, Register bytecode,
int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void load_field_entry (Register cache, Register index, int bcp_offset = 1);
void load_method_entry(Register cache, Register index, int bcp_offset = 1);
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass);
void load_resolved_method_at_index(int byte_no, Register cache, Register cpe_offset, Register method);
// Pop topmost element from stack. It just disappears. Useful if
// consumed previously by access via stackTop().
void popx(int len);

View File

@ -40,6 +40,7 @@
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/arguments.hpp"
@ -658,12 +659,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state,
__ load_resolved_indy_entry(cache, index);
__ z_llgh(size, in_bytes(ResolvedIndyEntry::num_parameters_offset()), cache);
} else {
const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset());
__ get_cache_and_index_at_bcp(cache, index, 1, index_size);
// #args is in rightmost byte of the _flags field.
__ z_llgc(size, Address(cache, index, flags_offset + (sizeof(size_t) - 1)));
assert(index_size == sizeof(u2), "Can only be u2");
__ load_method_entry(cache, index);
__ load_sized_value(size, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())), sizeof(u2), false /*is_signed*/);
}
__ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes.
__ z_agr(Z_esp, size); // Pop arguments.

View File

@ -40,6 +40,7 @@
#include "oops/oop.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
@ -544,14 +545,12 @@ void TemplateTable::condy_helper(Label& Done) {
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
assert(ConstantPoolCacheEntry::field_index_mask == 0xffff, "or use other instructions");
assert(ConstantPoolCache::field_index_mask == 0xffff, "or use other instructions");
__ z_llghr(off, flags);
const Address field(obj, off);
// What sort of thing are we loading?
__ z_srl(flags, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask flags for tos_state after the above shift.
ConstantPoolCacheEntry::verify_tos_state_shift();
__ z_srl(flags, ConstantPoolCache::tos_state_shift);
switch (bytecode()) {
case Bytecodes::_ldc:
@ -2351,53 +2350,58 @@ void TemplateTable::_return(TosState state) {
}
// ----------------------------------------------------------------------------
// NOTE: index is already computed as byte offset, so we must not
// shift it afterwards!
void TemplateTable::resolve_cache_and_index(int byte_no,
Register cache,
Register index,
size_t index_size) {
assert_different_registers(cache, index, Z_R1_scratch);
// Register Killed: Z_R1_scratch
void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
Register Rcache,
Register index) {
BLOCK_COMMENT("resolve_cache_and_index_for_method {");
assert_different_registers(Rcache, index);
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
const Register bytecode_in_cpcache = Z_R1_scratch;
NearLabel resolved, clinit_barrier_slow;
Label resolved, clinit_barrier_slow;
Bytecodes::Code code = bytecode();
switch (code) {
case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
default:
break;
}
BLOCK_COMMENT("resolve_cache_and_index {");
const int bc_offset = (byte_no == f1_byte) ? in_bytes(ResolvedMethodEntry::bytecode1_offset())
: in_bytes(ResolvedMethodEntry::bytecode2_offset());
__ get_cache_and_index_and_bytecode_at_bcp(cache, index, bytecode_in_cpcache, byte_no, 1, index_size);
// Have we resolved this bytecode?
__ compare32_and_branch(bytecode_in_cpcache, (int)code, Assembler::bcondEqual, resolved);
__ load_method_entry(Rcache, index);
__ z_cli(Address(Rcache, bc_offset), code);
__ z_bre(resolved);
// Resolve first time through via runtime call.
// Resolve first time through
// Class initialization barrier slow path lands here as well.
__ bind(clinit_barrier_slow);
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
__ load_const_optimized(Z_ARG2, (int)code);
__ call_VM(noreg, entry, Z_ARG2);
// Update registers with resolved info.
__ get_cache_and_index_at_bcp(cache, index, 1, index_size);
// Update registers with resolved info.
__ load_method_entry(Rcache, index);
__ bind(resolved);
// Class initialization barrier for static methods
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
const Register method = Z_R1_scratch;
const Register klass = Z_R1_scratch;
__ load_resolved_method_at_index(byte_no, cache, index, method);
__ z_lg(method, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(klass, method);
__ clinit_barrier(klass, Z_thread, nullptr /*L_fast_path*/, &clinit_barrier_slow);
}
BLOCK_COMMENT("} resolve_cache_and_index");
BLOCK_COMMENT("} resolve_cache_and_index_for_method");
}
void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
Register cache,
Register index) {
BLOCK_COMMENT("resolve_cache_and_index_for_field {");
assert_different_registers(cache, index);
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
@ -2412,11 +2416,10 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
}
__ load_field_entry(cache, index);
if (byte_no == f1_byte) {
__ z_cli(Address(cache, in_bytes(ResolvedFieldEntry::get_code_offset())), code);
} else {
__ z_cli(Address(cache, in_bytes(ResolvedFieldEntry::put_code_offset())), code);
}
const int code_offset = (byte_no == f1_byte) ? in_bytes(ResolvedFieldEntry::get_code_offset()) :
in_bytes(ResolvedFieldEntry::put_code_offset()) ;
__ z_cli(Address(cache, code_offset), code);
__ z_bre(resolved);
// resolve first time through
@ -2428,6 +2431,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ load_field_entry(cache, index);
__ bind(resolved);
BLOCK_COMMENT("} resolve_cache_and_index_for_field");
}
// The cache register (the only input reg) must be set before call.
@ -2458,30 +2463,6 @@ void TemplateTable::load_resolved_field_entry(Register obj,
}
}
// The Rcache and index registers must be set before call.
// Index is already a byte offset, don't shift!
void TemplateTable::load_field_cp_cache_entry(Register obj,
Register cache,
Register index,
Register off,
Register flags,
bool is_static = false) {
assert_different_registers(cache, index, flags, off);
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Field offset
__ mem2reg_opt(off, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
// Flags. Must load 64 bits.
__ mem2reg_opt(flags, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
// klass overwrite register
if (is_static) {
__ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
__ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset()));
__ resolve_oop_handle(obj);
}
}
void TemplateTable::load_invokedynamic_entry(Register method) {
const Register cache = Z_tmp_1;
const Register index = Z_tmp_3;
@ -2493,7 +2474,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
__ z_lg(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
// The invokedynamic is unresolved iff method is null
__ z_clgij(method, (unsigned long)nullptr, Assembler::bcondNotEqual, resolved); // method != 0, jump to resolved
__ compare64_and_branch(method, (unsigned long)nullptr, Assembler::bcondNotEqual, resolved); // method != 0, jump to resolved
Bytecodes::Code code = bytecode();
// Call to the interpreter runtime to resolve invokedynamic
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
@ -2503,17 +2484,17 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
__ load_resolved_indy_entry(cache, index);
__ z_lg(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset())));
#ifdef ASSERT
__ z_clgij(method, (unsigned long)nullptr, Assembler::bcondNotEqual, resolved); // method != 0, jump to resolved
__ compare64_and_branch(method, (unsigned long)nullptr, Assembler::bcondNotEqual, resolved); // method != 0, jump to resolved
__ stop("should be resolved by now");
#endif // ASSERT
__ bind(resolved);
Label L_no_push;
__ z_llgc(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())));
__ load_sized_value(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset())), sizeof(u1), false /*is_signed*/);
__ testbit(index, ResolvedIndyEntry::has_appendix_shift);
__ z_bfalse(L_no_push);
// get appendix
__ z_llgh(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())));
__ load_sized_value(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset())), sizeof(u2), false /*is_signed*/);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
@ -2524,65 +2505,42 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
// Compute return type.
Register ret_type = index;
__ z_llgc(ret_type, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())));
__ load_sized_value(ret_type, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset())), sizeof(u1), false /*is_signed*/);
const address table_addr = (address)Interpreter::invoke_return_entry_table_for(code);
__ load_absolute_address(Z_R14, table_addr);
const int bit_shift = LogBytesPerWord; // Size of each table entry.
// const int r_bitpos = 63 - bit_shift;
// const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1;
// const int n_rotate = bit_shift-ConstantPoolCacheEntry::tos_state_shift;
// __ rotate_then_insert(ret_type, Z_R0_scratch, l_bitpos, r_bitpos, n_rotate, true);
// Make sure we don't need to mask flags for tos_state after the above shift.
__ z_sllg(ret_type, ret_type, bit_shift);
ConstantPoolCacheEntry::verify_tos_state_shift();
__ z_lg(Z_R14, Address(Z_R14, ret_type));
}
void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register method,
Register itable_index,
Register flags,
bool is_invokevirtual,
bool is_invokevfinal, // unused
bool is_invokedynamic /* unused */) {
BLOCK_COMMENT("load_invoke_cp_cache_entry {");
// Setup registers.
const Register cache = Z_ARG1;
const Register cpe_offset= flags;
const ByteSize base_off = ConstantPoolCache::base_offset();
const ByteSize f1_off = ConstantPoolCacheEntry::f1_offset();
const ByteSize f2_off = ConstantPoolCacheEntry::f2_offset();
const ByteSize flags_off = ConstantPoolCacheEntry::flags_offset();
const int method_offset = in_bytes(base_off + ((byte_no == f2_byte) ? f2_off : f1_off));
const int flags_offset = in_bytes(base_off + flags_off);
// Access constant pool cache fields.
const int index_offset = in_bytes(base_off + f2_off);
void TemplateTable::load_resolved_method_entry_handle(Register cache,
Register method,
Register ref_index,
Register flags) {
assert_different_registers(method, cache, ref_index, flags);
assert_different_registers(method, itable_index, flags, cache);
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, method /* index */);
if (is_invokevfinal) {
// Already resolved.
assert(itable_index == noreg, "register not used");
__ get_cache_and_index_at_bcp(cache, cpe_offset, 1);
} else {
// Need to resolve.
resolve_cache_and_index(byte_no, cache, cpe_offset, sizeof(u2));
}
__ z_lg(method, Address(cache, cpe_offset, method_offset));
// maybe push appendix to arguments (just before return address)
Label L_no_push;
__ load_sized_value(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())), sizeof(u1), false /* is signed */);
__ testbit(flags, ResolvedMethodEntry::has_appendix_shift); // life ended for flags
__ z_bfalse(L_no_push);
// invokehandle uses an index into the resolved references array
__ load_sized_value(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())), sizeof(u2), false /* is signed */);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
Register appendix = method;
__ load_resolved_reference_at_index(appendix, ref_index);
__ verify_oop(appendix);
__ push_ptr(appendix); // Push appendix (MethodType, CallSite, etc.).
__ bind(L_no_push);
if (itable_index != noreg) {
__ z_lg(itable_index, Address(cache, cpe_offset, index_offset));
}
// Only load the lower 4 bytes and fill high bytes of flags with zeros.
// Callers depend on this zero-extension!!!
// Attention: overwrites cpe_offset == flags
__ z_llgf(flags, Address(cache, cpe_offset, flags_offset + (BytesPerLong-BytesPerInt)));
BLOCK_COMMENT("} load_invoke_cp_cache_entry");
__ z_lg(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
}
// The registers cache and index are set up if needed.
@ -3539,61 +3497,21 @@ void TemplateTable::fast_xaccess(TosState state) {
//-----------------------------------------------------------------------------
// Calls
void TemplateTable::prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index, // itable index, MethodType, etc.
Register recv, // If caller wants to see it.
Register flags) { // If caller wants to test it.
// Determine flags.
const Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = false; // should not reach here with invokedynamic
const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = (recv != noreg);
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
void TemplateTable::prepare_invoke(Register cache, Register recv) {
Bytecodes::Code code = bytecode();
const Register ret_type = Z_R1_scratch;
const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
assert_different_registers(ret_type, recv);
// Setup registers & access constant pool cache.
if (recv == noreg) { recv = Z_ARG1; }
if (flags == noreg) { flags = Z_ARG2; }
assert_different_registers(method, Z_R14, index, recv, flags);
// Load TOS state for later
__ load_sized_value(ret_type, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())), sizeof(u1), false /* is signed */);
BLOCK_COMMENT("prepare_invoke {");
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// Maybe push appendix to arguments.
if (is_invokehandle) {
Label L_no_push;
Register resolved_reference = Z_R1_scratch;
__ testbit(flags, ConstantPoolCacheEntry::has_appendix_shift);
__ z_bfalse(L_no_push);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
__ load_resolved_reference_at_index(resolved_reference, index);
__ verify_oop(resolved_reference);
__ push_ptr(resolved_reference); // Push appendix (MethodType, CallSite, etc.).
__ bind(L_no_push);
}
// Load receiver if needed (after appendix is pushed so parameter size is correct).
// load receiver if needed (note: no return address pushed yet)
if (load_receiver) {
assert(!is_invokedynamic, "");
// recv := int2long(flags & ConstantPoolCacheEntry::parameter_size_mask) << 3
// Flags is zero-extended int2long when loaded during load_invoke_cp_cache_entry().
// Only the least significant byte (psize) of flags is used.
{
const unsigned int logSES = Interpreter::logStackElementSize;
const int bit_shift = logSES;
const int r_bitpos = 63 - bit_shift;
const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::parameter_size_bits + 1;
const int n_rotate = bit_shift;
assert(ConstantPoolCacheEntry::parameter_size_mask == 255, "adapt bitpositions");
__ rotate_then_insert(recv, flags, l_bitpos, r_bitpos, n_rotate, true);
}
// Recv now contains #arguments * StackElementSize.
__ load_sized_value(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())), sizeof(u2), false /* is signed */);
const unsigned int bit_shift = Interpreter::logStackElementSize;
__ z_sllg(recv, recv, bit_shift);
// recv now contains #arguments * StackElementSize.
Address recv_addr(Z_esp, recv);
__ z_lg(recv, recv_addr);
@ -3602,26 +3520,73 @@ void TemplateTable::prepare_invoke(int byte_no,
// Compute return type.
// ret_type is used by callers (invokespecial, invokestatic) at least.
Register ret_type = Z_R1_scratch;
assert_different_registers(ret_type, method);
const address table_addr = (address)Interpreter::invoke_return_entry_table_for(code);
__ load_absolute_address(Z_R14, table_addr);
__ z_sllg(ret_type, ret_type, LogBytesPerWord);
{
const int bit_shift = LogBytesPerWord; // Size of each table entry.
const int r_bitpos = 63 - bit_shift;
const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1;
const int n_rotate = bit_shift-ConstantPoolCacheEntry::tos_state_shift;
__ rotate_then_insert(ret_type, flags, l_bitpos, r_bitpos, n_rotate, true);
// Make sure we don't need to mask flags for tos_state after the above shift.
ConstantPoolCacheEntry::verify_tos_state_shift();
}
__ z_lg(Z_R14, Address(Z_R14, ret_type)); // Load return address.
BLOCK_COMMENT("} prepare_invoke");
__ z_lg(Z_R14, Address(Z_R14, ret_type)); // Load return address.
}
void TemplateTable::load_resolved_method_entry_interface(Register cache,
Register klass,
Register method_or_table_index,
Register flags) {
assert_different_registers(method_or_table_index, cache, flags, klass);
BLOCK_COMMENT("load_resolved_method_entry_interface {");
// determine constant pool cache field offsets
const Register index = flags; // not containing anything important, let's kill it.
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_sized_value(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())), sizeof(u1), false /* is signed*/);
// Invokeinterface can behave in different ways:
// If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
// behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
// vtable index is placed in the register.
// Otherwise, the registers will be populated with the klass and method.
Label NotVirtual, NotVFinal, Done;
__ testbit(flags, ResolvedMethodEntry::is_forced_virtual_shift);
__ z_brz(NotVirtual);
__ testbit(flags, ResolvedMethodEntry::is_vfinal_shift);
__ z_brz(NotVFinal);
__ z_lg(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ z_bru(Done);
__ bind(NotVFinal);
__ load_sized_value(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())), sizeof(u2), false /* is signed */);
__ z_bru(Done);
__ bind(NotVirtual);
__ z_lg(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ z_lg(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
__ bind(Done);
BLOCK_COMMENT("} load_resolved_method_entry_interface");
}
// Registers Killed: Z_R1
void TemplateTable::load_resolved_method_entry_virtual(Register cache,
Register method_or_table_index,
Register flags) {
BLOCK_COMMENT("load_resolved_method_entry_virtual {");
assert_different_registers(method_or_table_index, cache, flags);
const Register index = flags; // doesn't contain valuable content, could be used as index for once
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f2_byte, cache, index);
__ load_sized_value(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())), sizeof(u1), false /*is_signed*/);
// method_or_table_index can either be an itable index or a method depending on the virtual final flag
Label NotVFinal, Done;
__ testbit(flags, ResolvedMethodEntry::is_vfinal_shift);
__ z_brz(NotVFinal);
__ z_lg(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ z_bru(Done);
__ bind(NotVFinal);
__ load_sized_value(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())), sizeof(u2), false /* is signed */);
__ bind(Done);
BLOCK_COMMENT("} load_resolved_method_entry_virtual");
}
void TemplateTable::invokevirtual_helper(Register index,
Register recv,
@ -3634,7 +3599,7 @@ void TemplateTable::invokevirtual_helper(Register index,
BLOCK_COMMENT("invokevirtual_helper {");
__ testbit(flags, ConstantPoolCacheEntry::is_vfinal_shift);
__ testbit(flags, ResolvedMethodEntry::is_vfinal_shift);
__ z_brz(notFinal);
const Register method = index; // Method must be Z_ARG3.
@ -3673,16 +3638,19 @@ void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
prepare_invoke(byte_no,
Z_ARG3, // method or vtable index
noreg, // unused itable index
Z_ARG1, // recv
Z_ARG2); // flags
const Register Rrecv = Z_ARG1;
const Register Rmethod = Z_ARG3;
const Register Rflags = Z_ARG2;
load_resolved_method_entry_virtual(Rrecv,
Rmethod,
Rflags);
prepare_invoke(Rrecv, Rrecv);
// Z_ARG3 : index
// Z_ARG1 : receiver
// Z_ARG2 : flags
invokevirtual_helper(Z_ARG3, Z_ARG1, Z_ARG2);
invokevirtual_helper(Rmethod, Rrecv, Rflags);
}
void TemplateTable::invokespecial(int byte_no) {
@ -3690,22 +3658,47 @@ void TemplateTable::invokespecial(int byte_no) {
assert(byte_no == f1_byte, "use this argument");
Register Rmethod = Z_tmp_2;
prepare_invoke(byte_no, Rmethod, noreg, // Get f1 method.
Z_ARG3); // Get receiver also for null check.
__ verify_oop(Z_ARG3);
__ null_check(Z_ARG3);
Register Rrecv = Z_ARG3;
load_resolved_method_entry_special_or_static(Rrecv,
Rmethod,
noreg); /* flags are not used here */
prepare_invoke(Rrecv, Rrecv);
__ verify_oop(Rrecv);
__ null_check(Rrecv);
// Do the call.
__ profile_call(Z_ARG2);
__ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false);
__ jump_from_interpreted(Rmethod, Z_R1_scratch);
}
/*
* There are only two callsite (invokestatic, invokespecial) for this method and both of them are passing
* "noreg" for flags registers at present.
*/
void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
Register method,
Register flags) {
assert_different_registers(method, cache, flags);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, method /* index (temp) */);
if (flags != noreg) {
__ load_sized_value(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())), sizeof(u1), false /* is signed */);
}
__ z_lg(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
}
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
Register Rmethod = Z_tmp_2;
prepare_invoke(byte_no, Rmethod); // Get f1 method.
Register Rrecv = Z_ARG1;
load_resolved_method_entry_special_or_static(Rrecv,
Rmethod,
noreg); /* flags are not used here */
prepare_invoke(Rrecv, Rrecv); // get receiver also for null check and flags
// Do the call.
__ profile_call(Z_ARG2);
__ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false);
@ -3731,8 +3724,11 @@ void TemplateTable::invokeinterface(int byte_no) {
BLOCK_COMMENT("invokeinterface {");
prepare_invoke(byte_no, interface, method, // Get f1 klassOop, f2 Method*.
receiver, flags);
load_resolved_method_entry_interface(receiver, // ResolvedMethodEntry*
interface, // Klass* (interface klass (from f1))
method, // Method* or itable/vtable index
flags); // flags
prepare_invoke(receiver, receiver);
// Z_R14 (== Z_bytecode) : return entry
@ -3742,14 +3738,14 @@ void TemplateTable::invokeinterface(int byte_no) {
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCache.cpp for details.
NearLabel notObjectMethod, no_such_method;
__ testbit(flags, ConstantPoolCacheEntry::is_forced_virtual_shift);
__ testbit(flags, ResolvedMethodEntry::is_forced_virtual_shift);
__ z_brz(notObjectMethod);
invokevirtual_helper(method, receiver, flags);
__ bind(notObjectMethod);
// Check for private method invocation - indicated by vfinal
NearLabel notVFinal;
__ testbit(flags, ConstantPoolCacheEntry::is_vfinal_shift);
__ testbit(flags, ResolvedMethodEntry::is_vfinal_shift);
__ z_brz(notVFinal);
// Get receiver klass into klass - also a null check.
@ -3843,9 +3839,12 @@ void TemplateTable::invokehandle(int byte_no) {
const Register method = Z_tmp_2;
const Register recv = Z_ARG5;
const Register mtype = Z_tmp_1;
prepare_invoke(byte_no,
method, mtype, // Get f2 method, f1 MethodType.
recv);
const Register flags = Z_R1_scratch;
load_resolved_method_entry_handle(recv,
method,
mtype /* index */,
flags );
prepare_invoke(recv, recv);
__ verify_method_ptr(method);
__ verify_oop(recv);
__ null_check(recv);
@ -3866,10 +3865,10 @@ void TemplateTable::invokedynamic(int byte_no) {
load_invokedynamic_entry(Rmethod);
// Rmethod: CallSite object (from f1)
// Rcallsite: MH.linkToCallSite method (from f2)
// Rmethod: CallSite object
// Rcallsite: MH.linkToCallSite method
// Note: Callsite is already pushed by prepare_invoke.
// Note: Callsite is already pushed
// TODO: should make a type profile for any invokedynamic that takes a ref argument.
// Profile this call.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,11 +26,7 @@
#ifndef CPU_S390_TEMPLATETABLE_S390_HPP
#define CPU_S390_TEMPLATETABLE_S390_HPP
static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index = noreg, // itable index, MethodType, etc.
Register recv = noreg, // If caller wants to see it.
Register flags = noreg); // If caller wants to test it.
static void prepare_invoke(Register cache, Register recv);
static void invokevirtual_helper(Register index, Register recv,
Register flags);

View File

@ -34,6 +34,7 @@
#include "oops/method.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
@ -454,56 +455,6 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index,
int bcp_offset,
size_t index_size) {
assert_different_registers(cache, index);
get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line");
shll(index, 2);
}
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register index,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size) {
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
// We use a 32-bit load here since the layout of 64-bit words on
// little-endian machines allow us that.
movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
const int shift_count = (1 + byte_no) * BitsPerByte;
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
"correct shift count");
shrl(bytecode, shift_count);
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
size_t index_size) {
assert_different_registers(cache, tmp);
get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
shll(tmp, 2 + LogBytesPerWord);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header
addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
addptr(cache, tmp); // construct pointer to cache entry
}
// Load object from cpool->resolved_references(index)
void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result,
Register index,
@ -532,21 +483,6 @@ void InterpreterMacroAssembler::load_resolved_klass_at_index(Register klass,
movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
}
void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
Register method,
Register cache,
Register index) {
assert_different_registers(cache, index);
const int method_offset = in_bytes(
ConstantPoolCache::base_offset() +
((byte_no == TemplateTable::f2_byte)
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
movptr(method, Address(cache, index, Address::times_ptr, method_offset)); // get f1 Method*
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
//
@ -2070,7 +2006,7 @@ void InterpreterMacroAssembler::notify_method_exit(
}
void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) {
// Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp
// Get index out of bytecode pointer
get_cache_index_at_bcp(index, 1, sizeof(u4));
// Get address of invokedynamic array
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
@ -2097,3 +2033,13 @@ void InterpreterMacroAssembler::load_field_entry(Register cache, Register index,
}
lea(cache, Address(cache, index, Address::times_1, Array<ResolvedFieldEntry>::base_offset_in_bytes()));
}
void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
// Get index out of bytecode pointer
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
movptr(cache, Address(cache, ConstantPoolCache::method_entries_offset()));
imull(index, index, sizeof(ResolvedMethodEntry)); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
lea(cache, Address(cache, index, Address::times_1, Array<ResolvedMethodEntry>::base_offset_in_bytes()));
}

View File

@ -103,20 +103,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache,
Register index,
int bcp_offset,
size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register index,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index,
int bcp_offset,
size_t index_size = sizeof(u2));
@ -129,11 +116,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register cpool, // the constant pool (corrupted on return)
Register index); // the constant pool index (corrupted on return)
void load_resolved_method_at_index(int byte_no,
Register method,
Register cache,
Register index);
NOT_LP64(void f2ieee();) // truncate ftos to 32bits
NOT_LP64(void d2ieee();) // truncate dtos to 64bits
@ -308,6 +290,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_resolved_indy_entry(Register cache, Register index);
void load_field_entry(Register cache, Register index, int bcp_offset = 1);
void load_method_entry(Register cache, Register index, int bcp_offset = 1);
};
#endif // CPU_X86_INTERP_MASM_X86_HPP

View File

@ -40,6 +40,7 @@
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/continuation.hpp"
@ -230,11 +231,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
__ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale()));
} else {
__ get_cache_and_index_at_bcp(cache, index, 1, index_size);
Register flags = cache;
__ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
__ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
__ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
assert(index_size == sizeof(u2), "Can only be u2");
__ load_method_entry(cache, index);
__ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
__ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale()));
}
const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);

View File

@ -39,6 +39,7 @@
#include "oops/oop.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
@ -524,12 +525,12 @@ void TemplateTable::condy_helper(Label& Done) {
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
__ movl(off, flags);
__ andl(off, ConstantPoolCacheEntry::field_index_mask);
__ andl(off, ConstantPoolCache::field_index_mask);
const Address field(obj, off, Address::times_1, 0*wordSize);
// What sort of thing are we loading?
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
__ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
__ shrl(flags, ConstantPoolCache::tos_state_shift);
__ andl(flags, ConstantPoolCache::tos_state_mask);
switch (bytecode()) {
case Bytecodes::_ldc:
@ -2653,10 +2654,9 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
__ membar(order_constraint);
}
void TemplateTable::resolve_cache_and_index(int byte_no,
Register cache,
Register index,
size_t index_size) {
void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
Register cache,
Register index) {
const Register temp = rbx;
assert_different_registers(cache, index, temp);
@ -2666,7 +2666,18 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
Bytecodes::Code code = bytecode();
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(cache, index, temp, byte_no, 1, index_size);
__ load_method_entry(cache, index);
switch(byte_no) {
case f1_byte:
__ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedMethodEntry::bytecode1_offset())));
break;
case f2_byte:
__ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedMethodEntry::bytecode2_offset())));
break;
default:
ShouldNotReachHere();
}
__ cmpl(temp, code); // have we resolved this bytecode?
__ jcc(Assembler::equal, resolved);
@ -2677,7 +2688,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ movl(temp, code);
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(cache, index, 1, index_size);
__ load_method_entry(cache, index);
__ bind(resolved);
@ -2688,7 +2699,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
assert(thread != noreg, "x86_32 not supported");
__ load_resolved_method_at_index(byte_no, method, cache, index);
__ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(klass, method);
__ clinit_barrier(klass, thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
}
@ -2756,36 +2767,6 @@ void TemplateTable::load_resolved_field_entry(Register obj,
}
// The cache and index registers must be set before call
void TemplateTable::load_field_cp_cache_entry(Register obj,
Register cache,
Register index,
Register off,
Register flags,
bool is_static = false) {
assert_different_registers(cache, index, flags, off);
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Field offset
__ movptr(off, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f2_offset())));
// Flags
__ movl(flags, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset())));
// klass overwrite register
if (is_static) {
__ movptr(obj, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f1_offset())));
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movptr(obj, Address(obj, mirror_offset));
__ resolve_oop_handle(obj, rscratch2);
}
}
void TemplateTable::load_invokedynamic_entry(Register method) {
// setup registers
const Register appendix = rax;
@ -2854,37 +2835,107 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
__ push(index);
}
void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register method,
Register itable_index,
Register flags,
bool is_invokevirtual,
bool is_invokevfinal, /*unused*/
bool is_invokedynamic /*unused*/) {
void TemplateTable::load_resolved_method_entry_special_or_static(Register cache,
Register method,
Register flags) {
// setup registers
const Register cache = rcx;
const Register index = rdx;
assert_different_registers(method, flags);
assert_different_registers(method, cache, index);
assert_different_registers(itable_index, flags);
assert_different_registers(itable_index, cache, index);
assert_different_registers(cache, index);
assert_different_registers(method, cache, flags);
// determine constant pool cache field offsets
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset());
// access constant pool cache fields
const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset());
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
__ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
}
size_t index_size = sizeof(u2);
resolve_cache_and_index(byte_no, cache, index, index_size);
__ load_resolved_method_at_index(byte_no, method, cache, index);
void TemplateTable::load_resolved_method_entry_handle(Register cache,
Register method,
Register ref_index,
Register flags) {
// setup registers
const Register index = rdx;
assert_different_registers(cache, index);
assert_different_registers(cache, method, ref_index, flags);
if (itable_index != noreg) {
// pick up itable or appendix index from f2 also:
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// Maybe push appendix
Label L_no_push;
__ testl(flags, (1 << ResolvedMethodEntry::has_appendix_shift));
__ jcc(Assembler::zero, L_no_push);
// invokehandle uses an index into the resolved references array
__ load_unsigned_short(ref_index, Address(cache, in_bytes(ResolvedMethodEntry::resolved_references_index_offset())));
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
Register appendix = method;
__ load_resolved_reference_at_index(appendix, ref_index);
__ push(appendix); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
__ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
}
void TemplateTable::load_resolved_method_entry_interface(Register cache,
Register klass,
Register method_or_table_index,
Register flags) {
// setup registers
const Register index = rdx;
assert_different_registers(cache, klass, method_or_table_index, flags);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f1_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// Invokeinterface can behave in different ways:
// If calling a method from java.lang.Object, the forced virtual flag is true so the invocation will
// behave like an invokevirtual call. The state of the virtual final flag will determine whether a method or
// vtable index is placed in the register.
// Otherwise, the registers will be populated with the klass and method.
Label NotVirtual; Label NotVFinal; Label Done;
__ testl(flags, 1 << ResolvedMethodEntry::is_forced_virtual_shift);
__ jcc(Assembler::zero, NotVirtual);
__ testl(flags, (1 << ResolvedMethodEntry::is_vfinal_shift));
__ jcc(Assembler::zero, NotVFinal);
__ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ jmp(Done);
__ bind(NotVFinal);
__ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
__ jmp(Done);
__ bind(NotVirtual);
__ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ movptr(klass, Address(cache, in_bytes(ResolvedMethodEntry::klass_offset())));
__ bind(Done);
}
void TemplateTable::load_resolved_method_entry_virtual(Register cache,
Register method_or_table_index,
Register flags) {
// setup registers
const Register index = rdx;
assert_different_registers(index, cache);
assert_different_registers(method_or_table_index, cache, flags);
// determine constant pool cache field offsets
resolve_cache_and_index_for_method(f2_byte, cache, index);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::flags_offset())));
// method_or_table_index can either be an itable index or a method depending on the virtual final flag
Label isVFinal; Label Done;
__ testl(flags, (1 << ResolvedMethodEntry::is_vfinal_shift));
__ jcc(Assembler::notZero, isVFinal);
__ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset())));
__ jmp(Done);
__ bind(isVFinal);
__ movptr(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ bind(Done);
}
// The registers cache and index expected to be set before call.
@ -3631,57 +3682,23 @@ void TemplateTable::fast_xaccess(TosState state) {
//-----------------------------------------------------------------------------
// Calls
void TemplateTable::prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index, // itable index, MethodType, etc.
Register recv, // if caller wants to see it
Register flags // if caller wants to test it
) {
void TemplateTable::prepare_invoke(Register cache, Register recv, Register flags) {
// determine flags
const Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = (recv != noreg);
const bool save_flags = (flags != noreg);
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
assert(flags == noreg || flags == rdx, "");
assert(recv == noreg || recv == rcx, "");
// setup registers & access constant pool cache
if (recv == noreg) recv = rcx;
if (flags == noreg) flags = rdx;
assert_different_registers(method, index, recv, flags);
const bool load_receiver = (code != Bytecodes::_invokestatic) && (code != Bytecodes::_invokedynamic);
assert_different_registers(recv, flags);
// save 'interpreter return address'
__ save_bcp();
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// maybe push appendix to arguments (just before return address)
if (is_invokehandle) {
Label L_no_push;
__ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
__ jcc(Assembler::zero, L_no_push);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
__ push(rbx);
__ mov(rbx, index);
__ load_resolved_reference_at_index(index, rbx);
__ pop(rbx);
__ push(index); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
}
// Save flags and load TOS
__ movl(rbcp, flags);
__ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedMethodEntry::type_offset())));
// load receiver if needed (after appendix is pushed so parameter size is correct)
// Note: no return address pushed yet
if (load_receiver) {
__ movl(recv, flags);
__ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
__ load_unsigned_short(recv, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
const int receiver_is_at_end = -1; // back off one slot to get receiver
Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
@ -3689,14 +3706,6 @@ void TemplateTable::prepare_invoke(int byte_no,
__ verify_oop(recv);
}
if (save_flags) {
__ movl(rbcp, flags);
}
// compute return type
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask flags after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
// load return address
{
const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
@ -3712,12 +3721,10 @@ void TemplateTable::prepare_invoke(int byte_no,
// push return address
__ push(flags);
// Restore flags value from the constant pool cache, and restore rsi
// Restore flags value from the constant pool cache entry, and restore rsi
// for later null checks. r13 is the bytecode pointer
if (save_flags) {
__ movl(flags, rbcp);
__ restore_bcp();
}
__ movl(flags, rbcp);
__ restore_bcp();
}
void TemplateTable::invokevirtual_helper(Register index,
@ -3731,7 +3738,7 @@ void TemplateTable::invokevirtual_helper(Register index,
// Test for an invoke of a final method
Label notFinal;
__ movl(rax, flags);
__ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
__ andl(rax, (1 << ResolvedMethodEntry::is_vfinal_shift));
__ jcc(Assembler::zero, notFinal);
const Register method = index; // method must be rbx
@ -3767,23 +3774,31 @@ void TemplateTable::invokevirtual_helper(Register index,
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
prepare_invoke(byte_no,
rbx, // method or vtable index
noreg, // unused itable index
rcx, rdx); // recv, flags
load_resolved_method_entry_virtual(rcx, // ResolvedMethodEntry*
rbx, // Method or itable index
rdx); // Flags
prepare_invoke(rcx, // ResolvedMethodEntry*
rcx, // Receiver
rdx); // flags
// rbx: index
// rcx: receiver
// rdx: flags
invokevirtual_helper(rbx, rcx, rdx);
}
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
rcx); // get receiver also for null check
load_resolved_method_entry_special_or_static(rcx, // ResolvedMethodEntry*
rbx, // Method*
rdx); // flags
prepare_invoke(rcx,
rcx, // get receiver also for null check
rdx); // flags
__ verify_oop(rcx);
__ null_check(rcx);
// do the call
@ -3795,7 +3810,13 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, rbx); // get f1 Method*
load_resolved_method_entry_special_or_static(rcx, // ResolvedMethodEntry*
rbx, // Method*
rdx // flags
);
prepare_invoke(rcx, rcx, rdx); // cache and flags
// do the call
__ profile_call(rax);
__ profile_arguments_type(rax, rbx, rbcp, false);
@ -3813,13 +3834,12 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 Method*
rcx, rdx); // recv, flags
// rax: reference klass (from f1) if interface method
// rbx: method (from f2)
// rcx: receiver
// rdx: flags
load_resolved_method_entry_interface(rcx, // ResolvedMethodEntry*
rax, // Klass*
rbx, // Method* or itable/vtable index
rdx); // flags
prepare_invoke(rcx, rcx, rdx); // receiver, flags
// First check for Object case, then private interface method,
// then regular interface method.
@ -3828,8 +3848,9 @@ void TemplateTable::invokeinterface(int byte_no) {
// java.lang.Object. See cpCache.cpp for details.
Label notObjectMethod;
__ movl(rlocals, rdx);
__ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
__ andl(rlocals, (1 << ResolvedMethodEntry::is_forced_virtual_shift));
__ jcc(Assembler::zero, notObjectMethod);
invokevirtual_helper(rbx, rcx, rdx);
// no return from above
__ bind(notObjectMethod);
@ -3840,7 +3861,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Check for private method invocation - indicated by vfinal
Label notVFinal;
__ movl(rlocals, rdx);
__ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
__ andl(rlocals, (1 << ResolvedMethodEntry::is_vfinal_shift));
__ jcc(Assembler::zero, notVFinal);
// Get receiver klass into rlocals - also a null check
@ -3961,15 +3982,17 @@ void TemplateTable::invokehandle(int byte_no) {
const Register rcx_recv = rcx;
const Register rdx_flags = rdx;
prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
load_resolved_method_entry_handle(rcx, rbx_method, rax_mtype, rdx_flags);
prepare_invoke(rcx, rcx_recv, rdx_flags);
__ verify_method_ptr(rbx_method);
__ verify_oop(rcx_recv);
__ null_check(rcx_recv);
// rax: MethodType object (from cpool->resolved_references[f1], if necessary)
// rbx: MH.invokeExact_MT method (from f2)
// rbx: MH.invokeExact_MT method
// Note: rax_mtype is already pushed (if necessary) by prepare_invoke
// Note: rax_mtype is already pushed (if necessary)
// FIXME: profile the LambdaForm also
__ profile_final_call(rax);
@ -3986,10 +4009,10 @@ void TemplateTable::invokedynamic(int byte_no) {
const Register rax_callsite = rax;
load_invokedynamic_entry(rbx_method);
// rax: CallSite object (from cpool->resolved_references[f1])
// rbx: MH.linkToCallSite method (from f2)
// rax: CallSite object (from cpool->resolved_references[])
// rbx: MH.linkToCallSite method
// Note: rax_callsite is already pushed by prepare_invoke
// Note: rax_callsite is already pushed
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call

View File

@ -25,12 +25,9 @@
#ifndef CPU_X86_TEMPLATETABLE_X86_HPP
#define CPU_X86_TEMPLATETABLE_X86_HPP
static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index = noreg, // itable index, MethodType, etc.
Register recv = noreg, // if caller wants to see it
Register flags = noreg // if caller wants to test it
);
static void prepare_invoke(Register cache,
Register recv,
Register flags);
static void invokevirtual_helper(Register index, Register recv,
Register flags);
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);

View File

@ -1054,11 +1054,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
switch (bc) {
case Bytecodes::_invokehandle: {
int cache_index = ConstantPool::decode_cpcache_index(index, true);
assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
ConstantPoolCacheEntry* cpce = pool->cache()->entry_at(cache_index);
cpce->set_method_handle(pool, info);
appendix = Handle(current, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
break;
}
case Bytecodes::_invokedynamic: {

View File

@ -1535,11 +1535,11 @@ void ciEnv::process_invokehandle(const constantPoolHandle &cp, int index, JavaTh
Klass* holder = ConstantPool::klass_at_if_loaded(cp, holder_index);
Symbol* name = cp->name_ref_at(index, Bytecodes::_invokehandle);
if (MethodHandles::is_signature_polymorphic_name(holder, name)) {
ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
if (cp_cache_entry->is_resolved(Bytecodes::_invokehandle)) {
ResolvedMethodEntry* method_entry = cp->resolved_method_entry_at(index);
if (method_entry->is_resolved(Bytecodes::_invokehandle)) {
// process the adapter
Method* adapter = cp_cache_entry->f1_as_method();
oop appendix = cp_cache_entry->appendix_if_resolved(cp);
Method* adapter = method_entry->method();
oop appendix = cp->cache()->appendix_if_resolved(method_entry);
record_call_site_method(thread, adapter);
// process the appendix
{
@ -1591,7 +1591,7 @@ void ciEnv::find_dynamic_call_sites() {
process_invokedynamic(pool, index, thread);
} else {
assert(opcode == Bytecodes::_invokehandle, "new switch label added?");
int cp_cache_index = bcs.get_index_u2_cpcache();
int cp_cache_index = bcs.get_index_u2();
process_invokehandle(pool, cp_cache_index, thread);
}
break;

View File

@ -407,15 +407,10 @@ class CompileReplay : public StackObj {
bytecode.verify();
int index = bytecode.index();
ConstantPoolCacheEntry* cp_cache_entry = nullptr;
CallInfo callInfo;
Bytecodes::Code bc = bytecode.invoke_code();
LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, bc, CHECK_NULL);
// ResolvedIndyEntry and ConstantPoolCacheEntry must currently coexist.
// To address this, the variables below contain the values that *might*
// be used to avoid multiple blocks of similar code. When CPCE is obsoleted
// these can be removed
oop appendix = nullptr;
Method* adapter_method = nullptr;
int pool_index = 0;
@ -433,12 +428,10 @@ class CompileReplay : public StackObj {
Symbol* name = cp->name_ref_at(index, bytecode.code());
assert(MethodHandles::is_signature_polymorphic_name(holder, name), "");
#endif
cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
cp_cache_entry->set_method_handle(cp, callInfo);
appendix = cp_cache_entry->appendix_if_resolved(cp);
adapter_method = cp_cache_entry->f1_as_method();
pool_index = cp_cache_entry->constant_pool_index();
ResolvedMethodEntry* method_entry = cp->cache()->set_method_handle(index, callInfo);
appendix = cp->cache()->appendix_if_resolved(method_entry);
adapter_method = method_entry->method();
pool_index = method_entry->constant_pool_index();
} else {
report_error("no dynamic invoke found");
return nullptr;

View File

@ -356,7 +356,7 @@ int ciBytecodeStream::get_method_index() {
assert(Bytecodes::is_invoke(cur_bc()), "invalid bytecode: %s", Bytecodes::name(cur_bc()));
if (has_index_u4())
return get_index_u4(); // invokedynamic
return get_index_u2_cpcache();
return get_index_u2();
}
// ------------------------------------------------------------------

View File

@ -161,11 +161,6 @@ public:
return bytecode().get_index_u2(cur_bc_raw(), is_wide);
}
// Get 2-byte index in native byte order. (Rewriter::rewrite makes these.)
int get_index_u2_cpcache() const {
return bytecode().get_index_u2_cpcache(cur_bc_raw());
}
// Get 4-byte index, for invokedynamic.
int get_index_u4() const {
return bytecode().get_index_u4(cur_bc_raw());

View File

@ -56,10 +56,9 @@ public:
// index of another entry in the table.
static const int CPCACHE_INDEX_MANGLE_VALUE = 1000000;
// This function is used to encode an index to differentiate it from a
// constant pool index. It assumes it is being called with a cpCache index
// (that is less than 0).
static int encode_cpcache_index(int index) {
// This function is used to encode an invokedynamic index to differentiate it from a
// constant pool index. It assumes it is being called with a index that is less than 0
static int encode_indy_index(int index) {
assert(index < 0, "Unexpected non-negative cpCache index");
return index + CPCACHE_INDEX_MANGLE_VALUE;
}

View File

@ -110,7 +110,6 @@
class BootstrapInfo;
class ClassFileStream;
class ConstantPoolCache;
class ConstantPoolCacheEntry;
class Dictionary;
class DumpTimeClassInfo;
class DumpTimeSharedClassTable;

View File

@ -272,7 +272,7 @@ bool AbstractInterpreter::is_not_reached(const methodHandle& method, int bci) {
return false; // might have been reached
}
assert(!invoke_bc.has_index_u4(code), "sanity");
int method_index = invoke_bc.get_index_u2_cpcache(code);
int method_index = invoke_bc.get_index_u2(code);
constantPoolHandle cp(Thread::current(), cpool);
Method* resolved_method = ConstantPool::method_at_if_loaded(cp, method_index);
return (resolved_method == nullptr);
@ -380,7 +380,7 @@ address AbstractInterpreter::deopt_continue_after_entry(Method* method, address
// (NOT needed for the old calling convention)
if (!is_top_frame) {
int index = Bytes::get_native_u2(bcp+1);
method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
method->constants()->cache()->resolved_method_entry_at(index)->set_num_parameters(callee_parameters);
}
break;
}

View File

@ -74,7 +74,7 @@ bool BootstrapInfo::resolve_previously_linked_invokedynamic(CallInfo& result, TR
Exceptions::wrap_dynamic_exception(/* is_indy */ true, CHECK_false);
return true;
} else if (indy_entry->resolution_failed()) {
int encoded_index = ResolutionErrorTable::encode_cpcache_index(ConstantPool::encode_invokedynamic_index(_indy_index));
int encoded_index = ResolutionErrorTable::encode_indy_index(ConstantPool::encode_invokedynamic_index(_indy_index));
ConstantPool::throw_resolution_error(_pool, encoded_index, CHECK_false); // Doesn't necessarily need to be resolved yet
return true;
} else {

View File

@ -164,32 +164,30 @@ int Bytecode_member_ref::index() const {
Bytecodes::Code rawc = code();
if (has_index_u4(rawc))
return get_index_u4(rawc);
else if (Bytecodes::is_field_code(rawc))
return get_index_u2(rawc);
else
return get_index_u2_cpcache(rawc);
return get_index_u2(rawc);
}
int Bytecode_member_ref::pool_index() const {
if (invoke_code() == Bytecodes::_invokedynamic) {
return resolved_indy_entry()->constant_pool_index();
} else {
return cpcache_entry()->constant_pool_index();
return resolved_method_entry()->constant_pool_index();
}
}
ConstantPoolCacheEntry* Bytecode_member_ref::cpcache_entry() const {
int index = this->index();
assert(invoke_code() != Bytecodes::_invokedynamic, "should not call this");
return cpcache()->entry_at(ConstantPool::decode_cpcache_index(index, true));
}
ResolvedIndyEntry* Bytecode_member_ref::resolved_indy_entry() const {
int index = this->index();
assert(invoke_code() == Bytecodes::_invokedynamic, "should not call this");
return cpcache()->resolved_indy_entry_at(ConstantPool::decode_invokedynamic_index(index));
}
ResolvedMethodEntry* Bytecode_member_ref::resolved_method_entry() const {
int index = this->index();
assert(invoke_code() != Bytecodes::_invokedynamic, "should not call this");
return cpcache()->resolved_method_entry_at(index);
}
// Implementation of Bytecode_field
void Bytecode_field::verify() const {

View File

@ -84,14 +84,6 @@ class Bytecode: public StackObj {
return Bytes::get_Java_u2(p);
}
}
int get_index_u1_cpcache(Bytecodes::Code bc) const {
assert_same_format_as(bc); assert_index_size(1, bc);
return *(u1*)addr_at(1) + ConstantPool::CPCACHE_INDEX_TAG;
}
int get_index_u2_cpcache(Bytecodes::Code bc) const {
assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc);
return Bytes::get_native_u2(addr_at(1)) + ConstantPool::CPCACHE_INDEX_TAG;
}
int get_index_u4(Bytecodes::Code bc) const {
assert_same_format_as(bc); assert_index_size(4, bc);
assert(can_use_native_byte_order(bc), "");
@ -188,8 +180,8 @@ class Bytecode_member_ref: public Bytecode {
const Method* method() const { return _method; }
ConstantPool* constants() const { return _method->constants(); }
ConstantPoolCache* cpcache() const { return _method->constants()->cache(); }
ConstantPoolCacheEntry* cpcache_entry() const;
ResolvedIndyEntry* resolved_indy_entry() const;
ResolvedMethodEntry* resolved_method_entry() const;
public:
int index() const; // cache index (loaded from instruction)

View File

@ -33,7 +33,7 @@ inline bool Bytecode_invoke::has_appendix() {
if (invoke_code() == Bytecodes::_invokedynamic) {
return resolved_indy_entry()->has_appendix();
} else {
return cpcache_entry()->has_appendix();
return resolved_method_entry()->has_appendix();
}
}

View File

@ -222,9 +222,6 @@ class BytecodeStream: public BaseBytecodeStream {
// Get an unsigned 2-byte index, swapping the bytes if necessary.
u2 get_index_u2() const { assert_raw_stream(false);
return bytecode().get_index_u2(raw_code(), false); }
// Get an unsigned 2-byte index in native order.
int get_index_u2_cpcache() const { assert_raw_stream(false);
return bytecode().get_index_u2_cpcache(raw_code()); }
int get_index_u4() const { assert_raw_stream(false);
return bytecode().get_index_u4(raw_code()); }
bool has_index_u4() const { return bytecode().has_index_u4(raw_code()); }

View File

@ -37,6 +37,7 @@
#include "oops/method.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
@ -326,13 +327,6 @@ void BytecodePrinter::print_bsm(int cp_index, outputStream* st) {
st->print_cr(" }");
}
void BytecodePrinter::print_cpcache_entry(int cpc_index, outputStream* st) {
ConstantPool* constants = method()->constants();
ConstantPoolCacheEntry* cpce = constants->cache()->entry_at(cpc_index);
st->print(" ConstantPoolCacheEntry: ");
cpce->print(st, cpc_index, constants->cache());
}
void BytecodePrinter::print_attributes(int bci, outputStream* st) {
// Show attributes of pre-rewritten codes
Bytecodes::Code code = Bytecodes::java_code(raw_code());
@ -516,20 +510,24 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) {
case Bytecodes::_invokestatic:
{
int cp_index;
int cpcache_index;
if (is_linked()) {
cpcache_index = get_native_index_u2();
cp_index = cpcache()->entry_at(cpcache_index)->constant_pool_index();
int method_index = get_native_index_u2();
ResolvedMethodEntry* method_entry = cpcache()->resolved_method_entry_at(method_index);
cp_index = method_entry->constant_pool_index();
print_field_or_method(cp_index, st);
if (raw_code() == Bytecodes::_invokehandle &&
ClassPrinter::has_mode(_flags, ClassPrinter::PRINT_METHOD_HANDLE)) {
assert(is_linked(), "invokehandle is only in rewritten methods");
method_entry->print_on(st);
if (method_entry->has_appendix()) {
st->print(" appendix: ");
constants()->resolved_reference_from_method(method_index)->print_on(st);
}
}
} else {
cpcache_index = -1;
cp_index = get_Java_index_u2();
}
print_field_or_method(cp_index, st);
if (raw_code() == Bytecodes::_invokehandle &&
ClassPrinter::has_mode(_flags, ClassPrinter::PRINT_METHOD_HANDLE)) {
assert(is_linked(), "invokehandle is only in rewritten methods");
assert(cpcache_index >= 0, "must be");
print_cpcache_entry(cpcache_index, st);
print_field_or_method(cp_index, st);
}
}
break;
@ -538,8 +536,8 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) {
{
int cp_index;
if (is_linked()) {
int cpcache_index = get_native_index_u2();
cp_index = cpcache()->entry_at(cpcache_index)->constant_pool_index();
int method_index = get_native_index_u2();
cp_index = cpcache()->resolved_method_entry_at(method_index)->constant_pool_index();
} else {
cp_index = get_Java_index_u2();
}

View File

@ -1003,7 +1003,7 @@ int ExceptionMessageBuilder::do_instruction(int bci) {
if (code == Bytecodes::_invokedynamic) {
cp_index = ((int) Bytes::get_native_u4(code_base + pos));
} else {
cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
cp_index = Bytes::get_native_u2(code_base + pos);
}
int name_and_type_index = cp->name_and_type_ref_index_at(cp_index, code);
@ -1143,7 +1143,7 @@ int ExceptionMessageBuilder::get_NPE_null_slot(int bci) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokeinterface: {
int cp_index = Bytes::get_native_u2(code_base+ pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
int cp_index = Bytes::get_native_u2(code_base+ pos);
ConstantPool* cp = _method->constants();
int name_and_type_index = cp->name_and_type_ref_index_at(cp_index, code);
int name_index = cp->name_ref_index_at(name_and_type_index);
@ -1346,7 +1346,7 @@ bool ExceptionMessageBuilder::print_NPE_cause0(outputStream* os, int bci, int sl
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface: {
int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
int cp_index = Bytes::get_native_u2(code_base + pos);
if (max_detail == _max_cause_detail && !inner_expr) {
os->print(" because the return value of \"");
}
@ -1428,7 +1428,7 @@ void ExceptionMessageBuilder::print_NPE_failed_action(outputStream *os, int bci)
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokeinterface: {
int cp_index = Bytes::get_native_u2(code_base+ pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG);
int cp_index = Bytes::get_native_u2(code_base+ pos);
os->print("Cannot invoke \"");
print_method_name(os, _method, cp_index, code);
os->print("\"");

View File

@ -104,13 +104,8 @@ public:
Bytecode bytecode() const { return Bytecode(method(), bcp()); }
int get_index_u1(Bytecodes::Code bc) const { return bytecode().get_index_u1(bc); }
int get_index_u2(Bytecodes::Code bc) const { return bytecode().get_index_u2(bc); }
int get_index_u2_cpcache(Bytecodes::Code bc) const
{ return bytecode().get_index_u2_cpcache(bc); }
int get_index_u4(Bytecodes::Code bc) const { return bytecode().get_index_u4(bc); }
int number_of_dimensions() const { return bcp()[3]; }
ConstantPoolCacheEntry* cache_entry_at(int i) const
{ return method()->constants()->cache()->entry_at(i); }
ConstantPoolCacheEntry* cache_entry() const { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); }
oop callee_receiver(Symbol* signature) {
return _last_frame.interpreter_callee_receiver(signature);
@ -207,8 +202,8 @@ JRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* current, Bytecodes::
// Tell the interpreter how to unbox the primitive.
guarantee(java_lang_boxing_object::is_instance(result, type), "");
int offset = java_lang_boxing_object::value_offset(type);
intptr_t flags = ((as_TosState(type) << ConstantPoolCacheEntry::tos_state_shift)
| (offset & ConstantPoolCacheEntry::field_index_mask));
intptr_t flags = ((as_TosState(type) << ConstantPoolCache::tos_state_shift)
| (offset & ConstantPoolCache::field_index_mask));
current->set_vm_result_2((Metadata*)flags);
}
}
@ -840,14 +835,16 @@ void InterpreterRuntime::resolve_invoke(JavaThread* current, Bytecodes::Code byt
// resolve method
CallInfo info;
constantPoolHandle pool(current, last_frame.method()->constants());
ConstantPoolCache* cache = pool->cache();
methodHandle resolved_method;
int method_index = last_frame.get_index_u2(bytecode);
{
JvmtiHideSingleStepping jhss(current);
JavaThread* THREAD = current; // For exception macros.
LinkResolver::resolve_invoke(info, receiver, pool,
last_frame.get_index_u2_cpcache(bytecode), bytecode,
method_index, bytecode,
THREAD);
if (HAS_PENDING_EXCEPTION) {
@ -868,8 +865,7 @@ void InterpreterRuntime::resolve_invoke(JavaThread* current, Bytecodes::Code byt
} // end JvmtiHideSingleStepping
// check if link resolution caused cpCache to be updated
ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry();
if (cp_cache_entry->is_resolved(bytecode)) return;
if (cache->resolved_method_entry_at(method_index)->is_resolved(bytecode)) return;
#ifdef ASSERT
if (bytecode == Bytecodes::_invokeinterface) {
@ -903,20 +899,15 @@ void InterpreterRuntime::resolve_invoke(JavaThread* current, Bytecodes::Code byt
switch (info.call_kind()) {
case CallInfo::direct_call:
cp_cache_entry->set_direct_call(
bytecode,
resolved_method,
sender->is_interface());
cache->set_direct_call(bytecode, method_index, resolved_method, sender->is_interface());
break;
case CallInfo::vtable_call:
cp_cache_entry->set_vtable_call(
bytecode,
resolved_method,
info.vtable_index());
cache->set_vtable_call(bytecode, method_index, resolved_method, info.vtable_index());
break;
case CallInfo::itable_call:
cp_cache_entry->set_itable_call(
cache->set_itable_call(
bytecode,
method_index,
info.resolved_klass(),
resolved_method,
info.itable_index());
@ -934,16 +925,16 @@ void InterpreterRuntime::resolve_invokehandle(JavaThread* current) {
// resolve method
CallInfo info;
constantPoolHandle pool(current, last_frame.method()->constants());
int method_index = last_frame.get_index_u2(bytecode);
{
JvmtiHideSingleStepping jhss(current);
JavaThread* THREAD = current; // For exception macros.
LinkResolver::resolve_invoke(info, Handle(), pool,
last_frame.get_index_u2_cpcache(bytecode), bytecode,
method_index, bytecode,
CHECK);
} // end JvmtiHideSingleStepping
ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry();
cp_cache_entry->set_method_handle(pool, info);
pool->cache()->set_method_handle(method_index, info);
}
// First time execution: Resolve symbols, create a permanent CallSite object.
@ -1501,7 +1492,7 @@ JRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* current,
return;
}
ConstantPool* cpool = method->constants();
int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG;
int cp_index = Bytes::get_native_u2(bcp + 1);
Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index, code));
Symbol* mname = cpool->name_ref_at(cp_index, code);

View File

@ -1699,12 +1699,11 @@ void LinkResolver::resolve_invokeinterface(CallInfo& result, Handle recv, const
}
bool LinkResolver::resolve_previously_linked_invokehandle(CallInfo& result, const LinkInfo& link_info, const constantPoolHandle& pool, int index, TRAPS) {
int cache_index = ConstantPool::decode_cpcache_index(index, true);
ConstantPoolCacheEntry* cpce = pool->cache()->entry_at(cache_index);
if (!cpce->is_f1_null()) {
ResolvedMethodEntry* method_entry = pool->cache()->resolved_method_entry_at(index);
if (method_entry->method() != nullptr) {
Klass* resolved_klass = link_info.resolved_klass();
methodHandle method(THREAD, cpce->f1_as_method());
Handle appendix(THREAD, cpce->appendix_if_resolved(pool));
methodHandle method(THREAD, method_entry->method());
Handle appendix(THREAD, pool->cache()->appendix_if_resolved(method_entry));
result.set_handle(resolved_klass, method, appendix, CHECK_false);
JFR_ONLY(Jfr::on_resolution(result, CHECK_false);)
return true;

View File

@ -35,6 +35,7 @@
#include "oops/generateOopMap.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp"
@ -58,7 +59,9 @@ void Rewriter::compute_index_maps() {
break;
case JVM_CONSTANT_InterfaceMethodref: // fall through
case JVM_CONSTANT_Methodref :
add_cp_cache_entry(i);
_cp_map.at_put(i, _method_entry_index);
_method_entry_index++;
_initialized_method_entries.push(ResolvedMethodEntry((u2)i));
break;
case JVM_CONSTANT_Dynamic:
assert(_pool->has_dynamic_constant(), "constant pool's _has_dynamic_constant flag not set");
@ -81,8 +84,8 @@ void Rewriter::compute_index_maps() {
// Record limits of resolved reference map for constant pool cache indices
record_map_limits();
guarantee((int) _cp_cache_map.length() - 1 <= (int) ((u2)-1),
"all cp cache indexes fit in a u2");
guarantee(_initialized_field_entries.length() - 1 <= (int)((u2)-1), "All resolved field indices fit in a u2");
guarantee(_initialized_method_entries.length() - 1 <= (int)((u2)-1), "All resolved method indices fit in a u2");
if (saw_mh_symbol) {
_method_handle_invokers.at_grow(length, 0);
@ -104,9 +107,12 @@ void Rewriter::restore_bytecodes(Thread* thread) {
// Creates a constant pool cache given a CPC map
void Rewriter::make_constant_pool_cache(TRAPS) {
ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data();
assert(_field_entry_index == _initialized_field_entries.length(), "Field entry size mismatch");
assert(_method_entry_index == _initialized_method_entries.length(), "Method entry size mismatch");
ConstantPoolCache* cache =
ConstantPoolCache::allocate(loader_data, _cp_cache_map,
_invokedynamic_references_map, _initialized_indy_entries, _initialized_field_entries, CHECK);
ConstantPoolCache::allocate(loader_data, _invokedynamic_references_map,
_initialized_indy_entries, _initialized_field_entries, _initialized_method_entries,
CHECK);
// initialize object cache in constant pool
_pool->set_cache(cache);
@ -124,8 +130,6 @@ void Rewriter::make_constant_pool_cache(TRAPS) {
// We are linking a shared class from the base archive. This
// class won't be written into the dynamic archive, so there's no
// need to save its CpCaches.
} else {
cache->save_for_archive(THREAD);
}
}
#endif
@ -194,21 +198,22 @@ void Rewriter::rewrite_field_reference(address bcp, int offset, bool reverse) {
}
}
// Rewrite a classfile-order CP index into a native-order CPC index.
void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
void Rewriter::rewrite_method_reference(address bcp, int offset, bool reverse) {
address p = bcp + offset;
if (!reverse) {
int cp_index = Bytes::get_Java_u2(p);
int cache_index = cp_entry_to_cp_cache(cp_index);
Bytes::put_native_u2(p, (u2)cache_index);
if (!_method_handle_invokers.is_empty())
maybe_rewrite_invokehandle(p - 1, cp_index, cache_index, reverse);
int method_entry_index = _cp_map.at(cp_index);
Bytes::put_native_u2(p, (u2)method_entry_index);
if (!_method_handle_invokers.is_empty()) {
maybe_rewrite_invokehandle(p - 1, cp_index, method_entry_index, reverse);
}
} else {
int cache_index = Bytes::get_native_u2(p);
int pool_index = cp_cache_entry_pool_index(cache_index);
int method_entry_index = Bytes::get_native_u2(p);
int pool_index = _initialized_method_entries.at(method_entry_index).constant_pool_index();
Bytes::put_Java_u2(p, (u2)pool_index);
if (!_method_handle_invokers.is_empty())
maybe_rewrite_invokehandle(p - 1, pool_index, cache_index, reverse);
if (!_method_handle_invokers.is_empty()) {
maybe_rewrite_invokehandle(p - 1, pool_index, method_entry_index, reverse);
}
}
}
@ -221,27 +226,27 @@ void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, bool
if (!reverse) {
int cp_index = Bytes::get_Java_u2(p);
if (_pool->tag_at(cp_index).is_interface_method()) {
int cache_index = add_invokespecial_cp_cache_entry(cp_index);
if (cache_index != (int)(jushort) cache_index) {
_initialized_method_entries.push(ResolvedMethodEntry((u2)cp_index));
Bytes::put_native_u2(p, (u2)_method_entry_index);
_method_entry_index++;
if (_method_entry_index != (int)(u2)_method_entry_index) {
*invokespecial_error = true;
}
Bytes::put_native_u2(p, (u2)cache_index);
} else {
rewrite_member_reference(bcp, offset, reverse);
rewrite_method_reference(bcp, offset, reverse);
}
} else {
rewrite_member_reference(bcp, offset, reverse);
rewrite_method_reference(bcp, offset, reverse);
}
}
// Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
if (!reverse) {
if ((*opc) == (u1)Bytecodes::_invokevirtual ||
// allow invokespecial as an alias, although it would be very odd:
(*opc) == (u1)Bytecodes::_invokespecial) {
assert(_pool->tag_at(cp_index).is_method(), "wrong index");
((*opc) == (u1)Bytecodes::_invokespecial)) {
assert(_pool->tag_at(cp_index).is_method(), "wrong index");
// Determine whether this is a signature-polymorphic method.
if (cp_index >= _method_handle_invokers.length()) return;
int status = _method_handle_invokers.at(cp_index);
@ -251,13 +256,15 @@ void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_i
MethodHandles::is_signature_polymorphic_name(vmClasses::MethodHandle_klass(),
_pool->uncached_name_ref_at(cp_index))) {
// we may need a resolved_refs entry for the appendix
add_invokedynamic_resolved_references_entry(cp_index, cache_index);
int resolved_index = add_invokedynamic_resolved_references_entry(cp_index, cache_index);
_initialized_method_entries.at(cache_index).set_resolved_references_index((u2)resolved_index);
status = +1;
} else if (_pool->uncached_klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_VarHandle() &&
MethodHandles::is_signature_polymorphic_name(vmClasses::VarHandle_klass(),
_pool->uncached_name_ref_at(cp_index))) {
// we may need a resolved_refs entry for the appendix
add_invokedynamic_resolved_references_entry(cp_index, cache_index);
int resolved_index = add_invokedynamic_resolved_references_entry(cp_index, cache_index);
_initialized_method_entries.at(cache_index).set_resolved_references_index((u2)resolved_index);
status = +1;
} else {
status = -1;
@ -471,7 +478,7 @@ void Rewriter::scan_method(Thread* thread, Method* method, bool reverse, bool* i
case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface:
case Bytecodes::_invokehandle : // if reverse=true
rewrite_member_reference(bcp, prefix_length+1, reverse);
rewrite_method_reference(bcp, prefix_length+1, reverse);
break;
case Bytecodes::_invokedynamic:
rewrite_invokedynamic(bcp, prefix_length+1, reverse);
@ -577,13 +584,13 @@ Rewriter::Rewriter(InstanceKlass* klass, const constantPoolHandle& cpool, Array<
_pool(cpool),
_methods(methods),
_cp_map(cpool->length()),
_cp_cache_map(cpool->length() / 2),
_reference_map(cpool->length()),
_resolved_references_map(cpool->length() / 2),
_invokedynamic_references_map(cpool->length() / 2),
_method_handle_invokers(cpool->length()),
_invokedynamic_index(0),
_field_entry_index(0)
_field_entry_index(0),
_method_entry_index(0)
{
// Rewrite bytecodes - exception here exits.

View File

@ -29,6 +29,7 @@
#include "oops/constantPool.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "utilities/growableArray.hpp"
// The Rewriter adds caches to the constant pool and rewrites bytecode indices
@ -40,8 +41,6 @@ class Rewriter: public StackObj {
constantPoolHandle _pool;
Array<Method*>* _methods;
GrowableArray<int> _cp_map;
GrowableArray<int> _cp_cache_map; // for Methodref, Fieldref,
// InterfaceMethodref and InvokeDynamic
GrowableArray<int> _reference_map; // maps from cp index to resolved_refs index (or -1)
GrowableArray<int> _resolved_references_map; // for strings, methodHandle, methodType
GrowableArray<int> _invokedynamic_references_map; // for invokedynamic resolved refs
@ -49,17 +48,20 @@ class Rewriter: public StackObj {
int _resolved_reference_limit;
int _invokedynamic_index;
int _field_entry_index;
int _method_entry_index;
// For collecting information about invokedynamic bytecodes before resolution
// With this, we can know how many indy calls there are and resolve them later
// For collecting initialization information for field, method, and invokedynamic
// constant pool cache entries. The number of entries of each type will be known
// at the end of rewriting and these arrays will be used to build the proper arrays
// in the Constant Pool Cache.
GrowableArray<ResolvedIndyEntry> _initialized_indy_entries;
GrowableArray<ResolvedFieldEntry> _initialized_field_entries;
GrowableArray<ResolvedMethodEntry> _initialized_method_entries;
void init_maps(int length) {
_cp_map.trunc_to(0);
_cp_map.at_grow(length, -1);
_cp_cache_map.trunc_to(0);
// Also cache resolved objects, in another different cache.
_reference_map.trunc_to(0);
_reference_map.at_grow(length, -1);
@ -68,24 +70,14 @@ class Rewriter: public StackObj {
_resolved_references_map.trunc_to(0);
_invokedynamic_references_map.trunc_to(0);
_resolved_reference_limit = -1;
_first_iteration_cp_cache_limit = -1;
}
int _first_iteration_cp_cache_limit;
void record_map_limits() {
// Record initial size of the two arrays generated for the CP cache
// relative to walking the constant pool.
_first_iteration_cp_cache_limit = _cp_cache_map.length();
_resolved_reference_limit = _resolved_references_map.length();
}
int cp_cache_delta() {
// How many cp cache entries were added since recording map limits after
// cp cache initialization?
assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration");
return _cp_cache_map.length() - _first_iteration_cp_cache_limit;
}
int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map.at(i); }
bool has_cp_cache(int i) { return (uint) i < (uint) _cp_map.length() && _cp_map.at(i) >= 0; }
@ -96,32 +88,6 @@ class Rewriter: public StackObj {
return cache_index;
}
int add_cp_cache_entry(int cp_index) {
assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version");
assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration");
int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map);
assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
return cache_index;
}
// add a new CP cache entry beyond the normal cache for the special case of
// invokespecial with InterfaceMethodref as cpool operand.
int add_invokespecial_cp_cache_entry(int cp_index) {
assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration");
// Don't add InterfaceMethodref if it already exists at the end.
for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) {
if (cp_cache_entry_pool_index(i) == cp_index) {
return i;
}
}
int cache_index = _cp_cache_map.append(cp_index);
assert(cache_index >= _first_iteration_cp_cache_limit, "");
// do not update _cp_map, since the mapping is one-to-many
assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
return cache_index;
}
int cp_entry_to_resolved_references(int cp_index) const {
assert(has_entry_in_resolved_references(cp_index), "oob");
return _reference_map.at(cp_index);
@ -153,12 +119,6 @@ class Rewriter: public StackObj {
return cp_index;
}
// Access the contents of _cp_cache_map to determine CP cache layout.
int cp_cache_entry_pool_index(int cache_index) {
int cp_index = _cp_cache_map.at(cache_index);
return cp_index;
}
// All the work goes in here:
Rewriter(InstanceKlass* klass, const constantPoolHandle& cpool, Array<Method*>* methods, TRAPS);
@ -167,6 +127,7 @@ class Rewriter: public StackObj {
void scan_method(Thread* thread, Method* m, bool reverse, bool* invokespecial_error);
void rewrite_Object_init(const methodHandle& m, TRAPS);
void rewrite_field_reference(address bcp, int offset, bool reverse);
void rewrite_method_reference(address bcp, int offset, bool reverse);
void rewrite_member_reference(address bcp, int offset, bool reverse);
void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse);
void rewrite_invokedynamic(address bcp, int offset, bool reverse);

View File

@ -262,13 +262,12 @@ class TemplateTable: AllStatic {
static void _return(TosState state);
static void resolve_cache_and_index(int byte_no, // one of 1,2,11
Register cache, // output for CP cache
Register index, // output for CP index
size_t index_size); // one of 1,2,4
static void resolve_cache_and_index_for_field(int byte_no,
Register cache,
Register index);
static void resolve_cache_and_index_for_method(int byte_no,
Register cache,
Register index);
static void load_invokedynamic_entry(Register method);
static void load_resolved_field_entry(Register obj,
Register cache,
@ -276,6 +275,20 @@ class TemplateTable: AllStatic {
Register off,
Register flags,
bool is_static);
static void load_resolved_method_entry_special_or_static(Register cache,
Register method,
Register flags);
static void load_resolved_method_entry_handle(Register cache,
Register method,
Register ref_index,
Register flags);
static void load_resolved_method_entry_interface(Register cache,
Register klass,
Register method_or_table_index,
Register flags);
static void load_resolved_method_entry_virtual(Register cache,
Register method_or_table_index,
Register flags);
static void load_invoke_cp_cache_entry(int byte_no,
Register method,
Register itable_index,

View File

@ -48,6 +48,7 @@
#include "oops/oop.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
@ -2275,20 +2276,20 @@ run:
CASE(_invokehandle): {
u2 index = Bytes::get_native_u2(pc+1);
ConstantPoolCacheEntry* cache = cp->entry_at(index);
ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
if (! cache->is_resolved((Bytecodes::Code) opcode)) {
if (! entry->is_resolved((Bytecodes::Code) opcode)) {
CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->entry_at(index);
entry = cp->resolved_method_entry_at(index);
}
Method* method = cache->f1_as_method();
Method* method = entry->method();
if (VerifyOops) method->verify();
if (cache->has_appendix()) {
if (entry->has_appendix()) {
constantPoolHandle cp(THREAD, METHOD->constants());
SET_STACK_OBJECT(cache->appendix_if_resolved(cp), 0);
SET_STACK_OBJECT(cp->cache()->appendix_if_resolved(entry), 0);
MORE_STACK(1);
}
@ -2306,11 +2307,10 @@ run:
// QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
// out so c++ compiler has a chance for constant prop to fold everything possible away.
ConstantPoolCacheEntry* cache = cp->entry_at(index);
if (!cache->is_resolved((Bytecodes::Code)opcode)) {
ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
if (!entry->is_resolved((Bytecodes::Code)opcode)) {
CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->entry_at(index);
}
istate->set_msg(call_method);
@ -2318,31 +2318,31 @@ run:
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCache.cpp for details.
Method* callee = nullptr;
if (cache->is_forced_virtual()) {
CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
if (cache->is_vfinal()) {
callee = cache->f2_as_vfinal_method();
if (entry->is_forced_virtual()) {
CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
if (entry->is_vfinal()) {
callee = entry->method();
} else {
// Get receiver.
int parms = cache->parameter_size();
int parms = entry->number_of_parameters();
// Same comments as invokevirtual apply here.
oop rcvr = STACK_OBJECT(-parms);
VERIFY_OOP(rcvr);
Klass* rcvrKlass = rcvr->klass();
callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index());
callee = (Method*) rcvrKlass->method_at_vtable(entry->table_index());
}
} else if (cache->is_vfinal()) {
} else if (entry->is_vfinal()) {
// private interface method invocations
//
// Ensure receiver class actually implements
// the resolved interface class. The link resolver
// does this, but only for the first time this
// interface is being called.
int parms = cache->parameter_size();
int parms = entry->number_of_parameters();
oop rcvr = STACK_OBJECT(-parms);
CHECK_NULL(rcvr);
Klass* recv_klass = rcvr->klass();
Klass* resolved_klass = cache->f1_as_klass();
Klass* resolved_klass = entry->interface_klass();
if (!recv_klass->is_subtype_of(resolved_klass)) {
ResourceMark rm(THREAD);
char buf[200];
@ -2351,7 +2351,7 @@ run:
resolved_klass->external_name());
VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
}
callee = cache->f2_as_vfinal_method();
callee = entry->method();
}
if (callee != nullptr) {
istate->set_callee(callee);
@ -2364,18 +2364,18 @@ run:
}
// this could definitely be cleaned up QQQ
Method *interface_method = cache->f2_as_interface_method();
Method *interface_method = entry->method();
InstanceKlass* iclass = interface_method->method_holder();
// get receiver
int parms = cache->parameter_size();
int parms = entry->number_of_parameters();
oop rcvr = STACK_OBJECT(-parms);
CHECK_NULL(rcvr);
InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
// Receiver subtype check against resolved interface klass (REFC).
{
Klass* refc = cache->f1_as_klass();
Klass* refc = entry->interface_klass();
itableOffsetEntry* scan;
for (scan = (itableOffsetEntry*) int2->start_of_itable();
scan->interface_klass() != nullptr;
@ -2428,30 +2428,30 @@ run:
CASE(_invokestatic): {
u2 index = Bytes::get_native_u2(pc+1);
ConstantPoolCacheEntry* cache = cp->entry_at(index);
ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
// QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
// out so c++ compiler has a chance for constant prop to fold everything possible away.
if (!cache->is_resolved((Bytecodes::Code)opcode)) {
if (!entry->is_resolved((Bytecodes::Code)opcode)) {
CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->entry_at(index);
entry = cp->resolved_method_entry_at(index);
}
istate->set_msg(call_method);
{
Method* callee;
if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
if (cache->is_vfinal()) {
callee = cache->f2_as_vfinal_method();
CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
if (entry->is_vfinal()) {
callee = entry->method();
if (REWRITE_BYTECODES && !UseSharedSpaces && !CDSConfig::is_dumping_archive()) {
// Rewrite to _fast_invokevfinal.
REWRITE_AT_PC(Bytecodes::_fast_invokevfinal);
}
} else {
// get receiver
int parms = cache->parameter_size();
int parms = entry->number_of_parameters();
// this works but needs a resourcemark and seems to create a vtable on every call:
// Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
//
@ -2477,13 +2477,13 @@ run:
However it seems to have a vtable in the right location. Huh?
Because vtables have the same offset for ArrayKlass and InstanceKlass.
*/
callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index());
callee = (Method*) rcvrKlass->method_at_vtable(entry->table_index());
}
} else {
if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
}
callee = cache->f1_as_method();
callee = entry->method();
}
istate->set_callee(callee);
@ -2888,14 +2888,14 @@ run:
CASE(_fast_invokevfinal): {
u2 index = Bytes::get_native_u2(pc+1);
ConstantPoolCacheEntry* cache = cp->entry_at(index);
ResolvedMethodEntry* entry = cp->resolved_method_entry_at(index);
assert(cache->is_resolved(Bytecodes::_invokevirtual), "Should be resolved before rewriting");
assert(entry->is_resolved(Bytecodes::_invokevirtual), "Should be resolved before rewriting");
istate->set_msg(call_method);
CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
Method* callee = cache->f2_as_vfinal_method();
CHECK_NULL(STACK_OBJECT(-(entry->number_of_parameters())));
Method* callee = entry->method();
istate->set_callee(callee);
if (JVMTI_ENABLED && THREAD->is_interp_only_mode()) {
istate->set_callee_entry_point(callee->interpreter_entry());

View File

@ -934,11 +934,6 @@ C2V_VMENTRY_NULL(jobject, lookupMethodInPool, (JNIEnv* env, jobject, ARGUMENT_PA
return JVMCIENV->get_jobject(result);
C2V_END
C2V_VMENTRY_0(jint, constantPoolRemapInstructionOperandFromCache, (JNIEnv* env, jobject, ARGUMENT_PAIR(cp), jint index))
constantPoolHandle cp(THREAD, UNPACK_PAIR(ConstantPool, cp));
return cp->remap_instruction_operand_from_cache(index);
C2V_END
C2V_VMENTRY_NULL(jobject, resolveFieldInPool, (JNIEnv* env, jobject, ARGUMENT_PAIR(cp), jint index, ARGUMENT_PAIR(method), jbyte opcode, jintArray info_handle))
constantPoolHandle cp(THREAD, UNPACK_PAIR(ConstantPool, cp));
Bytecodes::Code code = (Bytecodes::Code)(((int) opcode) & 0xFF);
@ -1660,6 +1655,14 @@ C2V_VMENTRY_0(int, decodeFieldIndexToCPIndex, (JNIEnv* env, jobject, ARGUMENT_PA
return cp->resolved_field_entry_at(field_index)->constant_pool_index();
C2V_END
C2V_VMENTRY_0(int, decodeMethodIndexToCPIndex, (JNIEnv* env, jobject, ARGUMENT_PAIR(cp), jint method_index))
constantPoolHandle cp(THREAD, UNPACK_PAIR(ConstantPool, cp));
if (method_index < 0 || method_index >= cp->resolved_method_entries_length()) {
JVMCI_THROW_MSG_0(IllegalStateException, err_msg("invalid method index %d", method_index));
}
return cp->resolved_method_entry_at(method_index)->constant_pool_index();
C2V_END
C2V_VMENTRY(void, resolveInvokeHandleInPool, (JNIEnv* env, jobject, ARGUMENT_PAIR(cp), jint index))
constantPoolHandle cp(THREAD, UNPACK_PAIR(ConstantPool, cp));
Klass* holder = cp->klass_ref_at(index, Bytecodes::_invokehandle, CHECK);
@ -1667,15 +1670,14 @@ C2V_VMENTRY(void, resolveInvokeHandleInPool, (JNIEnv* env, jobject, ARGUMENT_PAI
if (MethodHandles::is_signature_polymorphic_name(holder, name)) {
CallInfo callInfo;
LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, Bytecodes::_invokehandle, CHECK);
ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
cp_cache_entry->set_method_handle(cp, callInfo);
cp->cache()->set_method_handle(index, callInfo);
}
C2V_END
C2V_VMENTRY_0(jint, isResolvedInvokeHandleInPool, (JNIEnv* env, jobject, ARGUMENT_PAIR(cp), jint index))
constantPoolHandle cp(THREAD, UNPACK_PAIR(ConstantPool, cp));
ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
if (cp_cache_entry->is_resolved(Bytecodes::_invokehandle)) {
ResolvedMethodEntry* entry = cp->cache()->resolved_method_entry_at(index);
if (entry->is_resolved(Bytecodes::_invokehandle)) {
// MethodHandle.invoke* --> LambdaForm?
ResourceMark rm;
@ -1688,7 +1690,7 @@ C2V_VMENTRY_0(jint, isResolvedInvokeHandleInPool, (JNIEnv* env, jobject, ARGUMEN
vmassert(MethodHandles::is_method_handle_invoke_name(resolved_klass, name_sym), "!");
vmassert(MethodHandles::is_signature_polymorphic_name(resolved_klass, name_sym), "!");
methodHandle adapter_method(THREAD, cp_cache_entry->f1_as_method());
methodHandle adapter_method(THREAD, entry->method());
methodHandle resolved_method(adapter_method);
@ -1697,7 +1699,7 @@ C2V_VMENTRY_0(jint, isResolvedInvokeHandleInPool, (JNIEnv* env, jobject, ARGUMEN
vmassert(!resolved_method->is_static(),"!");
vmassert(MethodHandles::is_signature_polymorphic_method(resolved_method()),"!");
vmassert(!MethodHandles::is_signature_polymorphic_static(resolved_method->intrinsic_id()), "!");
vmassert(cp_cache_entry->appendix_if_resolved(cp) == nullptr, "!");
vmassert(cp->cache()->appendix_if_resolved(entry) == nullptr, "!");
methodHandle m(THREAD, LinkResolver::linktime_resolve_virtual_method_or_null(link_info));
vmassert(m == resolved_method, "!!");
@ -1707,7 +1709,7 @@ C2V_VMENTRY_0(jint, isResolvedInvokeHandleInPool, (JNIEnv* env, jobject, ARGUMEN
return Bytecodes::_invokevirtual;
}
if (cp->is_invokedynamic_index(index)) {
if (cp->resolved_indy_entry_at(cp->decode_cpcache_index(index))->is_resolved()) {
if (cp->resolved_indy_entry_at(cp->decode_invokedynamic_index(index))->is_resolved()) {
return Bytecodes::_invokedynamic;
}
}
@ -3220,13 +3222,13 @@ JNINativeMethod CompilerToVM::methods[] = {
{CC "lookupAppendixInPool", CC "(" HS_CONSTANT_POOL2 "I)" OBJECTCONSTANT, FN_PTR(lookupAppendixInPool)},
{CC "lookupMethodInPool", CC "(" HS_CONSTANT_POOL2 "IB" HS_METHOD2 ")" HS_METHOD, FN_PTR(lookupMethodInPool)},
{CC "lookupConstantInPool", CC "(" HS_CONSTANT_POOL2 "IZ)" JAVACONSTANT, FN_PTR(lookupConstantInPool)},
{CC "constantPoolRemapInstructionOperandFromCache", CC "(" HS_CONSTANT_POOL2 "I)I", FN_PTR(constantPoolRemapInstructionOperandFromCache)},
{CC "resolveBootstrapMethod", CC "(" HS_CONSTANT_POOL2 "I)[" OBJECT, FN_PTR(resolveBootstrapMethod)},
{CC "bootstrapArgumentIndexAt", CC "(" HS_CONSTANT_POOL2 "II)I", FN_PTR(bootstrapArgumentIndexAt)},
{CC "getUncachedStringInPool", CC "(" HS_CONSTANT_POOL2 "I)" JAVACONSTANT, FN_PTR(getUncachedStringInPool)},
{CC "resolveTypeInPool", CC "(" HS_CONSTANT_POOL2 "I)" HS_KLASS, FN_PTR(resolveTypeInPool)},
{CC "resolveFieldInPool", CC "(" HS_CONSTANT_POOL2 "I" HS_METHOD2 "B[I)" HS_KLASS, FN_PTR(resolveFieldInPool)},
{CC "decodeFieldIndexToCPIndex", CC "(" HS_CONSTANT_POOL2 "I)I", FN_PTR(decodeFieldIndexToCPIndex)},
{CC "decodeMethodIndexToCPIndex", CC "(" HS_CONSTANT_POOL2 "I)I", FN_PTR(decodeMethodIndexToCPIndex)},
{CC "decodeIndyIndexToCPIndex", CC "(" HS_CONSTANT_POOL2 "IZ)I", FN_PTR(decodeIndyIndexToCPIndex)},
{CC "resolveInvokeHandleInPool", CC "(" HS_CONSTANT_POOL2 "I)V", FN_PTR(resolveInvokeHandleInPool)},
{CC "isResolvedInvokeHandleInPool", CC "(" HS_CONSTANT_POOL2 "I)I", FN_PTR(isResolvedInvokeHandleInPool)},

View File

@ -612,7 +612,6 @@
declare_constant(vmIntrinsics::_invokeGeneric) \
declare_constant(vmIntrinsics::_compiledLambdaForm) \
\
declare_constant(ConstantPool::CPCACHE_INDEX_TAG) \
declare_constant(ConstantPool::_has_dynamic_constant) \
\
declare_constant(ConstMethodFlags::_misc_has_linenumber_table) \

View File

@ -639,14 +639,12 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w
Method* ConstantPool::method_at_if_loaded(const constantPoolHandle& cpool,
int which) {
if (cpool->cache() == nullptr) return nullptr; // nothing to load yet
int cache_index = decode_cpcache_index(which, true);
if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
if (!(which >= 0 && which < cpool->resolved_method_entries_length())) {
// FIXME: should be an assert
log_debug(class, resolve)("bad operand %d in:", which); cpool->print();
return nullptr;
}
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
return e->method_if_resolved(cpool);
return cpool->cache()->method_if_resolved(which);
}
@ -656,9 +654,7 @@ bool ConstantPool::has_appendix_at_if_loaded(const constantPoolHandle& cpool, in
int indy_index = decode_invokedynamic_index(which);
return cpool->resolved_indy_entry_at(indy_index)->has_appendix();
} else {
int cache_index = decode_cpcache_index(which, true);
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
return e->has_appendix();
return cpool->resolved_method_entry_at(which)->has_appendix();
}
}
@ -668,21 +664,18 @@ oop ConstantPool::appendix_at_if_loaded(const constantPoolHandle& cpool, int whi
int indy_index = decode_invokedynamic_index(which);
return cpool->resolved_reference_from_indy(indy_index);
} else {
int cache_index = decode_cpcache_index(which, true);
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
return e->appendix_if_resolved(cpool);
return cpool->cache()->appendix_if_resolved(which);
}
}
bool ConstantPool::has_local_signature_at_if_loaded(const constantPoolHandle& cpool, int which) {
if (cpool->cache() == nullptr) return false; // nothing to load yet
int cache_index = decode_cpcache_index(which, true);
if (is_invokedynamic_index(which)) {
return cpool->resolved_indy_entry_at(cache_index)->has_local_signature();
int indy_index = decode_invokedynamic_index(which);
return cpool->resolved_indy_entry_at(indy_index)->has_local_signature();
} else {
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
return e->has_local_signature();
return cpool->resolved_method_entry_at(which)->has_local_signature();
}
}
@ -702,10 +695,12 @@ int ConstantPool::to_cp_index(int index, Bytecodes::Code code) {
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokevirtual:
// TODO: handle resolved method entries with new structure
case Bytecodes::_fast_invokevfinal: // Bytecode interpreter uses this
return resolved_method_entry_at(index)->constant_pool_index();
default:
// change byte-ordering and go via cache
return remap_instruction_operand_from_cache(index);
tty->print_cr("Unexpected bytecode: %d", code);
ShouldNotReachHere(); // All cases should have been handled
return -1;
}
}
@ -747,15 +742,6 @@ u2 ConstantPool::klass_ref_index_at(int index, Bytecodes::Code code) {
return uncached_klass_ref_index_at(to_cp_index(index, code));
}
int ConstantPool::remap_instruction_operand_from_cache(int operand) {
int cpc_index = operand;
DEBUG_ONLY(cpc_index -= CPCACHE_INDEX_TAG);
assert((int)(u2)cpc_index == cpc_index, "clean u2");
int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
return member_index;
}
void ConstantPool::verify_constant_pool_resolve(const constantPoolHandle& this_cp, Klass* k, TRAPS) {
if (!(k->is_instance_klass() || k->is_objArray_klass())) {
return; // short cut, typeArray klass is always accessible

View File

@ -643,17 +643,13 @@ class ConstantPool : public Metadata {
// name_and_type_ref_index_at) all expect to be passed indices obtained
// directly from the bytecode.
// If the indices are meant to refer to fields or methods, they are
// actually rewritten constant pool cache indices.
// The routine remap_instruction_operand_from_cache manages the adjustment
// actually rewritten indices that point to entries in their respective structures
// i.e. ResolvedMethodEntries or ResolvedFieldEntries.
// The routine to_cp_index manages the adjustment
// of these values back to constant pool indices.
// There are also "uncached" versions which do not adjust the operand index; see below.
// FIXME: Consider renaming these with a prefix "cached_" to make the distinction clear.
// In a few cases (the verifier) there are uses before a cpcache has been built,
// which are handled by a dynamic check in remap_instruction_operand_from_cache.
// FIXME: Remove the dynamic check, and adjust all callers to specify the correct mode.
// Lookup for entries consisting of (klass_index, name_and_type index)
Klass* klass_ref_at(int which, Bytecodes::Code code, TRAPS);
Symbol* klass_ref_at_noresolve(int which, Bytecodes::Code code);
@ -669,8 +665,6 @@ class ConstantPool : public Metadata {
u2 klass_ref_index_at(int which, Bytecodes::Code code);
u2 name_and_type_ref_index_at(int which, Bytecodes::Code code);
int remap_instruction_operand_from_cache(int operand); // operand must be biased by CPCACHE_INDEX_TAG
constantTag tag_ref_at(int cp_cache_index, Bytecodes::Code code);
int to_cp_index(int which, Bytecodes::Code code);
@ -793,19 +787,6 @@ class ConstantPool : public Metadata {
// Debugging
const char* printable_name_at(int cp_index) PRODUCT_RETURN0;
#ifdef ASSERT
enum { CPCACHE_INDEX_TAG = 0x10000 }; // helps keep CP cache indices distinct from CP indices
#else
enum { CPCACHE_INDEX_TAG = 0 }; // in product mode, this zero value is a no-op
#endif //ASSERT
static int decode_cpcache_index(int raw_index, bool invokedynamic_ok = false) {
if (invokedynamic_ok && is_invokedynamic_index(raw_index))
return decode_invokedynamic_index(raw_index);
else
return raw_index - CPCACHE_INDEX_TAG;
}
private:
void set_resolved_references(OopHandle s) { _cache->set_resolved_references(s); }
@ -923,10 +904,16 @@ class ConstantPool : public Metadata {
inline ResolvedFieldEntry* resolved_field_entry_at(int field_index);
inline int resolved_field_entries_length() const;
// ResolvedMethodEntry getters
inline ResolvedMethodEntry* resolved_method_entry_at(int method_index);
inline int resolved_method_entries_length() const;
inline oop appendix_if_resolved(int method_index) const;
// ResolvedIndyEntry getters
inline ResolvedIndyEntry* resolved_indy_entry_at(int index);
inline int resolved_indy_entries_length() const;
inline oop resolved_reference_from_indy(int index) const;
inline oop resolved_reference_from_method(int index) const;
};
#endif // SHARE_OOPS_CONSTANTPOOL_HPP

View File

@ -30,6 +30,7 @@
#include "oops/cpCache.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "runtime/atomic.hpp"
inline Klass* ConstantPool::resolved_klass_at(int which) const { // Used by Compiler
@ -51,6 +52,22 @@ inline int ConstantPool::resolved_field_entries_length() const {
return cache()->resolved_field_entries_length();
}
inline ResolvedMethodEntry* ConstantPool::resolved_method_entry_at(int method_index) {
return cache()->resolved_method_entry_at(method_index);
}
inline int ConstantPool::resolved_method_entries_length() const {
return cache()->resolved_method_entries_length();
}
inline oop ConstantPool::appendix_if_resolved(int method_index) const {
ResolvedMethodEntry* entry = cache()->resolved_method_entry_at(method_index);
if (!entry->has_appendix())
return nullptr;
const int ref_index = entry->resolved_references_index();
return resolved_reference_at(ref_index);
}
inline u2 ConstantPool::invokedynamic_bootstrap_ref_index_at(int indy_index) const {
return cache()->resolved_indy_entry_at(decode_invokedynamic_index(indy_index))->constant_pool_index();
}
@ -66,4 +83,8 @@ inline int ConstantPool::resolved_indy_entries_length() const {
inline oop ConstantPool::resolved_reference_from_indy(int index) const {
return resolved_references()->obj_at(cache()->resolved_indy_entry_at(index)->resolved_references_index());
}
inline oop ConstantPool::resolved_reference_from_method(int index) const {
return resolved_references()->obj_at(cache()->resolved_method_entry_at(index)->resolved_references_index());
}
#endif // SHARE_OOPS_CONSTANTPOOL_INLINE_HPP

View File

@ -49,6 +49,7 @@
#include "oops/oop.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
@ -57,106 +58,23 @@
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
// Implementation of ConstantPoolCacheEntry
// Implementation of ConstantPoolCache
void ConstantPoolCacheEntry::initialize_entry(int index) {
assert(0 < index && index < 0x10000, "sanity check");
_indices = index;
_f1 = nullptr;
_f2 = _flags = 0;
assert(constant_pool_index() == index, "");
}
intx ConstantPoolCacheEntry::make_flags(TosState state,
int option_bits,
int field_index_or_method_params) {
assert(state < number_of_states, "Invalid state in make_flags");
intx f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
// Preserve existing flag bit values
// The low bits are a field offset, or else the method parameter size.
#ifdef ASSERT
TosState old_state = flag_state();
assert(old_state == (TosState)0 || old_state == state,
"inconsistent cpCache flags state");
#endif
return (_flags | f) ;
}
void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
#ifdef ASSERT
// Read once.
volatile Bytecodes::Code c = bytecode_1();
assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
// Need to flush pending stores here before bytecode is written.
Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
}
void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
#ifdef ASSERT
// Read once.
volatile Bytecodes::Code c = bytecode_2();
assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
// Need to flush pending stores here before bytecode is written.
Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
}
// Sets f1, ordering with previous writes.
void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
assert(f1 != nullptr, "");
Atomic::release_store(&_f1, f1);
}
void ConstantPoolCacheEntry::set_indy_resolution_failed() {
Atomic::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
}
// Note that concurrent update of both bytecodes can leave one of them
// reset to zero. This is harmless; the interpreter will simply re-resolve
// the damaged entry. More seriously, the memory synchronization is needed
// to flush other fields (f1, f2) completely to memory before the bytecodes
// are updated, lest other processors see a non-zero bytecode but zero f1/f2.
void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
Bytecodes::Code put_code,
Klass* field_holder,
int field_index,
int field_offset,
TosState field_type,
bool is_final,
bool is_volatile) {
set_f1(field_holder);
set_f2(field_offset);
assert((field_index & field_index_mask) == field_index,
"field index does not fit in low flag bits");
set_field_flags(field_type,
((is_volatile ? 1 : 0) << is_volatile_shift) |
((is_final ? 1 : 0) << is_final_shift),
field_index);
set_bytecode_1(get_code);
set_bytecode_2(put_code);
NOT_PRODUCT(verify(tty));
}
void ConstantPoolCacheEntry::set_parameter_size(int value) {
// This routine is called only in corner cases where the CPCE is not yet initialized.
// See AbstractInterpreter::deopt_continue_after_entry.
assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
"size must not change: parameter_size=%d, value=%d", parameter_size(), value);
// Setting the parameter size by itself is only safe if the
// current value of _flags is 0, otherwise another thread may have
// updated it and we don't want to overwrite that value. Don't
// bother trying to update it once it's nonzero but always make
// sure that the final parameter size agrees with what was passed.
if (_flags == 0) {
intx newflags = (value & parameter_size_mask);
Atomic::cmpxchg(&_flags, (intx)0, newflags);
template <class T>
static Array<T>* initialize_resolved_entries_array(ClassLoaderData* loader_data, GrowableArray<T> entries, TRAPS) {
Array<T>* resolved_entries;
if (entries.length() != 0) {
resolved_entries = MetadataFactory::new_array<T>(loader_data, entries.length(), CHECK_NULL);
for (int i = 0; i < entries.length(); i++) {
resolved_entries->at_put(i, entries.at(i));
}
return resolved_entries;
}
guarantee(parameter_size() == value,
"size must not change: parameter_size=%d, value=%d", parameter_size(), value);
return nullptr;
}
void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
void ConstantPoolCache::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
int method_index,
const methodHandle& method,
int vtable_index,
bool sender_is_interface) {
@ -167,6 +85,7 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co
int byte_no = -1;
bool change_to_virtual = false;
InstanceKlass* holder = nullptr; // have to declare this outside the switch
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
switch (invoke_code) {
case Bytecodes::_invokeinterface:
holder = method->method_holder();
@ -174,14 +93,14 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co
if (vtable_index == Method::nonvirtual_vtable_index && holder->is_interface() ) {
assert(method->is_private(), "unexpected non-private method");
assert(method->can_be_statically_bound(), "unexpected non-statically-bound method");
// set_f2_as_vfinal_method checks if is_vfinal flag is true.
set_method_flags(as_TosState(method->result_type()),
( 1 << is_vfinal_shift) |
((method->is_final_method() ? 1 : 0) << is_final_shift),
method()->size_of_parameters());
set_f2_as_vfinal_method(method());
method_entry->set_flags(( 1 << ResolvedMethodEntry::is_vfinal_shift) |
((method->is_final_method() ? 1 : 0) << ResolvedMethodEntry::is_final_shift));
method_entry->fill_in((u1)as_TosState(method->result_type()), (u2)method()->size_of_parameters());
assert(method_entry->is_vfinal(), "flags must be set");
method_entry->set_method(method());
byte_no = 2;
set_f1(holder); // interface klass*
method_entry->set_klass(holder);
break;
}
else {
@ -201,40 +120,40 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co
{
if (!is_vtable_call) {
assert(method->can_be_statically_bound(), "");
// set_f2_as_vfinal_method checks if is_vfinal flag is true.
set_method_flags(as_TosState(method->result_type()),
( 1 << is_vfinal_shift) |
((method->is_final_method() ? 1 : 0) << is_final_shift) |
((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
method()->size_of_parameters());
set_f2_as_vfinal_method(method());
method_entry->set_flags(( 1 << ResolvedMethodEntry::is_vfinal_shift) |
((method->is_final_method() ? 1 : 0) << ResolvedMethodEntry::is_final_shift) |
((change_to_virtual ? 1 : 0) << ResolvedMethodEntry::is_forced_virtual_shift));
method_entry->fill_in((u1)as_TosState(method->result_type()), (u2)method()->size_of_parameters());
assert(method_entry->is_vfinal(), "flags must be set");
method_entry->set_method(method());
} else {
assert(!method->can_be_statically_bound(), "");
assert(vtable_index >= 0, "valid index");
assert(!method->is_final_method(), "sanity");
set_method_flags(as_TosState(method->result_type()),
((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
method()->size_of_parameters());
set_f2(vtable_index);
method_entry->set_flags((change_to_virtual ? 1 : 0) << ResolvedMethodEntry::is_forced_virtual_shift);
method_entry->fill_in((u1)as_TosState(method->result_type()), (u2)method()->size_of_parameters());
assert(!method_entry->is_vfinal(), "flags must not be set");
method_entry->set_table_index(vtable_index);
}
byte_no = 2;
break;
}
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokestatic: {
assert(!is_vtable_call, "");
// Note: Read and preserve the value of the is_vfinal flag on any
// invokevirtual bytecode shared with this constant pool cache entry.
// It is cheap and safe to consult is_vfinal() at all times.
// Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
set_method_flags(as_TosState(method->result_type()),
((is_vfinal() ? 1 : 0) << is_vfinal_shift) |
((method->is_final_method() ? 1 : 0) << is_final_shift),
method()->size_of_parameters());
set_f1(method());
bool vfinal = method_entry->is_vfinal();
method_entry->set_flags(((method->is_final_method() ? 1 : 0) << ResolvedMethodEntry::is_final_shift));
assert(vfinal == method_entry->is_vfinal(), "Vfinal flag must be preserved");
method_entry->fill_in((u1)as_TosState(method->result_type()), (u2)method()->size_of_parameters());
method_entry->set_method(method());
byte_no = 1;
break;
}
default:
ShouldNotReachHere();
break;
@ -270,7 +189,7 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co
}
}
if (do_resolve) {
set_bytecode_1(invoke_code);
method_entry->set_bytecode1(invoke_code);
}
} else if (byte_no == 2) {
if (change_to_virtual) {
@ -300,87 +219,62 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co
// We set bytecode_1() to _invokeinterface, because that is the
// bytecode # used by the interpreter to see if it is resolved.
// We set bytecode_2() to _invokevirtual.
set_bytecode_1(invoke_code);
method_entry->set_bytecode1(invoke_code);
}
}
// set up for invokevirtual, even if linking for invokeinterface also:
set_bytecode_2(Bytecodes::_invokevirtual);
method_entry->set_bytecode2(Bytecodes::_invokevirtual);
} else {
ShouldNotReachHere();
}
NOT_PRODUCT(verify(tty));
}
void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, const methodHandle& method,
bool sender_is_interface) {
void ConstantPoolCache::set_direct_call(Bytecodes::Code invoke_code, int method_index, const methodHandle& method,
bool sender_is_interface) {
int index = Method::nonvirtual_vtable_index;
// index < 0; FIXME: inline and customize set_direct_or_vtable_call
set_direct_or_vtable_call(invoke_code, method, index, sender_is_interface);
set_direct_or_vtable_call(invoke_code, method_index, method, index, sender_is_interface);
}
void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, const methodHandle& method, int index) {
void ConstantPoolCache::set_vtable_call(Bytecodes::Code invoke_code, int method_index, const methodHandle& method, int index) {
// either the method is a miranda or its holder should accept the given index
assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
// index >= 0; FIXME: inline and customize set_direct_or_vtable_call
set_direct_or_vtable_call(invoke_code, method, index, false);
set_direct_or_vtable_call(invoke_code, method_index, method, index, false);
}
void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code,
Klass* referenced_klass,
const methodHandle& method, int index) {
void ConstantPoolCache::set_itable_call(Bytecodes::Code invoke_code,
int method_index,
Klass* referenced_klass,
const methodHandle& method, int index) {
assert(method->method_holder()->verify_itable_index(index), "");
assert(invoke_code == Bytecodes::_invokeinterface, "");
InstanceKlass* interf = method->method_holder();
assert(interf->is_interface(), "must be an interface");
assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
set_f1(referenced_klass);
set_f2((intx)method());
set_method_flags(as_TosState(method->result_type()),
0, // no option bits
method()->size_of_parameters());
set_bytecode_1(Bytecodes::_invokeinterface);
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
method_entry->set_klass(static_cast<InstanceKlass*>(referenced_klass));
method_entry->set_method(method());
method_entry->fill_in((u1)as_TosState(method->result_type()), (u2)method()->size_of_parameters());
method_entry->set_bytecode1(Bytecodes::_invokeinterface);
}
void ConstantPoolCacheEntry::set_method_handle(const constantPoolHandle& cpool, const CallInfo &call_info) {
set_method_handle_common(cpool, Bytecodes::_invokehandle, call_info);
}
void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& cpool,
Bytecodes::Code invoke_code,
const CallInfo &call_info) {
// NOTE: This CPCE can be the subject of data races.
// There are three words to update: flags, refs[f2], f1 (in that order).
// Writers must store all other values before f1.
// Readers must test f1 first for non-null before reading other fields.
ResolvedMethodEntry* ConstantPoolCache::set_method_handle(int method_index, const CallInfo &call_info) {
// NOTE: This method entry can be the subject of data races.
// There are three words to update: flags, refs[appendix_index], method (in that order).
// Writers must store all other values before method.
// Readers must test the method first for non-null before reading other fields.
// Competing writers must acquire exclusive access via a lock.
// A losing writer waits on the lock until the winner writes f1 and leaves
// A losing writer waits on the lock until the winner writes the method and leaves
// the lock, so that when the losing writer returns, he can use the linked
// cache entry.
// Lock fields to write
MutexLocker ml(cpool->pool_holder()->init_monitor());
Bytecodes::Code invoke_code = Bytecodes::_invokehandle;
MutexLocker ml(constant_pool()->pool_holder()->init_monitor());
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
if (!is_f1_null()) {
return;
}
if (indy_resolution_failed()) {
// Before we got here, another thread got a LinkageError exception during
// resolution. Ignore our success and throw their exception.
ConstantPoolCache* cpCache = cpool->cache();
int index = -1;
for (int i = 0; i < cpCache->length(); i++) {
if (cpCache->entry_at(i) == this) {
index = i;
break;
}
}
guarantee(index >= 0, "Didn't find cpCache entry!");
int encoded_index = ResolutionErrorTable::encode_cpcache_index(
ConstantPool::encode_invokedynamic_index(index));
JavaThread* THREAD = JavaThread::current(); // For exception macros.
ConstantPool::throw_resolution_error(cpool, encoded_index, THREAD);
return;
if (method_entry->is_resolved(invoke_code)) {
return method_entry;
}
Method* adapter = call_info.resolved_method();
@ -388,306 +282,102 @@ void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle&
const bool has_appendix = appendix.not_null();
// Write the flags.
// MHs and indy are always sig-poly and have a local signature.
set_method_flags(as_TosState(adapter->result_type()),
((has_appendix ? 1 : 0) << has_appendix_shift ) |
( 1 << has_local_signature_shift ) |
( 1 << is_final_shift ),
adapter->size_of_parameters());
// MHs are always sig-poly and have a local signature.
method_entry->fill_in((u1)as_TosState(adapter->result_type()), (u2)adapter->size_of_parameters());
method_entry->set_flags(((has_appendix ? 1 : 0) << ResolvedMethodEntry::has_appendix_shift ) |
( 1 << ResolvedMethodEntry::has_local_signature_shift ) |
( 1 << ResolvedMethodEntry::is_final_shift ));
LogStream* log_stream = nullptr;
LogStreamHandle(Debug, methodhandles, indy) lsh_indy;
if (lsh_indy.is_enabled()) {
ResourceMark rm;
log_stream = &lsh_indy;
log_stream->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method=" PTR_FORMAT " (local signature) ",
invoke_code,
p2i(appendix()),
(has_appendix ? "" : " (unused)"),
p2i(adapter));
adapter->print_on(log_stream);
if (has_appendix) appendix()->print_on(log_stream);
}
// Method handle invokes and invokedynamic sites use both cp cache words.
// refs[f2], if not null, contains a value passed as a trailing argument to the adapter.
// Method handle invokes use both a method and a resolved references index.
// refs[appendix_index], if not null, contains a value passed as a trailing argument to the adapter.
// In the general case, this could be the call site's MethodType,
// for use with java.lang.Invokers.checkExactType, or else a CallSite object.
// f1 contains the adapter method which manages the actual call.
// method_entry->method() contains the adapter method which manages the actual call.
// In the general case, this is a compiled LambdaForm.
// (The Java code is free to optimize these calls by binding other
// sorts of methods and appendices to call sites.)
// JVM-level linking is via f1, as if for invokespecial, and signatures are erased.
// JVM-level linking is via the method, as if for invokespecial, and signatures are erased.
// The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
// Even with the appendix, the method will never take more than 255 parameter slots.
//
// This means that given a call site like (List)mh.invoke("foo"),
// the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
// the method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
// not '(Ljava/lang/String;)Ljava/util/List;'.
// The fact that String and List are involved is encoded in the MethodType in refs[f2].
// The fact that String and List are involved is encoded in the MethodType in refs[appendix_index].
// This allows us to create fewer Methods, while keeping type safety.
//
// Store appendix, if any.
if (has_appendix) {
const int appendix_index = f2_as_index();
oop old_oop = cpool->set_resolved_reference_at(appendix_index, appendix());
assert(old_oop == nullptr, "init just once");
const int appendix_index = method_entry->resolved_references_index();
objArrayOop resolved_references = constant_pool()->resolved_references();
assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
assert(resolved_references->obj_at(appendix_index) == nullptr, "init just once");
resolved_references->obj_at_put(appendix_index, appendix());
}
release_set_f1(adapter); // This must be the last one to set (see NOTE above)!
method_entry->set_method(adapter); // This must be the last one to set (see NOTE above)!
// The interpreter assembly code does not check byte_2,
// but it is used by is_resolved, method_if_resolved, etc.
set_bytecode_1(invoke_code);
NOT_PRODUCT(verify(tty));
method_entry->set_bytecode1(invoke_code);
if (log_stream != nullptr) {
this->print(log_stream, 0, cpool->cache());
}
assert(has_appendix == this->has_appendix(), "proper storage of appendix flag");
assert(this->has_local_signature(), "proper storage of signature flag");
assert(has_appendix == method_entry->has_appendix(), "proper storage of appendix flag");
assert(method_entry->has_local_signature(), "proper storage of signature flag");
return method_entry;
}
Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpool) const {
Method* ConstantPoolCache::method_if_resolved(int method_index) const {
// Decode the action of set_method and set_interface_call
Bytecodes::Code invoke_code = bytecode_1();
if (invoke_code != (Bytecodes::Code)0) {
Metadata* f1 = f1_ord();
if (f1 != nullptr) {
switch (invoke_code) {
case Bytecodes::_invokeinterface:
assert(f1->is_klass(), "");
return f2_as_interface_method();
case Bytecodes::_invokestatic:
case Bytecodes::_invokespecial:
assert(!has_appendix(), "");
case Bytecodes::_invokehandle:
assert(f1->is_method(), "");
return (Method*)f1;
case Bytecodes::_invokedynamic:
ShouldNotReachHere();
default:
break;
}
}
}
invoke_code = bytecode_2();
if (invoke_code != (Bytecodes::Code)0) {
switch (invoke_code) {
case Bytecodes::_invokevirtual:
if (is_vfinal()) {
// invokevirtual
Method* m = f2_as_vfinal_method();
assert(m->is_method(), "");
return m;
} else {
int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
if (cpool->tag_at(holder_index).is_klass()) {
Klass* klass = cpool->resolved_klass_at(holder_index);
return klass->method_at_vtable(f2_as_index());
}
}
break;
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
Bytecodes::Code invoke_code = (Bytecodes::Code)method_entry->bytecode1();
switch (invoke_code) {
case Bytecodes::_invokeinterface:
case Bytecodes::_invokestatic:
case Bytecodes::_invokespecial:
assert(!method_entry->has_appendix(), "");
// fall through
case Bytecodes::_invokehandle:
return method_entry->method();
case Bytecodes::_invokedynamic:
ShouldNotReachHere();
default:
assert(invoke_code == (Bytecodes::Code)0, "unexpected bytecode");
break;
}
}
return nullptr;
}
oop ConstantPoolCacheEntry::appendix_if_resolved(const constantPoolHandle& cpool) const {
if (!has_appendix())
return nullptr;
const int ref_index = f2_as_index();
return cpool->resolved_reference_at(ref_index);
}
#if INCLUDE_JVMTI
void log_adjust(const char* entry_type, Method* old_method, Method* new_method, bool* trace_name_printed) {
ResourceMark rm;
if (!(*trace_name_printed)) {
log_info(redefine, class, update)("adjust: name=%s", old_method->method_holder()->external_name());
*trace_name_printed = true;
}
log_trace(redefine, class, update, constantpool)
("cpc %s entry update: %s", entry_type, new_method->external_name());
}
// RedefineClasses() API support:
// If this ConstantPoolCacheEntry refers to old_method then update it
// to refer to new_method.
void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
Method* new_method, bool * trace_name_printed) {
if (is_vfinal()) {
// virtual and final so _f2 contains method ptr instead of vtable index
if (f2_as_vfinal_method() == old_method) {
// match old_method so need an update
// NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
_f2 = (intptr_t)new_method;
log_adjust("vfinal", old_method, new_method, trace_name_printed);
}
return;
}
assert (_f1 != nullptr, "should not call with uninteresting entry");
if (!(_f1->is_method())) {
// _f1 is a Klass* for an interface, _f2 is the method
if (f2_as_interface_method() == old_method) {
_f2 = (intptr_t)new_method;
log_adjust("interface", old_method, new_method, trace_name_printed);
}
} else if (_f1 == old_method) {
_f1 = new_method;
log_adjust("special, static or dynamic", old_method, new_method, trace_name_printed);
}
}
// a constant pool cache entry should never contain old or obsolete methods
bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
Method* m = get_interesting_method_entry();
// return false if m refers to a non-deleted old or obsolete method
if (m != nullptr) {
assert(m->is_valid() && m->is_method(), "m is a valid method");
return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete
} else {
return true;
}
}
Method* ConstantPoolCacheEntry::get_interesting_method_entry() {
if (!is_method_entry()) {
// not a method entry so not interesting by default
return nullptr;
}
Method* m = nullptr;
if (is_vfinal()) {
// virtual and final so _f2 contains method ptr instead of vtable index
m = f2_as_vfinal_method();
} else if (is_f1_null()) {
// null _f1 means this is a virtual entry so also not interesting
return nullptr;
} else {
if (!(_f1->is_method())) {
// _f1 is a Klass* for an interface
m = f2_as_interface_method();
invoke_code = (Bytecodes::Code)method_entry->bytecode2();
if (invoke_code == Bytecodes::_invokevirtual) {
if (method_entry->is_vfinal()) {
return method_entry->method();
} else {
m = f1_as_method();
}
}
assert(m != nullptr && m->is_method(), "sanity check");
if (m == nullptr || !m->is_method()) {
return nullptr;
}
return m;
}
#endif // INCLUDE_JVMTI
void ConstantPoolCacheEntry::print(outputStream* st, int index, const ConstantPoolCache* cache) const {
// print separator
if (index == 0) st->print_cr(" -------------");
// print universal entry info
st->print_cr("%3d", index);
st->print_cr(" - this: " PTR_FORMAT, p2i(this));
st->print_cr(" - bytecode 1: %s %02x", Bytecodes::name(bytecode_1()), bytecode_1());
st->print_cr(" - bytecode 2: %s %02x", Bytecodes::name(bytecode_2()), bytecode_2());
st->print_cr(" - cp index: %5d", constant_pool_index());
if (is_method_entry()) {
ResourceMark rm;
constantPoolHandle cph(Thread::current(), cache->constant_pool());
Method* m = method_if_resolved(cph);
st->print_cr(" - F1: [ " PTR_FORMAT "]", (intptr_t)_f1);
st->print_cr(" - F2: [ " PTR_FORMAT "]", (intptr_t)_f2);
st->print_cr(" - method: " INTPTR_FORMAT " %s", p2i(m), m != nullptr ? m->external_name() : nullptr);
st->print_cr(" - flag values: [%02x|0|0|%01x|%01x|%01x|%01x|0|%01x|%01x|00|00|%02x]",
flag_state(), has_local_signature(), has_appendix(),
is_forced_virtual(), is_final(), is_vfinal(),
indy_resolution_failed(), parameter_size());
st->print_cr(" - tos: %s\n - local signature: %01x\n"
" - has appendix: %01x\n - forced virtual: %01x\n"
" - final: %01x\n - virtual final: %01x\n - resolution failed: %01x\n"
" - num parameters: %02x",
type2name(as_BasicType(flag_state())), has_local_signature(), has_appendix(),
is_forced_virtual(), is_final(), is_vfinal(),
indy_resolution_failed(), parameter_size());
if ((bytecode_1() == Bytecodes::_invokehandle)) {
oop appendix = appendix_if_resolved(cph);
if (appendix != nullptr) {
st->print(" appendix: ");
appendix->print_on(st);
int holder_index = constant_pool()->uncached_klass_ref_index_at(method_entry->constant_pool_index());
if (constant_pool()->tag_at(holder_index).is_klass()) {
Klass* klass = constant_pool()->resolved_klass_at(holder_index);
return klass->method_at_vtable(method_entry->table_index());
}
}
} else {
assert(is_field_entry(), "must be a field entry");
st->print_cr(" - F1: [ " PTR_FORMAT "]", (intptr_t)_f1);
st->print_cr(" - F2: [ " PTR_FORMAT "]", (intptr_t)_f2);
st->print_cr(" - flag values: [%02x|0|1|0|0|0|%01x|%01x|0|0|%04x]",
flag_state(), is_final(), is_volatile(), field_index());
st->print_cr(" - tos: %s\n - final: %d\n - volatile: %d\n - field index: %04x",
type2name(as_BasicType(flag_state())), is_final(), is_volatile(), field_index());
}
st->print_cr(" -------------");
}
void ConstantPoolCacheEntry::verify(outputStream* st) const {
// not implemented yet
}
// Implementation of ConstantPoolCache
template <class T>
static Array<T>* initialize_resolved_entries_array(ClassLoaderData* loader_data, GrowableArray<T> entries, TRAPS) {
Array<T>* resolved_entries;
if (entries.length() != 0) {
resolved_entries = MetadataFactory::new_array<T>(loader_data, entries.length(), CHECK_NULL);
for (int i = 0; i < entries.length(); i++) {
resolved_entries->at_put(i, entries.at(i));
}
return resolved_entries;
}
return nullptr;
}
ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
const intStack& index_map,
const intStack& invokedynamic_map,
const GrowableArray<ResolvedIndyEntry> indy_entries,
const GrowableArray<ResolvedFieldEntry> field_entries,
const GrowableArray<ResolvedMethodEntry> method_entries,
TRAPS) {
const int length = index_map.length();
int size = ConstantPoolCache::size(length);
int size = ConstantPoolCache::size();
// Initialize resolved entry arrays with available data
Array<ResolvedFieldEntry>* resolved_field_entries = initialize_resolved_entries_array(loader_data, field_entries, CHECK_NULL);
Array<ResolvedIndyEntry>* resolved_indy_entries = initialize_resolved_entries_array(loader_data, indy_entries, CHECK_NULL);
Array<ResolvedMethodEntry>* resolved_method_entries = initialize_resolved_entries_array(loader_data, method_entries, CHECK_NULL);
return new (loader_data, size, MetaspaceObj::ConstantPoolCacheType, THREAD)
ConstantPoolCache(length, index_map, invokedynamic_map, resolved_indy_entries, resolved_field_entries);
}
void ConstantPoolCache::initialize(const intArray& inverse_index_map,
const intArray& invokedynamic_references_map) {
for (int i = 0; i < inverse_index_map.length(); i++) {
ConstantPoolCacheEntry* e = entry_at(i);
int original_index = inverse_index_map.at(i);
e->initialize_entry(original_index);
assert(entry_at(i) == e, "sanity");
}
for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
const int cpci = invokedynamic_references_map.at(ref);
if (cpci >= 0) {
entry_at(cpci)->initialize_resolved_reference_index(ref);
}
}
ConstantPoolCache(invokedynamic_map, resolved_indy_entries, resolved_field_entries, resolved_method_entries);
}
// Record the GC marking cycle when redefined vs. when found in the loom stack chunks.
@ -696,28 +386,11 @@ void ConstantPoolCache::record_gc_epoch() {
}
#if INCLUDE_CDS
void ConstantPoolCache::save_for_archive(TRAPS) {
ClassLoaderData* loader_data = constant_pool()->pool_holder()->class_loader_data();
_initial_entries = MetadataFactory::new_array<ConstantPoolCacheEntry>(loader_data, length(), CHECK);
for (int i = 0; i < length(); i++) {
_initial_entries->at_put(i, *entry_at(i));
}
}
void ConstantPoolCache::remove_unshareable_info() {
assert(CDSConfig::is_dumping_archive(), "sanity");
// <this> is the copy to be written into the archive. It's in the ArchiveBuilder's "buffer space".
// However, this->_initial_entries was not copied/relocated by the ArchiveBuilder, so it's
// still pointing to the array allocated inside save_for_archive().
assert(_initial_entries != nullptr, "archived cpcache must have been initialized");
assert(!ArchiveBuilder::current()->is_in_buffer_space(_initial_entries), "must be");
for (int i=0; i<length(); i++) {
// Restore each entry to the initial state -- just after Rewriter::make_constant_pool_cache()
// has finished.
*entry_at(i) = _initial_entries->at(i);
}
_initial_entries = nullptr;
if (_resolved_indy_entries != nullptr) {
for (int i = 0; i < _resolved_indy_entries->length(); i++) {
resolved_indy_entry_at(i)->remove_unshareable_info();
@ -728,6 +401,11 @@ void ConstantPoolCache::remove_unshareable_info() {
resolved_field_entry_at(i)->remove_unshareable_info();
}
}
if (_resolved_method_entries != nullptr) {
for (int i = 0; i < _resolved_method_entries->length(); i++) {
resolved_method_entry_at(i)->remove_unshareable_info();
}
}
}
#endif // INCLUDE_CDS
@ -738,18 +416,17 @@ void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) {
MetadataFactory::free_array<u2>(data, _reference_map);
set_reference_map(nullptr);
#if INCLUDE_CDS
if (_initial_entries != nullptr) {
assert(CDSConfig::is_dumping_archive(), "sanity");
MetadataFactory::free_array<ConstantPoolCacheEntry>(data, _initial_entries);
if (_resolved_indy_entries) {
MetadataFactory::free_array<ResolvedIndyEntry>(data, _resolved_indy_entries);
_resolved_indy_entries = nullptr;
}
if (_resolved_field_entries) {
MetadataFactory::free_array<ResolvedFieldEntry>(data, _resolved_field_entries);
_resolved_field_entries = nullptr;
}
_initial_entries = nullptr;
if (_resolved_indy_entries != nullptr) {
MetadataFactory::free_array<ResolvedIndyEntry>(data, _resolved_indy_entries);
_resolved_indy_entries = nullptr;
}
if (_resolved_field_entries != nullptr) {
MetadataFactory::free_array<ResolvedFieldEntry>(data, _resolved_field_entries);
_resolved_field_entries = nullptr;
}
if (_resolved_method_entries != nullptr) {
MetadataFactory::free_array<ResolvedMethodEntry>(data, _resolved_method_entries);
_resolved_method_entries = nullptr;
}
#endif
}
@ -776,6 +453,17 @@ void ConstantPoolCache::set_archived_references(int root_index) {
#endif
#if INCLUDE_JVMTI
void log_adjust(const char* entry_type, Method* old_method, Method* new_method, bool* trace_name_printed) {
ResourceMark rm;
if (!(*trace_name_printed)) {
log_info(redefine, class, update)("adjust: name=%s", old_method->method_holder()->external_name());
*trace_name_printed = true;
}
log_trace(redefine, class, update, constantpool)
("cpc %s entry update: %s", entry_type, new_method->external_name());
}
// RedefineClasses() API support:
// If any entry of this ConstantPoolCache points to any of
// old_methods, replace it with the corresponding new_method.
@ -791,26 +479,30 @@ void ConstantPoolCache::adjust_method_entries(bool * trace_name_printed) {
log_adjust("indy", old_method, new_method, trace_name_printed);
}
}
for (int i = 0; i < length(); i++) {
ConstantPoolCacheEntry* entry = entry_at(i);
Method* old_method = entry->get_interesting_method_entry();
if (old_method == nullptr || !old_method->is_old()) {
continue; // skip uninteresting entries
if (_resolved_method_entries != nullptr) {
for (int i = 0; i < _resolved_method_entries->length(); i++) {
ResolvedMethodEntry* method_entry = resolved_method_entry_at(i);
// get interesting method entry
Method* old_method = method_entry->method();
if (old_method == nullptr || !old_method->is_old()) {
continue; // skip uninteresting entries
}
if (old_method->is_deleted()) {
// clean up entries with deleted methods
method_entry->reset_entry();
continue;
}
Method* new_method = old_method->get_new_method();
method_entry->adjust_method_entry(new_method);
log_adjust("non-indy", old_method, new_method, trace_name_printed);
}
if (old_method->is_deleted()) {
// clean up entries with deleted methods
entry->initialize_entry(entry->constant_pool_index());
continue;
}
Method* new_method = old_method->get_new_method();
entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed);
}
}
// the constant pool cache should never contain old or obsolete methods
bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
ResourceMark rm;
if (_resolved_indy_entries) {
if (_resolved_indy_entries != nullptr) {
for (int i = 0; i < _resolved_indy_entries->length(); i++) {
Method* m = resolved_indy_entry_at(i)->method();
if (m != nullptr && !resolved_indy_entry_at(i)->check_no_old_or_obsolete_entry()) {
@ -821,25 +513,23 @@ bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
}
}
}
for (int i = 1; i < length(); i++) {
Method* m = entry_at(i)->get_interesting_method_entry();
if (m != nullptr && !entry_at(i)->check_no_old_or_obsolete_entries()) {
log_trace(redefine, class, update, constantpool)
("cpcache check found old method entry: class: %s, old: %d, obsolete: %d, method: %s",
constant_pool()->pool_holder()->external_name(), m->is_old(), m->is_obsolete(), m->external_name());
return false;
if (_resolved_method_entries != nullptr) {
for (int i = 0; i < _resolved_method_entries->length(); i++) {
ResolvedMethodEntry* method_entry = resolved_method_entry_at(i);
Method* m = method_entry->method();
if (m != nullptr && !method_entry->check_no_old_or_obsolete_entry()) {
log_trace(redefine, class, update, constantpool)
("cpcache check found old method entry: class: %s, old: %d, obsolete: %d, method: %s",
constant_pool()->pool_holder()->external_name(), m->is_old(), m->is_obsolete(), m->external_name());
return false;
}
}
}
return true;
}
void ConstantPoolCache::dump_cache() {
for (int i = 1; i < length(); i++) {
if (entry_at(i)->get_interesting_method_entry() != nullptr) {
entry_at(i)->print(tty, i, this);
}
}
print_on(tty);
}
#endif // INCLUDE_JVMTI
@ -853,6 +543,9 @@ void ConstantPoolCache::metaspace_pointers_do(MetaspaceClosure* it) {
if (_resolved_field_entries != nullptr) {
it->push(&_resolved_field_entries, MetaspaceClosure::_writable);
}
if (_resolved_method_entries != nullptr) {
it->push(&_resolved_method_entries, MetaspaceClosure::_writable);
}
}
bool ConstantPoolCache::save_and_throw_indy_exc(
@ -877,7 +570,7 @@ bool ConstantPoolCache::save_and_throw_indy_exc(
Symbol* error = PENDING_EXCEPTION->klass()->name();
Symbol* message = java_lang_Throwable::detail_message(PENDING_EXCEPTION);
int encoded_index = ResolutionErrorTable::encode_cpcache_index(
int encoded_index = ResolutionErrorTable::encode_indy_index(
ConstantPool::encode_invokedynamic_index(index));
SystemDictionary::add_resolution_error(cpool, encoded_index, error, message);
resolved_indy_entry_at(index)->set_resolution_failed();
@ -897,7 +590,7 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
// Before we got here, another thread got a LinkageError exception during
// resolution. Ignore our success and throw their exception.
guarantee(index >= 0, "Invalid indy index");
int encoded_index = ResolutionErrorTable::encode_cpcache_index(
int encoded_index = ResolutionErrorTable::encode_indy_index(
ConstantPool::encode_invokedynamic_index(index));
JavaThread* THREAD = JavaThread::current(); // For exception macros.
constantPoolHandle cp(THREAD, constant_pool());
@ -941,30 +634,45 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
return appendix();
}
oop ConstantPoolCache::appendix_if_resolved(int method_index) const {
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
return appendix_if_resolved(method_entry);
}
oop ConstantPoolCache::appendix_if_resolved(ResolvedMethodEntry* method_entry) const {
if (!method_entry->has_appendix())
return nullptr;
const int ref_index = method_entry->resolved_references_index();
return constant_pool()->resolved_reference_at(ref_index);
}
// Printing
void ConstantPoolCache::print_on(outputStream* st) const {
st->print_cr("%s", internal_name());
// print constant pool cache entries
for (int i = 0; i < length(); i++) entry_at(i)->print(st, i, this);
print_resolved_field_entries(st);
print_resolved_method_entries(st);
print_resolved_indy_entries(st);
}
void ConstantPoolCache::print_value_on(outputStream* st) const {
st->print("cache [%d]", length());
print_address_on(st);
st->print(" for ");
constant_pool()->print_value_on(st);
}
void ConstantPoolCache::print_resolved_field_entries(outputStream* st) const {
for (int field_index = 0; field_index < resolved_field_entries_length(); field_index++) {
resolved_field_entry_at(field_index)->print_on(st);
}
}
void ConstantPoolCache::print_resolved_method_entries(outputStream* st) const {
for (int method_index = 0; method_index < resolved_method_entries_length(); method_index++) {
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
method_entry->print_on(st);
if (method_entry->has_appendix()) {
st->print(" appendix: ");
constant_pool()->resolved_reference_from_method(method_index)->print_on(st);
}
}
}
void ConstantPoolCache::print_resolved_indy_entries(outputStream* st) const {
for (int indy_index = 0; indy_index < resolved_indy_entries_length(); indy_index++) {
ResolvedIndyEntry* indy_entry = resolved_indy_entry_at(indy_index);
@ -975,10 +683,3 @@ void ConstantPoolCache::print_resolved_indy_entries(outputStream* st) const {
}
}
}
// Verification
void ConstantPoolCache::verify_on(outputStream* st) {
// print constant pool cache entries
for (int i = 0; i < length(); i++) entry_at(i)->verify(st);
}

View File

@ -38,348 +38,13 @@
// interpreter uses to avoid going into the runtime and a way to access resolved
// values.
// A ConstantPoolCacheEntry describes an individual entry of the constant
// pool cache. There's 2 principal kinds of entries: field entries for in-
// stance & static field access, and method entries for invokes. Some of
// the entry layout is shared and looks as follows:
//
// bit number |31 0|
// bit length |-8--|-8--|---16----|
// --------------------------------
// _indices [ b2 | b1 | index ] index = constant_pool_index
// _f1 [ entry specific ] metadata ptr (method or klass)
// _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr
// _flags [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries)
// bit length [ 4 |1| 1 |1|1|1|1|1|1 |1 |-3-|----16-----]
// _flags [tos|0|F=0|S|A|I|f|0|vf|indy_rf|000|00000|psize] (for method entries)
// bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--]
// --------------------------------
//
// with:
// index = original constant pool index
// b1 = bytecode 1
// b2 = bytecode 2
// psize = parameters size (method entries only)
// field_index = index into field information in holder InstanceKlass
// The index max is 0xffff (max number of fields in constant pool)
// and is multiplied by (InstanceKlass::next_offset) when accessing.
// tos = TosState
// F = the entry is for a field (or F=0 for a method)
// A = call site has an appendix argument (loaded from resolved references)
// I = interface call is forced virtual (must use a vtable index or vfinal)
// f = field or method is final
// v = field is volatile
// vf = virtual but final (method entries only: is_vfinal())
// indy_rf = call site specifier method resolution failed
//
// The flags after TosState have the following interpretation:
// bit 27: 0 for fields, 1 for methods
// f flag true if field is marked final
// v flag true if field is volatile (only for fields)
// f2 flag true if f2 contains an oop (e.g., virtual final method)
// fv flag true if invokeinterface used for method in class Object
//
// The flags 31, 30, 29, 28 together build a 4 bit number 0 to 16 with the
// following mapping to the TosState states:
//
// btos: 0
// ztos: 1
// ctos: 2
// stos: 3
// itos: 4
// ltos: 5
// ftos: 6
// dtos: 7
// atos: 8
// vtos: 9
//
// Entry specific: field entries:
// _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index
// _f1 = field holder (as a java.lang.Class, not a Klass*)
// _f2 = field offset in bytes
// _flags = field type information, original FieldInfo index in field holder
// (field_index section)
//
// Entry specific: method entries:
// _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section),
// original constant pool index
// _f1 = Method* for non-virtual calls, unused by virtual calls.
// for interface calls, which are essentially virtual but need a klass,
// contains Klass* for the corresponding interface.
// for invokedynamic and invokehandle, f1 contains the adapter method which
// manages the actual call. The appendix is stored in the ConstantPool
// resolved_references array.
// (upcoming metadata changes will move the appendix to a separate array)
// _f2 = vtable/itable index (or final Method*) for virtual calls only,
// unused by non-virtual. The is_vfinal flag indicates this is a
// method pointer for a final method, not an index.
// _flags = has local signature (MHs and indy),
// virtual final bit (vfinal),
// parameter size (psize section)
//
// Note: invokevirtual & invokespecial bytecodes can share the same constant
// pool entry and thus the same constant pool cache entry. All invoke
// bytecodes but invokevirtual use only _f1 and the corresponding b1
// bytecode, while invokevirtual uses only _f2 and the corresponding
// b2 bytecode. The value of _flags is shared for both types of entries.
//
// The fields are volatile so that they are stored in the order written in the
// source code. The _indices field with the bytecode must be written last.
class CallInfo;
class ResolvedFieldEntry;
class ResolvedIndyEntry;
class ConstantPoolCacheEntry {
friend class VMStructs;
friend class ConstantPool;
friend class InterpreterRuntime;
private:
volatile intx _indices; // constant pool index & rewrite bytecodes
Metadata* volatile _f1; // entry specific metadata field
volatile intx _f2; // entry specific int/metadata field
volatile intx _flags; // flags
void set_bytecode_1(Bytecodes::Code code);
void set_bytecode_2(Bytecodes::Code code);
void set_f1(Metadata* f1) {
Metadata* existing_f1 = _f1; // read once
assert(existing_f1 == nullptr || existing_f1 == f1, "illegal field change");
_f1 = f1;
}
void release_set_f1(Metadata* f1);
void set_f2(intx f2) {
intx existing_f2 = _f2; // read once
assert(existing_f2 == 0 || existing_f2 == f2, "illegal field change");
_f2 = f2;
}
void set_f2_as_vfinal_method(Method* f2) {
assert(is_vfinal(), "flags must be set");
set_f2((intx)f2);
}
intx make_flags(TosState state, int option_bits, int field_index_or_method_params);
void set_flags(intx flags) { _flags = flags; }
void set_field_flags(TosState field_type, int option_bits, int field_index) {
assert((field_index & field_index_mask) == field_index, "field_index in range");
set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index));
}
void set_method_flags(TosState return_type, int option_bits, int method_params) {
assert((method_params & parameter_size_mask) == method_params, "method_params in range");
set_flags(make_flags(return_type, option_bits, method_params));
}
public:
// specific bit definitions for the flags field:
// (Note: the interpreter must use these definitions to access the CP cache.)
enum {
// high order bits are the TosState corresponding to field type or method return type
tos_state_bits = 4,
tos_state_mask = right_n_bits(tos_state_bits),
tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below
// misc. option bits; can be any bit position in [16..27]
is_field_entry_shift = 26, // (F) is it a field or a method?
has_local_signature_shift = 25, // (S) does the call site have a per-site signature (sig-poly methods)?
has_appendix_shift = 24, // (A) does the call site have an appendix argument?
is_forced_virtual_shift = 23, // (I) is the interface reference forced to virtual mode?
is_final_shift = 22, // (f) is the field or method final?
is_volatile_shift = 21, // (v) is the field volatile?
is_vfinal_shift = 20, // (vf) did the call resolve to a final method?
indy_resolution_failed_shift= 19, // (indy_rf) did call site specifier resolution fail ?
// low order bits give field index (for FieldInfo) or method parameter size:
field_index_bits = 16,
field_index_mask = right_n_bits(field_index_bits),
parameter_size_bits = 8, // subset of field_index_mask, range is 0..255
parameter_size_mask = right_n_bits(parameter_size_bits),
option_bits_mask = ~(((~0u) << tos_state_shift) | (field_index_mask | parameter_size_mask))
};
// specific bit definitions for the indices field:
enum {
cp_index_bits = 2*BitsPerByte,
cp_index_mask = right_n_bits(cp_index_bits),
bytecode_1_shift = cp_index_bits,
bytecode_1_mask = right_n_bits(BitsPerByte), // == (u1)0xFF
bytecode_2_shift = cp_index_bits + BitsPerByte,
bytecode_2_mask = right_n_bits(BitsPerByte) // == (u1)0xFF
};
// Initialization
void initialize_entry(int original_index); // initialize primary entry
void initialize_resolved_reference_index(int ref_index) {
assert(_f2 == 0, "set once"); // note: ref_index might be zero also
_f2 = ref_index;
}
void set_field( // sets entry to resolved field state
Bytecodes::Code get_code, // the bytecode used for reading the field
Bytecodes::Code put_code, // the bytecode used for writing the field
Klass* field_holder, // the object/klass holding the field
int orig_field_index, // the original field index in the field holder
int field_offset, // the field offset in words in the field holder
TosState field_type, // the (machine) field type
bool is_final, // the field is final
bool is_volatile // the field is volatile
);
private:
void set_direct_or_vtable_call(
Bytecodes::Code invoke_code, // the bytecode used for invoking the method
const methodHandle& method, // the method/prototype if any (null, otherwise)
int vtable_index, // the vtable index if any, else negative
bool sender_is_interface
);
public:
void set_direct_call( // sets entry to exact concrete method entry
Bytecodes::Code invoke_code, // the bytecode used for invoking the method
const methodHandle& method, // the method to call
bool sender_is_interface
);
void set_vtable_call( // sets entry to vtable index
Bytecodes::Code invoke_code, // the bytecode used for invoking the method
const methodHandle& method, // resolved method which declares the vtable index
int vtable_index // the vtable index
);
void set_itable_call(
Bytecodes::Code invoke_code, // the bytecode used; must be invokeinterface
Klass* referenced_klass, // the referenced klass in the InterfaceMethodref
const methodHandle& method, // the resolved interface method
int itable_index // index into itable for the method
);
void set_method_handle(
const constantPoolHandle& cpool, // holding constant pool (required for locking)
const CallInfo &call_info // Call link information
);
// Common code for invokedynamic and MH invocations.
// The "appendix" is an optional call-site-specific parameter which is
// pushed by the JVM at the end of the argument list. This argument may
// be a MethodType for the MH.invokes and a CallSite for an invokedynamic
// instruction. However, its exact type and use depends on the Java upcall,
// which simply returns a compiled LambdaForm along with any reference
// that LambdaForm needs to complete the call. If the upcall returns a
// null appendix, the argument is not passed at all.
//
// The appendix is *not* represented in the signature of the symbolic
// reference for the call site, but (if present) it *is* represented in
// the Method* bound to the site. This means that static and dynamic
// resolution logic needs to make slightly different assessments about the
// number and types of arguments.
void set_method_handle_common(
const constantPoolHandle& cpool, // holding constant pool (required for locking)
Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic
const CallInfo &call_info // Call link information
);
// invokedynamic and invokehandle call sites have an "appendix" item in the
// resolved references array.
Method* method_if_resolved(const constantPoolHandle& cpool) const;
oop appendix_if_resolved(const constantPoolHandle& cpool) const;
void set_parameter_size(int value);
// Which bytecode number (1 or 2) in the index field is valid for this bytecode?
// Returns -1 if neither is valid.
static int bytecode_number(Bytecodes::Code code) {
switch (code) {
case Bytecodes::_getstatic : // fall through
case Bytecodes::_getfield : // fall through
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokehandle : // fall through
case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface : return 1;
case Bytecodes::_putstatic : // fall through
case Bytecodes::_putfield : // fall through
case Bytecodes::_invokevirtual : return 2;
default : break;
}
return -1;
}
// Has this bytecode been resolved? Only valid for invokes and get/put field/static.
bool is_resolved(Bytecodes::Code code) const;
// Accessors
intx indices() const { return _indices; }
intx indices_ord() const;
int constant_pool_index() const { return (indices() & cp_index_mask); }
Bytecodes::Code bytecode_1() const;
Bytecodes::Code bytecode_2() const;
Metadata* f1_ord() const;
Method* f1_as_method() const;
Klass* f1_as_klass() const;
// Use the accessor f1() to acquire _f1's value. This is needed for
// example in BytecodeInterpreter::run(), where is_f1_null() is
// called to check if an invokedynamic call is resolved. This load
// of _f1 must be ordered with the loads performed by
// cache->main_entry_index().
bool is_f1_null() const; // classifies a CPC entry as unbound
int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; }
Method* f2_as_vfinal_method() const { assert(is_vfinal(), ""); return (Method*)_f2; }
Method* f2_as_interface_method() const;
intx flags_ord() const;
int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); }
int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; }
bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; }
bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; }
bool indy_resolution_failed() const;
bool has_appendix() const;
bool has_local_signature() const;
bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; }
bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; }
bool is_long() const { return flag_state() == ltos; }
bool is_double() const { return flag_state() == dtos; }
TosState flag_state() const { assert((uint)number_of_states <= (uint)tos_state_mask+1, "");
return (TosState)((_flags >> tos_state_shift) & tos_state_mask); }
void set_indy_resolution_failed();
// Code generation support
static WordSize size() {
return in_WordSize(align_up((int)sizeof(ConstantPoolCacheEntry), wordSize) / wordSize);
}
static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); }
static ByteSize f1_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f1); }
static ByteSize f2_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f2); }
static ByteSize flags_offset() { return byte_offset_of(ConstantPoolCacheEntry, _flags); }
#if INCLUDE_JVMTI
// RedefineClasses() API support:
// If this ConstantPoolCacheEntry refers to old_method then update it
// to refer to new_method.
// trace_name_printed is set to true if the current call has
// printed the klass name so that other routines in the adjust_*
// group don't print the klass name.
void adjust_method_entry(Method* old_method, Method* new_method,
bool* trace_name_printed);
bool check_no_old_or_obsolete_entries();
Method* get_interesting_method_entry();
#endif // INCLUDE_JVMTI
// Debugging & Printing
void print (outputStream* st, int index, const ConstantPoolCache* cache) const;
void verify(outputStream* st) const;
static void verify_tos_state_shift() {
// When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state:
assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask");
}
};
class ResolvedMethodEntry;
// A constant pool cache is a runtime data structure set aside to a constant pool. The cache
// holds interpreter runtime information for all field access and invoke bytecodes. The cache
// holds runtime information for all field access and invoke bytecodes. The cache
// is created and initialized before a class is actively used (i.e., initialized), the indivi-
// dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*).
@ -389,7 +54,6 @@ class ConstantPoolCache: public MetaspaceObj {
private:
// If you add a new field that points to any metaspace object, you
// must add this field to ConstantPoolCache::metaspace_pointers_do().
int _length;
// The narrowOop pointer to the archived resolved_references. Set at CDS dump
// time when caching java heap object is supported.
@ -407,33 +71,41 @@ class ConstantPoolCache: public MetaspaceObj {
// RedefineClasses support
uint64_t _gc_epoch;
Array<ResolvedIndyEntry>* _resolved_indy_entries;
Array<ResolvedFieldEntry>* _resolved_field_entries;
CDS_ONLY(Array<ConstantPoolCacheEntry>* _initial_entries;)
Array<ResolvedIndyEntry>* _resolved_indy_entries;
Array<ResolvedFieldEntry>* _resolved_field_entries;
Array<ResolvedMethodEntry>* _resolved_method_entries;
// Sizing
debug_only(friend class ClassVerifier;)
public:
// specific but defiinitions for ldc
enum {
// high order bits are the TosState corresponding to field type or method return type
tos_state_bits = 4,
tos_state_mask = right_n_bits(tos_state_bits),
tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below
// low order bits give field index (for FieldInfo) or method parameter size:
field_index_bits = 16,
field_index_mask = right_n_bits(field_index_bits),
};
// Constructor
ConstantPoolCache(int length,
const intStack& inverse_index_map,
const intStack& invokedynamic_references_map,
ConstantPoolCache(const intStack& invokedynamic_references_map,
Array<ResolvedIndyEntry>* indy_info,
Array<ResolvedFieldEntry>* field_entries);
Array<ResolvedFieldEntry>* field_entries,
Array<ResolvedMethodEntry>* mehtod_entries);
// Initialization
void initialize(const intArray& inverse_index_map,
const intArray& invokedynamic_references_map);
void initialize(const intArray& invokedynamic_references_map);
public:
static ConstantPoolCache* allocate(ClassLoaderData* loader_data,
const intStack& cp_cache_map,
const intStack& invokedynamic_references_map,
const GrowableArray<ResolvedIndyEntry> indy_entries,
const GrowableArray<ResolvedFieldEntry> field_entries,
const GrowableArray<ResolvedMethodEntry> method_entries,
TRAPS);
int length() const { return _length; }
void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return ConstantPoolCacheType; }
@ -446,6 +118,58 @@ class ConstantPoolCache: public MetaspaceObj {
Array<u2>* reference_map() const { return _reference_map; }
void set_reference_map(Array<u2>* o) { _reference_map = o; }
private:
void set_direct_or_vtable_call(
Bytecodes::Code invoke_code, // the bytecode used for invoking the method
int method_index, // Index into the resolved method entry array
const methodHandle& method, // the method/prototype if any (null, otherwise)
int vtable_index, // the vtable index if any, else negative
bool sender_is_interface
);
public:
void set_direct_call( // sets entry to exact concrete method entry
Bytecodes::Code invoke_code, // the bytecode used for invoking the method
int method_index, // Index into the resolved method entry array
const methodHandle& method, // the method to call
bool sender_is_interface
);
void set_vtable_call( // sets entry to vtable index
Bytecodes::Code invoke_code, // the bytecode used for invoking the method
int method_index, // Index into the resolved method entry array
const methodHandle& method, // resolved method which declares the vtable index
int vtable_index // the vtable index
);
void set_itable_call(
Bytecodes::Code invoke_code, // the bytecode used; must be invokeinterface
int method_index, // Index into the resolved method entry array
Klass* referenced_klass, // the referenced klass in the InterfaceMethodref
const methodHandle& method, // the resolved interface method
int itable_index // index into itable for the method
);
// The "appendix" is an optional call-site-specific parameter which is
// pushed by the JVM at the end of the argument list. This argument may
// be a MethodType for the MH.invokes and a CallSite for an invokedynamic
// instruction. However, its exact type and use depends on the Java upcall,
// which simply returns a compiled LambdaForm along with any reference
// that LambdaForm needs to complete the call. If the upcall returns a
// null appendix, the argument is not passed at all.
//
// The appendix is *not* represented in the signature of the symbolic
// reference for the call site, but (if present) it *is* represented in
// the Method* bound to the site. This means that static and dynamic
// resolution logic needs to make slightly different assessments about the
// number and types of arguments.
ResolvedMethodEntry* set_method_handle(
int method_index,
const CallInfo &call_info // Call link information
);
Method* method_if_resolved(int method_index) const;
Array<ResolvedFieldEntry>* resolved_field_entries() { return _resolved_field_entries; }
inline ResolvedFieldEntry* resolved_field_entry_at(int field_index) const;
inline int resolved_field_entries_length() const;
@ -456,49 +180,37 @@ class ConstantPoolCache: public MetaspaceObj {
inline int resolved_indy_entries_length() const;
void print_resolved_indy_entries(outputStream* st) const;
Array<ResolvedMethodEntry>* resolved_method_entries() { return _resolved_method_entries; }
inline ResolvedMethodEntry* resolved_method_entry_at(int method_index) const;
inline int resolved_method_entries_length() const;
void print_resolved_method_entries(outputStream* st) const;
// Assembly code support
static ByteSize resolved_references_offset() { return byte_offset_of(ConstantPoolCache, _resolved_references); }
static ByteSize invokedynamic_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_indy_entries); }
static ByteSize field_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_field_entries); }
static ByteSize resolved_references_offset() { return byte_offset_of(ConstantPoolCache, _resolved_references); }
static ByteSize invokedynamic_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_indy_entries); }
static ByteSize field_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_field_entries); }
static ByteSize method_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_method_entries); }
#if INCLUDE_CDS
void remove_unshareable_info();
void save_for_archive(TRAPS);
#endif
private:
void walk_entries_for_initialization(bool check_only);
void set_length(int length) { _length = length; }
static int header_size() { return sizeof(ConstantPoolCache) / wordSize; }
static int size(int length) { return align_metadata_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); }
public:
int size() const { return size(length()); }
static int size() { return align_metadata_size(sizeof(ConstantPoolCache) / wordSize); }
private:
// Helpers
ConstantPool** constant_pool_addr() { return &_constant_pool; }
ConstantPoolCacheEntry* base() const { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); }
friend class ConstantPoolCacheEntry;
public:
// Accessors
void set_constant_pool(ConstantPool* pool) { _constant_pool = pool; }
ConstantPool* constant_pool() const { return _constant_pool; }
// Fetches the entry at the given index.
// In either case the index must not be encoded or byte-swapped in any way.
ConstantPoolCacheEntry* entry_at(int i) const {
assert(0 <= i && i < length(), "index out of bounds");
return base() + i;
}
// Code generation
static ByteSize base_offset() { return in_ByteSize(sizeof(ConstantPoolCache)); }
static ByteSize entry_offset(int raw_index) {
int index = raw_index;
return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index);
}
#if INCLUDE_JVMTI
// RedefineClasses() API support:
@ -525,6 +237,8 @@ class ConstantPoolCache: public MetaspaceObj {
// chance to record its failure.
bool save_and_throw_indy_exc(const constantPoolHandle& cpool, int cpool_index, int index, constantTag tag, TRAPS);
oop set_dynamic_call(const CallInfo &call_info, int index);
oop appendix_if_resolved(int method_index) const;
oop appendix_if_resolved(ResolvedMethodEntry* method_entry) const;
// Printing
void print_on(outputStream* st) const;

View File

@ -30,78 +30,20 @@
#include "oops/oopHandle.inline.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "runtime/atomic.hpp"
inline intx ConstantPoolCacheEntry::indices_ord() const { return Atomic::load_acquire(&_indices); }
inline Bytecodes::Code ConstantPoolCacheEntry::bytecode_1() const {
return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask);
}
inline Bytecodes::Code ConstantPoolCacheEntry::bytecode_2() const {
return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask);
}
// Has this bytecode been resolved? Only valid for invokes and get/put field/static.
inline bool ConstantPoolCacheEntry::is_resolved(Bytecodes::Code code) const {
switch (bytecode_number(code)) {
case 1: return (bytecode_1() == code);
case 2: return (bytecode_2() == code);
}
return false; // default: not resolved
}
inline Method* ConstantPoolCacheEntry::f2_as_interface_method() const {
assert(bytecode_1() == Bytecodes::_invokeinterface, "");
return (Method*)_f2;
}
inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)Atomic::load_acquire(&_f1); }
inline Method* ConstantPoolCacheEntry::f1_as_method() const {
Metadata* f1 = f1_ord(); assert(f1 == nullptr || f1->is_method(), "");
return (Method*)f1;
}
inline Klass* ConstantPoolCacheEntry::f1_as_klass() const {
Metadata* f1 = f1_ord(); assert(f1 == nullptr || f1->is_klass(), "");
return (Klass*)f1;
}
inline bool ConstantPoolCacheEntry::is_f1_null() const { Metadata* f1 = f1_ord(); return f1 == nullptr; }
inline bool ConstantPoolCacheEntry::has_appendix() const {
return (!is_f1_null()) && (_flags & (1 << has_appendix_shift)) != 0;
}
inline bool ConstantPoolCacheEntry::has_local_signature() const {
return (!is_f1_null()) && (_flags & (1 << has_local_signature_shift)) != 0;
}
inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)Atomic::load_acquire(&_flags); }
inline bool ConstantPoolCacheEntry::indy_resolution_failed() const {
intx flags = flags_ord();
return (flags & (1 << indy_resolution_failed_shift)) != 0;
}
// Constructor
inline ConstantPoolCache::ConstantPoolCache(int length,
const intStack& inverse_index_map,
const intStack& invokedynamic_references_map,
inline ConstantPoolCache::ConstantPoolCache(const intStack& invokedynamic_references_map,
Array<ResolvedIndyEntry>* invokedynamic_info,
Array<ResolvedFieldEntry>* field_entries) :
_length(length),
Array<ResolvedFieldEntry>* field_entries,
Array<ResolvedMethodEntry>* method_entries) :
_constant_pool(nullptr),
_gc_epoch(0),
_resolved_indy_entries(invokedynamic_info),
_resolved_field_entries(field_entries) {
_resolved_field_entries(field_entries),
_resolved_method_entries(method_entries) {
CDS_JAVA_HEAP_ONLY(_archived_references_index = -1;)
initialize(inverse_index_map,
invokedynamic_references_map);
for (int i = 0; i < length; i++) {
assert(entry_at(i)->is_f1_null(), "Failed to clear?");
}
}
inline objArrayOop ConstantPoolCache::resolved_references() {
@ -118,6 +60,14 @@ inline int ConstantPoolCache::resolved_field_entries_length() const {
return _resolved_field_entries->length();
}
inline ResolvedMethodEntry* ConstantPoolCache::resolved_method_entry_at(int method_index) const {
return _resolved_method_entries->adr_at(method_index);
}
inline int ConstantPoolCache::resolved_method_entries_length() const {
return _resolved_method_entries->length();
}
inline ResolvedIndyEntry* ConstantPoolCache::resolved_indy_entry_at(int index) const {
return _resolved_indy_entries->adr_at(index);
}

View File

@ -1318,7 +1318,7 @@ void GenerateOopMap::print_current_state(outputStream *os,
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface: {
int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2_cpcache();
int idx = currentBC->has_index_u4() ? currentBC->get_index_u4() : currentBC->get_index_u2();
ConstantPool* cp = method()->constants();
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx, currentBC->code());
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx);
@ -1597,24 +1597,16 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
case Bytecodes::_jsr: do_jsr(itr->dest()); break;
case Bytecodes::_jsr_w: do_jsr(itr->dest_w()); break;
case Bytecodes::_getstatic:
do_field(true, true, itr->get_index_u2(), itr->bci(), itr->code());
break;
case Bytecodes::_putstatic:
do_field(false, true, itr->get_index_u2(), itr->bci(), itr->code());
break;
case Bytecodes::_getfield:
do_field(true, false, itr->get_index_u2(), itr->bci(), itr->code());
break;
case Bytecodes::_putfield:
do_field(false, false, itr->get_index_u2(), itr->bci(), itr->code());
break;
case Bytecodes::_getstatic: do_field(true, true, itr->get_index_u2(), itr->bci(), itr->code()); break;
case Bytecodes::_putstatic: do_field(false, true, itr->get_index_u2(), itr->bci(), itr->code()); break;
case Bytecodes::_getfield: do_field(true, false, itr->get_index_u2(), itr->bci(), itr->code()); break;
case Bytecodes::_putfield: do_field(false, false, itr->get_index_u2(), itr->bci(), itr->code()); break;
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_u2_cpcache(), itr->bci(), itr->code()); break;
case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_u2_cpcache(), itr->bci(), itr->code()); break;
case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_u4(), itr->bci(), itr->code()); break;
case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_u2_cpcache(), itr->bci(), itr->code()); break;
case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_u2(), itr->bci(), itr->code()); break;
case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_u2(), itr->bci(), itr->code()); break;
case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_u4(), itr->bci(), itr->code()); break;
case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_u2(), itr->bci(), itr->code()); break;
case Bytecodes::_newarray:
case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
case Bytecodes::_checkcast: do_checkcast(); break;

View File

@ -37,10 +37,10 @@
// "resolution" refers to populating the getcode and putcode fields and other relevant information.
// The field's type (TOS), offset, holder klass, and index within that class can all be acquired
// together and are used to populate this structure. These entries are contained
// within the ConstantPoolCache and are accessed with indices added to the invokedynamic bytecode after
// within the ConstantPoolCache and are accessed with indices added to the bytecode after
// rewriting.
// Field bytecodes start with a constant pool index as their operate, which is then rewritten to
// Field bytecodes start with a constant pool index as their operand, which is then rewritten to
// a "field index", which is an index into the array of ResolvedFieldEntry.
//class InstanceKlass;

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "oops/method.hpp"
#include "oops/resolvedMethodEntry.hpp"
bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
// return false if m refers to a non-deleted old or obsolete method
if (_method != nullptr) {
assert(_method->is_valid() && _method->is_method(), "m is a valid method");
return !_method->is_old() && !_method->is_obsolete(); // old is always set for old and obsolete
} else {
return true;
}
}
void ResolvedMethodEntry::reset_entry() {
if (has_resolved_ref_index()) {
u2 saved_resolved_references_index = _entry_specific._resolved_references_index;
u2 saved_cpool_index = _cpool_index;
memset(this, 0, sizeof(*this));
_entry_specific._resolved_references_index = saved_resolved_references_index;
_cpool_index = saved_cpool_index;
} else {
u2 saved_cpool_index = _cpool_index;
memset(this, 0, sizeof(*this));
_cpool_index = saved_cpool_index;
}
}
void ResolvedMethodEntry::remove_unshareable_info() {
reset_entry();
}
void ResolvedMethodEntry::print_on(outputStream* st) const {
st->print_cr("Method Entry:");
if (method() != nullptr) {
st->print_cr(" - Method: " INTPTR_FORMAT " %s", p2i(method()), method()->external_name());
} else {
st->print_cr("- Method: null");
}
// Some fields are mutually exclusive and are only used by certain invoke codes
if (bytecode1() == Bytecodes::_invokeinterface && interface_klass() != nullptr) {
st->print_cr(" - Klass: " INTPTR_FORMAT " %s", p2i(interface_klass()), interface_klass()->external_name());
} else {
st->print_cr("- Klass: null");
}
if (bytecode1() == Bytecodes::_invokehandle) {
st->print_cr(" - Resolved References Index: %d", resolved_references_index());
} else {
st->print_cr(" - Resolved References Index: none");
}
if (bytecode2() == Bytecodes::_invokevirtual) {
st->print_cr(" - Table Index: %d", table_index());
} else {
st->print_cr(" - Table Index: none");
}
st->print_cr(" - CP Index: %d", constant_pool_index());
st->print_cr(" - TOS: %s", type2name(as_BasicType((TosState)tos_state())));
st->print_cr(" - Number of Parameters: %d", number_of_parameters());
st->print_cr(" - Is Virtual Final: %d", is_vfinal());
st->print_cr(" - Is Final: %d", is_final());
st->print_cr(" - Is Forced Virtual: %d", is_forced_virtual());
st->print_cr(" - Has Appendix: %d", has_appendix());
st->print_cr(" - Has Local Signature: %d", has_local_signature());
st->print_cr(" - Bytecode 1: %s", Bytecodes::name((Bytecodes::Code)bytecode1()));
st->print_cr(" - Bytecode 2: %s", Bytecodes::name((Bytecodes::Code)bytecode2()));
}

View File

@ -0,0 +1,220 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_OOPS_RESOLVEDMETHODENTRY_HPP
#define SHARE_OOPS_RESOLVEDMETHODENTRY_HPP
#include "interpreter/bytecodes.hpp"
#include "runtime/atomic.hpp"
#include "utilities/sizes.hpp"
// ResolvedMethodEntry contains the resolution information for the invoke bytecodes
// invokestatic, invokespecial, invokeinterface, invokevirtual, and invokehandle but
// NOT invokedynamic (see resolvedIndyEntry.hpp). A member of this class can be initialized
// with the constant pool index associated with the bytecode before any resolution is done,
// where "resolution" refers to populating the bytecode1 and bytecode2 fields and other
// relevant information. These entries are contained within the ConstantPoolCache and are
// accessed with indices added to the bytecode after rewriting.
// Invoke bytecodes start with a constant pool index as their operand, which is then
// rewritten to a "method index", which is an index into the array of ResolvedMethodEntry.
// This structure has fields for every type of invoke bytecode but each entry may only
// use some of the fields. All entries have a TOS state, number of parameters, flags,
// and a constant pool index.
// Types of invokes
// invokestatic
// invokespecial
// Method*
// invokehandle
// Method*
// resolved references index
// invokevirtual
// Method* (if vfinal is true)
// vtable/itable index
// invokeinterface
// Klass*
// Method*
// Note: invokevirtual & invokespecial bytecodes can share the same constant
// pool entry and thus the same resolved method entry.
// The is_vfinal flag indicates method pointer for a final method or an index.
class InstanceKlass;
class ResolvedMethodEntry {
friend class VMStructs;
Method* _method; // Method for non virtual calls, adapter method for invokevirtual, final method for virtual
union { // These fields are mutually exclusive and are only used by some invoke codes
InstanceKlass* _interface_klass; // for interface and static
u2 _resolved_references_index; // Index of resolved references array that holds the appendix oop for invokehandle
u2 _table_index; // vtable/itable index for virtual and interface calls
} _entry_specific;
u2 _cpool_index; // Constant pool index
u2 _number_of_parameters; // Number of arguments for method
u1 _tos_state; // TOS state
u1 _flags; // Flags: [00|has_resolved_ref_index|has_local_signature|has_appendix|forced_virtual|final|virtual_final]
u1 _bytecode1, _bytecode2; // Resolved invoke codes
// Constructors
public:
ResolvedMethodEntry(u2 cpi) :
_method(nullptr),
_cpool_index(cpi),
_number_of_parameters(0),
_tos_state(0),
_flags(0),
_bytecode1(0),
_bytecode2(0) { _entry_specific._interface_klass = nullptr; }
ResolvedMethodEntry() :
ResolvedMethodEntry(0) {}
// Bit shift to get flags
enum {
is_vfinal_shift = 0,
is_final_shift = 1,
is_forced_virtual_shift = 2,
has_appendix_shift = 3,
has_local_signature_shift = 4,
has_resolved_ref_shift = 5
};
// Getters
Method* method() const { return Atomic::load_acquire(&_method); }
InstanceKlass* interface_klass() const {
assert(_bytecode1 == Bytecodes::_invokeinterface, "Only invokeinterface has a klass %d", _bytecode1);
return _entry_specific._interface_klass;
}
u2 resolved_references_index() const {
// This index may be read before resolution completes
return _entry_specific._resolved_references_index;
}
u2 table_index() const {
assert(_bytecode2 == Bytecodes::_invokevirtual, "Only invokevirtual has a vtable/itable index %d", _bytecode2);
return _entry_specific._table_index;
}
u2 constant_pool_index() const { return _cpool_index; }
u1 tos_state() const { return _tos_state; }
u2 number_of_parameters() const { return _number_of_parameters; }
u1 bytecode1() const { return Atomic::load_acquire(&_bytecode1); }
u1 bytecode2() const { return Atomic::load_acquire(&_bytecode2); }
// Flags
bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; }
bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; }
bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; }
bool has_local_signature() const { return (_flags & (1 << has_local_signature_shift)) != 0; }
bool has_resolved_ref_index() const { return (_flags & (1 << has_resolved_ref_shift)) != 0; }
bool is_resolved(Bytecodes::Code code) const {
switch(code) {
case Bytecodes::_invokeinterface:
case Bytecodes::_invokehandle:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
return (bytecode1() == code);
case Bytecodes::_invokevirtual:
return (bytecode2() == code);
default:
ShouldNotReachHere();
return false;
}
}
void adjust_method_entry(Method* new_method) {
// this is done during the redefinition safepoint
_method = new_method;
}
bool check_no_old_or_obsolete_entry();
// Printing
void print_on(outputStream* st) const;
// Setters
void set_flags(u1 flags) { _flags |= flags; }
inline void set_bytecode(u1* code, u1 new_code) {
#ifdef ASSERT
// Read once.
volatile Bytecodes::Code c = (Bytecodes::Code)*code;
assert(c == 0 || c == new_code || new_code == 0, "update must be consistent old: %d, new: %d", c, new_code);
#endif
Atomic::release_store(code, new_code);
}
void set_bytecode1(u1 b1) {
set_bytecode(&_bytecode1, b1);
}
void set_bytecode2(u1 b2) {
set_bytecode(&_bytecode2, b2);
}
void set_method(Method* m) {
Atomic::release_store(&_method, m);
}
void set_klass(InstanceKlass* klass) {
_entry_specific._interface_klass = klass;
}
void set_resolved_references_index(u2 ref_index) {
set_flags(1 << has_resolved_ref_shift);
_entry_specific._resolved_references_index = ref_index;
}
void set_table_index(u2 table_index) {
_entry_specific._table_index = table_index;
}
void set_num_parameters(u2 num_params) {
_number_of_parameters = num_params;
}
void fill_in(u1 tos_state, u2 num_params) {
_tos_state = tos_state;
_number_of_parameters = num_params;
}
void reset_entry();
// CDS
void remove_unshareable_info();
// Offsets
static ByteSize klass_offset() { return byte_offset_of(ResolvedMethodEntry, _entry_specific._interface_klass); }
static ByteSize method_offset() { return byte_offset_of(ResolvedMethodEntry, _method); }
static ByteSize resolved_references_index_offset() { return byte_offset_of(ResolvedMethodEntry, _entry_specific._resolved_references_index); }
static ByteSize table_index_offset() { return byte_offset_of(ResolvedMethodEntry, _entry_specific._table_index); }
static ByteSize num_parameters_offset() { return byte_offset_of(ResolvedMethodEntry, _number_of_parameters); }
static ByteSize type_offset() { return byte_offset_of(ResolvedMethodEntry, _tos_state); }
static ByteSize flags_offset() { return byte_offset_of(ResolvedMethodEntry, _flags); }
static ByteSize bytecode1_offset() { return byte_offset_of(ResolvedMethodEntry, _bytecode1); }
static ByteSize bytecode2_offset() { return byte_offset_of(ResolvedMethodEntry, _bytecode2); }
};
#endif //SHARE_OOPS_RESOLVEDMETHODENTRY_HPP

View File

@ -508,7 +508,7 @@ bool InlineTree::pass_initial_checks(ciMethod* caller_method, int caller_bci, ci
Bytecodes::Code call_bc = iter.cur_bc();
// An invokedynamic instruction does not have a klass.
if (call_bc != Bytecodes::_invokedynamic) {
int index = iter.get_index_u2_cpcache();
int index = iter.get_index_u2();
if (!caller_method->is_klass_loaded(index, call_bc, true)) {
return false;
}

View File

@ -1046,15 +1046,13 @@ void JvmtiClassFileReconstituter::copy_bytecodes(const methodHandle& mh,
int cpci = Bytes::get_native_u2(bcp+1);
bool is_invokedynamic = (code == Bytecodes::_invokedynamic);
ConstantPoolCacheEntry* entry;
int pool_index;
if (is_invokedynamic) {
cpci = Bytes::get_native_u4(bcp+1);
pool_index = mh->constants()->resolved_indy_entry_at(mh->constants()->decode_invokedynamic_index(cpci))->constant_pool_index();
} else {
// cache cannot be pre-fetched since some classes won't have it yet
entry = mh->constants()->cache()->entry_at(cpci);
pool_index = entry->constant_pool_index();
// cache cannot be pre-fetched since some classes won't have it yet
pool_index = mh->constants()->resolved_method_entry_at(cpci)->constant_pool_index();
}
assert(pool_index < mh->constants()->length(), "sanity check");
Bytes::put_Java_u2((address)(p+1), (u2)pool_index); // java byte ordering

View File

@ -104,14 +104,14 @@ bool MethodComparator::args_same(Bytecodes::Code const c_old, Bytecodes::Code c
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface : {
int cpci_old = s_old->get_index_u2_cpcache();
int cpci_new = s_new->get_index_u2_cpcache();
int index_old = s_old->get_index_u2();
int index_new = s_new->get_index_u2();
// Check if the names of classes, field/method names and signatures at these indexes
// are the same. Indices which are really into constantpool cache (rather than constant
// pool itself) are accepted by the constantpool query routines below.
if ((old_cp->klass_ref_at_noresolve(cpci_old, c_old) != new_cp->klass_ref_at_noresolve(cpci_new, c_old)) ||
(old_cp->name_ref_at(cpci_old, c_old) != new_cp->name_ref_at(cpci_new, c_old)) ||
(old_cp->signature_ref_at(cpci_old, c_old) != new_cp->signature_ref_at(cpci_new, c_old)))
if ((old_cp->klass_ref_at_noresolve(index_old, c_old) != new_cp->klass_ref_at_noresolve(index_new, c_old)) ||
(old_cp->name_ref_at(index_old, c_old) != new_cp->name_ref_at(index_new, c_old)) ||
(old_cp->signature_ref_at(index_old, c_old) != new_cp->signature_ref_at(index_new, c_old)))
return false;
break;
}

View File

@ -1868,40 +1868,12 @@ WB_ENTRY(jlong, WB_GetConstantPool(JNIEnv* env, jobject wb, jclass klass))
return (jlong) ik->constants();
WB_END
WB_ENTRY(jint, WB_GetConstantPoolCacheIndexTag(JNIEnv* env, jobject wb))
return ConstantPool::CPCACHE_INDEX_TAG;
WB_END
WB_ENTRY(jint, WB_GetConstantPoolCacheLength(JNIEnv* env, jobject wb, jclass klass))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
ConstantPool* cp = ik->constants();
if (cp->cache() == nullptr) {
return -1;
}
return cp->cache()->length();
WB_END
WB_ENTRY(jobjectArray, WB_GetResolvedReferences(JNIEnv* env, jobject wb, jclass klass))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
objArrayOop resolved_refs= ik->constants()->resolved_references();
return (jobjectArray)JNIHandles::make_local(THREAD, resolved_refs);
WB_END
WB_ENTRY(jint, WB_ConstantPoolRemapInstructionOperandFromCache(JNIEnv* env, jobject wb, jclass klass, jint index))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
ConstantPool* cp = ik->constants();
if (cp->cache() == nullptr) {
THROW_MSG_0(vmSymbols::java_lang_IllegalStateException(), "Constant pool does not have a cache");
}
jint cpci = index;
jint cpciTag = ConstantPool::CPCACHE_INDEX_TAG;
if (cpciTag > cpci || cpci >= cp->cache()->length() + cpciTag) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Constant pool cache index is out of range");
}
jint cpi = cp->remap_instruction_operand_from_cache(cpci);
return cpi;
WB_END
WB_ENTRY(jint, WB_ConstantPoolEncodeIndyIndex(JNIEnv* env, jobject wb, jint index))
return ConstantPool::encode_invokedynamic_index(index);
WB_END
@ -1924,6 +1896,24 @@ WB_ENTRY(jint, WB_getFieldCPIndex(JNIEnv* env, jobject wb, jclass klass, jint in
return cp->resolved_field_entry_at(index)->constant_pool_index();
WB_END
WB_ENTRY(jint, WB_getMethodEntriesLength(JNIEnv* env, jobject wb, jclass klass))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
ConstantPool* cp = ik->constants();
if (cp->cache() == nullptr) {
return -1;
}
return cp->resolved_method_entries_length();
WB_END
WB_ENTRY(jint, WB_getMethodCPIndex(JNIEnv* env, jobject wb, jclass klass, jint index))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
ConstantPool* cp = ik->constants();
if (cp->cache() == NULL) {
return -1;
}
return cp->resolved_method_entry_at(index)->constant_pool_index();
WB_END
WB_ENTRY(jint, WB_getIndyInfoLength(JNIEnv* env, jobject wb, jclass klass))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
ConstantPool* cp = ik->constants();
@ -2812,15 +2802,13 @@ static JNINativeMethod methods[] = {
{CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint },
{CC"forceClassLoaderStatsSafepoint", CC"()V", (void*)&WB_ForceClassLoaderStatsSafepoint },
{CC"getConstantPool0", CC"(Ljava/lang/Class;)J", (void*)&WB_GetConstantPool },
{CC"getConstantPoolCacheIndexTag0", CC"()I", (void*)&WB_GetConstantPoolCacheIndexTag},
{CC"getConstantPoolCacheLength0", CC"(Ljava/lang/Class;)I", (void*)&WB_GetConstantPoolCacheLength},
{CC"getResolvedReferences0", CC"(Ljava/lang/Class;)[Ljava/lang/Object;", (void*)&WB_GetResolvedReferences},
{CC"remapInstructionOperandFromCPCache0",
CC"(Ljava/lang/Class;I)I", (void*)&WB_ConstantPoolRemapInstructionOperandFromCache},
{CC"encodeConstantPoolIndyIndex0",
CC"(I)I", (void*)&WB_ConstantPoolEncodeIndyIndex},
{CC"getFieldEntriesLength0", CC"(Ljava/lang/Class;)I", (void*)&WB_getFieldEntriesLength},
{CC"getFieldCPIndex0", CC"(Ljava/lang/Class;I)I", (void*)&WB_getFieldCPIndex},
{CC"getMethodEntriesLength0", CC"(Ljava/lang/Class;)I", (void*)&WB_getMethodEntriesLength},
{CC"getMethodCPIndex0", CC"(Ljava/lang/Class;I)I", (void*)&WB_getMethodCPIndex},
{CC"getIndyInfoLength0", CC"(Ljava/lang/Class;)I", (void*)&WB_getIndyInfoLength},
{CC"getIndyCPIndex0", CC"(Ljava/lang/Class;I)I", (void*)&WB_getIndyCPIndex},
{CC"printClasses0", CC"(Ljava/lang/String;I)Ljava/lang/String;", (void*)&WB_printClasses},

View File

@ -85,6 +85,7 @@
#include "oops/oopHandle.hpp"
#include "oops/resolvedFieldEntry.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/resolvedMethodEntry.hpp"
#include "oops/symbol.hpp"
#include "oops/typeArrayKlass.hpp"
#include "oops/typeArrayOop.hpp"
@ -224,10 +225,11 @@
nonstatic_field(ConstantPool, _source_file_name_index, u2) \
nonstatic_field(ConstantPoolCache, _resolved_references, OopHandle) \
nonstatic_field(ConstantPoolCache, _reference_map, Array<u2>*) \
nonstatic_field(ConstantPoolCache, _length, int) \
nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \
nonstatic_field(ConstantPoolCache, _resolved_field_entries, Array<ResolvedFieldEntry>*) \
nonstatic_field(ResolvedFieldEntry, _cpool_index, u2) \
nonstatic_field(ConstantPoolCache, _resolved_method_entries, Array<ResolvedMethodEntry>*) \
nonstatic_field(ResolvedMethodEntry, _cpool_index, u2) \
nonstatic_field(ConstantPoolCache, _resolved_indy_entries, Array<ResolvedIndyEntry>*) \
nonstatic_field(ResolvedIndyEntry, _cpool_index, u2) \
volatile_nonstatic_field(InstanceKlass, _array_klasses, ObjArrayKlass*) \
@ -337,15 +339,6 @@
nonstatic_field(Annotations, _class_type_annotations, Array<u1>*) \
nonstatic_field(Annotations, _fields_type_annotations, Array<Array<u1>*>*) \
\
/***********************/ \
/* Constant Pool Cache */ \
/***********************/ \
\
volatile_nonstatic_field(ConstantPoolCacheEntry, _indices, intx) \
volatile_nonstatic_field(ConstantPoolCacheEntry, _f1, Metadata*) \
volatile_nonstatic_field(ConstantPoolCacheEntry, _f2, intx) \
volatile_nonstatic_field(ConstantPoolCacheEntry, _flags, intx) \
\
/*****************************/ \
/* Method related structures */ \
/*****************************/ \
@ -488,6 +481,8 @@
nonstatic_field(Array<Klass*>, _data[0], Klass*) \
nonstatic_field(Array<ResolvedFieldEntry>, _length, int) \
nonstatic_field(Array<ResolvedFieldEntry>, _data[0], ResolvedFieldEntry) \
nonstatic_field(Array<ResolvedMethodEntry>, _length, int) \
nonstatic_field(Array<ResolvedMethodEntry>, _data[0], ResolvedMethodEntry) \
nonstatic_field(Array<ResolvedIndyEntry>, _length, int) \
nonstatic_field(Array<ResolvedIndyEntry>, _data[0], ResolvedIndyEntry) \
\
@ -975,6 +970,7 @@
unchecked_nonstatic_field(Array<Method*>, _data, sizeof(Method*)) \
unchecked_nonstatic_field(Array<Klass*>, _data, sizeof(Klass*)) \
unchecked_nonstatic_field(Array<ResolvedFieldEntry>, _data, sizeof(ResolvedFieldEntry)) \
unchecked_nonstatic_field(Array<ResolvedMethodEntry>,_data, sizeof(ResolvedMethodEntry)) \
unchecked_nonstatic_field(Array<ResolvedIndyEntry>, _data, sizeof(ResolvedIndyEntry)) \
unchecked_nonstatic_field(Array<Array<u1>*>, _data, sizeof(Array<u1>*)) \
\
@ -1903,6 +1899,7 @@
declare_type(Array<Klass*>, MetaspaceObj) \
declare_type(Array<Method*>, MetaspaceObj) \
declare_type(Array<ResolvedFieldEntry>, MetaspaceObj) \
declare_type(Array<ResolvedMethodEntry>, MetaspaceObj) \
declare_type(Array<ResolvedIndyEntry>, MetaspaceObj) \
declare_type(Array<Array<u1>*>, MetaspaceObj) \
\
@ -1920,8 +1917,8 @@
declare_toplevel_type(CodeBlob*) \
declare_toplevel_type(RuntimeBlob*) \
declare_toplevel_type(CompressedWriteStream*) \
declare_toplevel_type(ConstantPoolCacheEntry) \
declare_toplevel_type(ResolvedFieldEntry) \
declare_toplevel_type(ResolvedMethodEntry) \
declare_toplevel_type(ResolvedIndyEntry) \
declare_toplevel_type(elapsedTimer) \
declare_toplevel_type(frame) \
@ -2198,18 +2195,6 @@
declare_constant(ConstantPool::_indy_bsm_offset) \
declare_constant(ConstantPool::_indy_argc_offset) \
declare_constant(ConstantPool::_indy_argv_offset) \
declare_constant(ConstantPool::CPCACHE_INDEX_TAG) \
\
/********************************/ \
/* ConstantPoolCacheEntry enums */ \
/********************************/ \
\
declare_constant(ConstantPoolCacheEntry::is_volatile_shift) \
declare_constant(ConstantPoolCacheEntry::is_final_shift) \
declare_constant(ConstantPoolCacheEntry::is_forced_virtual_shift) \
declare_constant(ConstantPoolCacheEntry::is_vfinal_shift) \
declare_constant(ConstantPoolCacheEntry::is_field_entry_shift) \
declare_constant(ConstantPoolCacheEntry::tos_state_shift) \
\
/***************************************/ \
/* JavaThreadStatus enum */ \

View File

@ -56,8 +56,10 @@ public abstract class BytecodeWithCPIndex extends Bytecode {
return cpCacheIndex;
} else if (code() == Bytecodes._invokedynamic) {
return cpCache.getIndyEntryAt(cpCacheIndex).getConstantPoolIndex();
} else if (Bytecodes.isFieldCode(code())) {
return cpCache.getFieldEntryAt(cpCacheIndex).getConstantPoolIndex();
} else {
return cpCache.getEntryAt((int) (0xFFFF & cpCacheIndex)).getConstantPoolIndex();
return cpCache.getMethodEntryAt(cpCacheIndex).getConstantPoolIndex();
}
}
}

View File

@ -401,6 +401,7 @@ public class Bytecodes {
public static boolean isZeroConst (int code) { return (code == _aconst_null || code == _iconst_0
|| code == _fconst_0 || code == _dconst_0); }
public static boolean isFieldCode (int code) { return (_getstatic <= code && code <= _putfield); }
static int flags (int code, boolean is_wide) {
assert code == (code & 0xff) : "must be a byte";

View File

@ -227,7 +227,7 @@ public class ConstantPool extends Metadata implements ClassConstants {
return Double.longBitsToDouble(getLongAt(index));
}
public int getFieldOrMethodAt(int which) {
public int getFieldOrMethodAt(int which, int code) {
if (DEBUG) {
System.err.print("ConstantPool.getFieldOrMethodAt(" + which + "): new index = ");
}
@ -237,7 +237,7 @@ public class ConstantPool extends Metadata implements ClassConstants {
i = which;
} else {
// change byte-ordering and go via cache
i = cache.getEntryAt(0xFFFF & which).getConstantPoolIndex();
i = to_cp_index(which, code);
}
if (Assert.ASSERTS_ENABLED) {
Assert.that(getTagAt(i).isFieldOrMethod(), "Corrupted constant pool");
@ -269,10 +269,9 @@ public class ConstantPool extends Metadata implements ClassConstants {
case Bytecodes._invokespecial:
case Bytecodes._invokestatic:
case Bytecodes._invokevirtual:
// TODO: handle resolved method entries with new structure
return getCache().getMethodEntryAt(index).getConstantPoolIndex();
default:
// change byte-ordering and go via cache
return remapInstructionOperandFromCache(index);
throw new InternalError("Unexpected bytecode: " + code);
}
}
@ -319,12 +318,6 @@ public class ConstantPool extends Metadata implements ClassConstants {
return decodeInvokedynamicIndex(index);
}
ConstantPoolCacheEntry invokedynamicCPCacheEntryAt(int index) {
// decode index that invokedynamic points to.
int cpCacheIndex = invokedynamicCPCacheIndex(index);
return getCache().getEntryAt(cpCacheIndex);
}
public int uncachedGetNameAndTypeRefIndexAt(int cp_index) {
if (getTagAt(cp_index).isInvokeDynamic() || getTagAt(cp_index).isDynamicConstant()) {
int poolIndex = invokeDynamicNameAndTypeRefIndexAt(cp_index);
@ -341,14 +334,6 @@ public class ConstantPool extends Metadata implements ClassConstants {
return uncachedGetNameAndTypeRefIndexAt(to_cp_index(index, code));
}
private int remapInstructionOperandFromCache(int operand) {
int cpc_index = operand;
// DEBUG_ONLY(cpc_index -= CPCACHE_INDEX_TAG);
// assert((int)(u2)cpc_index == cpc_index, "clean u2");
int member_index = getCache().getEntryAt(cpc_index).getConstantPoolIndex();
return member_index;
}
public int invokeDynamicNameAndTypeRefIndexAt(int which) {
// assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
return extractHighShortFromInt(getIntAt(which));
@ -372,15 +357,15 @@ public class ConstantPool extends Metadata implements ClassConstants {
}
// returns null, if not resolved.
public Klass getFieldOrMethodKlassRefAt(int which) {
int refIndex = getFieldOrMethodAt(which);
public Klass getFieldOrMethodKlassRefAt(int which, int code) {
int refIndex = getFieldOrMethodAt(which, code);
int klassIndex = extractLowShortFromInt(refIndex);
return getKlassAt(klassIndex);
}
// returns null, if not resolved.
public Method getMethodRefAt(int which, int code) {
Klass klass = getFieldOrMethodKlassRefAt(which);
Klass klass = getFieldOrMethodKlassRefAt(which, code);
if (klass == null) return null;
Symbol name = getNameRefAt(which, code);
Symbol sig = getSignatureRefAt(which, code);
@ -393,7 +378,7 @@ public class ConstantPool extends Metadata implements ClassConstants {
// returns null, if not resolved.
public Field getFieldRefAt(int which, int code) {
InstanceKlass klass = (InstanceKlass)getFieldOrMethodKlassRefAt(which);
InstanceKlass klass = (InstanceKlass)getFieldOrMethodKlassRefAt(which, code);
if (klass == null) return null;
Symbol name = getNameRefAt(which, code);
Symbol sig = getSignatureRefAt(which, code);

View File

@ -49,13 +49,11 @@ public class ConstantPoolCache extends Metadata {
Type type = db.lookupType("ConstantPoolCache");
constants = new MetadataField(type.getAddressField("_constant_pool"), 0);
baseOffset = type.getSize();
Type elType = db.lookupType("ConstantPoolCacheEntry");
elementSize = elType.getSize();
length = new CIntField(type.getCIntegerField("_length"), 0);
intSize = VM.getVM().getObjectHeap().getIntSize();
resolvedReferences = type.getAddressField("_resolved_references");
referenceMap = type.getAddressField("_reference_map");
resolvedFieldArray = type.getAddressField("_resolved_field_entries");
resolvedMethodArray = type.getAddressField("_resolved_method_entries");
resolvedIndyArray = type.getAddressField("_resolved_indy_entries");
}
@ -74,17 +72,13 @@ public class ConstantPoolCache extends Metadata {
private static AddressField resolvedReferences;
private static AddressField referenceMap;
private static AddressField resolvedFieldArray;
private static AddressField resolvedMethodArray;
private static AddressField resolvedIndyArray;
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
public long getSize() {
return alignSize(baseOffset + getLength() * elementSize);
}
public ConstantPoolCacheEntry getEntryAt(int i) {
Objects.checkIndex(i, getLength());
return new ConstantPoolCacheEntry(this, i);
return alignSize(baseOffset);
}
public ResolvedIndyEntry getIndyEntryAt(int i) {
@ -99,6 +93,12 @@ public class ConstantPoolCache extends Metadata {
return array.getAt(i);
}
public ResolvedMethodEntry getMethodEntryAt(int i) {
Address addr = resolvedMethodArray.getValue(getAddress());
ResolvedMethodArray array = new ResolvedMethodArray(addr);
return array.getAt(i);
}
public int getIntAt(int entry, int fld) {
long offset = baseOffset + entry * elementSize + fld * intSize;
return (int) getAddress().getCIntegerAt(offset, intSize, true );
@ -109,19 +109,6 @@ public class ConstantPoolCache extends Metadata {
tty.print("ConstantPoolCache for " + getConstants().getPoolHolder().getName().asString() + " address = " + getAddress() + " offset = " + baseOffset);
}
public int getLength() {
return (int) length.getValue(getAddress());
}
public void iterateFields(MetadataVisitor visitor) {
super.iterateFields(visitor);
visitor.doMetadata(constants, true);
for (int i = 0; i < getLength(); i++) {
ConstantPoolCacheEntry entry = getEntryAt(i);
entry.iterateFields(visitor);
}
}
public Oop getResolvedReferences() {
Address handle = resolvedReferences.getValue(getAddress());
if (handle != null) {

View File

@ -1,104 +0,0 @@
/*
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
public class ConstantPoolCacheEntry {
private static long size;
private static long baseOffset;
private static CIntegerField indices;
private static AddressField f1;
private static CIntegerField f2;
private static CIntegerField flags;
private ConstantPoolCache cp;
private long offset;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("ConstantPoolCacheEntry");
size = type.getSize();
indices = type.getCIntegerField("_indices");
f1 = type.getAddressField ("_f1");
f2 = type.getCIntegerField("_f2");
flags = type.getCIntegerField("_flags");
type = db.lookupType("ConstantPoolCache");
baseOffset = type.getSize();
}
ConstantPoolCacheEntry(ConstantPoolCache cp, int index) {
this.cp = cp;
offset = baseOffset + index * size;
}
public int getConstantPoolIndex() {
if (Assert.ASSERTS_ENABLED) {
Assert.that((getIndices() & 0xFFFF) != 0, "must be main entry");
}
return (int) (getIndices() & 0xFFFF);
}
private long getIndices() {
return cp.getAddress().getCIntegerAt(indices.getOffset() + offset, indices.getSize(), indices.isUnsigned());
}
public Metadata getF1() {
return Metadata.instantiateWrapperFor(cp.getAddress().getAddressAt(f1.getOffset() + offset));
}
public int getF2() {
return cp.getAddress().getJIntAt(f1.getOffset() + offset);
}
public int getFlags() {
return cp.getAddress().getJIntAt(flags.getOffset() + offset);
}
static NamedFieldIdentifier f1FieldName = new NamedFieldIdentifier("_f1");
static NamedFieldIdentifier f2FieldName = new NamedFieldIdentifier("_f2");
static NamedFieldIdentifier flagsFieldName = new NamedFieldIdentifier("_flags");
public void iterateFields(MetadataVisitor visitor) {
visitor.doOop(new OopField(f1FieldName, f1.getOffset() + offset, true), true);
visitor.doInt(new IntField(f2FieldName, f2.getOffset() + offset, true), true);
visitor.doInt(new IntField(flagsFieldName, flags.getOffset() + offset, true), true);
}
}

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
import sun.jvm.hotspot.types.WrongTypeException;
import sun.jvm.hotspot.utilities.GenericArray;
import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
public class ResolvedMethodArray extends GenericArray {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
elemType = db.lookupType("ResolvedMethodEntry");
Type type = db.lookupType("Array<ResolvedMethodEntry>");
dataFieldOffset = type.getAddressField("_data").getOffset();
}
private static long dataFieldOffset;
protected static Type elemType;
public ResolvedMethodArray(Address addr) {
super(addr, dataFieldOffset);
}
public ResolvedMethodEntry getAt(int index) {
if (index < 0 || index >= length()) throw new ArrayIndexOutOfBoundsException(index + " " + length());
Type elemType = getElemType();
Address data = getAddress().addOffsetTo(dataFieldOffset);
long elemSize = elemType.getSize();
return new ResolvedMethodEntry(data.addOffsetTo(index* elemSize));
}
public Type getElemType() {
return elemType;
}
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
public class ResolvedMethodEntry extends VMObject {
private static long size;
private static long baseOffset;
private static CIntegerField cpIndex;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("ResolvedMethodEntry");
size = type.getSize();
cpIndex = type.getCIntegerField("_cpool_index");
}
ResolvedMethodEntry(Address addr) {
super(addr);
}
public int getConstantPoolIndex() {
return this.getAddress().getJShortAt(cpIndex.getOffset());
}
public void iterateFields(MetadataVisitor visitor) { }
}

View File

@ -126,8 +126,8 @@ public class ByteCodeRewriter
case Bytecodes._invokespecial:
case Bytecodes._invokestatic:
case Bytecodes._invokeinterface: {
int cpci = method.getNativeShortArg(bci + 1);
cpoolIndex = (short) cpCache.getEntryAt(cpci).getConstantPoolIndex();
int methodIndex = method.getNativeShortArg(bci + 1);
cpoolIndex = (short) cpCache.getMethodEntryAt(methodIndex).getConstantPoolIndex();
writeShort(code, bci + 1, cpoolIndex);
break;
}

View File

@ -447,7 +447,7 @@ final class CompilerToVM {
* to an index directly into {@code constantPool}.
*
* @throws IllegalArgumentException if {@code rawIndex} is out of range.
* @return {@code JVM_CONSTANT_FieldRef} constant pool entry index for the invokedynamic
* @return {@code JVM_CONSTANT_FieldRef} constant pool entry index for the instruction
*/
int decodeFieldIndexToCPIndex(HotSpotConstantPool constantPool, int rawIndex) {
return decodeFieldIndexToCPIndex(constantPool, constantPool.getConstantPoolPointer(), rawIndex);
@ -455,6 +455,19 @@ final class CompilerToVM {
private native int decodeFieldIndexToCPIndex(HotSpotConstantPool constantPool, long constantPoolPointer, int rawIndex);
/**
* Converts the {@code rawIndex} operand of a rewritten invokestatic/invokespecial/invokeinterface/invokevirtual instruction
* to an index directly into {@code constantPool}.
*
* @throws IllegalArgumentException if {@code rawIndex} is out of range.
* @return {@code JVM_CONSTANT_MethodRef} or {@code JVM_CONSTANT_InterfaceMethodRef} constant pool entry index for the instruction
*/
int decodeMethodIndexToCPIndex(HotSpotConstantPool constantPool, int rawIndex) {
return decodeMethodIndexToCPIndex(constantPool, constantPool.getConstantPoolPointer(), rawIndex);
}
private native int decodeMethodIndexToCPIndex(HotSpotConstantPool constantPool, long constantPoolPointer, int rawIndex);
/**
* Resolves the details for invoking the bootstrap method associated with the
* {@code CONSTANT_Dynamic_info} or @{code CONSTANT_InvokeDynamic_info} entry at {@code cpi} in
@ -576,24 +589,11 @@ final class CompilerToVM {
private native HotSpotResolvedObjectTypeImpl resolveFieldInPool(HotSpotConstantPool constantPool, long constantPoolPointer,
int rawIndex, HotSpotResolvedJavaMethodImpl method, long methodPointer, byte opcode, int[] info);
/**
* Converts {@code cpci} from an index into the cache for {@code constantPool} to an index
* directly into {@code constantPool}.
*
* The behavior of this method is undefined if {@code cpci} is an invalid constant pool cache
* index.
*/
int constantPoolRemapInstructionOperandFromCache(HotSpotConstantPool constantPool, int cpci) {
return constantPoolRemapInstructionOperandFromCache(constantPool, constantPool.getConstantPoolPointer(), cpci);
}
private native int constantPoolRemapInstructionOperandFromCache(HotSpotConstantPool constantPool, long constantPoolPointer, int cpci);
/**
* Gets the appendix object (if any) associated with the entry identified by {@code which}.
*
* @param which if negative, is treated as an encoded indy index for INVOKEDYNAMIC;
* Otherwise, it's treated as a constant pool cache index (returned by HotSpotConstantPool::rawIndexToConstantPoolCacheIndex)
* Otherwise, it's treated as a constant pool cache index
* for INVOKE{VIRTUAL,SPECIAL,STATIC,INTERFACE}.
*/
HotSpotObjectConstantImpl lookupAppendixInPool(HotSpotConstantPool constantPool, int which) {

View File

@ -56,7 +56,7 @@ import jdk.vm.ci.meta.UnresolvedJavaType;
* <li>rawIndex - Index in the bytecode stream after the opcode (could be rewritten for some opcodes)</li>
* <li>cpi - The constant pool index (as specified in JVM Spec)</li>
* <li>cpci - The constant pool cache index, used only by the four bytecodes INVOKE{VIRTUAL,SPECIAL,STATIC,INTERFACE}.
* It's the same as {@code rawIndex + HotSpotVMConfig::constantPoolCpCacheIndexTag}. </li>
* It's the same as {@code rawIndex}. </li>
* <li>which - May be either a {@code rawIndex} or a {@code cpci}.</li>
* </ul>
*
@ -259,26 +259,6 @@ public final class HotSpotConstantPool implements ConstantPool, MetaspaceHandleO
return holder;
}
/**
* Converts a raw index from the bytecodes to a constant pool cache index by adding a
* {@link HotSpotVMConfig#constantPoolCpCacheIndexTag constant}.
*
* @param rawIndex index from the bytecode
* @param opcode bytecode to convert the index for
* @return constant pool cache index
*/
private static int rawIndexToConstantPoolCacheIndex(int rawIndex, int opcode) {
if (opcode == Bytecodes.INVOKEINTERFACE ||
opcode == Bytecodes.INVOKEVIRTUAL ||
opcode == Bytecodes.INVOKESPECIAL ||
opcode == Bytecodes.INVOKESTATIC) {
return rawIndex + config().constantPoolCpCacheIndexTag;
} else {
// Only the above 4 bytecodes use ConstantPoolCacheIndex
throw new IllegalArgumentException("unexpected opcode " + opcode);
}
}
/**
* See {@code ConstantPool::is_invokedynamic_index}.
*/
@ -735,7 +715,7 @@ public final class HotSpotConstantPool implements ConstantPool, MetaspaceHandleO
}
return compilerToVM().lookupAppendixInPool(this, rawIndex);
} else {
return compilerToVM().lookupAppendixInPool(this, rawIndexToConstantPoolCacheIndex(rawIndex, opcode));
return compilerToVM().lookupAppendixInPool(this, rawIndex);
}
}
@ -762,7 +742,7 @@ public final class HotSpotConstantPool implements ConstantPool, MetaspaceHandleO
}
which = rawIndex;
} else {
which = rawIndexToConstantPoolCacheIndex(rawIndex, opcode);
which = rawIndex;
}
final HotSpotResolvedJavaMethod method = compilerToVM().lookupMethodInPool(this, which, (byte) opcode, (HotSpotResolvedJavaMethodImpl) caller);
if (method != null) {
@ -820,8 +800,7 @@ public final class HotSpotConstantPool implements ConstantPool, MetaspaceHandleO
case Bytecodes.INVOKESPECIAL:
case Bytecodes.INVOKESTATIC:
case Bytecodes.INVOKEINTERFACE: {
int cpci = rawIndexToConstantPoolCacheIndex(rawIndex, opcode);
cpi = getKlassRefIndexAt(cpci, opcode);
cpi = getKlassRefIndexAt(rawIndex, opcode);
break;
}
default:
@ -922,9 +901,7 @@ public final class HotSpotConstantPool implements ConstantPool, MetaspaceHandleO
case Bytecodes.INVOKESPECIAL:
case Bytecodes.INVOKESTATIC:
case Bytecodes.INVOKEINTERFACE: {
// invoke and field instructions point to a constant pool cache entry.
int cpci = rawIndexToConstantPoolCacheIndex(rawIndex, opcode);
cpi = compilerToVM().constantPoolRemapInstructionOperandFromCache(this, cpci);
cpi = compilerToVM().decodeMethodIndexToCPIndex(this, rawIndex);
break;
}
default:
@ -954,9 +931,8 @@ public final class HotSpotConstantPool implements ConstantPool, MetaspaceHandleO
}
if (tag == constants.jvmMethodref) {
if (Bytecodes.isInvokeHandleAlias(opcode) && isSignaturePolymorphicHolder(type)) {
final int methodRefCacheIndex = rawIndexToConstantPoolCacheIndex(rawIndex, opcode);
checkTag(compilerToVM().constantPoolRemapInstructionOperandFromCache(this, methodRefCacheIndex), constants.jvmMethodref);
compilerToVM().resolveInvokeHandleInPool(this, methodRefCacheIndex);
checkTag(compilerToVM().decodeMethodIndexToCPIndex(this, rawIndex), constants.jvmMethodref);
compilerToVM().resolveInvokeHandleInPool(this, rawIndex);
}
}

View File

@ -219,7 +219,6 @@ class HotSpotVMConfig extends HotSpotVMConfigAccess {
final int constantPoolLengthOffset = getFieldOffset("ConstantPool::_length", Integer.class, "int");
final int constantPoolFlagsOffset = getFieldOffset("ConstantPool::_flags", Integer.class, "u2");
final int constantPoolCpCacheIndexTag = getConstant("ConstantPool::CPCACHE_INDEX_TAG", Integer.class);
final int constantPoolHasDynamicConstant = getConstant("ConstantPool::_has_dynamic_constant", Integer.class);
final int constantPoolSourceFileNameIndexOffset = getFieldOffset("ConstantPool::_source_file_name_index", Integer.class, "u2");

View File

@ -44,10 +44,9 @@ TEST_VM(ConstantPoolCache, print_on) {
const char* output = ss.freeze();
static const char* const expected_strings[] = {
// Method entry tests:
"this", "bytecode 1:", "bytecode 2:", "cp index:", "F1:", "F2:",
"method:", "flag values:", "tos:", "local signature:", "has appendix:",
"forced virtual:", "final:", "virtual final:", "resolution failed:",
"num parameters:",
"Klass:", "Method:", "CP Index:", "Resolved References Index:", "Table Index:",
"TOS:", "Number of Parameters:", "Is Virtual Final:", "Is Final", "Is Forced Virtual",
"Has Appendix:", "Has Local Signature", "Bytecode 1:", "Bytecode 2:",
// field entry test
"Offset:", "Field Index:", "CP Index:", "TOS:", "Is Final:", "Is Volatile:",

View File

@ -143,11 +143,6 @@ public class CompilerToVMHelper {
return CTVM.resolveFieldInPool((HotSpotConstantPool) constantPool, cpi, (HotSpotResolvedJavaMethodImpl) method, opcode, info);
}
public static int constantPoolRemapInstructionOperandFromCache(
ConstantPool constantPool, int cpci) {
return CTVM.constantPoolRemapInstructionOperandFromCache((HotSpotConstantPool) constantPool, cpci);
}
public static Object lookupAppendixInPool(
ConstantPool constantPool, int cpi) {
return CTVM.lookupAppendixInPool((HotSpotConstantPool) constantPool, cpi);

View File

@ -248,10 +248,8 @@ public class ConstantPoolTestCase {
public void test() {
for (DummyClasses dummyClass : DummyClasses.values()) {
boolean isCPCached = WB.getConstantPoolCacheLength(dummyClass.klass) > -1;
System.out.printf("Testing dummy %s with constant pool cached = %b%n",
dummyClass.klass,
isCPCached);
System.out.printf("Testing dummy %s with constant pool",
dummyClass.klass);
HotSpotResolvedObjectType holder = CompilerToVMHelper.fromObjectClass(dummyClass.klass);
jdk.vm.ci.meta.ConstantPool constantPoolCTVM = holder.getConstantPool();
ConstantPool constantPoolSS = dummyClass.constantPoolSS;

View File

@ -91,14 +91,11 @@ public class ConstantPoolTestsHelper {
}
}
}
int cacheLength = WB.getConstantPoolCacheLength(this.klass);
int indexTag = WB.getConstantPoolCacheIndexTag();
for (int cpci = indexTag; cpci < cacheLength + indexTag; cpci++) {
if (WB.remapInstructionOperandFromCPCache(this.klass, cpci) == cpi) {
if (constantPoolSS.getTagAt(cpi).equals(Tag.INVOKEDYNAMIC)) {
return WB.encodeConstantPoolIndyIndex(cpci) + indexTag;
if (constantPoolSS.getTagAt(cpi).equals(Tag.METHODREF) || constantPoolSS.getTagAt(cpi).equals(Tag.INTERFACEMETHODREF)) {
for (int method_index = 0; method_index < WB.getMethodEntriesLength(this.klass); method_index++) {
if (WB.getMethodCPIndex(this.klass, method_index) == cpi) {
return method_index;
}
return cpci;
}
}
return NO_CP_CACHE_PRESENT;

View File

@ -129,17 +129,6 @@ public class WhiteBox {
return getConstantPool0(aClass);
}
private native int getConstantPoolCacheIndexTag0();
public int getConstantPoolCacheIndexTag() {
return getConstantPoolCacheIndexTag0();
}
private native int getConstantPoolCacheLength0(Class<?> aClass);
public int getConstantPoolCacheLength(Class<?> aClass) {
Objects.requireNonNull(aClass);
return getConstantPoolCacheLength0(aClass);
}
private native Object[] getResolvedReferences0(Class<?> aClass);
public Object[] getResolvedReferences(Class<?> aClass) {
Objects.requireNonNull(aClass);
@ -169,6 +158,18 @@ public class WhiteBox {
return getFieldCPIndex0(aClass, index);
}
private native int getMethodEntriesLength0(Class<?> aClass);
public int getMethodEntriesLength(Class<?> aClass) {
Objects.requireNonNull(aClass);
return getMethodEntriesLength0(aClass);
}
private native int getMethodCPIndex0(Class<?> aClass, int index);
public int getMethodCPIndex(Class<?> aClass, int index) {
Objects.requireNonNull(aClass);
return getMethodCPIndex0(aClass, index);
}
private native int getIndyInfoLength0(Class<?> aClass);
public int getIndyInfoLength(Class<?> aClass) {
Objects.requireNonNull(aClass);