This commit is contained in:
Igor Ignatyev 2015-09-19 11:19:22 +02:00
commit 1c12516ffa
26 changed files with 129 additions and 175 deletions

View File

@ -42,6 +42,11 @@
// Implementation of InterpreterMacroAssembler
void InterpreterMacroAssembler::jump_to_entry(address entry) {
assert(entry, "Entry must have been generated by now");
b(entry);
}
#ifndef CC_INTERP
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {

View File

@ -66,6 +66,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_earlyret_value(TosState state);
void jump_to_entry(address entry);
#ifdef CC_INTERP
void save_bcp() { /* not needed in c++ interpreter and harmless */ }
void restore_bcp() { /* not needed in c++ interpreter and harmless */ }

View File

@ -41,9 +41,8 @@ private:
address generate_native_entry(bool synchronized);
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_accessor_entry(void) { return NULL; }
address generate_empty_entry(void) { return NULL; }
void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs);
address generate_Reference_get_entry();
address generate_CRC32_update_entry();

View File

@ -236,17 +236,6 @@ void InterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::Me
__ blrt(rscratch1, gpargs, fpargs, rtype);
}
// Jump into normal path for accessor and empty entry to jump to normal entry
// The "fast" optimization don't update compilation count therefore can disable inlining
// for these functions that should be inlined.
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry_point = __ pc();
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
__ b(Interpreter::entry_for_kind(Interpreter::zerolocals));
return entry_point;
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {

View File

@ -721,8 +721,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
return entry;
}
#endif // INCLUDE_ALL_GCS
@ -779,12 +778,10 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return generate_native_entry(false);
return NULL;
}
/**
@ -841,12 +838,10 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return generate_native_entry(false);
return NULL;
}
void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {

View File

@ -46,7 +46,7 @@ void InterpreterMacroAssembler::null_check_throw(Register a, int offset, Registe
MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
}
void InterpreterMacroAssembler::branch_to_entry(address entry, Register Rscratch) {
void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) {
assert(entry, "Entry must have been generated by now");
if (is_within_range_of_b(entry, pc())) {
b(entry);

View File

@ -39,7 +39,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void null_check_throw(Register a, int offset, Register temp_reg);
void branch_to_entry(address entry, Register Rscratch);
void jump_to_entry(address entry, Register Rscratch);
// Handy address generation macros.
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread

View File

@ -31,9 +31,8 @@
private:
address generate_abstract_entry(void);
address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_accessor_entry(void) { return NULL; }
address generate_empty_entry(void) { return NULL; }
address generate_Reference_get_entry(void);
address generate_CRC32_update_entry();

View File

@ -427,18 +427,6 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
return entry;
}
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry = __ pc();
address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
assert(normal_entry != NULL, "should already be generated.");
__ branch_to_entry(normal_entry, R11_scratch1);
__ flush();
return entry;
}
// Abstract method entry.
//
address InterpreterGenerator::generate_abstract_entry(void) {
@ -529,13 +517,13 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// regular method entry code to generate the NPE.
//
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
if (UseG1GC) {
Label slow_path;
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
Label slow_path;
// Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
@ -577,13 +565,11 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// Generate regular method entry.
__ bind(slow_path);
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
__ flush();
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
return entry;
} else {
return generate_jump_to_normal_entry();
}
return NULL;
}
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {

View File

@ -620,7 +620,7 @@ inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
if (!math_entry_available(kind)) {
NOT_PRODUCT(__ should_not_reach_here();)
return Interpreter::entry_for_kind(Interpreter::zerolocals);
return NULL;
}
address entry = __ pc();
@ -1126,14 +1126,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
#ifdef FAST_DISPATCH
__ unimplemented("Fast dispatch in generate_normal_entry");
#if 0
__ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
// Set bytecode dispatch table base.
#endif
#endif
// --------------------------------------------------------------------------
// Zero out non-parameter locals.
// Note: *Always* zero out non-parameter locals as Sparc does. It's not
@ -1266,9 +1258,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
* int java.util.zip.CRC32.update(int crc, int b)
*/
address InterpreterGenerator::generate_CRC32_update_entry() {
address start = __ pc(); // Remember stub start address (is rtn value).
if (UseCRC32Intrinsics) {
address start = __ pc(); // Remember stub start address (is rtn value).
Label slow_path;
// Safepoint check
@ -1313,11 +1304,11 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
// Generate a vanilla native entry as the slow path.
BLOCK_COMMENT("} CRC32_update");
BIND(slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
return start;
}
(void) generate_native_entry(false);
return start;
return NULL;
}
// CRC32 Intrinsics.
@ -1327,9 +1318,8 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
* int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
*/
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
address start = __ pc(); // Remember stub start address (is rtn value).
if (UseCRC32Intrinsics) {
address start = __ pc(); // Remember stub start address (is rtn value).
Label slow_path;
// Safepoint check
@ -1406,11 +1396,11 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
// Generate a vanilla native entry as the slow path.
BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
BIND(slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
return start;
}
(void) generate_native_entry(false);
return start;
return NULL;
}
// These should never be compiled since the interpreter will prefer

View File

@ -468,7 +468,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_jump_to_normal_entry();
return NULL;
}
//

View File

@ -59,6 +59,13 @@ const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_s
#endif // CC_INTERP
void InterpreterMacroAssembler::jump_to_entry(address entry) {
assert(entry, "Entry must have been generated by now");
AddressLiteral al(entry);
jump_to(al, G3_scratch);
delayed()->nop();
}
void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
// Note: this algorithm is also used by C1's OSR entry sequence.
// Any changes should also be applied to CodeEmitter::emit_osr_entry().

View File

@ -80,6 +80,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {}
void jump_to_entry(address entry);
#ifndef CC_INTERP
virtual void load_earlyret_value(TosState state);

View File

@ -34,9 +34,8 @@
address generate_abstract_entry(void);
// there are no math intrinsics on sparc
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_accessor_entry(void) { return NULL; }
address generate_empty_entry(void) { return NULL; }
address generate_Reference_get_entry(void);
void lock_method(void);
void save_native_result(void);

View File

@ -241,15 +241,6 @@ void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
// Various method entries
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry = __ pc();
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
__ jump_to(al, G3_scratch);
__ delayed()->nop();
return entry;
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
//

View File

@ -779,14 +779,14 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// Generate regular method entry
__ bind(slow_path);
(void) generate_normal_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
return entry;
}
#endif // INCLUDE_ALL_GCS
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_jump_to_normal_entry();
return NULL;
}
//

View File

@ -807,7 +807,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_jump_to_normal_entry();
return NULL;
}
//

View File

@ -40,6 +40,11 @@
// Implementation of InterpreterMacroAssembler
void InterpreterMacroAssembler::jump_to_entry(address entry) {
assert(entry, "Entry must have been generated by now");
jump(RuntimeAddress(entry));
}
#ifndef CC_INTERP
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
Label update, next, none;

View File

@ -60,6 +60,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
_locals_register(LP64_ONLY(r14) NOT_LP64(rdi)),
_bcp_register(LP64_ONLY(r13) NOT_LP64(rsi)) {}
void jump_to_entry(address entry);
void load_earlyret_value(TosState state);
#ifdef CC_INTERP

View File

@ -31,17 +31,6 @@
#define __ _masm->
// Jump into normal path for accessor and empty entry to jump to normal entry
// The "fast" optimization don't update compilation count therefore can disable inlining
// for these functions that should be inlined.
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry_point = __ pc();
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
__ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
return entry_point;
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {

View File

@ -36,9 +36,8 @@
address generate_native_entry(bool synchronized);
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_accessor_entry(void) { return NULL; }
address generate_empty_entry(void) { return NULL; }
address generate_Reference_get_entry();
address generate_CRC32_update_entry();
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);

View File

@ -697,15 +697,14 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
__ jmp(rdi);
__ bind(slow_path);
(void) generate_normal_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
return entry;
}
#endif // INCLUDE_ALL_GCS
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_jump_to_normal_entry();
return NULL;
}
/**
@ -753,12 +752,10 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return generate_native_entry(false);
return NULL;
}
/**
@ -821,12 +818,10 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return generate_native_entry(false);
return NULL;
}
/**
@ -873,7 +868,7 @@ address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpre
return entry;
}
return generate_native_entry(false);
return NULL;
}
/**
@ -881,10 +876,8 @@ address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpre
* java.lang.Float.intBitsToFloat(int bits)
*/
address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
address entry;
if (UseSSE >= 1) {
entry = __ pc();
address entry = __ pc();
// rsi: the sender's SP
@ -898,11 +891,10 @@ address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set rsp to the sender's SP
__ jmp(rdi);
} else {
entry = generate_native_entry(false);
return entry;
}
return entry;
return NULL;
}
/**
@ -910,10 +902,8 @@ address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
* java.lang.Float.floatToRawIntBits(float value)
*/
address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
address entry;
if (UseSSE >= 1) {
entry = __ pc();
address entry = __ pc();
// rsi: the sender's SP
@ -927,11 +917,10 @@ address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set rsp to the sender's SP
__ jmp(rdi);
} else {
entry = generate_native_entry(false);
return entry;
}
return entry;
return NULL;
}
@ -940,10 +929,8 @@ address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
* java.lang.Double.longBitsToDouble(long bits)
*/
address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
address entry;
if (UseSSE >= 2) {
entry = __ pc();
address entry = __ pc();
// rsi: the sender's SP
@ -957,11 +944,10 @@ address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set rsp to the sender's SP
__ jmp(rdi);
} else {
entry = generate_native_entry(false);
return entry;
}
return entry;
return NULL;
}
/**
@ -969,10 +955,8 @@ address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
* java.lang.Double.doubleToRawLongBits(double value)
*/
address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
address entry;
if (UseSSE >= 2) {
entry = __ pc();
address entry = __ pc();
// rsi: the sender's SP
@ -987,11 +971,10 @@ address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set rsp to the sender's SP
__ jmp(rdi);
} else {
entry = generate_native_entry(false);
return entry;
}
return entry;
return NULL;
}
//

View File

@ -677,15 +677,14 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
return entry;
}
#endif // INCLUDE_ALL_GCS
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_jump_to_normal_entry();
return NULL;
}
/**
@ -733,12 +732,10 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return generate_native_entry(false);
return NULL;
}
/**
@ -796,12 +793,10 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return generate_native_entry(false);
return NULL;
}
/**
@ -852,7 +847,7 @@ address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpre
return entry;
}
return generate_native_entry(false);
return NULL;
}
// Interpreter stub for calling a native method. (asm interpreter)

View File

@ -816,7 +816,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the normal entry point
// Reference.get could be instrumented by jvmti
return generate_normal_entry(false);
return NULL;
}
address InterpreterGenerator::generate_native_entry(bool synchronized) {

View File

@ -104,7 +104,10 @@ CodeletMark::~CodeletMark() {
(*_masm)->flush();
// Commit Codelet.
AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size(), (*_masm)->code()->strings());
int committed_code_size = (*_masm)->code()->pure_insts_size();
if (committed_code_size) {
AbstractInterpreter::code()->commit(committed_code_size, (*_masm)->code()->strings());
}
// Make sure nobody can use _masm outside a CodeletMark lifespan.
*_masm = NULL;
}
@ -546,17 +549,18 @@ void AbstractInterpreterGenerator::initialize_method_handle_entries() {
address InterpreterGenerator::generate_method_entry(
AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool native = false;
bool synchronized = false;
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = generate_native_entry(true); break;
case Interpreter::empty : entry_point = generate_empty_entry(); break;
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : native = true; break;
case Interpreter::native_synchronized : native = true; synchronized = true; break;
case Interpreter::empty : entry_point = generate_empty_entry(); break;
case Interpreter::accessor : entry_point = generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = generate_abstract_entry(); break;
case Interpreter::abstract : entry_point = generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
@ -571,11 +575,11 @@ address InterpreterGenerator::generate_method_entry(
: entry_point = generate_Reference_get_entry(); break;
#ifndef CC_INTERP
case Interpreter::java_util_zip_CRC32_update
: entry_point = generate_CRC32_update_entry(); break;
: native = true; entry_point = generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32_updateByteBuffer
: entry_point = generate_CRC32_updateBytes_entry(kind); break;
: native = true; entry_point = generate_CRC32_updateBytes_entry(kind); break;
case Interpreter::java_util_zip_CRC32C_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer
@ -584,20 +588,20 @@ address InterpreterGenerator::generate_method_entry(
// On x86_32 platforms, a special entry is generated for the following four methods.
// On other platforms the normal entry is used to enter these methods.
case Interpreter::java_lang_Float_intBitsToFloat
: entry_point = generate_Float_intBitsToFloat_entry(); break;
: native = true; entry_point = generate_Float_intBitsToFloat_entry(); break;
case Interpreter::java_lang_Float_floatToRawIntBits
: entry_point = generate_Float_floatToRawIntBits_entry(); break;
: native = true; entry_point = generate_Float_floatToRawIntBits_entry(); break;
case Interpreter::java_lang_Double_longBitsToDouble
: entry_point = generate_Double_longBitsToDouble_entry(); break;
: native = true; entry_point = generate_Double_longBitsToDouble_entry(); break;
case Interpreter::java_lang_Double_doubleToRawLongBits
: entry_point = generate_Double_doubleToRawLongBits_entry(); break;
: native = true; entry_point = generate_Double_doubleToRawLongBits_entry(); break;
#else
case Interpreter::java_lang_Float_intBitsToFloat:
case Interpreter::java_lang_Float_floatToRawIntBits:
case Interpreter::java_lang_Double_longBitsToDouble:
case Interpreter::java_lang_Double_doubleToRawLongBits:
entry_point = generate_native_entry(false);
break;
case Interpreter::java_lang_Double_doubleToRawLongBits:
native = true;
break;
#endif // defined(TARGET_ARCH_x86) && !defined(_LP64)
#endif // CC_INTERP
default:
@ -609,5 +613,18 @@ address InterpreterGenerator::generate_method_entry(
return entry_point;
}
return generate_normal_entry(synchronized);
// We expect the normal and native entry points to be generated first so we can reuse them.
if (native) {
entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
if (entry_point == NULL) {
entry_point = generate_native_entry(synchronized);
}
} else {
entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
if (entry_point == NULL) {
entry_point = generate_normal_entry(synchronized);
}
}
return entry_point;
}

View File

@ -412,6 +412,14 @@ void TemplateInterpreterGenerator::generate_all() {
method_entry(java_lang_math_pow )
method_entry(java_lang_ref_reference_get)
initialize_method_handle_entries();
// all native method kinds (must be one contiguous block)
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
method_entry(native)
method_entry(native_synchronized)
Interpreter::_native_entry_end = Interpreter::code()->code_end();
if (UseCRC32Intrinsics) {
method_entry(java_util_zip_CRC32_update)
method_entry(java_util_zip_CRC32_updateBytes)
@ -428,14 +436,6 @@ void TemplateInterpreterGenerator::generate_all() {
method_entry(java_lang_Double_longBitsToDouble);
method_entry(java_lang_Double_doubleToRawLongBits);
initialize_method_handle_entries();
// all native method kinds (must be one contiguous block)
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
method_entry(native)
method_entry(native_synchronized)
Interpreter::_native_entry_end = Interpreter::code()->code_end();
#undef method_entry
// Bytecodes