This commit is contained in:
Nils Eliasson 2015-11-24 10:30:23 +01:00
commit a51ff63df7
229 changed files with 11445 additions and 4951 deletions

View File

@ -1079,10 +1079,10 @@ source %{
// and for a volatile write we need
//
// stlr<x>
//
//
// Alternatively, we can implement them by pairing a normal
// load/store with a memory barrier. For a volatile read we need
//
//
// ldr<x>
// dmb ishld
//
@ -1240,7 +1240,7 @@ source %{
// Alternatively, we can elide generation of the dmb instructions
// and plant the alternative CompareAndSwap macro-instruction
// sequence (which uses ldaxr<x>).
//
//
// Of course, the above only applies when we see these signature
// configurations. We still want to plant dmb instructions in any
// other cases where we may see a MemBarAcquire, MemBarRelease or
@ -1367,7 +1367,7 @@ source %{
opcode = parent->Opcode();
return opcode == Op_MemBarRelease;
}
// 2) card mark detection helper
// helper predicate which can be used to detect a volatile membar
@ -1383,7 +1383,7 @@ source %{
// true
//
// iii) the node's Mem projection feeds a StoreCM node.
bool is_card_mark_membar(const MemBarNode *barrier)
{
if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
@ -1402,7 +1402,7 @@ source %{
return true;
}
}
return false;
}
@ -1430,7 +1430,7 @@ source %{
// where
// || and \\ represent Ctl and Mem feeds via Proj nodes
// | \ and / indicate further routing of the Ctl and Mem feeds
//
//
// this is the graph we see for non-object stores. however, for a
// volatile Object store (StoreN/P) we may see other nodes below the
// leading membar because of the need for a GC pre- or post-write
@ -1592,7 +1592,7 @@ source %{
// ordering but neither will a releasing store (stlr). The latter
// guarantees that the object put is visible but does not guarantee
// that writes by other threads have also been observed.
//
//
// So, returning to the task of translating the object put and the
// leading/trailing membar nodes: what do the non-normal node graph
// look like for these 2 special cases? and how can we determine the
@ -1731,7 +1731,7 @@ source %{
// | | | |
// C | M | M | M |
// \ | | /
// . . .
// . . .
// (post write subtree elided)
// . . .
// C \ M /
@ -1812,12 +1812,12 @@ source %{
// | | | / /
// | Region . . . Phi[M] _____/
// | / | /
// | | /
// | | /
// | . . . . . . | /
// | / | /
// Region | | Phi[M]
// | | | / Bot
// \ MergeMem
// \ MergeMem
// \ /
// MemBarVolatile
//
@ -1858,7 +1858,7 @@ source %{
// to a trailing barrier via a MergeMem. That feed is either direct
// (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
// memory flow (for G1).
//
//
// The predicates controlling generation of instructions for store
// and barrier nodes employ a few simple helper functions (described
// below) which identify the presence or absence of all these
@ -2112,8 +2112,8 @@ source %{
x = x->in(MemNode::Memory);
} else {
// the merge should get its Bottom mem feed from the leading membar
x = mm->in(Compile::AliasIdxBot);
}
x = mm->in(Compile::AliasIdxBot);
}
// ensure this is a non control projection
if (!x->is_Proj() || x->is_CFG()) {
@ -2190,12 +2190,12 @@ source %{
// . . .
// |
// MemBarVolatile (card mark)
// | |
// | |
// | StoreCM
// | |
// | . . .
// Bot | /
// MergeMem
// Bot | /
// MergeMem
// |
// |
// MemBarVolatile {trailing}
@ -2203,10 +2203,10 @@ source %{
// 2)
// MemBarRelease/CPUOrder (leading)
// |
// |
// |
// |\ . . .
// | \ |
// | \ MemBarVolatile (card mark)
// | \ |
// | \ MemBarVolatile (card mark)
// | \ | |
// \ \ | StoreCM . . .
// \ \ |
@ -2231,7 +2231,7 @@ source %{
// | \ \ | StoreCM . . .
// | \ \ |
// \ \ Phi
// \ \ /
// \ \ /
// \ Phi
// \ /
// Phi . . .
@ -2506,7 +2506,7 @@ bool unnecessary_acquire(const Node *barrier)
return (x->is_Load() && x->as_Load()->is_acquire());
}
// now check for an unsafe volatile get
// need to check for
@ -2644,7 +2644,7 @@ bool needs_acquiring_load(const Node *n)
}
membar = child_membar(membar);
if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
return false;
}
@ -2703,7 +2703,7 @@ bool unnecessary_volatile(const Node *n)
// first we check if this is part of a card mark. if so then we have
// to generate a StoreLoad barrier
if (is_card_mark_membar(mbvol)) {
return false;
}
@ -2769,7 +2769,7 @@ bool needs_releasing_store(const Node *n)
if (!is_card_mark_membar(mbvol)) {
return true;
}
// we found a card mark -- just make sure we have a trailing barrier
return (card_mark_to_trailing(mbvol) != NULL);
@ -2808,7 +2808,7 @@ bool needs_acquiring_load_exclusive(const Node *n)
assert(barrier->Opcode() == Op_MemBarCPUOrder,
"CAS not fed by cpuorder membar!");
MemBarNode *b = parent_membar(barrier);
assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
"CAS not fed by cpuorder+release membar pair!");
@ -3463,6 +3463,17 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
// TODO
// identify extra cases that we might want to provide match rules for
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
bool ret_value = match_rule_supported(opcode);
// Add rules here.
return ret_value; // Per default match rules are supported.
}
const int Matcher::float_pressure(int default_pressure_threshold) {
return default_pressure_threshold;
}
@ -4663,7 +4674,7 @@ encode %{
call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
}
if (call == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
ciEnv::current()->record_failure("CodeCache is full");
return;
}
@ -4671,7 +4682,7 @@ encode %{
// Emit stub for static call
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
ciEnv::current()->record_failure("CodeCache is full");
return;
}
}
@ -4681,7 +4692,7 @@ encode %{
MacroAssembler _masm(&cbuf);
address call = __ ic_call((address)$meth$$method);
if (call == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
@ -4706,7 +4717,7 @@ encode %{
if (cb) {
address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
if (call == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
ciEnv::current()->record_failure("CodeCache is full");
return;
}
} else {

View File

@ -41,7 +41,9 @@
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
ce->store_parameter(_method->as_register(), 1);
Metadata *m = _method->as_constant_ptr()->as_metadata();
__ mov_metadata(rscratch1, m);
ce->store_parameter(rscratch1, 1);
ce->store_parameter(_bci, 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
ce->add_call_info_here(_info);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -70,6 +70,7 @@ LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::i
LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }
LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }

View File

@ -73,6 +73,7 @@ define_pd_global(bool, UseCISCSpill, true);
define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);

View File

@ -29,16 +29,16 @@
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) {
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
Unimplemented();
return 0;
}
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
Unimplemented();
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
Unimplemented();
}
@ -46,20 +46,20 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
Unimplemented();
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
Unimplemented();
}
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
Unimplemented();
}
void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
Unimplemented();
}
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
return NULL;
}

View File

@ -91,20 +91,18 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
unsigned offset_lo = dest & 0xfff;
offset = adr_page - pc_page;
// We handle 3 types of PC relative addressing
// We handle 4 types of PC relative addressing
// 1 - adrp Rx, target_page
// ldr/str Ry, [Rx, #offset_in_page]
// 2 - adrp Rx, target_page
// add Ry, Rx, #offset_in_page
// 3 - adrp Rx, target_page (page aligned reloc, offset == 0)
// In the first 2 cases we must check that Rx is the same in the adrp and the
// subsequent ldr/str or add instruction. Otherwise we could accidentally end
// up treating a type 3 relocation as a type 1 or 2 just because it happened
// to be followed by a random unrelated ldr/str or add instruction.
//
// In the case of a type 3 relocation, we know that these are only generated
// for the safepoint polling page, or for the card type byte map base so we
// assert as much and of course that the offset is 0.
// movk Rx, #imm16<<32
// 4 - adrp Rx, target_page (page aligned reloc, offset == 0)
// In the first 3 cases we must check that Rx is the same in the adrp and the
// subsequent ldr/str, add or movk instruction. Otherwise we could accidentally end
// up treating a type 4 relocation as a type 1, 2 or 3 just because it happened
// to be followed by a random unrelated ldr/str, add or movk instruction.
//
unsigned insn2 = ((unsigned*)branch)[1];
if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
@ -123,13 +121,13 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
Instruction_aarch64::patch(branch + sizeof (unsigned),
21, 10, offset_lo);
instructions = 2;
} else {
assert((jbyte *)target ==
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base ||
target == StubRoutines::crc_table_addr() ||
(address)target == os::get_polling_page(),
"adrp must be polling page or byte map base");
assert(offset_lo == 0, "offset must be 0 for polling page or byte map base");
} else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
Instruction_aarch64::extract(insn, 4, 0) ==
Instruction_aarch64::extract(insn2, 4, 0)) {
// movk #imm16<<32
Instruction_aarch64::patch(branch + 4, 20, 5, (uint64_t)target >> 32);
offset &= (1<<20)-1;
instructions = 2;
}
}
int offset_lo = offset & 3;
@ -212,16 +210,16 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
// Return the target address for the following sequences
// 1 - adrp Rx, target_page
// ldr/str Ry, [Rx, #offset_in_page]
// 2 - adrp Rx, target_page ]
// 2 - adrp Rx, target_page
// add Ry, Rx, #offset_in_page
// 3 - adrp Rx, target_page (page aligned reloc, offset == 0)
// movk Rx, #imm12<<32
// 4 - adrp Rx, target_page (page aligned reloc, offset == 0)
//
// In the first two cases we check that the register is the same and
// return the target_page + the offset within the page.
// Otherwise we assume it is a page aligned relocation and return
// the target page only. The only cases this is generated is for
// the safepoint polling page or for the card table byte map base so
// we assert as much.
// the target page only.
//
unsigned insn2 = ((unsigned*)insn_addr)[1];
if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
@ -238,10 +236,12 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
return address(target_page + byte_offset);
} else {
assert((jbyte *)target_page ==
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base ||
(address)target_page == os::get_polling_page(),
"adrp must be polling page or byte map base");
if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
Instruction_aarch64::extract(insn, 4, 0) ==
Instruction_aarch64::extract(insn2, 4, 0)) {
target_page = (target_page & 0xffffffff) |
((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
}
return (address)target_page;
}
} else {
@ -3964,22 +3964,26 @@ address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype
void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
relocInfo::relocType rtype = dest.rspec().reloc()->type();
if (uabs(pc() - dest.target()) >= (1LL << 32)) {
guarantee(rtype == relocInfo::none
|| rtype == relocInfo::external_word_type
|| rtype == relocInfo::poll_type
|| rtype == relocInfo::poll_return_type,
"can only use a fixed address with an ADRP");
// Out of range. This doesn't happen very often, but we have to
// handle it
mov(reg1, dest);
byte_offset = 0;
} else {
InstructionMark im(this);
code_section()->relocate(inst_mark(), dest.rspec());
byte_offset = (uint64_t)dest.target() & 0xfff;
unsigned long low_page = (unsigned long)CodeCache::low_bound() >> 12;
unsigned long high_page = (unsigned long)(CodeCache::high_bound()-1) >> 12;
unsigned long dest_page = (unsigned long)dest.target() >> 12;
long offset_low = dest_page - low_page;
long offset_high = dest_page - high_page;
InstructionMark im(this);
code_section()->relocate(inst_mark(), dest.rspec());
// 8143067: Ensure that the adrp can reach the dest from anywhere within
// the code cache so that if it is relocated we know it will still reach
if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
_adrp(reg1, dest.target());
} else {
unsigned long pc_page = (unsigned long)pc() >> 12;
long offset = dest_page - pc_page;
offset = (offset & ((1<<20)-1)) << 12;
_adrp(reg1, pc()+offset);
movk(reg1, ((unsigned long)dest.target() >> 32) & 0xffff, 32);
}
byte_offset = (unsigned long)dest.target() & 0xfff;
}
void MacroAssembler::build_frame(int framesize) {

View File

@ -2384,6 +2384,7 @@ void SharedRuntime::generate_deopt_blob() {
}
#endif // ASSERT
__ mov(c_rarg0, rthread);
__ mov(c_rarg1, rcpool);
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
__ blrt(rscratch1, 1, 0, 1);
__ bind(retaddr);
@ -2397,6 +2398,7 @@ void SharedRuntime::generate_deopt_blob() {
// Load UnrollBlock* into rdi
__ mov(r5, r0);
__ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
Label noException;
__ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending?
__ br(Assembler::NE, noException);
@ -2609,6 +2611,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// n.b. 2 gp args, 0 fp args, integral return type
__ mov(c_rarg0, rthread);
__ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
__ lea(rscratch1,
RuntimeAddress(CAST_FROM_FN_PTR(address,
Deoptimization::uncommon_trap)));
@ -2628,6 +2631,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// move UnrollBlock* into r4
__ mov(r4, r0);
#ifdef ASSERT
{ Label L;
__ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
__ br(Assembler::EQ, L);
__ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
__ bind(L);
}
#endif
// Pop all the frames we must move/replace.
//
// Frame picture (youngest to oldest)

View File

@ -61,6 +61,7 @@ define_pd_global(bool, OptoPeephole, false);
define_pd_global(bool, UseCISCSpill, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
// GL:
// Detected a problem with unscaled compressed oops and
// narrow_oop_use_complex_address() == false.

View File

@ -2697,7 +2697,7 @@ address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
// Provide a debugger breakpoint in the frame manager if breakpoints
// in osr'd methods are requested.
#ifdef COMPILER2
NOT_PRODUCT( if (OptoBreakpointOSR) { __ illtrap(); } )
if (OptoBreakpointOSR) { __ illtrap(); }
#endif
// Load callee's pointer to locals array from callee's state.

View File

@ -297,8 +297,16 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
__ bind(do_float);
__ lfs(floatSlot, 0, arg_java);
#if defined(LINUX)
// Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
// in the least significant word of an argument slot.
#if defined(VM_LITTLE_ENDIAN)
__ stfs(floatSlot, 0, arg_c);
#else
__ stfs(floatSlot, 4, arg_c);
#endif
#elif defined(AIX)
// Although AIX runs on big endian CPU, float is in most significant
// word of an argument slot.
__ stfs(floatSlot, 0, arg_c);
#else
#error "unknown OS"

View File

@ -29,16 +29,16 @@
#include "runtime/sharedRuntime.hpp"
#include "vmreg_ppc.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) {
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
Unimplemented();
return 0;
}
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
Unimplemented();
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
Unimplemented();
}
@ -46,20 +46,20 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
Unimplemented();
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
Unimplemented();
}
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
Unimplemented();
}
void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
Unimplemented();
}
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
return NULL;
}

View File

@ -2064,6 +2064,17 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
// TODO
// identify extra cases that we might want to provide match rules for
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
bool ret_value = match_rule_supported(opcode);
// Add rules here.
return ret_value; // Per default match rules are supported.
}
const int Matcher::float_pressure(int default_pressure_threshold) {
return default_pressure_threshold;
}
@ -3416,7 +3427,7 @@ encode %{
// The stub for call to interpreter.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
ciEnv::current()->record_failure("CodeCache is full");
return;
}
}
@ -3465,7 +3476,7 @@ encode %{
// The stub for call to interpreter.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
ciEnv::current()->record_failure("CodeCache is full");
return;
}
@ -6911,7 +6922,7 @@ instruct decodeN_Disjoint_isel_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
n_compare->_opnds[0] = op_crx;
n_compare->_opnds[1] = op_src;
n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR);
decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
n2->add_req(n_region, n_src, n1);
n2->_opnds[0] = op_dst;
@ -10588,7 +10599,7 @@ instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 src2) %{
instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{
// Needs matchrule, see cmpDUnordered.
match(Set crx (CmpF src1 src2));
match(Set crx (CmpF src1 src2));
// no match-rule, false predicate
predicate(false);
@ -10697,13 +10708,13 @@ instruct cmpF3_reg_reg_ExEx(iRegIdst dst, regF src1, regF src2) %{
%}
instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{
// Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the
// node right before the conditional move using it.
// Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the
// node right before the conditional move using it.
// In jck test api/java_awt/geom/QuadCurve2DFloat/index.html#SetCurveTesttestCase7,
// compilation of java.awt.geom.RectangularShape::getBounds()Ljava/awt/Rectangle
// crashed in register allocation where the flags Reg between cmpDUnoredered and a
// conditional move was supposed to be spilled.
match(Set crx (CmpD src1 src2));
match(Set crx (CmpD src1 src2));
// False predicate, shall not be matched.
predicate(false);

View File

@ -753,6 +753,21 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// in farg_reg[j] if argument i is the j-th float argument of this call.
//
case T_FLOAT:
#if defined(LINUX)
// Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
// in the least significant word of an argument slot.
#if defined(VM_LITTLE_ENDIAN)
#define FLOAT_WORD_OFFSET_IN_SLOT 0
#else
#define FLOAT_WORD_OFFSET_IN_SLOT 1
#endif
#elif defined(AIX)
// Although AIX runs on big endian CPU, float is in the most
// significant word of an argument slot.
#define FLOAT_WORD_OFFSET_IN_SLOT 0
#else
#error "unknown OS"
#endif
if (freg < Argument::n_float_register_parameters_c) {
// Put float in register ...
reg = farg_reg[freg];
@ -766,14 +781,14 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
if (arg >= Argument::n_regs_not_on_stack_c) {
// ... and on the stack.
guarantee(regs2 != NULL, "must pass float in register and stack slot");
VMReg reg2 = VMRegImpl::stack2reg(stk LINUX_ONLY(+1));
VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
regs2[i].set1(reg2);
stk += inc_stk_for_intfloat;
}
} else {
// Put float on stack.
reg = VMRegImpl::stack2reg(stk LINUX_ONLY(+1));
reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
stk += inc_stk_for_intfloat;
}
regs[i].set1(reg);
@ -2802,7 +2817,7 @@ void SharedRuntime::generate_deopt_blob() {
__ set_last_Java_frame(R1_SP, noreg);
// With EscapeAnalysis turned on, this call may safepoint!
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg);
address calls_return_pc = __ last_calls_return_pc();
// Set an oopmap for the call site that describes all our saved registers.
oop_maps->add_gc_map(calls_return_pc - start, map);
@ -2815,6 +2830,8 @@ void SharedRuntime::generate_deopt_blob() {
// by save_volatile_registers(...).
RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
// reload the exec mode from the UnrollBlock (it might have changed)
__ lwz(exec_mode_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg);
// In excp_deopt_mode, restore and clear exception oop which we
// stored in the thread during exception entry above. The exception
// oop will be the return value of this stub.
@ -2945,8 +2962,9 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
__ mr(klass_index_reg, R3);
__ li(R5_ARG3, Deoptimization::Unpack_uncommon_trap);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
R16_thread, klass_index_reg);
R16_thread, klass_index_reg, R5_ARG3);
// Set an oopmap for the call site.
oop_maps->add_gc_map(gc_map_pc - start, map);
@ -2966,6 +2984,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// stack: (caller_of_deoptee, ...).
#ifdef ASSERT
__ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg);
__ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
__ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0);
#endif
// Allocate new interpreter frame(s) and possibly a c2i adapter
// frame.
push_skeleton_frames(masm, false/*deopt*/,

View File

@ -94,8 +94,10 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ set(_bci, G4);
Metadata *m = _method->as_constant_ptr()->as_metadata();
__ set_metadata_constant(m, G5);
__ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
__ delayed()->mov_or_nop(_method->as_register(), G5);
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);

View File

@ -2812,7 +2812,23 @@ void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
}
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
fatal("CRC32 intrinsic is not implemented on this platform");
assert(op->crc()->is_single_cpu(), "crc must be register");
assert(op->val()->is_single_cpu(), "byte value must be register");
assert(op->result_opr()->is_single_cpu(), "result must be register");
Register crc = op->crc()->as_register();
Register val = op->val()->as_register();
Register table = op->result_opr()->as_register();
Register res = op->result_opr()->as_register();
assert_different_registers(val, crc, table);
__ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
__ not1(crc);
__ clruwu(crc);
__ update_byte_crc32(crc, val, table);
__ not1(crc);
__ mov(crc, res);
}
void LIR_Assembler::emit_lock(LIR_OpLock* op) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,6 +68,7 @@ void LIRItem::load_nonconstant() {
LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexception_opr; }
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); }
@ -785,7 +786,86 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
}
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
fatal("CRC32 intrinsic is not implemented on this platform");
// Make all state_for calls early since they can emit code
LIR_Opr result = rlock_result(x);
int flags = 0;
switch (x->id()) {
case vmIntrinsics::_updateCRC32: {
LIRItem crc(x->argument_at(0), this);
LIRItem val(x->argument_at(1), this);
// val is destroyed by update_crc32
val.set_destroys_register();
crc.load_item();
val.load_item();
__ update_crc32(crc.result(), val.result(), result);
break;
}
case vmIntrinsics::_updateBytesCRC32:
case vmIntrinsics::_updateByteBufferCRC32: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem len(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if(off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
if (index->is_constant()) {
offset += index->as_constant_ptr()->as_jint();
index = LIR_OprFact::illegalOpr;
} else if (index->is_register()) {
LIR_Opr tmp2 = new_register(T_LONG);
LIR_Opr tmp3 = new_register(T_LONG);
__ move(base_op, tmp2);
__ move(index, tmp3);
__ add(tmp2, tmp3, tmp2);
base_op = tmp2;
} else {
ShouldNotReachHere();
}
}
LIR_Address* a = new LIR_Address(base_op, offset, T_BYTE);
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for(x->type());
LIR_Opr addr = new_pointer_register();
__ leal(LIR_OprFact::address(a), addr);
crc.load_item_force(cc->at(0));
__ move(addr, cc->at(1));
len.load_item_force(cc->at(2));
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
}
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f

View File

@ -65,6 +65,7 @@ define_pd_global(bool, UseCISCSpill, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoScheduling, true);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
#ifdef _LP64
// We need to make sure that all generated code is within

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,8 +43,9 @@
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
void generate_counter_overflow(Label& Lcontinue);
address generate_CRC32_update_entry();
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
// Not supported
address generate_CRC32_update_entry() { return NULL; }
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP

View File

@ -29,7 +29,7 @@
#include "runtime/sharedRuntime.hpp"
#include "vmreg_sparc.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) {
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
if (inst->is_call() || inst->is_jump()) {
return pc_offset + NativeCall::instruction_size;
} else if (inst->is_call_reg()) {
@ -37,12 +37,12 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop
} else if (inst->is_sethi()) {
return pc_offset + NativeFarCall::instruction_size;
} else {
fatal("unsupported type of instruction for call site");
JVMCI_ERROR_0("unsupported type of instruction for call site");
return 0;
}
}
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
Handle obj = HotSpotObjectConstantImpl::object(constant);
jobject value = JNIHandles::make_local(obj());
@ -52,7 +52,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
RelocationHolder rspec = oop_Relocation::spec(oop_index);
_instructions->relocate(pc, rspec, 1);
#else
fatal("compressed oop on 32bit");
JVMCI_ERROR("compressed oop on 32bit");
#endif
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
@ -66,20 +66,20 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
}
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64
NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
narrowKlass narrowOop = record_narrow_metadata_reference(constant);
narrowKlass narrowOop = record_narrow_metadata_reference(constant, CHECK);
move->set_data((intptr_t)narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at %p/%p", pc, narrowOop);
#else
fatal("compressed Klass* on 32bit");
JVMCI_ERROR("compressed Klass* on 32bit");
#endif
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
Metadata* reference = record_metadata_reference(constant);
Metadata* reference = record_metadata_reference(constant, CHECK);
move->set_data((intptr_t)reference);
TRACE_jvmci_3("relocating (metaspace constant) at %p/%p", pc, reference);
}
@ -106,7 +106,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
}
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
address pc = (address) inst;
if (inst->is_call()) {
NativeCall* call = nativeCall_at(pc);
@ -117,17 +117,17 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
jump->set_jump_destination((address) foreign_call_destination);
_instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec());
} else {
fatal(err_msg("unknown call or jump instruction at " PTR_FORMAT, p2i(pc)));
JVMCI_ERROR("unknown call or jump instruction at " PTR_FORMAT, p2i(pc));
}
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
#ifdef ASSERT
Method* method = NULL;
// we need to check, this might also be an unresolved method
if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
method = getMethodFromHotSpotMethod(hotspot_method);
method = getMethodFromHotSpotMethod(hotspot_method());
}
#endif
switch (_next_call_type) {
@ -156,33 +156,33 @@ void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
break;
}
default:
fatal("invalid _next_call_type value");
JVMCI_ERROR("invalid _next_call_type value");
break;
}
}
void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
switch (mark) {
case POLL_NEAR:
fatal("unimplemented");
JVMCI_ERROR("unimplemented");
break;
case POLL_FAR:
_instructions->relocate(pc, relocInfo::poll_type);
break;
case POLL_RETURN_NEAR:
fatal("unimplemented");
JVMCI_ERROR("unimplemented");
break;
case POLL_RETURN_FAR:
_instructions->relocate(pc, relocInfo::poll_return_type);
break;
default:
fatal("invalid mark value");
JVMCI_ERROR("invalid mark value");
break;
}
}
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
// JVMCI Registers are numbered as follows:
// 0..31: Thirty-two General Purpose registers (CPU Registers)
// 32..63: Thirty-two single precision float registers
@ -199,7 +199,7 @@ VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
} else if(jvmci_reg < 112) {
floatRegisterNumber = 4 * (jvmci_reg - 96);
} else {
fatal("Unknown jvmci register");
JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
}
return as_FloatRegister(floatRegisterNumber)->as_VMReg();
}

View File

@ -4771,3 +4771,243 @@ void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register
movdtox(src, tmp1);
reverse_bytes_32(tmp1, dst, tmp2);
}
void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) {
xmulx(xcrc_hi, xK_hi, xtmp_lo);
xmulxhi(xcrc_hi, xK_hi, xtmp_hi);
xmulxhi(xcrc_lo, xK_lo, xcrc_hi);
xmulx(xcrc_lo, xK_lo, xcrc_lo);
xor3(xcrc_lo, xtmp_lo, xcrc_lo);
xor3(xcrc_hi, xtmp_hi, xcrc_hi);
ldxl(buf, G0, xtmp_lo);
inc(buf, 8);
ldxl(buf, G0, xtmp_hi);
inc(buf, 8);
xor3(xcrc_lo, xtmp_lo, xcrc_lo);
xor3(xcrc_hi, xtmp_hi, xcrc_hi);
}
void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) {
mov(xcrc_lo, xtmp_lo);
mov(xcrc_hi, xtmp_hi);
xmulx(xtmp_hi, xK_hi, xtmp_lo);
xmulxhi(xtmp_hi, xK_hi, xtmp_hi);
xmulxhi(xcrc_lo, xK_lo, xcrc_hi);
xmulx(xcrc_lo, xK_lo, xcrc_lo);
xor3(xcrc_lo, xbuf_lo, xcrc_lo);
xor3(xcrc_hi, xbuf_hi, xcrc_hi);
xor3(xcrc_lo, xtmp_lo, xcrc_lo);
xor3(xcrc_hi, xtmp_hi, xcrc_hi);
}
void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) {
and3(xcrc, 0xFF, tmp);
sllx(tmp, 2, tmp);
lduw(table, tmp, xtmp);
srlx(xcrc, 8, xcrc);
xor3(xtmp, xcrc, xcrc);
}
void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
and3(crc, 0xFF, tmp);
srlx(crc, 8, crc);
sllx(tmp, 2, tmp);
lduw(table, tmp, tmp);
xor3(tmp, crc, crc);
}
#define CRC32_TMP_REG_NUM 18
#define CRC32_CONST_64 0x163cd6124
#define CRC32_CONST_96 0x0ccaa009e
#define CRC32_CONST_160 0x1751997d0
#define CRC32_CONST_480 0x1c6e41596
#define CRC32_CONST_544 0x154442bd4
void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) {
Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check;
Label L_main_loop_prologue;
Label L_fold_512b, L_fold_512b_loop, L_fold_128b;
Label L_fold_tail, L_fold_tail_loop;
Label L_8byte_fold_loop, L_8byte_fold_check;
const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3};
Register const_64 = tmp[CRC32_TMP_REG_NUM-1];
Register const_96 = tmp[CRC32_TMP_REG_NUM-1];
Register const_160 = tmp[CRC32_TMP_REG_NUM-2];
Register const_480 = tmp[CRC32_TMP_REG_NUM-1];
Register const_544 = tmp[CRC32_TMP_REG_NUM-2];
set(ExternalAddress(StubRoutines::crc_table_addr()), table);
not1(crc); // ~c
clruwu(crc); // clear upper 32 bits of crc
// Check if below cutoff, proceed directly to cleanup code
mov(31, G4);
cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check);
// Align buffer to 8 byte boundry
mov(8, O5);
and3(buf, 0x7, O4);
sub(O5, O4, O5);
and3(O5, 0x7, O5);
sub(len, O5, len);
ba(L_align_check);
delayed()->nop();
// Alignment loop, table look up method for up to 7 bytes
bind(L_align_loop);
ldub(buf, 0, O4);
inc(buf);
dec(O5);
xor3(O4, crc, O4);
and3(O4, 0xFF, O4);
sllx(O4, 2, O4);
lduw(table, O4, O4);
srlx(crc, 8, crc);
xor3(O4, crc, crc);
bind(L_align_check);
nop();
cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop);
// Aligned on 64-bit (8-byte) boundry at this point
// Check if still above cutoff (31-bytes)
mov(31, G4);
cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check);
// At least 32 bytes left to process
// Free up registers by storing them to FP registers
for (int i = 0; i < CRC32_TMP_REG_NUM; i++) {
movxtod(tmp[i], as_FloatRegister(2*i));
}
// Determine which loop to enter
// Shared prologue
ldxl(buf, G0, tmp[0]);
inc(buf, 8);
ldxl(buf, G0, tmp[1]);
inc(buf, 8);
xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes
and3(crc, 0, crc); // Clear out the crc register
// Main loop needs 128-bytes at least
mov(128, G4);
mov(64, tmp[2]);
cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue);
// Less than 64 bytes
nop();
cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail);
// Between 64 and 127 bytes
set64(CRC32_CONST_96, const_96, tmp[8]);
set64(CRC32_CONST_160, const_160, tmp[9]);
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0);
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16);
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32);
dec(len, 48);
ba(L_fold_tail);
delayed()->nop();
bind(L_main_loop_prologue);
for (int i = 2; i < 8; i++) {
ldxl(buf, G0, tmp[i]);
inc(buf, 8);
}
// Fold total 512 bits of polynomial on each iteration,
// 128 bits per each of 4 parallel streams
set64(CRC32_CONST_480, const_480, tmp[8]);
set64(CRC32_CONST_544, const_544, tmp[9]);
mov(128, G4);
bind(L_fold_512b_loop);
fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0);
fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16);
fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32);
fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64);
dec(len, 64);
cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop);
// Fold 512 bits to 128 bits
bind(L_fold_512b);
set64(CRC32_CONST_96, const_96, tmp[8]);
set64(CRC32_CONST_160, const_160, tmp[9]);
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]);
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]);
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]);
dec(len, 48);
// Fold the rest of 128 bits data chunks
bind(L_fold_tail);
mov(32, G4);
cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b);
set64(CRC32_CONST_96, const_96, tmp[8]);
set64(CRC32_CONST_160, const_160, tmp[9]);
bind(L_fold_tail_loop);
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0);
sub(len, 16, len);
cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop);
// Fold the 128 bits in tmps 0 - 1 into tmp 1
bind(L_fold_128b);
set64(CRC32_CONST_64, const_64, tmp[4]);
xmulx(const_64, tmp[0], tmp[2]);
xmulxhi(const_64, tmp[0], tmp[3]);
srl(tmp[2], G0, tmp[4]);
xmulx(const_64, tmp[4], tmp[4]);
srlx(tmp[2], 32, tmp[2]);
sllx(tmp[3], 32, tmp[3]);
or3(tmp[2], tmp[3], tmp[2]);
xor3(tmp[4], tmp[1], tmp[4]);
xor3(tmp[4], tmp[2], tmp[1]);
dec(len, 8);
// Use table lookup for the 8 bytes left in tmp[1]
dec(len, 8);
// 8 8-bit folds to compute 32-bit CRC.
for (int j = 0; j < 4; j++) {
fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]);
}
srl(tmp[1], G0, crc); // move 32 bits to general register
for (int j = 0; j < 4; j++) {
fold_8bit_crc32(crc, table, tmp[3]);
}
bind(L_8byte_fold_check);
// Restore int registers saved in FP registers
for (int i = 0; i < CRC32_TMP_REG_NUM; i++) {
movdtox(as_FloatRegister(2*i), tmp[i]);
}
ba(L_cleanup_check);
delayed()->nop();
// Table look-up method for the remaining few bytes
bind(L_cleanup_loop);
ldub(buf, 0, O4);
inc(buf);
dec(len);
xor3(O4, crc, O4);
and3(O4, 0xFF, O4);
sllx(O4, 2, O4);
lduw(table, O4, O4);
srlx(crc, 8, crc);
xor3(O4, crc, crc);
bind(L_cleanup_check);
nop();
cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop);
not1(crc);
}

View File

@ -904,7 +904,9 @@ public:
inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
// little-endian
inline void ldxl(Register s1, Register s2, Register d) { ldxa(s1, s2, ASI_PRIMARY_LITTLE, d); }
inline void lduwl(Register s1, Register s2, Register d) { lduwa(s1, s2, ASI_PRIMARY_LITTLE, d); }
inline void ldswl(Register s1, Register s2, Register d) { ldswa(s1, s2, ASI_PRIMARY_LITTLE, d);}
inline void ldxl( Register s1, Register s2, Register d) { ldxa(s1, s2, ASI_PRIMARY_LITTLE, d); }
inline void ldfl(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { ldfa(w, s1, s2, ASI_PRIMARY_LITTLE, d); }
// membar psuedo instruction. takes into account target memory model.
@ -1469,6 +1471,15 @@ public:
void movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2);
void movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2);
// CRC32 code for java.util.zip.CRC32::updateBytes0() instrinsic.
void kernel_crc32(Register crc, Register buf, Register len, Register table);
// Fold 128-bit data chunk
void fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset);
void fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo);
// Fold 8-bit data
void fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp);
#undef VIRTUAL
};

View File

@ -3036,6 +3036,7 @@ void SharedRuntime::generate_deopt_blob() {
__ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode);
__ mov(G2_thread, O0);
__ mov(L0deopt_mode, O2);
__ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
__ delayed()->nop();
oop_maps->add_gc_map( __ offset()-start, map->deep_copy());
@ -3121,6 +3122,7 @@ void SharedRuntime::generate_deopt_blob() {
// do the call by hand so we can get the oopmap
__ mov(G2_thread, L7_thread_cache);
__ mov(L0deopt_mode, O1);
__ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
__ delayed()->mov(G2_thread, O0);
@ -3146,6 +3148,7 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::restore_result_registers(masm);
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode);
Label noException;
__ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
@ -3269,7 +3272,8 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ save_frame(0);
__ set_last_Java_frame(SP, noreg);
__ mov(I0, O2klass_index);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
__ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3);
__ reset_last_Java_frame();
__ mov(O0, O2UnrollBlock->after_save());
__ restore();
@ -3278,6 +3282,15 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ mov(O2UnrollBlock, O2UnrollBlock->after_save());
__ restore();
#ifdef ASSERT
{ Label L;
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1);
__ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L);
__ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
__ bind(L);
}
#endif
// Allocate new interpreter frame(s) and possible c2i adapter frame
make_new_frames(masm, false);

View File

@ -1860,6 +1860,17 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
// TODO
// identify extra cases that we might want to provide match rules for
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
bool ret_value = match_rule_supported(opcode);
// Add rules here.
return ret_value; // Per default match rules are supported.
}
const int Matcher::float_pressure(int default_pressure_threshold) {
return default_pressure_threshold;
}
@ -1905,7 +1916,7 @@ const bool Matcher::misaligned_vectors_ok() {
}
// Current (2013) SPARC platforms need to read original key
// to construct decryption expanded key
// to construct decryption expanded key
const bool Matcher::pass_original_key_for_aes() {
return true;
}
@ -2612,7 +2623,7 @@ encode %{
if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
}
}
%}
@ -3132,10 +3143,10 @@ ins_attrib ins_size(32); // Required size attribute (in bits)
// AVOID_NONE - instruction can be placed anywhere
// AVOID_BEFORE - instruction cannot be placed after an
// instruction with MachNode::AVOID_AFTER
// AVOID_AFTER - the next instruction cannot be the one
// AVOID_AFTER - the next instruction cannot be the one
// with MachNode::AVOID_BEFORE
// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at
// the same time
// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at
// the same time
ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE);
ins_attrib ins_short_branch(0); // Required flag: is this instruction a

View File

@ -5292,6 +5292,38 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
/**
* Arguments:
*
* Inputs:
* O0 - int crc
* O1 - byte* buf
* O2 - int len
* O3 - int* table
*
* Output:
* O0 - int crc result
*/
address generate_updateBytesCRC32() {
assert(UseCRC32Intrinsics, "need VIS3 instructions");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
address start = __ pc();
const Register crc = O0; // crc
const Register buf = O1; // source java byte array address
const Register len = O2; // length
const Register table = O3; // crc_table address (reuse register)
__ kernel_crc32(crc, buf, len, table);
__ retl();
__ delayed()->nop();
return start;
}
void generate_initial() {
// Generates all stubs and initializes the entry points
@ -5324,6 +5356,12 @@ class StubGenerator: public StubCodeGenerator {
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::Sparc::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,3 +52,98 @@ address StubRoutines::Sparc::_stop_subroutine_entry = NULL;
address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
address StubRoutines::Sparc::_partial_subtype_check = NULL;
uint64_t StubRoutines::Sparc::_crc_by128_masks[] =
{
/* The fields in this structure are arranged so that they can be
* picked up two at a time with 128-bit loads.
*
* Because of flipped bit order for this CRC polynomials
* the constant for X**N is left-shifted by 1. This is because
* a 64 x 64 polynomial multiply produces a 127-bit result
* but the highest term is always aligned to bit 0 in the container.
* Pre-shifting by one fixes this, at the cost of potentially making
* the 32-bit constant no longer fit in a 32-bit container (thus the
* use of uint64_t, though this is also the size used by the carry-
* less multiply instruction.
*
* In addition, the flipped bit order and highest-term-at-least-bit
* multiply changes the constants used. The 96-bit result will be
* aligned to the high-term end of the target 128-bit container,
* not the low-term end; that is, instead of a 512-bit or 576-bit fold,
* instead it is a 480 (=512-32) or 544 (=512+64-32) bit fold.
*
* This cause additional problems in the 128-to-64-bit reduction; see the
* code for details. By storing a mask in the otherwise unused half of
* a 128-bit constant, bits can be cleared before multiplication without
* storing and reloading. Note that staying on a 128-bit datapath means
* that some data is uselessly stored and some unused data is intersected
* with an irrelevant constant.
*/
((uint64_t) 0xffffffffUL), /* low of K_M_64 */
((uint64_t) 0xb1e6b092U << 1), /* high of K_M_64 */
((uint64_t) 0xba8ccbe8U << 1), /* low of K_160_96 */
((uint64_t) 0x6655004fU << 1), /* high of K_160_96 */
((uint64_t) 0xaa2215eaU << 1), /* low of K_544_480 */
((uint64_t) 0xe3720acbU << 1) /* high of K_544_480 */
};
/**
* crc_table[] from jdk/src/java.base/share/native/libzip/zlib-1.2.8/crc32.h
*/
juint StubRoutines::Sparc::_crc_table[] =
{
0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
0x2d02ef8dUL
};

View File

@ -53,6 +53,9 @@ class Sparc {
static address _flush_callers_register_windows_entry;
static address _partial_subtype_check;
// masks and table for CRC32
static uint64_t _crc_by128_masks[];
static juint _crc_table[];
public:
// test assembler stop routine by setting registers
@ -65,6 +68,8 @@ class Sparc {
static intptr_t* (*flush_callers_register_windows_func())() { return CAST_TO_FN_PTR(intptr_t* (*)(void), _flush_callers_register_windows_entry); }
static address partial_subtype_check() { return _partial_subtype_check; }
static address crc_by128_masks_addr() { return (address)_crc_by128_masks; }
};
#endif // CPU_SPARC_VM_STUBROUTINES_SPARC_HPP

View File

@ -803,6 +803,106 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
return NULL;
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
*/
address InterpreterGenerator::generate_CRC32_update_entry() {
if (UseCRC32Intrinsics) {
address entry = __ pc();
Label L_slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
__ set(SafepointSynchronize::_not_synchronized, O3);
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
// Load parameters
const Register crc = O0; // initial crc
const Register val = O1; // byte to update with
const Register table = O2; // address of 256-entry lookup table
__ ldub(Gargs, 3, val);
__ lduw(Gargs, 8, crc);
__ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
__ not1(crc); // ~crc
__ clruwu(crc);
__ update_byte_crc32(crc, val, table);
__ not1(crc); // ~crc
// result in O0
__ retl();
__ delayed()->nop();
// generate a vanilla native entry as the slow path
__ bind(L_slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return NULL;
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
* int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
*/
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32Intrinsics) {
address entry = __ pc();
Label L_slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
__ set(SafepointSynchronize::_not_synchronized, O3);
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
// Load parameters from the stack
const Register crc = O0; // initial crc
const Register buf = O1; // source java byte array address
const Register len = O2; // len
const Register offset = O3; // offset
// Arguments are reversed on java expression stack
// Calculate address of start element
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
__ lduw(Gargs, 0, len);
__ lduw(Gargs, 8, offset);
__ ldx( Gargs, 16, buf);
__ lduw(Gargs, 32, crc);
__ add(buf, offset, buf);
} else {
__ lduw(Gargs, 0, len);
__ lduw(Gargs, 8, offset);
__ ldx( Gargs, 16, buf);
__ lduw(Gargs, 24, crc);
__ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
__ add(buf ,offset, buf);
}
// Call the crc32 kernel
__ MacroAssembler::save_thread(L7_thread_cache);
__ kernel_crc32(crc, buf, len, O3);
__ MacroAssembler::restore_thread(L7_thread_cache);
// result in O0
__ retl();
__ delayed()->nop();
// generate a vanilla native entry as the slow path
__ bind(L_slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return NULL;
}
//
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the native method

View File

@ -229,35 +229,35 @@ void VM_Version::initialize() {
// SPARC T4 and above should have support for AES instructions
if (has_aes()) {
if (UseVIS > 2) { // AES intrinsics use MOVxTOd/MOVdTOx which are VIS3
if (FLAG_IS_DEFAULT(UseAES)) {
FLAG_SET_DEFAULT(UseAES, true);
if (FLAG_IS_DEFAULT(UseAES)) {
FLAG_SET_DEFAULT(UseAES, true);
}
if (!UseAES) {
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
}
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
}
// we disable both the AES flags if either of them is disabled on the command line
if (!UseAES || !UseAESIntrinsics) {
FLAG_SET_DEFAULT(UseAES, false);
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
} else {
// The AES intrinsic stubs require AES instruction support (of course)
// but also require VIS3 mode or higher for instructions it use.
if (UseVIS > 2) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
}
} else {
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("SPARC AES intrinsics require VIS3 instructions. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
} else {
if (UseAES || UseAESIntrinsics) {
warning("SPARC AES intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
if (UseAES) {
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics) {
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
}
} else if (UseAES || UseAESIntrinsics) {
warning("AES instructions are not available on this CPU");
if (UseAES) {
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics) {
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
@ -347,6 +347,15 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (UseVIS > 2) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
}
} else if (UseCRC32Intrinsics) {
warning("SPARC CRC32 intrinsics require VIS3 insructions support. Intriniscs will be disabled");
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
(cache_line_size > ContendedPaddingWidth))
ContendedPaddingWidth = cache_line_size;
@ -358,7 +367,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
}
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size());
@ -391,7 +399,6 @@ void VM_Version::initialize() {
tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
}
}
#endif // PRODUCT
}
void VM_Version::print_features() {
@ -400,7 +407,7 @@ void VM_Version::print_features() {
int VM_Version::determine_features() {
if (UseV8InstrsOnly) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Version is Forced-V8");)
if (PrintMiscellaneous && Verbose) { tty->print_cr("Version is Forced-V8"); }
return generic_v8_m;
}
@ -416,12 +423,12 @@ int VM_Version::determine_features() {
if (is_T_family(features)) {
// Happy to accomodate...
} else {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Version is Forced-Niagara");)
if (PrintMiscellaneous && Verbose) { tty->print_cr("Version is Forced-Niagara"); }
features |= T_family_m;
}
} else {
if (is_T_family(features) && !FLAG_IS_DEFAULT(UseNiagaraInstrs)) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Version is Forced-Not-Niagara");)
if (PrintMiscellaneous && Verbose) { tty->print_cr("Version is Forced-Not-Niagara"); }
features &= ~(T_family_m | T1_model_m);
} else {
// Happy to accomodate...

File diff suppressed because it is too large Load Diff

View File

@ -438,6 +438,8 @@ class ArrayAddress VALUE_OBJ_CLASS_SPEC {
};
class InstructionAttr;
// 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes
// See fxsave and xsave(EVEX enabled) documentation for layout
const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize);
@ -568,7 +570,8 @@ class Assembler : public AbstractAssembler {
EVEX_8bit = 0,
EVEX_16bit = 1,
EVEX_32bit = 2,
EVEX_64bit = 3
EVEX_64bit = 3,
EVEX_NObit = 4
};
enum WhichOperand {
@ -598,16 +601,12 @@ class Assembler : public AbstractAssembler {
private:
int _evex_encoding;
int _input_size_in_bits;
int _avx_vector_len;
int _tuple_type;
bool _is_evex_instruction;
bool _legacy_mode_bw;
bool _legacy_mode_dq;
bool _legacy_mode_vl;
bool _legacy_mode_vlbw;
bool _instruction_uses_vl;
class InstructionAttr *_attributes;
// 64bit prefixes
int prefix_and_encode(int reg_enc, bool byteinst = false);
@ -637,181 +636,30 @@ private:
int rex_prefix_and_encode(int dst_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc, bool rex_w);
void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
int nds_enc, VexSimdPrefix pre, VexOpcode opc,
int vector_len);
void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc);
void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, bool evex_r, bool evex_v,
int nds_enc, VexSimdPrefix pre, VexOpcode opc,
bool is_extended_context, bool is_merge_context,
int vector_len, bool no_mask_reg );
void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v,
int nds_enc, VexSimdPrefix pre, VexOpcode opc);
void vex_prefix(Address adr, int nds_enc, int xreg_enc,
VexSimdPrefix pre, VexOpcode opc,
bool vex_w, int vector_len,
bool legacy_mode = false, bool no_mask_reg = false);
void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, int vector_len = AVX_128bit,
bool no_mask_reg = false, bool legacy_mode = false) {
int dst_enc = dst->encoding();
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector_len, legacy_mode, no_mask_reg);
}
void vex_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, int vector_len = AVX_128bit,
bool no_mask_reg = false) {
int dst_enc = dst->encoding();
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, true, vector_len, false, no_mask_reg);
}
void vex_prefix_0F38(Register dst, Register nds, Address src, bool no_mask_reg = false) {
bool vex_w = false;
int vector_len = AVX_128bit;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
vector_len, no_mask_reg);
}
void vex_prefix_0F38_legacy(Register dst, Register nds, Address src, bool no_mask_reg = false) {
bool vex_w = false;
int vector_len = AVX_128bit;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
vector_len, true, no_mask_reg);
}
void vex_prefix_0F38_q(Register dst, Register nds, Address src, bool no_mask_reg = false) {
bool vex_w = true;
int vector_len = AVX_128bit;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
vector_len, no_mask_reg);
}
void vex_prefix_0F38_q_legacy(Register dst, Register nds, Address src, bool no_mask_reg = false) {
bool vex_w = true;
int vector_len = AVX_128bit;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
vector_len, true, no_mask_reg);
}
InstructionAttr *attributes);
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc,
bool vex_w, int vector_len,
bool legacy_mode, bool no_mask_reg);
InstructionAttr *attributes);
int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src, bool no_mask_reg = false) {
bool vex_w = false;
int vector_len = AVX_128bit;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
false, no_mask_reg);
}
void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
VexOpcode opc, InstructionAttr *attributes);
int vex_prefix_0F38_and_encode_legacy(Register dst, Register nds, Register src, bool no_mask_reg = false) {
bool vex_w = false;
int vector_len = AVX_128bit;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
true, no_mask_reg);
}
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
VexOpcode opc, InstructionAttr *attributes);
int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src, bool no_mask_reg = false) {
bool vex_w = true;
int vector_len = AVX_128bit;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
false, no_mask_reg);
}
int kreg_prefix_and_encode(KRegister dst, KRegister nds, KRegister src, VexSimdPrefix pre,
VexOpcode opc, InstructionAttr *attributes);
int vex_prefix_0F38_and_encode_q_legacy(Register dst, Register nds, Register src, bool no_mask_reg = false) {
bool vex_w = true;
int vector_len = AVX_128bit;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
true, no_mask_reg);
}
int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, int vector_len = AVX_128bit,
VexOpcode opc = VEX_OPCODE_0F, bool legacy_mode = false,
bool no_mask_reg = false) {
int src_enc = src->encoding();
int dst_enc = dst->encoding();
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector_len, legacy_mode, no_mask_reg);
}
void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
VexSimdPrefix pre, bool no_mask_reg, VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, int vector_len = AVX_128bit, bool legacy_mode = false);
void simd_prefix(XMMRegister dst, Address src, VexSimdPrefix pre,
bool no_mask_reg, VexOpcode opc = VEX_OPCODE_0F) {
simd_prefix(dst, xnoreg, src, pre, no_mask_reg, opc);
}
void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) {
simd_prefix(src, dst, pre, no_mask_reg);
}
void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, bool no_mask_reg = false) {
bool rex_w = true;
simd_prefix(dst, nds, src, pre, no_mask_reg, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, bool no_mask_reg,
VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, int vector_len = AVX_128bit,
bool legacy_mode = false);
int kreg_prefix_and_encode(KRegister dst, KRegister nds, KRegister src,
VexSimdPrefix pre, bool no_mask_reg,
VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, int vector_len = AVX_128bit);
int kreg_prefix_and_encode(KRegister dst, KRegister nds, Register src,
VexSimdPrefix pre, bool no_mask_reg,
VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, int vector_len = AVX_128bit);
// Move/convert 32-bit integer value.
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre, bool no_mask_reg) {
// It is OK to cast from Register to XMMRegister to pass argument here
// since only encoding is used in simd_prefix_and_encode() and number of
// Gen and Xmm registers are the same.
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, no_mask_reg, VEX_OPCODE_0F);
}
int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre, bool no_mask_reg) {
return simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg);
}
int simd_prefix_and_encode(Register dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool no_mask_reg = false) {
return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, no_mask_reg, opc);
}
// Move/convert 64-bit integer value.
int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre, bool no_mask_reg = false) {
bool rex_w = true;
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, no_mask_reg, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre, bool no_mask_reg) {
return simd_prefix_and_encode_q(dst, xnoreg, src, pre, no_mask_reg);
}
int simd_prefix_and_encode_q(Register dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool no_mask_reg = false) {
bool rex_w = true;
return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, no_mask_reg, opc, rex_w);
}
int kreg_prefix_and_encode(KRegister dst, KRegister nds, Register src, VexSimdPrefix pre,
VexOpcode opc, InstructionAttr *attributes);
// Helper functions for groups of instructions
void emit_arith_b(int op1, int op2, Register dst, int imm8);
@ -821,27 +669,6 @@ private:
void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
void emit_arith(int op1, int op2, Register dst, Register src);
void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false);
void emit_simd_arith_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false);
void emit_simd_arith_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_simd_arith_nonds_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false);
void emit_simd_arith_nonds_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
Address src, VexSimdPrefix pre, int vector_len,
bool no_mask_reg = false, bool legacy_mode = false);
void emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds,
Address src, VexSimdPrefix pre, int vector_len,
bool no_mask_reg = false);
void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
XMMRegister src, VexSimdPrefix pre, int vector_len,
bool no_mask_reg = false, bool legacy_mode = false);
void emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds,
XMMRegister src, VexSimdPrefix pre, int vector_len,
bool no_mask_reg = false);
bool emit_compressed_disp_byte(int &disp);
void emit_operand(Register reg,
@ -986,18 +813,16 @@ private:
// belong in macro assembler but there is no need for both varieties to exist
void init_attributes(void) {
_evex_encoding = 0;
_input_size_in_bits = 0;
_avx_vector_len = AVX_NoVec;
_tuple_type = EVEX_ETUP;
_is_evex_instruction = false;
_legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
_legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
_legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
_legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
_instruction_uses_vl = false;
_attributes = NULL;
}
void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
void clear_attributes(void) { _attributes = NULL; }
void lea(Register dst, Address src);
void mov(Register dst, Register src);
@ -1506,13 +1331,18 @@ private:
void movddup(XMMRegister dst, XMMRegister src);
void kmovwl(KRegister dst, Register src);
void kmovdl(KRegister dst, Register src);
void kmovql(KRegister dst, KRegister src);
void kmovql(KRegister dst, Register src);
void kmovdl(KRegister dst, Register src);
void kmovwl(KRegister dst, Register src);
void kmovql(Address dst, KRegister src);
void kmovql(KRegister dst, Address src);
void kortestbl(KRegister dst, KRegister src);
void kortestwl(KRegister dst, KRegister src);
void kortestdl(KRegister dst, KRegister src);
void kortestql(KRegister dst, KRegister src);
void movdl(XMMRegister dst, Register src);
void movdl(Register dst, XMMRegister src);
void movdl(XMMRegister dst, Address src);
@ -1537,6 +1367,12 @@ private:
void vmovdqu(XMMRegister dst, XMMRegister src);
// Move Unaligned 512bit Vector
void evmovdqub(Address dst, XMMRegister src, int vector_len);
void evmovdqub(XMMRegister dst, Address src, int vector_len);
void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);
void evmovdquw(Address dst, XMMRegister src, int vector_len);
void evmovdquw(XMMRegister dst, Address src, int vector_len);
void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len);
void evmovdqul(Address dst, XMMRegister src, int vector_len);
void evmovdqul(XMMRegister dst, Address src, int vector_len);
void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
@ -1682,8 +1518,22 @@ private:
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
void pcmpeqb(XMMRegister dst, XMMRegister src);
void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
void pcmpeqw(XMMRegister dst, XMMRegister src);
void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
void pcmpeqd(XMMRegister dst, XMMRegister src);
void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
void pcmpeqq(XMMRegister dst, XMMRegister src);
void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
void pmovmskb(Register dst, XMMRegister src);
void vpmovmskb(Register dst, XMMRegister src);
@ -1704,7 +1554,7 @@ private:
void pmovzxbw(XMMRegister dst, XMMRegister src);
void pmovzxbw(XMMRegister dst, Address src);
void vpmovzxbw(XMMRegister dst, Address src);
void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
#ifndef _LP64 // no 32bit push/pop on amd64
void popl(Address dst);
@ -2106,12 +1956,12 @@ private:
void vextracti128h(Address dst, XMMRegister src);
// Copy low 256bit into high 256bit of ZMM registers.
void vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vextracti64x4h(XMMRegister dst, XMMRegister src);
void vextractf64x4h(XMMRegister dst, XMMRegister src);
void vextractf64x4h(Address dst, XMMRegister src);
void vinsertf64x4h(XMMRegister dst, Address src);
void vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value);
void vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value);
void vextracti64x4h(XMMRegister dst, XMMRegister src, int value);
void vextractf64x4h(XMMRegister dst, XMMRegister src, int value);
void vextractf64x4h(Address dst, XMMRegister src, int value);
void vinsertf64x4h(XMMRegister dst, Address src, int value);
// Copy targeted 128bit segments of the ZMM registers
void vextracti64x2h(XMMRegister dst, XMMRegister src, int value);
@ -2173,4 +2023,95 @@ private:
};
// The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions.
// Specific set functions are for specialized use, else defaults or whatever was supplied to object construction
// are applied.
class InstructionAttr {
public:
InstructionAttr(
int vector_len,
bool rex_vex_w,
bool legacy_mode,
bool no_reg_mask,
bool uses_vl)
:
_avx_vector_len(vector_len),
_rex_vex_w(rex_vex_w),
_legacy_mode(legacy_mode),
_no_reg_mask(no_reg_mask),
_uses_vl(uses_vl),
_tuple_type(Assembler::EVEX_ETUP),
_input_size_in_bits(Assembler::EVEX_NObit),
_is_evex_instruction(false),
_evex_encoding(0),
_is_clear_context(false),
_is_extended_context(false),
_current_assembler(NULL) {
if (UseAVX < 3) _legacy_mode = true;
}
~InstructionAttr() {
if (_current_assembler != NULL) {
_current_assembler->clear_attributes();
}
_current_assembler = NULL;
}
private:
int _avx_vector_len;
bool _rex_vex_w;
bool _legacy_mode;
bool _no_reg_mask;
bool _uses_vl;
int _tuple_type;
int _input_size_in_bits;
bool _is_evex_instruction;
int _evex_encoding;
bool _is_clear_context;
bool _is_extended_context;
Assembler *_current_assembler;
public:
// query functions for field accessors
int get_vector_len(void) const { return _avx_vector_len; }
bool is_rex_vex_w(void) const { return _rex_vex_w; }
bool is_legacy_mode(void) const { return _legacy_mode; }
bool is_no_reg_mask(void) const { return _no_reg_mask; }
bool uses_vl(void) const { return _uses_vl; }
int get_tuple_type(void) const { return _tuple_type; }
int get_input_size(void) const { return _input_size_in_bits; }
int is_evex_instruction(void) const { return _is_evex_instruction; }
int get_evex_encoding(void) const { return _evex_encoding; }
bool is_clear_context(void) const { return _is_clear_context; }
bool is_extended_context(void) const { return _is_extended_context; }
// Set the vector len manually
void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
// Set the instruction to be encoded in AVX mode
void set_is_legacy_mode(void) { _legacy_mode = true; }
// Set the current instuction to be encoded as an EVEX instuction
void set_is_evex_instruction(void) { _is_evex_instruction = true; }
// Internal encoding data used in compressed immediate offset programming
void set_evex_encoding(int value) { _evex_encoding = value; }
// Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
void set_is_clear_context(void) { _is_clear_context = true; }
// Map back to current asembler so that we can manage object level assocation
void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
// Address modifiers used for compressed displacement calculation
void set_address_attributes(int tuple_type, int input_size_in_bits) {
if (VM_Version::supports_evex()) {
_tuple_type = tuple_type;
_input_size_in_bits = input_size_in_bits;
}
}
};
#endif // CPU_X86_VM_ASSEMBLER_X86_HPP

View File

@ -81,7 +81,8 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
ce->store_parameter(_method->as_register(), 1);
Metadata *m = _method->as_constant_ptr()->as_metadata();
ce->store_parameter(m, 1);
ce->store_parameter(_bci, 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
ce->add_call_info_here(_info);

View File

@ -2971,6 +2971,14 @@ void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
}
void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
__ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
}
// This code replaces a call to arraycopy; no exception may
// be thrown in this code, they must be thrown in the System.arraycopy
// activation frame; we could save some checks if this would not be the case
@ -3711,7 +3719,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
}
if (UseAVX > 1) {
if (UseAVX > 0) {
__ vnegatess(dest->as_xmm_float_reg(), dest->as_xmm_float_reg(),
ExternalAddress((address)float_signflip_pool));
} else {
@ -3722,7 +3730,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
}
if (UseAVX > 1) {
if (UseAVX > 0) {
__ vnegatesd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg(),
ExternalAddress((address)double_signflip_pool));
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,9 +49,10 @@
Register recv, Label* update_done);
public:
void store_parameter(Register r, int offset_from_esp_in_words);
void store_parameter(jint c, int offset_from_esp_in_words);
void store_parameter(jobject c, int offset_from_esp_in_words);
void store_parameter(Register r, int offset_from_esp_in_words);
void store_parameter(jint c, int offset_from_esp_in_words);
void store_parameter(jobject c, int offset_from_esp_in_words);
void store_parameter(Metadata* c, int offset_from_esp_in_words);
enum { call_stub_size = NOT_LP64(15) LP64_ONLY(28),
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,6 +80,7 @@ LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; }
LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; }
LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; }
LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; }
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; }
LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }

View File

@ -84,6 +84,7 @@ define_pd_global(bool, UseCISCSpill, true);
define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, true);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);

View File

@ -58,6 +58,4 @@ void Compile::pd_compiler2_init() {
OptoReg::invalidate(i);
}
}
SuperWordLoopUnrollAnalysis = true;
}

View File

@ -36,7 +36,7 @@
#include "code/vmreg.hpp"
#include "vmreg_x86.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) {
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
if (inst->is_call() || inst->is_jump()) {
assert(NativeCall::instruction_size == (int)NativeJump::instruction_size, "unexpected size");
return (pc_offset + NativeCall::instruction_size);
@ -53,18 +53,17 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop
return (offset);
} else if (inst->is_call_reg()) {
// the inlined vtable stub contains a "call register" instruction
assert(method != NULL, "only valid for virtual calls");
assert(method.not_null(), "only valid for virtual calls");
return (pc_offset + ((NativeCallReg *) inst)->next_instruction_offset());
} else if (inst->is_cond_jump()) {
address pc = (address) (inst);
return pc_offset + (jint) (Assembler::locate_next_instruction(pc) - pc);
} else {
fatal("unsupported type of instruction for call site");
return 0;
JVMCI_ERROR_0("unsupported type of instruction for call site");
}
}
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
Handle obj = HotSpotObjectConstantImpl::object(constant);
jobject value = JNIHandles::make_local(obj());
@ -75,7 +74,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
_instructions->relocate(pc, oop_Relocation::spec(oop_index), Assembler::narrow_oop_operand);
TRACE_jvmci_3("relocating (narrow oop constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
#else
fatal("compressed oop on 32bit");
JVMCI_ERROR("compressed oop on 32bit");
#endif
} else {
address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
@ -85,19 +84,19 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
}
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64
address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
*((narrowKlass*) operand) = record_narrow_metadata_reference(constant);
*((narrowKlass*) operand) = record_narrow_metadata_reference(constant, CHECK);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
#else
fatal("compressed Klass* on 32bit");
JVMCI_ERROR("compressed Klass* on 32bit");
#endif
} else {
address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
*((Metadata**) operand) = record_metadata_reference(constant);
*((Metadata**) operand) = record_metadata_reference(constant, CHECK);
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
}
}
@ -117,7 +116,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
TRACE_jvmci_3("relocating at " PTR_FORMAT "/" PTR_FORMAT " with destination at " PTR_FORMAT " (%d)", p2i(pc), p2i(operand), p2i(dest), data_offset);
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
address pc = (address) inst;
if (inst->is_call()) {
// NOTE: for call without a mov, the offset must fit a 32-bit immediate
@ -139,18 +138,18 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
*(jint*) disp += ((address) foreign_call_destination) - old_dest;
_instructions->relocate(pc, runtime_call_Relocation::spec(), Assembler::call32_operand);
} else {
fatal("unsupported relocation for foreign call");
JVMCI_ERROR("unsupported relocation for foreign call");
}
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
#ifdef ASSERT
Method* method = NULL;
// we need to check, this might also be an unresolved method
if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
method = getMethodFromHotSpotMethod(hotspot_method);
method = getMethodFromHotSpotMethod(hotspot_method());
}
#endif
switch (_next_call_type) {
@ -185,6 +184,7 @@ void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
break;
}
default:
JVMCI_ERROR("invalid _next_call_type value");
break;
}
}
@ -198,7 +198,7 @@ static void relocate_poll_near(address pc) {
}
void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
switch (mark) {
case POLL_NEAR: {
relocate_poll_near(pc);
@ -222,13 +222,13 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
_instructions->relocate(pc, relocInfo::poll_return_type, Assembler::imm_operand);
break;
default:
fatal("invalid mark value");
JVMCI_ERROR("invalid mark value: %d", mark);
break;
}
}
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
if (jvmci_reg < RegisterImpl::number_of_registers) {
return as_Register(jvmci_reg)->as_VMReg();
} else {
@ -236,8 +236,7 @@ VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
if (floatRegisterNumber < XMMRegisterImpl::number_of_registers) {
return as_XMMRegister(floatRegisterNumber)->as_VMReg();
}
ShouldNotReachHere();
return NULL;
JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -962,10 +962,15 @@ public:
void divss(XMMRegister dst, AddressLiteral src);
// Move Unaligned Double Quadword
void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); }
void movdqu(Address dst, XMMRegister src);
void movdqu(XMMRegister dst, Address src);
void movdqu(XMMRegister dst, XMMRegister src);
void movdqu(XMMRegister dst, AddressLiteral src);
// AVX Unaligned forms
void vmovdqu(Address dst, XMMRegister src);
void vmovdqu(XMMRegister dst, Address src);
void vmovdqu(XMMRegister dst, XMMRegister src);
void vmovdqu(XMMRegister dst, AddressLiteral src);
// Move Aligned Double Quadword
void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
@ -999,6 +1004,19 @@ public:
Assembler::pclmulqdq(dst, src, 0x11);
}
void pcmpeqb(XMMRegister dst, XMMRegister src);
void pcmpeqw(XMMRegister dst, XMMRegister src);
void pcmpestri(XMMRegister dst, Address src, int imm8);
void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
void pmovzxbw(XMMRegister dst, XMMRegister src);
void pmovzxbw(XMMRegister dst, Address src);
void pmovmskb(Register dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, AddressLiteral src);
@ -1024,12 +1042,12 @@ public:
void ucomisd(XMMRegister dst, AddressLiteral src);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); }
void xorpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
void xorpd(XMMRegister dst, AddressLiteral src);
// Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); }
void xorps(XMMRegister dst, XMMRegister src);
void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
void xorps(XMMRegister dst, AddressLiteral src);
@ -1047,6 +1065,49 @@ public:
void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpbroadcastw(XMMRegister dst, XMMRegister src);
void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
void vpmovmskb(Register dst, XMMRegister src);
void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
void vptest(XMMRegister dst, XMMRegister src);
void punpcklbw(XMMRegister dst, XMMRegister src);
void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
void pshuflw(XMMRegister dst, XMMRegister src, int mode);
void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);

View File

@ -192,31 +192,22 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
}
} else if(UseSSE >= 2) {
// Save whole 128bit (16 bytes) XMM regiters
if (VM_Version::supports_avx512novl()) {
for (int n = 0; n < num_xmm_regs; n++) {
__ vextractf32x4h(Address(rsp, off*wordSize), as_XMMRegister(n), 0);
off += delta;
}
} else {
for (int n = 0; n < num_xmm_regs; n++) {
__ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
off += delta;
}
for (int n = 0; n < num_xmm_regs; n++) {
__ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
off += delta;
}
}
if (vect_words > 0) {
if (save_vectors) {
assert(vect_words*wordSize == 128, "");
__ subptr(rsp, 128); // Save upper half of YMM registes
off = 0;
for (int n = 0; n < num_xmm_regs; n++) {
__ vextractf128h(Address(rsp, off++*16), as_XMMRegister(n));
__ vextractf128h(Address(rsp, n*16), as_XMMRegister(n));
}
if (UseAVX > 2) {
__ subptr(rsp, 256); // Save upper half of ZMM registes
off = 0;
for (int n = 0; n < num_xmm_regs; n++) {
__ vextractf64x4h(Address(rsp, off++*32), as_XMMRegister(n));
__ vextractf64x4h(Address(rsp, n*32), as_XMMRegister(n), 1);
}
}
}
@ -275,44 +266,39 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
#else
assert(!restore_vectors, "vectors are generated only by C2");
#endif
if (restore_vectors) {
assert(additional_frame_bytes == 128, "");
if (UseAVX > 2) {
// Restore upper half of ZMM registers.
for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf64x4h(as_XMMRegister(n), Address(rsp, n*32), 1);
}
__ addptr(rsp, additional_frame_bytes*2); // Save upper half of ZMM registes
}
// Restore upper half of YMM registes.
for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, n*16));
}
__ addptr(rsp, additional_frame_bytes); // Save upper half of YMM registes
}
int off = xmm0_off;
int delta = xmm1_off - off;
if (UseSSE == 1) {
assert(additional_frame_bytes == 0, "");
for (int n = 0; n < num_xmm_regs; n++) {
__ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
off += delta;
}
} else if (UseSSE >= 2) {
if (VM_Version::supports_avx512novl()) {
for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf32x4h(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes), 0);
off += delta;
}
} else {
for (int n = 0; n < num_xmm_regs; n++) {
__ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
off += delta;
}
}
}
if (restore_vectors) {
if (UseAVX > 2) {
off = 0;
for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf64x4h(as_XMMRegister(n), Address(rsp, off++*32));
}
__ addptr(rsp, additional_frame_bytes*2); // Save upper half of ZMM registes
}
// Restore upper half of YMM registes.
assert(additional_frame_bytes == 128, "");
off = 0;
// additional_frame_bytes only populated for the restore_vector case, else it is 0
for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16));
__ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
off += delta;
}
__ addptr(rsp, additional_frame_bytes); // Save upper half of YMM registes
}
__ pop_FPU_state();
__ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
@ -2562,7 +2548,8 @@ void SharedRuntime::generate_deopt_blob() {
oop_maps->add_gc_map( __ pc()-start, map);
// Discard arg to fetch_unroll_info
// Discard args to fetch_unroll_info
__ pop(rcx);
__ pop(rcx);
__ get_thread(rcx);
@ -2575,9 +2562,8 @@ void SharedRuntime::generate_deopt_blob() {
// we are very short of registers
Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
// retrieve the deopt kind from where we left it.
__ pop(rax);
__ movl(unpack_kind, rax); // save the unpack_kind value
// retrieve the deopt kind from the UnrollBlock.
__ movl(rax, unpack_kind);
Label noException;
__ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending?
@ -2787,11 +2773,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
enum frame_layout {
arg0_off, // thread sp + 0 // Arg location for
arg1_off, // unloaded_class_index sp + 1 // calling C
arg2_off, // exec_mode sp + 2
// The frame sender code expects that rbp will be in the "natural" place and
// will override any oopMap setting for it. We must therefore force the layout
// so that it agrees with the frame sender code.
rbp_off, // callee saved register sp + 2
return_off, // slot for return address sp + 3
rbp_off, // callee saved register sp + 3
return_off, // slot for return address sp + 4
framesize
};
@ -2823,6 +2810,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ movptr(Address(rsp, arg0_off*wordSize), rdx);
// argument already in ECX
__ movl(Address(rsp, arg1_off*wordSize),rcx);
__ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
// Set an oopmap for the call site
@ -2839,6 +2827,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Load UnrollBlock into EDI
__ movptr(rdi, rax);
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
(int32_t)Deoptimization::Unpack_uncommon_trap);
__ jcc(Assembler::equal, L);
__ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
__ bind(L);
}
#endif
// Pop all the frames we must move/replace.
//
// Frame picture (youngest to oldest)

View File

@ -72,45 +72,28 @@ class SimpleRuntimeFrame {
class RegisterSaver {
// Capture info about frame layout. Layout offsets are in jint
// units because compiler frame slots are jints.
#define HALF_ZMM_BANK_WORDS 128
#define XSAVE_AREA_BEGIN 160
#define XSAVE_AREA_YMM_BEGIN 576
#define XSAVE_AREA_ZMM_BEGIN 1152
#define XSAVE_AREA_UPPERBANK 1664
#define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
#define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
#define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
enum layout {
fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area
xmm_off = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt, // offset in fxsave save area
DEF_XMM_OFFS(0),
DEF_XMM_OFFS(1),
DEF_XMM_OFFS(2),
DEF_XMM_OFFS(3),
DEF_XMM_OFFS(4),
DEF_XMM_OFFS(5),
DEF_XMM_OFFS(6),
DEF_XMM_OFFS(7),
DEF_XMM_OFFS(8),
DEF_XMM_OFFS(9),
DEF_XMM_OFFS(10),
DEF_XMM_OFFS(11),
DEF_XMM_OFFS(12),
DEF_XMM_OFFS(13),
DEF_XMM_OFFS(14),
DEF_XMM_OFFS(15),
zmm_off = fpu_state_off + ((FPUStateSizeInWords - (HALF_ZMM_BANK_WORDS + 1))*wordSize / BytesPerInt),
// 2..15 are implied in range usage
ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
DEF_YMM_OFFS(0),
DEF_YMM_OFFS(1),
// 2..15 are implied in range usage
zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
DEF_ZMM_OFFS(16),
DEF_ZMM_OFFS(17),
DEF_ZMM_OFFS(18),
DEF_ZMM_OFFS(19),
DEF_ZMM_OFFS(20),
DEF_ZMM_OFFS(21),
DEF_ZMM_OFFS(22),
DEF_ZMM_OFFS(23),
DEF_ZMM_OFFS(24),
DEF_ZMM_OFFS(25),
DEF_ZMM_OFFS(26),
DEF_ZMM_OFFS(27),
DEF_ZMM_OFFS(28),
DEF_ZMM_OFFS(29),
DEF_ZMM_OFFS(30),
DEF_ZMM_OFFS(31),
// 18..31 are implied in range usage
fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
fpu_stateH_end,
r15_off, r15H_off,
@ -160,8 +143,6 @@ class RegisterSaver {
};
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
int vect_words = 0;
int ymmhi_offset = -1;
int off = 0;
int num_xmm_regs = XMMRegisterImpl::number_of_registers;
if (UseAVX < 3) {
@ -171,24 +152,15 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
if (save_vectors) {
assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
// Save upper half of YMM registers
vect_words = 16 * num_xmm_regs / wordSize;
if (UseAVX < 3) {
ymmhi_offset = additional_frame_words;
additional_frame_words += vect_words;
}
}
#else
assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
#endif
// Always make the frame size 16-byte aligned
int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
reg_save_size*BytesPerInt, num_xmm_regs);
// Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
int frame_size_in_bytes = round_to(reg_save_size*BytesPerInt, num_xmm_regs);
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
// The caller will allocate additional_frame_words
int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
// CodeBlob frame size is in words.
int frame_size_in_words = frame_size_in_bytes / wordSize;
*total_frame_words = frame_size_in_words;
@ -203,12 +175,34 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
__ push_CPU_state(); // Push a multiple of 16 bytes
// push cpu state handles this on EVEX enabled targets
if ((vect_words > 0) && (UseAVX < 3)) {
assert(vect_words*wordSize >= 256, "");
// Save upper half of YMM registes(0..num_xmm_regs)
__ subptr(rsp, num_xmm_regs*16);
for (int n = 0; n < num_xmm_regs; n++) {
__ vextractf128h(Address(rsp, off++*16), as_XMMRegister(n));
if (save_vectors) {
// Save upper half of YMM registes(0..15)
int base_addr = XSAVE_AREA_YMM_BEGIN;
for (int n = 0; n < 16; n++) {
__ vextractf128h(Address(rsp, base_addr+n*16), as_XMMRegister(n));
}
if (VM_Version::supports_evex()) {
// Save upper half of ZMM registes(0..15)
base_addr = XSAVE_AREA_ZMM_BEGIN;
for (int n = 0; n < 16; n++) {
__ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1);
}
// Save full ZMM registes(16..num_xmm_regs)
base_addr = XSAVE_AREA_UPPERBANK;
int off = 0;
int vector_len = Assembler::AVX_512bit;
for (int n = 16; n < num_xmm_regs; n++) {
__ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
}
}
} else {
if (VM_Version::supports_evex()) {
// Save upper bank of ZMM registers(16..31) for double/float usage
int base_addr = XSAVE_AREA_UPPERBANK;
int off = 0;
for (int n = 16; n < num_xmm_regs; n++) {
__ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
}
}
}
if (frame::arg_reg_save_area_bytes != 0) {
@ -224,8 +218,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = new OopMap(frame_size_in_slots, 0);
#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots)
#define YMMHI_STACK_OFFSET(x) VMRegImpl::stack2reg((x / VMRegImpl::stack_slot_size) + ymmhi_offset)
#define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
@ -257,31 +250,21 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
off = zmm16_off;
delta = zmm17_off - off;
for (int n = 16; n < num_xmm_regs; n++) {
XMMRegister xmm_name = as_XMMRegister(n);
map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
XMMRegister zmm_name = as_XMMRegister(n);
map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
off += delta;
}
}
#if defined(COMPILER2) || INCLUDE_JVMCI
if (save_vectors) {
assert(ymmhi_offset != -1, "save area must exist");
map->set_callee_saved(YMMHI_STACK_OFFSET( 0), xmm0->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET( 16), xmm1->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET( 32), xmm2->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET( 48), xmm3->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET( 64), xmm4->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET( 80), xmm5->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET( 96), xmm6->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(112), xmm7->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(128), xmm8->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(144), xmm9->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(160), xmm10->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(176), xmm11->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(192), xmm12->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(208), xmm13->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(224), xmm14->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(240), xmm15->as_VMReg()->next(4));
off = ymm0_off;
int delta = ymm1_off - off;
for (int n = 0; n < 16; n++) {
XMMRegister ymm_name = as_XMMRegister(n);
map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
off += delta;
}
}
#endif // COMPILER2 || INCLUDE_JVMCI
@ -316,8 +299,8 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
off = zmm16H_off;
delta = zmm17H_off - off;
for (int n = 16; n < num_xmm_regs; n++) {
XMMRegister xmm_name = as_XMMRegister(n);
map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
XMMRegister zmm_name = as_XMMRegister(n);
map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
off += delta;
}
}
@ -335,21 +318,48 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
// Pop arg register save area
__ addptr(rsp, frame::arg_reg_save_area_bytes);
}
#if defined(COMPILER2) || INCLUDE_JVMCI
// On EVEX enabled targets everything is handled in pop fpu state
if ((restore_vectors) && (UseAVX < 3)) {
assert(UseAVX > 0, "256/512-bit vectors are supported only with AVX");
assert(MaxVectorSize == 64, "up to 512bit vectors are supported now");
int off = 0;
// Restore upper half of YMM registes (0..num_xmm_regs)
for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16));
}
__ addptr(rsp, num_xmm_regs*16);
if (restore_vectors) {
assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
}
#else
assert(!restore_vectors, "vectors are generated only by C2 and JVMCI");
assert(!save_vectors, "vectors are generated only by C2");
#endif
// On EVEX enabled targets everything is handled in pop fpu state
if (restore_vectors) {
// Restore upper half of YMM registes (0..15)
int base_addr = XSAVE_AREA_YMM_BEGIN;
for (int n = 0; n < 16; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, base_addr+n*16));
}
if (VM_Version::supports_evex()) {
// Restore upper half of ZMM registes (0..15)
base_addr = XSAVE_AREA_ZMM_BEGIN;
for (int n = 0; n < 16; n++) {
__ vinsertf64x4h(as_XMMRegister(n), Address(rsp, base_addr+n*32), 1);
}
// Restore full ZMM registes(16..num_xmm_regs)
base_addr = XSAVE_AREA_UPPERBANK;
int vector_len = Assembler::AVX_512bit;
int off = 0;
for (int n = 16; n < num_xmm_regs; n++) {
__ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
}
}
} else {
if (VM_Version::supports_evex()) {
// Restore upper bank of ZMM registes(16..31) for double/float usage
int base_addr = XSAVE_AREA_UPPERBANK;
int off = 0;
for (int n = 16; n < num_xmm_regs; n++) {
__ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
}
}
}
// Recover CPU state
__ pop_CPU_state();
// Get the rbp described implicitly by the calling convention (no oopMap)
@ -2819,6 +2829,7 @@ void SharedRuntime::generate_deopt_blob() {
__ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
__ mov(c_rarg0, r15_thread);
__ movl(c_rarg2, r14); // exec mode
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
@ -2905,6 +2916,7 @@ void SharedRuntime::generate_deopt_blob() {
}
#endif // ASSERT
__ mov(c_rarg0, r15_thread);
__ movl(c_rarg1, r14); // exec_mode
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
// Need to have an oopmap that tells fetch_unroll_info where to
@ -2922,6 +2934,7 @@ void SharedRuntime::generate_deopt_blob() {
// Load UnrollBlock* into rdi
__ mov(rdi, rax);
__ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
Label noException;
__ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
__ jcc(Assembler::notEqual, noException);
@ -3140,6 +3153,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
__ mov(c_rarg0, r15_thread);
__ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
// Set an oopmap for the call site
@ -3155,6 +3169,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Load UnrollBlock* into rdi
__ mov(rdi, rax);
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
(int32_t)Deoptimization::Unpack_uncommon_trap);
__ jcc(Assembler::equal, L);
__ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
__ bind(L);
}
#endif
// Pop all the frames we must move/replace.
//
// Frame picture (youngest to oldest)

View File

@ -273,7 +273,7 @@ class StubGenerator: public StubCodeGenerator {
if (UseAVX > 2) {
last_reg = 31;
}
if (VM_Version::supports_avx512novl()) {
if (VM_Version::supports_evex()) {
for (int i = xmm_save_first; i <= last_reg; i++) {
__ vextractf32x4h(xmm_save(i), as_XMMRegister(i), 0);
}
@ -391,7 +391,7 @@ class StubGenerator: public StubCodeGenerator {
// restore regs belonging to calling function
#ifdef _WIN64
// emit the restores for xmm regs
if (VM_Version::supports_avx512novl()) {
if (VM_Version::supports_evex()) {
for (int i = xmm_save_first; i <= last_reg; i++) {
__ vinsertf32x4h(as_XMMRegister(i), xmm_save(i), 0);
}
@ -1439,8 +1439,8 @@ class StubGenerator: public StubCodeGenerator {
// Copy 64-bytes per iteration
__ BIND(L_loop);
if (UseAVX > 2) {
__ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 32), Assembler::AVX_512bit);
__ evmovdqul(Address(dest, qword_count, Address::times_8, 32), xmm0, Assembler::AVX_512bit);
__ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit);
__ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit);
} else if (UseAVX == 2) {
__ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
__ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);

View File

@ -632,12 +632,36 @@ void VM_Version::get_processor_features() {
// Use AES instructions if available.
if (supports_aes()) {
if (FLAG_IS_DEFAULT(UseAES)) {
UseAES = true;
FLAG_SET_DEFAULT(UseAES, true);
}
} else if (UseAES) {
if (!FLAG_IS_DEFAULT(UseAES))
if (!UseAES) {
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
} else {
if (UseSSE > 2) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
}
} else {
// The AES intrinsic stubs require AES instruction support (of course)
// but also require sse3 mode or higher for instructions it use.
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
} else if (UseAES || UseAESIntrinsics) {
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
// Use CLMUL instructions if available.
@ -673,18 +697,6 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
}
// The AES intrinsic stubs require AES instruction support (of course)
// but also require sse3 mode for instructions it use.
if (UseAES && (UseSSE > 2)) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
UseAESIntrinsics = true;
}
} else if (UseAESIntrinsics) {
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
// GHASH/GCM intrinsics
if (UseCLMUL && (UseSSE > 2)) {
if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
@ -891,7 +903,7 @@ void VM_Version::get_processor_features() {
UseNewLongLShift = true;
}
if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
if( supports_sse4a() ) {
if (supports_sse4a()) {
UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
} else {
UseXmmLoadAndClearUpper = false;
@ -918,10 +930,15 @@ void VM_Version::get_processor_features() {
UseXmmI2D = false;
}
}
if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) {
if( supports_sse4_2() && UseSSE >= 4 ) {
UseSSE42Intrinsics = true;
if (supports_sse4_2() && UseSSE >= 4) {
if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
}
} else {
if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
}
// some defaults for AMD family 15h
@ -995,8 +1012,13 @@ void VM_Version::get_processor_features() {
}
if (supports_sse4_2() && UseSSE >= 4) {
if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
UseSSE42Intrinsics = true;
FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
}
} else {
if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
}
}
if ((cpu_family() == 0x06) &&

View File

@ -552,6 +552,19 @@ protected:
break;
}
}
// zmm_save will be set on a EVEX enabled machine even if we choose AVX code gen
if (retVal == false) {
// Verify that OS save/restore all bits of EVEX registers
// during signal processing.
int nreg = 2 LP64_ONLY(+2);
retVal = true;
for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
retVal = false;
break;
}
}
}
}
return retVal;
}
@ -706,6 +719,9 @@ public:
static bool supports_avx512vl() { return (_cpuFeatures & CPU_AVX512VL) != 0; }
static bool supports_avx512vlbw() { return (supports_avx512bw() && supports_avx512vl()); }
static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); }
static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); }
static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }
static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); }
// Intel features
static bool is_intel_family_core() { return is_intel() &&
extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }

File diff suppressed because it is too large Load Diff

View File

@ -291,9 +291,7 @@ static int pre_call_resets_size() {
size += 6; // fldcw
}
if (C->max_vector_size() > 16) {
if(UseAVX <= 2) {
size += 3; // vzeroupper
}
size += 3; // vzeroupper
}
return size;
}
@ -1915,7 +1913,7 @@ encode %{
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
}
}
%}

View File

@ -536,11 +536,7 @@ source %{
#define __ _masm.
static int clear_avx_size() {
if(UseAVX > 2) {
return 0; // vzeroupper is ignored
} else {
return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
}
return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
}
// !!!!! Special hack to get all types of calls to specify the byte offset
@ -871,7 +867,7 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
if (framesize > 0) {
st->print("\n\t");
st->print("addq rbp, #%d", framesize);
}
}
}
}

View File

@ -497,12 +497,15 @@ int CppInterpreter::accessor_entry(Method* method, intptr_t UNUSED, TRAPS) {
// 1: getfield
// 2: index
// 3: index
// 4: ireturn/areturn
// 4: ireturn/areturn/freturn/lreturn/dreturn
// NB this is not raw bytecode: index is in machine order
u1 *code = method->code_base();
assert(code[0] == Bytecodes::_aload_0 &&
code[1] == Bytecodes::_getfield &&
(code[4] == Bytecodes::_ireturn ||
code[4] == Bytecodes::_freturn ||
code[4] == Bytecodes::_lreturn ||
code[4] == Bytecodes::_dreturn ||
code[4] == Bytecodes::_areturn), "should do");
u2 index = Bytes::get_native_u2(&code[2]);

View File

@ -32,6 +32,7 @@ import java.lang.reflect.Method;
import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.code.InvalidInstalledCodeException;
import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspotvmconfig.HotSpotVMField;
import jdk.vm.ci.inittimer.InitTimer;
import jdk.vm.ci.meta.JavaType;
@ -308,6 +309,8 @@ final class CompilerToVM {
* {@link HotSpotVMConfig#codeInstallResultCodeTooLarge},
* {@link HotSpotVMConfig#codeInstallResultDependenciesFailed} or
* {@link HotSpotVMConfig#codeInstallResultDependenciesInvalid}.
* @throws JVMCIError if there is something wrong with the compiled code or the associated
* metadata.
*/
native int installCode(TargetDescription target, HotSpotCompiledCode compiledCode, InstalledCode code, HotSpotSpeculationLog speculationLog);

View File

@ -1680,6 +1680,7 @@ public class HotSpotVMConfig {
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_caller_adjustment", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockCallerAdjustmentOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_number_of_frames", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockNumberOfFramesOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_total_frame_sizes", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockTotalFrameSizesOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_unpack_kind", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockUnpackKindOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_frame_sizes", type = "intptr_t*", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockFrameSizesOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_frame_pcs", type = "address*", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockFramePcsOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_initial_info", type = "intptr_t", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockInitialInfoOffset;

View File

@ -66,12 +66,12 @@ int VM_Version::platform_features(int features) {
features = generic_v9_m;
if (detect_niagara()) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on Niagara");)
if (PrintMiscellaneous && Verbose) { tty->print_cr("Detected Linux on Niagara"); }
features = niagara1_m | T_family_m;
}
if (detect_M_family()) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");)
if (PrintMiscellaneous && Verbose) { tty->print_cr("Detected Linux on M family"); }
features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
}

View File

@ -707,12 +707,10 @@ BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
BlockBegin* block = bci2block()->at(bci);
if (block != NULL && block == parent()->bci2block()->at(bci)) {
BlockBegin* new_block = new BlockBegin(block->bci());
#ifndef PRODUCT
if (PrintInitialBlockList) {
tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
block->block_id(), block->bci(), new_block->block_id());
}
#endif
// copy data from cloned blocked
new_block->set_depth_first_number(block->depth_first_number());
if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
@ -1438,7 +1436,9 @@ void GraphBuilder::method_return(Value x) {
bool need_mem_bar = false;
if (method()->name() == ciSymbol::object_initializer_name() &&
(scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields()))) {
(scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields())
|| (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile())
)){
need_mem_bar = true;
}
@ -1554,6 +1554,9 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
if (code == Bytecodes::_putfield) {
scope()->set_wrote_fields();
if (field->is_volatile()) {
scope()->set_wrote_volatile();
}
}
const int offset = !needs_patching ? field->offset() : -1;
@ -3785,12 +3788,10 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
cont = new BlockBegin(next_bci());
// low number so that continuation gets parsed as early as possible
cont->set_depth_first_number(0);
#ifndef PRODUCT
if (PrintInitialBlockList) {
tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
cont->block_id(), cont->bci(), bci());
}
#endif
continuation_existed = false;
}
// Record number of predecessors of continuation block before

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,6 +143,7 @@ IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMe
_monitor_pairing_ok = method->has_balanced_monitors();
_wrote_final = false;
_wrote_fields = false;
_wrote_volatile = false;
_start = NULL;
if (osr_bci == -1) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -151,6 +151,7 @@ class IRScope: public CompilationResourceObj {
bool _monitor_pairing_ok; // the monitor pairing info
bool _wrote_final; // has written final field
bool _wrote_fields; // has written fields
bool _wrote_volatile; // has written volatile field
BlockBegin* _start; // the start block, successsors are method entries
BitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable
@ -187,7 +188,8 @@ class IRScope: public CompilationResourceObj {
bool wrote_final () const { return _wrote_final; }
void set_wrote_fields() { _wrote_fields = true; }
bool wrote_fields () const { return _wrote_fields; }
void set_wrote_volatile() { _wrote_volatile = true; }
bool wrote_volatile () const { return _wrote_volatile; }
};

View File

@ -2004,7 +2004,7 @@ void LIR_OpRoundFP::print_instr(outputStream* out) const {
// LIR_Op2
void LIR_Op2::print_instr(outputStream* out) const {
if (code() == lir_cmove) {
if (code() == lir_cmove || code() == lir_cmp) {
print_condition(out, condition()); out->print(" ");
}
in_opr1()->print(out); out->print(" ");

View File

@ -1761,7 +1761,7 @@ void LIRGenerator::do_StoreField(StoreField* x) {
post_barrier(object.result(), value.result());
}
if (is_volatile && os::is_MP()) {
if (!support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
__ membar();
}
}
@ -1822,6 +1822,10 @@ void LIRGenerator::do_LoadField(LoadField* x) {
address = generate_address(object.result(), x->offset(), field_type);
}
if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
__ membar();
}
bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
if (needs_atomic_access && !needs_patching) {
volatile_field_load(address, reg, info);
@ -2238,6 +2242,10 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
LIR_Opr value = rlock_result(x, x->basic_type());
if (support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) {
__ membar();
}
get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
#if INCLUDE_ALL_GCS
@ -2395,7 +2403,7 @@ void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
if (x->is_volatile() && os::is_MP()) __ membar_release();
put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
if (x->is_volatile() && os::is_MP()) __ membar();
if (!support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) __ membar();
}
@ -2794,7 +2802,7 @@ void LIRGenerator::do_Base(Base* x) {
assert(obj->is_valid(), "must be valid");
if (method()->is_synchronized() && GenerateSynchronizationCode) {
LIR_Opr lock = new_register(T_INT);
LIR_Opr lock = syncLockOpr();
__ load_stack_address_monitor(0, lock);
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
@ -3421,14 +3429,18 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
__ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
__ store(result, counter);
if (notify) {
LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
LIR_Opr meth = new_register(T_METADATA);
__ metadata2reg(method->constant_encoding(), meth);
__ logical_and(result, mask, result);
__ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
// The bci for info can point to cmp for if's we want the if bci
CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
__ branch(lir_cond_equal, T_INT, overflow);
int freq = frequency << InvocationCounter::count_shift;
if (freq == 0) {
__ branch(lir_cond_always, T_ILLEGAL, overflow);
} else {
LIR_Opr mask = load_immediate(freq, T_INT);
__ logical_and(result, mask, result);
__ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
__ branch(lir_cond_equal, T_INT, overflow);
}
__ branch_destination(overflow->continuation());
}
}

View File

@ -495,6 +495,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
static LIR_Opr divOutOpr();
static LIR_Opr remOutOpr();
static LIR_Opr shiftCountOpr();
LIR_Opr syncLockOpr();
LIR_Opr syncTempOpr();
LIR_Opr atomicLockOpr();

View File

@ -6233,9 +6233,19 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
if (prev_branch->stub() == NULL) {
LIR_Op2* prev_cmp = NULL;
// There might be a cmove inserted for profiling which depends on the same
// compare. If we change the condition of the respective compare, we have
// to take care of this cmove as well.
LIR_Op2* prev_cmove = NULL;
for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
prev_op = instructions->at(j);
// check for the cmove
if (prev_op->code() == lir_cmove) {
assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2");
prev_cmove = (LIR_Op2*)prev_op;
assert(prev_branch->cond() == prev_cmove->condition(), "should be the same");
}
if (prev_op->code() == lir_cmp) {
assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
prev_cmp = (LIR_Op2*)prev_op;
@ -6252,6 +6262,13 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
prev_branch->negate_cond();
prev_cmp->set_condition(prev_branch->cond());
instructions->truncate(instructions->length() - 1);
// if we do change the condition, we have to change the cmove as well
if (prev_cmove != NULL) {
prev_cmove->set_condition(prev_branch->cond());
LIR_Opr t = prev_cmove->in_opr1();
prev_cmove->set_in_opr1(prev_cmove->in_opr2());
prev_cmove->set_in_opr2(t);
}
}
}
}

View File

@ -1262,6 +1262,8 @@ bool ciMethod::is_empty_method() const { FETCH_FLAG_FROM_VM(is_empty_met
bool ciMethod::is_vanilla_constructor() const { FETCH_FLAG_FROM_VM(is_vanilla_constructor); }
bool ciMethod::has_loops () const { FETCH_FLAG_FROM_VM(has_loops); }
bool ciMethod::has_jsrs () const { FETCH_FLAG_FROM_VM(has_jsrs); }
bool ciMethod::is_getter () const { FETCH_FLAG_FROM_VM(is_getter); }
bool ciMethod::is_setter () const { FETCH_FLAG_FROM_VM(is_setter); }
bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); }
bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); }

View File

@ -311,6 +311,8 @@ class ciMethod : public ciMetadata {
bool is_final_method() const { return is_final() || holder()->is_final(); }
bool has_loops () const;
bool has_jsrs () const;
bool is_getter () const;
bool is_setter () const;
bool is_accessor () const;
bool is_initializer () const;
bool can_be_statically_bound() const { return _can_be_statically_bound; }

View File

@ -1588,6 +1588,7 @@ ciTypeFlow::Block::Block(ciTypeFlow* outer,
_exceptions = NULL;
_exc_klasses = NULL;
_successors = NULL;
_predecessors = new (outer->arena()) GrowableArray<Block*>(outer->arena(), 1, 0, NULL);
_state = new (outer->arena()) StateVector(outer);
JsrSet* new_jsrs =
new (outer->arena()) JsrSet(outer->arena(), jsrs->size());
@ -1771,6 +1772,12 @@ ciTypeFlow::Block::successors(ciBytecodeStream* str,
break;
}
}
// Set predecessor information
for (int i = 0; i < _successors->length(); i++) {
Block* block = _successors->at(i);
block->predecessors()->append(this);
}
}
return _successors;
}
@ -1813,7 +1820,9 @@ void ciTypeFlow::Block::compute_exceptions() {
} else {
klass = handler->catch_klass();
}
_exceptions->append(analyzer->block_at(bci, _jsrs));
Block* block = analyzer->block_at(bci, _jsrs);
_exceptions->append(block);
block->predecessors()->append(this);
_exc_klasses->append(klass);
}
}
@ -1909,6 +1918,18 @@ void ciTypeFlow::Block::print_on(outputStream* st) const {
st->cr();
}
}
if (_predecessors == NULL) {
st->print_cr(" No predecessor information");
} else {
int num_predecessors = _predecessors->length();
st->print_cr(" Predecessors : %d", num_predecessors);
for (int i = 0; i < num_predecessors; i++) {
Block* predecessor = _predecessors->at(i);
st->print(" ");
predecessor->print_value_on(st);
st->cr();
}
}
if (_exceptions == NULL) {
st->print_cr(" No exception information");
} else {
@ -2270,6 +2291,9 @@ ciTypeFlow::Block* ciTypeFlow::clone_loop_head(Loop* lp, StateVector* temp_vecto
for (SuccIter iter(tail); !iter.done(); iter.next()) {
if (iter.succ() == head) {
iter.set_succ(clone);
// Update predecessor information
head->predecessors()->remove(tail);
clone->predecessors()->append(tail);
}
}
flow_block(tail, temp_vector, temp_set);
@ -2279,6 +2303,9 @@ ciTypeFlow::Block* ciTypeFlow::clone_loop_head(Loop* lp, StateVector* temp_vecto
for (SuccIter iter(clone); !iter.done(); iter.next()) {
if (iter.succ() == head) {
iter.set_succ(clone);
// Update predecessor information
head->predecessors()->remove(clone);
clone->predecessors()->append(clone);
break;
}
}
@ -2883,6 +2910,69 @@ void ciTypeFlow::do_flow() {
}
}
// ------------------------------------------------------------------
// ciTypeFlow::is_dominated_by
//
// Determine if the instruction at bci is dominated by the instruction at dom_bci.
bool ciTypeFlow::is_dominated_by(int bci, int dom_bci) {
assert(!method()->has_jsrs(), "jsrs are not supported");
ResourceMark rm;
JsrSet* jsrs = new ciTypeFlow::JsrSet(NULL);
int index = _methodBlocks->block_containing(bci)->index();
int dom_index = _methodBlocks->block_containing(dom_bci)->index();
Block* block = get_block_for(index, jsrs, ciTypeFlow::no_create);
Block* dom_block = get_block_for(dom_index, jsrs, ciTypeFlow::no_create);
// Start block dominates all other blocks
if (start_block()->rpo() == dom_block->rpo()) {
return true;
}
// Dominated[i] is true if block i is dominated by dom_block
int num_blocks = _methodBlocks->num_blocks();
bool* dominated = NEW_RESOURCE_ARRAY(bool, num_blocks);
for (int i = 0; i < num_blocks; ++i) {
dominated[i] = true;
}
dominated[start_block()->rpo()] = false;
// Iterative dominator algorithm
bool changed = true;
while (changed) {
changed = false;
// Use reverse postorder iteration
for (Block* blk = _rpo_list; blk != NULL; blk = blk->rpo_next()) {
if (blk->is_start()) {
// Ignore start block
continue;
}
// The block is dominated if it is the dominating block
// itself or if all predecessors are dominated.
int index = blk->rpo();
bool dom = (index == dom_block->rpo());
if (!dom) {
// Check if all predecessors are dominated
dom = true;
for (int i = 0; i < blk->predecessors()->length(); ++i) {
Block* pred = blk->predecessors()->at(i);
if (!dominated[pred->rpo()]) {
dom = false;
break;
}
}
}
// Update dominator information
if (dominated[index] != dom) {
changed = true;
dominated[index] = dom;
}
}
}
// block dominated by dom_block?
return dominated[block->rpo()];
}
// ------------------------------------------------------------------
// ciTypeFlow::record_failure()
// The ciTypeFlow object keeps track of failure reasons separately from the ciEnv.

View File

@ -529,6 +529,7 @@ public:
GrowableArray<Block*>* _exceptions;
GrowableArray<ciInstanceKlass*>* _exc_klasses;
GrowableArray<Block*>* _successors;
GrowableArray<Block*>* _predecessors;
StateVector* _state;
JsrSet* _jsrs;
@ -617,6 +618,12 @@ public:
return _successors;
}
// Predecessors of this block (including exception edges)
GrowableArray<Block*>* predecessors() {
assert(_predecessors != NULL, "must be filled in");
return _predecessors;
}
// Get the exceptional successors for this Block.
GrowableArray<Block*>* exceptions() {
if (_exceptions == NULL) {
@ -941,6 +948,9 @@ public:
// Perform type inference flow analysis.
void do_flow();
// Determine if bci is dominated by dom_bci
bool is_dominated_by(int bci, int dom_bci);
void print_on(outputStream* st) const PRODUCT_RETURN;
void rpo_print_on(outputStream* st) const PRODUCT_RETURN;

View File

@ -28,6 +28,7 @@
#include "classfile/stringTable.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/debugInfo.hpp"
#include "code/dependencyContext.hpp"
#include "code/pcDesc.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/oopFactory.hpp"
@ -3216,14 +3217,16 @@ void java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets() {
}
}
nmethodBucket* java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
DependencyContext java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
return (nmethodBucket*) (address) call_site->long_field(_vmdependencies_offset);
}
void java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(oop call_site, nmethodBucket* context) {
assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
call_site->long_field_put(_vmdependencies_offset, (jlong) (address) context);
intptr_t* vmdeps_addr = (intptr_t*)call_site->address_field_addr(_vmdependencies_offset);
#ifndef ASSERT
DependencyContext dep_ctx(vmdeps_addr);
#else
// Verify that call_site isn't moved during DependencyContext lifetime.
DependencyContext dep_ctx(vmdeps_addr, Handle(call_site));
#endif // ASSERT
return dep_ctx;
}
// Support for java_security_AccessControlContext

View File

@ -1212,6 +1212,8 @@ public:
#define CALLSITECONTEXT_INJECTED_FIELDS(macro) \
macro(java_lang_invoke_MethodHandleNatives_CallSiteContext, vmdependencies, intptr_signature, false)
class DependencyContext;
class java_lang_invoke_MethodHandleNatives_CallSiteContext : AllStatic {
friend class JavaClasses;
@ -1222,8 +1224,7 @@ private:
public:
// Accessors
static nmethodBucket* vmdependencies(oop context);
static void set_vmdependencies(oop context, nmethodBucket* bucket);
static DependencyContext vmdependencies(oop context);
// Testers
static bool is_subclass(Klass* klass) {

View File

@ -109,6 +109,7 @@
template(java_io_ByteArrayInputStream, "java/io/ByteArrayInputStream") \
template(java_io_Serializable, "java/io/Serializable") \
template(java_util_Arrays, "java/util/Arrays") \
template(java_util_Objects, "java/util/Objects") \
template(java_util_Properties, "java/util/Properties") \
template(java_util_Vector, "java/util/Vector") \
template(java_util_AbstractList, "java/util/AbstractList") \
@ -883,6 +884,9 @@
do_intrinsic(_equalsL, java_lang_StringLatin1,equals_name, equalsB_signature, F_S) \
do_intrinsic(_equalsU, java_lang_StringUTF16, equals_name, equalsB_signature, F_S) \
\
do_intrinsic(_Objects_checkIndex, java_util_Objects, checkIndex_name, Objects_checkIndex_signature, F_S) \
do_signature(Objects_checkIndex_signature, "(IILjava/util/function/BiFunction;)I") \
\
do_class(java_nio_Buffer, "java/nio/Buffer") \
do_intrinsic(_checkIndex, java_nio_Buffer, checkIndex_name, int_int_signature, F_R) \
do_name( checkIndex_name, "checkIndex") \

View File

@ -133,18 +133,47 @@ class CodeBlob_sizes {
address CodeCache::_low_bound = 0;
address CodeCache::_high_bound = 0;
int CodeCache::_number_of_blobs = 0;
int CodeCache::_number_of_adapters = 0;
int CodeCache::_number_of_nmethods = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL;
int CodeCache::_codemem_full_count = 0;
// Initialize array of CodeHeaps
GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
// Prepare error message
const char* error = "Invalid code heap sizes";
err_msg message("NonNMethodCodeHeapSize (%zuK) + ProfiledCodeHeapSize (%zuK) + NonProfiledCodeHeapSize (%zuK) = %zuK",
non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
if (total_size > cache_size) {
// Some code heap sizes were explicitly set: total_size must be <= cache_size
message.append(" is greater than ReservedCodeCacheSize (%zuK).", cache_size/K);
vm_exit_during_initialization(error, message);
} else if (all_set && total_size != cache_size) {
// All code heap sizes were explicitly set: total_size must equal cache_size
message.append(" is not equal to ReservedCodeCacheSize (%zuK).", cache_size/K);
vm_exit_during_initialization(error, message);
}
}
void CodeCache::initialize_heaps() {
bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
size_t min_size = os::vm_page_size();
size_t cache_size = ReservedCodeCacheSize;
size_t non_nmethod_size = NonNMethodCodeHeapSize;
size_t profiled_size = ProfiledCodeHeapSize;
size_t non_profiled_size = NonProfiledCodeHeapSize;
// Check if total size set via command line flags exceeds the reserved size
check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size),
(profiled_set ? profiled_size : min_size),
(non_profiled_set ? non_profiled_size : min_size),
cache_size,
non_nmethod_set && profiled_set && non_profiled_set);
// Determine size of compiler buffers
size_t code_buffers_size = 0;
#ifdef COMPILER1
@ -159,51 +188,94 @@ void CodeCache::initialize_heaps() {
code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif
// Increase default non_nmethod_size to account for compiler buffers
if (!non_nmethod_set) {
non_nmethod_size += code_buffers_size;
}
// Calculate default CodeHeap sizes if not set by user
if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
&& !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
// Increase default NonNMethodCodeHeapSize to account for compiler buffers
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size);
if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
// Check if we have enough space for the non-nmethod code heap
if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) {
// Use the default value for NonNMethodCodeHeapSize and one half of the
// remaining size for non-profiled methods and one half for profiled methods
size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize;
size_t profiled_size = remaining_size / 2;
size_t non_profiled_size = remaining_size - profiled_size;
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
if (cache_size > non_nmethod_size) {
// Use the default value for non_nmethod_size and one half of the
// remaining size for non-profiled and one half for profiled methods
size_t remaining_size = cache_size - non_nmethod_size;
profiled_size = remaining_size / 2;
non_profiled_size = remaining_size - profiled_size;
} else {
// Use all space for the non-nmethod heap and set other heaps to minimal size
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
non_nmethod_size = cache_size - 2 * min_size;
profiled_size = min_size;
non_profiled_size = min_size;
}
} else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
// The user explicitly set some code heap sizes. Increase or decrease the (default)
// sizes of the other code heaps accordingly. First adapt non-profiled and profiled
// code heap sizes and then only change non-nmethod code heap size if still necessary.
intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
if (non_profiled_set) {
if (!profiled_set) {
// Adapt size of profiled code heap
if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
// Not enough space available, set to minimum size
diff_size += profiled_size - min_size;
profiled_size = min_size;
} else {
profiled_size += diff_size;
diff_size = 0;
}
}
} else if (profiled_set) {
// Adapt size of non-profiled code heap
if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
// Not enough space available, set to minimum size
diff_size += non_profiled_size - min_size;
non_profiled_size = min_size;
} else {
non_profiled_size += diff_size;
diff_size = 0;
}
} else if (non_nmethod_set) {
// Distribute remaining size between profiled and non-profiled code heaps
diff_size = cache_size - non_nmethod_size;
profiled_size = diff_size / 2;
non_profiled_size = diff_size - profiled_size;
diff_size = 0;
}
if (diff_size != 0) {
// Use non-nmethod code heap for remaining space requirements
assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
non_nmethod_size += diff_size;
}
}
// We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
if(!heap_available(CodeBlobType::MethodProfiled)) {
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
non_profiled_size += profiled_size;
profiled_size = 0;
}
// We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
if(!heap_available(CodeBlobType::MethodNonProfiled)) {
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
non_nmethod_size += non_profiled_size;
non_profiled_size = 0;
}
// Make sure we have enough space for VM internal code
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
vm_exit_during_initialization(err_msg(
"Not enough space in non-nmethod code heap to run VM: %zuK < %zuK",
non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
}
guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
// Verify sizes and update flag values
assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
// Align CodeHeaps
size_t alignment = heap_alignment();
size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment);
size_t profiled_size = align_size_down(ProfiledCodeHeapSize, alignment);
non_nmethod_size = align_size_up(non_nmethod_size, alignment);
profiled_size = align_size_down(profiled_size, alignment);
// Reserve one continuous chunk of memory for CodeHeaps and split it into
// parts for the individual heaps. The memory layout looks like this:
@ -212,9 +284,9 @@ void CodeCache::initialize_heaps() {
// Profiled nmethods
// Non-nmethods
// ---------- low ------------
ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
ReservedSpace non_method_space = rs.first_part(non_method_size);
ReservedSpace rest = rs.last_part(non_method_size);
ReservedCodeSpace rs = reserve_heap_memory(cache_size);
ReservedSpace non_method_space = rs.first_part(non_nmethod_size);
ReservedSpace rest = rs.last_part(non_nmethod_size);
ReservedSpace profiled_space = rest.first_part(profiled_size);
ReservedSpace non_profiled_space = rest.last_part(profiled_size);
@ -420,42 +492,41 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) {
}
}
print_trace("allocation", cb, size);
_number_of_blobs++;
return cb;
}
void CodeCache::free(CodeBlob* cb) {
assert_locked_or_safepoint(CodeCache_lock);
CodeHeap* heap = get_code_heap(cb);
print_trace("free", cb);
if (cb->is_nmethod()) {
_number_of_nmethods--;
heap->set_nmethod_count(heap->nmethod_count() - 1);
if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies--;
}
}
if (cb->is_adapter_blob()) {
_number_of_adapters--;
heap->set_adapter_count(heap->adapter_count() - 1);
}
_number_of_blobs--;
// Get heap for given CodeBlob and deallocate
get_code_heap(cb)->deallocate(cb);
assert(_number_of_blobs >= 0, "sanity check");
assert(heap->blob_count() >= 0, "sanity check");
}
void CodeCache::commit(CodeBlob* cb) {
// this is called by nmethod::nmethod, which must already own CodeCache_lock
assert_locked_or_safepoint(CodeCache_lock);
CodeHeap* heap = get_code_heap(cb);
if (cb->is_nmethod()) {
_number_of_nmethods++;
heap->set_nmethod_count(heap->nmethod_count() + 1);
if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies++;
}
}
if (cb->is_adapter_blob()) {
_number_of_adapters++;
heap->set_adapter_count(heap->adapter_count() + 1);
}
// flush the hardware I-cache
@ -577,11 +648,9 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
#ifndef PRODUCT
if (TraceScavenge) {
cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
}
#endif //PRODUCT
if (is_live) {
// Perform cur->oops_do(f), maybe just once per nmethod.
f->do_code_blob(cur);
@ -774,6 +843,55 @@ void CodeCache::verify_oops() {
}
}
int CodeCache::blob_count(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? heap->blob_count() : 0;
}
int CodeCache::blob_count() {
int count = 0;
FOR_ALL_HEAPS(heap) {
count += (*heap)->blob_count();
}
return count;
}
int CodeCache::nmethod_count(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? heap->nmethod_count() : 0;
}
int CodeCache::nmethod_count() {
int count = 0;
FOR_ALL_HEAPS(heap) {
count += (*heap)->nmethod_count();
}
return count;
}
int CodeCache::adapter_count(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? heap->adapter_count() : 0;
}
int CodeCache::adapter_count() {
int count = 0;
FOR_ALL_HEAPS(heap) {
count += (*heap)->adapter_count();
}
return count;
}
address CodeCache::low_bound(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? (address)heap->low_boundary() : NULL;
}
address CodeCache::high_bound(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? (address)heap->high_boundary() : NULL;
}
size_t CodeCache::capacity() {
size_t cap = 0;
FOR_ALL_HEAPS(heap) {
@ -863,6 +981,9 @@ void CodeCache::initialize() {
initialize_heaps();
} else {
// Use a single code heap
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0);
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
add_heap(rs, "CodeCache", CodeBlobType::All);
}
@ -1104,9 +1225,8 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
CodeHeap* heap = get_code_heap(code_blob_type);
assert(heap != NULL, "heap is null");
if (!heap->was_full() || print) {
if ((heap->full_count() == 0) || print) {
// Not yet reported for this heap, report
heap->report_full();
if (SegmentedCodeCache) {
warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type));
warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type));
@ -1125,18 +1245,19 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
tty->print("%s", s.as_string());
}
_codemem_full_count++;
heap->report_full();
EventCodeCacheFull event;
if (event.should_commit()) {
event.set_codeBlobType((u1)code_blob_type);
event.set_startAddress((u8)heap->low_boundary());
event.set_commitedTopAddress((u8)heap->high());
event.set_reservedTopAddress((u8)heap->high_boundary());
event.set_entryCount(nof_blobs());
event.set_methodCount(nof_nmethods());
event.set_adaptorCount(nof_adapters());
event.set_entryCount(heap->blob_count());
event.set_methodCount(heap->nmethod_count());
event.set_adaptorCount(heap->adapter_count());
event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
event.set_fullCount(_codemem_full_count);
event.set_fullCount(heap->full_count());
event.commit();
}
}
@ -1360,7 +1481,7 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
if (detailed) {
st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
" adapters=" UINT32_FORMAT,
nof_blobs(), nof_nmethods(), nof_adapters());
blob_count(), nmethod_count(), adapter_count());
st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
"enabled" : Arguments::mode() == Arguments::_int ?
"disabled (interpreter mode)" :
@ -1392,6 +1513,6 @@ void CodeCache::print_layout(outputStream* st) {
void CodeCache::log_state(outputStream* st) {
st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
nof_blobs(), nof_nmethods(), nof_adapters(),
blob_count(), nmethod_count(), adapter_count(),
unallocated_capacity());
}

View File

@ -85,26 +85,23 @@ class CodeCache : AllStatic {
static address _low_bound; // Lower bound of CodeHeap addresses
static address _high_bound; // Upper bound of CodeHeap addresses
static int _number_of_blobs; // Total number of CodeBlobs in the cache
static int _number_of_adapters; // Total number of Adapters in the cache
static int _number_of_nmethods; // Total number of nmethods in the cache
static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
static bool _needs_cache_clean; // True if inline caches of the nmethods needs to be flushed
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static int _codemem_full_count; // Number of times a CodeHeap in the cache was full
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
// CodeHeap management
static void initialize_heaps(); // Initializes the CodeHeaps
// Check the code heap sizes set by the user via command line
static void check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set);
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
static void add_heap(ReservedSpace rs, const char* name, int code_blob_type);
static CodeHeap* get_code_heap(const CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
// Returns the name of the VM option to set the size of the corresponding CodeHeap
static const char* get_code_heap_flag_name(int code_blob_type);
static bool heap_available(int code_blob_type); // Returns true if an own CodeHeap for the given CodeBlobType is available
static size_t heap_alignment(); // Returns the alignment of the CodeHeaps in bytes
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
@ -139,9 +136,12 @@ class CodeCache : AllStatic {
static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
static int nof_blobs() { return _number_of_blobs; } // Returns the total number of CodeBlobs in the cache
static int nof_adapters() { return _number_of_adapters; } // Returns the total number of Adapters in the cache
static int nof_nmethods() { return _number_of_nmethods; } // Returns the total number of nmethods in the cache
static int blob_count(); // Returns the total number of CodeBlobs in the cache
static int blob_count(int code_blob_type);
static int adapter_count(); // Returns the total number of Adapters in the cache
static int adapter_count(int code_blob_type);
static int nmethod_count(); // Returns the total number of nmethods in the cache
static int nmethod_count(int code_blob_type);
// GC support
static void gc_epilogue();
@ -177,7 +177,9 @@ class CodeCache : AllStatic {
// The full limits of the codeCache
static address low_bound() { return _low_bound; }
static address low_bound(int code_blob_type);
static address high_bound() { return _high_bound; }
static address high_bound(int code_blob_type);
// Profiling
static size_t capacity();
@ -191,6 +193,9 @@ class CodeCache : AllStatic {
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
// Returns true if an own CodeHeap for the given CodeBlobType is available
static bool heap_available(int code_blob_type);
// Returns the CodeBlobType for the given nmethod
static int get_code_blob_type(nmethod* nm) {
return get_code_heap(nm)->code_blob_type();
@ -239,7 +244,10 @@ class CodeCache : AllStatic {
// tells how many nmethods have dependencies
static int number_of_nmethods_with_dependencies();
static int get_codemem_full_count() { return _codemem_full_count; }
static int get_codemem_full_count(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? heap->full_count() : 0;
}
};

View File

@ -0,0 +1,347 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/nmethod.hpp"
#include "code/dependencies.hpp"
#include "code/dependencyContext.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/perfData.hpp"
#include "utilities/exceptions.hpp"
PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = NULL;
PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;
PerfCounter* DependencyContext::_perf_total_buckets_stale_count = NULL;
PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = NULL;
void dependencyContext_init() {
DependencyContext::init();
}
void DependencyContext::init() {
if (UsePerfData) {
EXCEPTION_MARK;
_perf_total_buckets_allocated_count =
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
_perf_total_buckets_deallocated_count =
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
_perf_total_buckets_stale_count =
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
_perf_total_buckets_stale_acc_count =
PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
}
}
//
// Walk the list of dependent nmethods searching for nmethods which
// are dependent on the changes that were passed in and mark them for
// deoptimization. Returns the number of nmethods found.
//
int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
int found = 0;
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
// since dependencies aren't removed until an nmethod becomes a zombie,
// the dependency list may contain nmethods which aren't alive.
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
if (TraceDependencies) {
ResourceMark rm;
tty->print_cr("Marked for deoptimization");
changes.print();
nm->print();
nm->print_dependencies();
}
nm->mark_for_deoptimization();
found++;
}
}
return found;
}
//
// Add an nmethod to the dependency context.
// It's possible that an nmethod has multiple dependencies on a klass
// so a count is kept for each bucket to guarantee that creation and
// deletion of dependencies is consistent.
//
void DependencyContext::add_dependent_nmethod(nmethod* nm, bool expunge) {
assert_lock_strong(CodeCache_lock);
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
b->increment();
return;
}
}
set_dependencies(new nmethodBucket(nm, dependencies()));
if (UsePerfData) {
_perf_total_buckets_allocated_count->inc();
}
if (expunge) {
// Remove stale entries from the list.
expunge_stale_entries();
}
}
//
// Remove an nmethod dependency from the context.
// Decrement count of the nmethod in the dependency list and, optionally, remove
// the bucket completely when the count goes to 0. This method must find
// a corresponding bucket otherwise there's a bug in the recording of dependencies.
// Can be called concurrently by parallel GC threads.
//
void DependencyContext::remove_dependent_nmethod(nmethod* nm, bool expunge) {
assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* first = dependencies();
nmethodBucket* last = NULL;
for (nmethodBucket* b = first; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
int val = b->decrement();
guarantee(val >= 0, "Underflow: %d", val);
if (val == 0) {
if (expunge) {
if (last == NULL) {
set_dependencies(b->next());
} else {
last->set_next(b->next());
}
delete b;
if (UsePerfData) {
_perf_total_buckets_deallocated_count->inc();
}
} else {
// Mark the context as having stale entries, since it is not safe to
// expunge the list right now.
set_has_stale_entries(true);
if (UsePerfData) {
_perf_total_buckets_stale_count->inc();
_perf_total_buckets_stale_acc_count->inc();
}
}
}
if (expunge) {
// Remove stale entries from the list.
expunge_stale_entries();
}
return;
}
last = b;
}
#ifdef ASSERT
tty->print_raw_cr("### can't find dependent nmethod");
nm->print();
#endif // ASSERT
ShouldNotReachHere();
}
//
// Reclaim all unused buckets.
//
void DependencyContext::expunge_stale_entries() {
assert_locked_or_safepoint(CodeCache_lock);
if (!has_stale_entries()) {
assert(!find_stale_entries(), "inconsistent info");
return;
}
nmethodBucket* first = dependencies();
nmethodBucket* last = NULL;
int removed = 0;
for (nmethodBucket* b = first; b != NULL;) {
assert(b->count() >= 0, "bucket count: %d", b->count());
nmethodBucket* next = b->next();
if (b->count() == 0) {
if (last == NULL) {
first = next;
} else {
last->set_next(next);
}
removed++;
delete b;
// last stays the same.
} else {
last = b;
}
b = next;
}
set_dependencies(first);
set_has_stale_entries(false);
if (UsePerfData && removed > 0) {
_perf_total_buckets_deallocated_count->inc(removed);
_perf_total_buckets_stale_count->dec(removed);
}
}
//
// Invalidate all dependencies in the context
int DependencyContext::remove_all_dependents() {
assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* b = dependencies();
set_dependencies(NULL);
int marked = 0;
int removed = 0;
while (b != NULL) {
nmethod* nm = b->get_nmethod();
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
nm->mark_for_deoptimization();
marked++;
}
nmethodBucket* next = b->next();
removed++;
delete b;
b = next;
}
set_has_stale_entries(false);
if (UsePerfData && removed > 0) {
_perf_total_buckets_deallocated_count->inc(removed);
}
return marked;
}
#ifndef PRODUCT
void DependencyContext::print_dependent_nmethods(bool verbose) {
int idx = 0;
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
tty->print("[%d] count=%d { ", idx++, b->count());
if (!verbose) {
nm->print_on(tty, "nmethod");
tty->print_cr(" } ");
} else {
nm->print();
nm->print_dependencies();
tty->print_cr("--- } ");
}
}
}
bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
#ifdef ASSERT
int count = b->count();
assert(count >= 0, "count shouldn't be negative: %d", count);
#endif
return true;
}
}
return false;
}
bool DependencyContext::find_stale_entries() {
for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
if (b->count() == 0) return true;
}
return false;
}
#endif //PRODUCT
int nmethodBucket::decrement() {
return Atomic::add(-1, (volatile int *)&_count);
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT
class TestDependencyContext {
public:
nmethod* _nmethods[3];
intptr_t _dependency_context;
TestDependencyContext() : _dependency_context(DependencyContext::EMPTY) {
CodeCache_lock->lock_without_safepoint_check();
DependencyContext depContext(&_dependency_context);
_nmethods[0] = reinterpret_cast<nmethod*>(0x8 * 0);
_nmethods[1] = reinterpret_cast<nmethod*>(0x8 * 1);
_nmethods[2] = reinterpret_cast<nmethod*>(0x8 * 2);
depContext.add_dependent_nmethod(_nmethods[2]);
depContext.add_dependent_nmethod(_nmethods[1]);
depContext.add_dependent_nmethod(_nmethods[0]);
}
~TestDependencyContext() {
wipe();
CodeCache_lock->unlock();
}
static void testRemoveDependentNmethod(int id, bool delete_immediately) {
TestDependencyContext c;
DependencyContext depContext(&c._dependency_context);
assert(!has_stale_entries(depContext), "check");
nmethod* nm = c._nmethods[id];
depContext.remove_dependent_nmethod(nm, delete_immediately);
if (!delete_immediately) {
assert(has_stale_entries(depContext), "check");
assert(depContext.is_dependent_nmethod(nm), "check");
depContext.expunge_stale_entries();
}
assert(!has_stale_entries(depContext), "check");
assert(!depContext.is_dependent_nmethod(nm), "check");
}
static void testRemoveDependentNmethod() {
testRemoveDependentNmethod(0, false);
testRemoveDependentNmethod(1, false);
testRemoveDependentNmethod(2, false);
testRemoveDependentNmethod(0, true);
testRemoveDependentNmethod(1, true);
testRemoveDependentNmethod(2, true);
}
static void test() {
testRemoveDependentNmethod();
}
static bool has_stale_entries(DependencyContext ctx) {
assert(ctx.has_stale_entries() == ctx.find_stale_entries(), "check");
return ctx.has_stale_entries();
}
void wipe() {
DependencyContext ctx(&_dependency_context);
nmethodBucket* b = ctx.dependencies();
ctx.set_dependencies(NULL);
ctx.set_has_stale_entries(false);
while (b != NULL) {
nmethodBucket* next = b->next();
delete b;
b = next;
}
}
};
void TestDependencyContext_test() {
TestDependencyContext::test();
}
#endif // PRODUCT

View File

@ -0,0 +1,152 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP
#define SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "runtime/handles.hpp"
#include "runtime/perfData.hpp"
class nmethod;
class DepChange;
//
// nmethodBucket is used to record dependent nmethods for
// deoptimization. nmethod dependencies are actually <klass, method>
// pairs but we really only care about the klass part for purposes of
// finding nmethods which might need to be deoptimized. Instead of
// recording the method, a count of how many times a particular nmethod
// was recorded is kept. This ensures that any recording errors are
// noticed since an nmethod should be removed as many times are it's
// added.
//
class nmethodBucket: public CHeapObj<mtClass> {
friend class VMStructs;
private:
nmethod* _nmethod;
int _count;
nmethodBucket* _next;
public:
nmethodBucket(nmethod* nmethod, nmethodBucket* next) :
_nmethod(nmethod), _next(next), _count(1) {}
int count() { return _count; }
int increment() { _count += 1; return _count; }
int decrement();
nmethodBucket* next() { return _next; }
void set_next(nmethodBucket* b) { _next = b; }
nmethod* get_nmethod() { return _nmethod; }
};
//
// Utility class to manipulate nmethod dependency context.
// The context consists of nmethodBucket* (a head of a linked list)
// and a boolean flag (does the list contains stale entries). The structure is
// encoded as an intptr_t: lower bit is used for the flag. It is possible since
// nmethodBucket* is aligned - the structure is malloc'ed in C heap.
// Dependency context can be attached either to an InstanceKlass (_dep_context field)
// or CallSiteContext oop for call_site_target dependencies (see javaClasses.hpp).
// DependencyContext class operates on some location which holds a intptr_t value.
//
class DependencyContext : public StackObj {
friend class VMStructs;
friend class TestDependencyContext;
private:
enum TagBits { _has_stale_entries_bit = 1, _has_stale_entries_mask = 1 };
intptr_t* _dependency_context_addr;
void set_dependencies(nmethodBucket* b) {
assert((intptr_t(b) & _has_stale_entries_mask) == 0, "should be aligned");
if (has_stale_entries()) {
*_dependency_context_addr = intptr_t(b) | _has_stale_entries_mask;
} else {
*_dependency_context_addr = intptr_t(b);
}
}
void set_has_stale_entries(bool x) {
if (x) {
*_dependency_context_addr |= _has_stale_entries_mask;
} else {
*_dependency_context_addr &= ~_has_stale_entries_mask;
}
}
nmethodBucket* dependencies() {
intptr_t value = *_dependency_context_addr;
return (nmethodBucket*) (value & ~_has_stale_entries_mask);
}
bool has_stale_entries() const {
intptr_t value = *_dependency_context_addr;
return (value & _has_stale_entries_mask) != 0;
}
static PerfCounter* _perf_total_buckets_allocated_count;
static PerfCounter* _perf_total_buckets_deallocated_count;
static PerfCounter* _perf_total_buckets_stale_count;
static PerfCounter* _perf_total_buckets_stale_acc_count;
public:
#ifdef ASSERT
// Verification for dependency contexts rooted at Java objects.
Handle _base; // non-NULL if dependency context resides in an oop (e.g. CallSite).
oop _base_oop;
DependencyContext(intptr_t* addr, Handle base = Handle())
: _dependency_context_addr(addr), _base(base)
{
_base_oop = _base();
}
~DependencyContext() {
// Base oop relocation invalidates _dependency_context_addr.
assert(_base_oop == _base(), "base oop relocation is forbidden");
}
#else
DependencyContext(intptr_t* addr) : _dependency_context_addr(addr) {}
#endif // ASSERT
static const intptr_t EMPTY = 0; // dependencies = NULL, has_stale_entries = false
static void init();
int mark_dependent_nmethods(DepChange& changes);
void add_dependent_nmethod(nmethod* nm, bool expunge_stale_entries = false);
void remove_dependent_nmethod(nmethod* nm, bool expunge_stale_entries = false);
int remove_all_dependents();
void expunge_stale_entries();
#ifndef PRODUCT
void print_dependent_nmethods(bool verbose);
bool is_dependent_nmethod(nmethod* nm);
bool find_stale_entries();
#endif //PRODUCT
};
#endif // SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP

View File

@ -1539,7 +1539,7 @@ void nmethod::flush() {
if (PrintMethodFlushing) {
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
"/Free CodeCache:" SIZE_FORMAT "Kb",
_compile_id, p2i(this), CodeCache::nof_blobs(),
_compile_id, p2i(this), CodeCache::blob_count(),
CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
}
@ -1819,9 +1819,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
if (_jvmci_installed_code != NULL) {
if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
if (!is_alive->do_object_b(_jvmci_installed_code)) {
bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
_jvmci_installed_code = NULL;
bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
clear_jvmci_installed_code();
}
} else {
if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
@ -1922,27 +1920,6 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
unloading_occurred = true;
}
#if INCLUDE_JVMCI
// Follow JVMCI method
if (_jvmci_installed_code != NULL) {
if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
if (!is_alive->do_object_b(_jvmci_installed_code)) {
_jvmci_installed_code = NULL;
}
} else {
if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
return false;
}
}
}
if (_speculation_log != NULL) {
if (!is_alive->do_object_b(_speculation_log)) {
_speculation_log = NULL;
}
}
#endif
// Exception cache
clean_exception_cache(is_alive);
@ -2006,9 +1983,7 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
if (_jvmci_installed_code != NULL) {
if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
if (!is_alive->do_object_b(_jvmci_installed_code)) {
bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
_jvmci_installed_code = NULL;
bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
clear_jvmci_installed_code();
}
} else {
if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
@ -2271,7 +2246,7 @@ bool nmethod::test_set_oops_do_mark() {
break;
}
// Mark was clear when we first saw this guy.
NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark"));
if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
return false;
}
}
@ -2280,7 +2255,7 @@ bool nmethod::test_set_oops_do_mark() {
}
void nmethod::oops_do_marking_prologue() {
NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue"));
if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
// We use cmpxchg_ptr instead of regular assignment here because the user
// may fork a bunch of threads, and we need them all to see the same state.
@ -2302,7 +2277,7 @@ void nmethod::oops_do_marking_epilogue() {
void* required = _oops_do_mark_nmethods;
void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
guarantee(observed == required, "no races in this sequential code");
NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]"));
if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
}
class DetectScavengeRoot: public OopClosure {
@ -3373,6 +3348,14 @@ void nmethod::print_statistics() {
#endif // !PRODUCT
#if INCLUDE_JVMCI
void nmethod::clear_jvmci_installed_code() {
// This must be done carefully to maintain nmethod remembered sets properly
BarrierSet* bs = Universe::heap()->barrier_set();
bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
_jvmci_installed_code = NULL;
bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
}
void nmethod::maybe_invalidate_installed_code() {
if (_jvmci_installed_code != NULL) {
if (!is_alive()) {
@ -3382,7 +3365,7 @@ void nmethod::maybe_invalidate_installed_code() {
// might want to invalidate all existing activations.
InstalledCode::set_address(_jvmci_installed_code, 0);
InstalledCode::set_entryPoint(_jvmci_installed_code, 0);
_jvmci_installed_code = NULL;
clear_jvmci_installed_code();
} else if (is_not_entrant()) {
InstalledCode::set_entryPoint(_jvmci_installed_code, 0);
}

View File

@ -602,7 +602,7 @@ public:
#if INCLUDE_JVMCI
oop jvmci_installed_code() { return _jvmci_installed_code ; }
char* jvmci_installed_code_name(char* buf, size_t buflen);
void set_jvmci_installed_code(oop installed_code) { _jvmci_installed_code = installed_code; }
void clear_jvmci_installed_code();
void maybe_invalidate_installed_code();
oop speculation_log() { return _speculation_log ; }
void set_speculation_log(oop speculation_log) { _speculation_log = speculation_log; }

View File

@ -26,6 +26,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
@ -237,10 +238,27 @@ CompileTaskWrapper::~CompileTaskWrapper() {
task->set_code_handle(NULL);
thread->set_env(NULL);
if (task->is_blocking()) {
MutexLocker notifier(task->lock(), thread);
task->mark_complete();
// Notify the waiting thread that the compilation has completed.
task->lock()->notify_all();
bool free_task = false;
{
MutexLocker notifier(task->lock(), thread);
task->mark_complete();
#if INCLUDE_JVMCI
if (CompileBroker::compiler(task->comp_level())->is_jvmci() &&
!task->has_waiter()) {
// The waiting thread timed out and thus did not free the task.
free_task = true;
}
#endif
if (!free_task) {
// Notify the waiting thread that the compilation has completed
// so that it can free the task.
task->lock()->notify_all();
}
}
if (free_task) {
// The task can only be freed once the task lock is released.
CompileTask::free(task);
}
} else {
task->mark_complete();
@ -547,7 +565,6 @@ void CompileBroker::compilation_init() {
PerfData::U_Ticks, CHECK);
}
if (UsePerfData) {
EXCEPTION_MARK;
@ -1302,6 +1319,11 @@ CompileTask* CompileBroker::create_compile_task(CompileQueue* queue,
return new_task;
}
// 1 second should be long enough to complete most JVMCI compilations
// and not too long to stall a blocking JVMCI compilation that
// is trying to acquire a lock held by the app thread that submitted the
// compilation.
static const long BLOCKING_JVMCI_COMPILATION_TIMEOUT = 1000;
/**
* Wait for the compilation task to complete.
@ -1318,30 +1340,47 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
thread->set_blocked_on_compilation(true);
methodHandle method(thread, task->method());
bool free_task;
#if INCLUDE_JVMCI
if (compiler(task->comp_level())->is_jvmci()) {
MutexLocker waiter(task->lock(), thread);
// No need to check if compilation has completed - just
// rely on the time out. The JVMCI compiler thread will
// recycle the CompileTask.
task->lock()->wait(!Mutex::_no_safepoint_check_flag, BLOCKING_JVMCI_COMPILATION_TIMEOUT);
// If the compilation completes while has_waiter is true then
// this thread is responsible for freeing the task. Otherwise
// the compiler thread will free the task.
task->clear_waiter();
free_task = task->is_complete();
} else
#endif
{
MutexLocker waiter(task->lock(), thread);
free_task = true;
while (!task->is_complete() && !is_compilation_disabled_forever()) {
task->lock()->wait();
}
}
thread->set_blocked_on_compilation(false);
if (is_compilation_disabled_forever()) {
if (free_task) {
if (is_compilation_disabled_forever()) {
CompileTask::free(task);
return;
}
// It is harmless to check this status without the lock, because
// completion is a stable property (until the task object is recycled).
assert(task->is_complete(), "Compilation should have completed");
assert(task->code_handle() == NULL, "must be reset");
// By convention, the waiter is responsible for recycling a
// blocking CompileTask. Since there is only one waiter ever
// waiting on a CompileTask, we know that no one else will
// be using this CompileTask; we can free it.
CompileTask::free(task);
return;
}
// It is harmless to check this status without the lock, because
// completion is a stable property (until the task object is recycled).
assert(task->is_complete(), "Compilation should have completed");
assert(task->code_handle() == NULL, "must be reset");
// By convention, the waiter is responsible for recycling a
// blocking CompileTask. Since there is only one waiter ever
// waiting on a CompileTask, we know that no one else will
// be using this CompileTask; we can free it.
CompileTask::free(task);
}
/**
@ -1676,13 +1715,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
bool should_break = false;
int task_level = task->comp_level();
// Look up matching directives
DirectiveSet* directive = DirectivesStack::getMatchingDirective(task->method(), compiler(task_level));
should_break = directive->BreakAtExecuteOption || task->check_break_at_flags();
if (should_log && !directive->LogOption) {
should_log = false;
}
DirectiveSet* directive;
{
// create the handle inside it's own block so it can't
// accidentally be referenced once the thread transitions to
@ -1691,12 +1724,20 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
methodHandle method(thread, task->method());
assert(!method->is_native(), "no longer compile natives");
// Look up matching directives
directive = DirectivesStack::getMatchingDirective(method, compiler(task_level));
// Save information about this method in case of failure.
set_last_compile(thread, method, is_osr, task_level);
DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
}
should_break = directive->BreakAtExecuteOption || task->check_break_at_flags();
if (should_log && !directive->LogOption) {
should_log = false;
}
// Allocate a new set of JNI handles.
push_jni_handle_block();
Method* target_handle = task->method();
@ -1716,7 +1757,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
EventCompilation event;
JVMCIEnv env(task, system_dictionary_modification_counter);
jvmci->compile_method(target_handle, osr_bci, &env);
methodHandle method(thread, target_handle);
jvmci->compile_method(method, osr_bci, &env);
post_compile(thread, task, event, task->code() != NULL, NULL);
} else

View File

@ -47,7 +47,7 @@ CompileTask* CompileTask::allocate() {
} else {
task = new CompileTask();
DEBUG_ONLY(_num_allocated_tasks++;)
assert (WhiteBoxAPI || _num_allocated_tasks < 10000, "Leaking compilation tasks?");
assert (WhiteBoxAPI || JVMCI_ONLY(UseJVMCICompiler ||) _num_allocated_tasks < 10000, "Leaking compilation tasks?");
task->set_next(NULL);
task->set_is_free(true);
}
@ -90,6 +90,7 @@ void CompileTask::initialize(int compile_id,
_method_holder = JNIHandles::make_global(method->method_holder()->klass_holder());
_osr_bci = osr_bci;
_is_blocking = is_blocking;
JVMCI_ONLY(_has_waiter = CompileBroker::compiler(comp_level)->is_jvmci();)
_comp_level = comp_level;
_num_inlined_bytecodes = 0;

View File

@ -53,6 +53,9 @@ class CompileTask : public CHeapObj<mtCompiler> {
bool _is_complete;
bool _is_success;
bool _is_blocking;
#if INCLUDE_JVMCI
bool _has_waiter;
#endif
int _comp_level;
int _num_inlined_bytecodes;
nmethodLocker* _code_handle; // holder of eventual result
@ -85,6 +88,10 @@ class CompileTask : public CHeapObj<mtCompiler> {
bool is_complete() const { return _is_complete; }
bool is_blocking() const { return _is_blocking; }
bool is_success() const { return _is_success; }
#if INCLUDE_JVMCI
bool has_waiter() const { return _has_waiter; }
void clear_waiter() { _has_waiter = false; }
#endif
nmethodLocker* code_handle() const { return _code_handle; }
void set_code_handle(nmethodLocker* l) { _code_handle = l; }

View File

@ -527,12 +527,14 @@ void DirectivesStack::release(CompilerDirectives* dir) {
DirectiveSet* DirectivesStack::getMatchingDirective(methodHandle method, AbstractCompiler *comp) {
assert(_depth > 0, "Must never be empty");
CompilerDirectives* dir = _top;
assert(dir != NULL, "Must be initialized");
DirectiveSet* match = NULL;
{
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
CompilerDirectives* dir = _top;
assert(dir != NULL, "Must be initialized");
while (dir != NULL) {
if (dir->is_default_directive() || dir->match(method)) {
match = dir->get_for(comp);

View File

@ -67,7 +67,7 @@
cflags(VectorizeDebug, bool, false, VectorizeDebug) \
cflags(CloneMapDebug, bool, false, CloneMapDebug) \
cflags(DoReserveCopyInSuperWordDebug, bool, false, DoReserveCopyInSuperWordDebug) \
NOT_PRODUCT( cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel)) \
cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit)
#else
#define compilerdirectives_c2_flags(cflags)

View File

@ -1148,7 +1148,6 @@ oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
}
assert(new_obj != NULL, "just checking");
#ifndef PRODUCT
// This code must come after the CAS test, or it will print incorrect
// information.
if (TraceScavenge) {
@ -1156,7 +1155,6 @@ oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
is_in_reserved(new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
}
#endif
if (forward_ptr == NULL) {
oop obj_to_push = new_obj;

View File

@ -108,14 +108,11 @@ inline void ParScanClosure::do_oop_work(T* p,
if (m->is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj);
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
#ifndef PRODUCT
if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
"forwarded ",
new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
}
#endif
} else {
size_t obj_sz = obj->size_given_klass(objK);
new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);

View File

@ -430,7 +430,6 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
obj = obj->forwardee();
}
#ifndef PRODUCT
if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " (%d)}",
"promotion-failure",
@ -438,7 +437,6 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
p2i(obj), obj->size());
}
#endif
return obj;
}

View File

@ -260,7 +260,6 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
new_obj = o->forwardee();
}
#ifndef PRODUCT
// This code must come after the CAS test, or it will print incorrect
// information.
if (TraceScavenge) {
@ -268,7 +267,6 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
should_scavenge(&new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
}
#endif
return new_obj;
}
@ -285,15 +283,13 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
? o->forwardee()
: copy_to_survivor_space<promote_immediately>(o);
#ifndef PRODUCT
// This code must come after the CAS test, or it will print incorrect
// information.
if (TraceScavenge && o->is_forwarded()) {
if (TraceScavenge && o->is_forwarded()) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
"forwarding",
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
}
#endif
oopDesc::encode_store_heap_oop_not_null(p, new_obj);

View File

@ -138,7 +138,6 @@ class PSScavengeKlassClosure: public KlassClosure {
// If the klass has not been dirtied we know that there's
// no references into the young gen and we can skip it.
#ifndef PRODUCT
if (TraceScavenge) {
ResourceMark rm;
gclog_or_tty->print_cr("PSScavengeKlassClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
@ -146,7 +145,6 @@ class PSScavengeKlassClosure: public KlassClosure {
klass->external_name(),
klass->has_modified_oops() ? "true" : "false");
}
#endif
if (klass->has_modified_oops()) {
// Clean the klass since we're going to scavenge all the metadata.

View File

@ -134,7 +134,6 @@ void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
void KlassScanClosure::do_klass(Klass* klass) {
#ifndef PRODUCT
if (TraceScavenge) {
ResourceMark rm;
gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
@ -142,7 +141,6 @@ void KlassScanClosure::do_klass(Klass* klass) {
klass->external_name(),
klass->has_modified_oops() ? "true" : "false");
}
#endif
// If the klass has not been dirtied we know that there's
// no references into the young gen and we can skip it.

View File

@ -300,7 +300,10 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
}
// Accessor method?
if (m->is_accessor()) {
if (m->is_getter()) {
// TODO: We should have used ::is_accessor above, but fast accessors in Zero expect only getters.
// See CppInterpreter::accessor_entry in cppInterpreter_zero.cpp. This should be fixed in Zero,
// then the call above updated to ::is_accessor
assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
return accessor;
}

View File

@ -71,62 +71,97 @@ Method* getMethodFromHotSpotMethod(oop hotspot_method) {
return CompilerToVM::asMethod(hotspot_method);
}
VMReg getVMRegFromLocation(oop location, int total_frame_size) {
oop reg = code_Location::reg(location);
VMReg getVMRegFromLocation(Handle location, int total_frame_size, TRAPS) {
if (location.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
}
Handle reg = code_Location::reg(location);
jint offset = code_Location::offset(location);
if (reg != NULL) {
if (reg.not_null()) {
// register
jint number = code_Register::number(reg);
VMReg vmReg = CodeInstaller::get_hotspot_reg(number);
assert(offset % 4 == 0, "must be aligned");
return vmReg->next(offset / 4);
VMReg vmReg = CodeInstaller::get_hotspot_reg(number, CHECK_NULL);
if (offset % 4 == 0) {
return vmReg->next(offset / 4);
} else {
JVMCI_ERROR_NULL("unaligned subregister offset %d in oop map", offset);
}
} else {
// stack slot
assert(offset % 4 == 0, "must be aligned");
return VMRegImpl::stack2reg(offset / 4);
if (offset % 4 == 0) {
return VMRegImpl::stack2reg(offset / 4);
} else {
JVMCI_ERROR_NULL("unaligned stack offset %d in oop map", offset);
}
}
}
// creates a HotSpot oop map out of the byte arrays provided by DebugInfo
OopMap* CodeInstaller::create_oop_map(oop debug_info) {
oop reference_map = DebugInfo::referenceMap(debug_info);
OopMap* CodeInstaller::create_oop_map(Handle debug_info, TRAPS) {
Handle reference_map = DebugInfo::referenceMap(debug_info);
if (reference_map.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
}
if (!reference_map->is_a(HotSpotReferenceMap::klass())) {
JVMCI_ERROR_NULL("unknown reference map: %s", reference_map->klass()->signature_name());
}
if (HotSpotReferenceMap::maxRegisterSize(reference_map) > 16) {
_has_wide_vector = true;
}
OopMap* map = new OopMap(_total_frame_size, _parameter_count);
objArrayOop objects = HotSpotReferenceMap::objects(reference_map);
objArrayOop derivedBase = HotSpotReferenceMap::derivedBase(reference_map);
typeArrayOop sizeInBytes = HotSpotReferenceMap::sizeInBytes(reference_map);
objArrayHandle objects = HotSpotReferenceMap::objects(reference_map);
objArrayHandle derivedBase = HotSpotReferenceMap::derivedBase(reference_map);
typeArrayHandle sizeInBytes = HotSpotReferenceMap::sizeInBytes(reference_map);
if (objects.is_null() || derivedBase.is_null() || sizeInBytes.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
}
if (objects->length() != derivedBase->length() || objects->length() != sizeInBytes->length()) {
JVMCI_ERROR_NULL("arrays in reference map have different sizes: %d %d %d", objects->length(), derivedBase->length(), sizeInBytes->length());
}
for (int i = 0; i < objects->length(); i++) {
oop location = objects->obj_at(i);
oop baseLocation = derivedBase->obj_at(i);
Handle location = objects->obj_at(i);
Handle baseLocation = derivedBase->obj_at(i);
int bytes = sizeInBytes->int_at(i);
VMReg vmReg = getVMRegFromLocation(location, _total_frame_size);
if (baseLocation != NULL) {
VMReg vmReg = getVMRegFromLocation(location, _total_frame_size, CHECK_NULL);
if (baseLocation.not_null()) {
// derived oop
assert(bytes == 8, "derived oop can't be compressed");
VMReg baseReg = getVMRegFromLocation(baseLocation, _total_frame_size);
map->set_derived_oop(vmReg, baseReg);
#ifdef _LP64
if (bytes == 8) {
#else
if (bytes == 4) {
#endif
VMReg baseReg = getVMRegFromLocation(baseLocation, _total_frame_size, CHECK_NULL);
map->set_derived_oop(vmReg, baseReg);
} else {
JVMCI_ERROR_NULL("invalid derived oop size in ReferenceMap: %d", bytes);
}
#ifdef _LP64
} else if (bytes == 8) {
// wide oop
map->set_oop(vmReg);
} else {
} else if (bytes == 4) {
// narrow oop
assert(bytes == 4, "wrong size");
map->set_narrowoop(vmReg);
#else
} else if (bytes == 4) {
map->set_oop(vmReg);
#endif
} else {
JVMCI_ERROR_NULL("invalid oop size in ReferenceMap: %d", bytes);
}
}
oop callee_save_info = (oop) DebugInfo::calleeSaveInfo(debug_info);
if (callee_save_info != NULL) {
objArrayOop registers = RegisterSaveLayout::registers(callee_save_info);
typeArrayOop slots = RegisterSaveLayout::slots(callee_save_info);
Handle callee_save_info = (oop) DebugInfo::calleeSaveInfo(debug_info);
if (callee_save_info.not_null()) {
objArrayHandle registers = RegisterSaveLayout::registers(callee_save_info);
typeArrayHandle slots = RegisterSaveLayout::slots(callee_save_info);
for (jint i = 0; i < slots->length(); i++) {
oop jvmci_reg = registers->obj_at(i);
Handle jvmci_reg = registers->obj_at(i);
jint jvmci_reg_number = code_Register::number(jvmci_reg);
VMReg hotspot_reg = CodeInstaller::get_hotspot_reg(jvmci_reg_number);
VMReg hotspot_reg = CodeInstaller::get_hotspot_reg(jvmci_reg_number, CHECK_NULL);
// HotSpot stack slots are 4 bytes
jint jvmci_slot = slots->int_at(i);
jint hotspot_slot = jvmci_slot * VMRegImpl::slots_per_word;
@ -142,7 +177,7 @@ OopMap* CodeInstaller::create_oop_map(oop debug_info) {
return map;
}
Metadata* CodeInstaller::record_metadata_reference(Handle& constant) {
Metadata* CodeInstaller::record_metadata_reference(Handle constant, TRAPS) {
oop obj = HotSpotMetaspaceConstantImpl::metaspaceObject(constant);
if (obj->is_a(HotSpotResolvedObjectTypeImpl::klass())) {
Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj));
@ -157,16 +192,18 @@ Metadata* CodeInstaller::record_metadata_reference(Handle& constant) {
TRACE_jvmci_3("metadata[%d of %d] = %s", index, _oop_recorder->metadata_count(), method->name()->as_C_string());
return method;
} else {
fatal("unexpected metadata reference for constant of type %s", obj->klass()->name()->as_C_string());
return NULL;
JVMCI_ERROR_NULL("unexpected metadata reference for constant of type %s", obj->klass()->signature_name());
}
}
#ifdef _LP64
narrowKlass CodeInstaller::record_narrow_metadata_reference(Handle& constant) {
narrowKlass CodeInstaller::record_narrow_metadata_reference(Handle constant, TRAPS) {
oop obj = HotSpotMetaspaceConstantImpl::metaspaceObject(constant);
assert(HotSpotMetaspaceConstantImpl::compressed(constant), "unexpected uncompressed pointer");
assert(obj->is_a(HotSpotResolvedObjectTypeImpl::klass()), "unexpected compressed pointer of type %s", obj->klass()->name()->as_C_string());
if (!obj->is_a(HotSpotResolvedObjectTypeImpl::klass())) {
JVMCI_ERROR_0("unexpected compressed pointer of type %s", obj->klass()->signature_name());
}
Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj));
int index = _oop_recorder->find_index(klass);
@ -175,9 +212,9 @@ narrowKlass CodeInstaller::record_narrow_metadata_reference(Handle& constant) {
}
#endif
Location::Type CodeInstaller::get_oop_type(oop value) {
oop lirKind = Value::lirKind(value);
oop platformKind = LIRKind::platformKind(lirKind);
Location::Type CodeInstaller::get_oop_type(Handle value) {
Handle lirKind = Value::lirKind(value);
Handle platformKind = LIRKind::platformKind(lirKind);
assert(LIRKind::referenceMask(lirKind) == 1, "unexpected referenceMask");
if (platformKind == word_kind()) {
@ -187,24 +224,29 @@ Location::Type CodeInstaller::get_oop_type(oop value) {
}
}
ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second) {
ScopeValue* CodeInstaller::get_scope_value(Handle value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second, TRAPS) {
second = NULL;
if (value == Value::ILLEGAL()) {
assert(type == T_ILLEGAL, "expected legal value");
if (value.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
} else if (value == Value::ILLEGAL()) {
if (type != T_ILLEGAL) {
JVMCI_ERROR_NULL("unexpected illegal value, expected %s", basictype_to_str(type));
}
return _illegal_value;
} else if (value->is_a(RegisterValue::klass())) {
oop reg = RegisterValue::reg(value);
Handle reg = RegisterValue::reg(value);
jint number = code_Register::number(reg);
VMReg hotspotRegister = get_hotspot_reg(number);
VMReg hotspotRegister = get_hotspot_reg(number, CHECK_NULL);
if (is_general_purpose_reg(hotspotRegister)) {
Location::Type locationType;
if (type == T_OBJECT) {
locationType = get_oop_type(value);
} else if (type == T_LONG) {
locationType = Location::lng;
} else {
assert(type == T_INT || type == T_FLOAT || type == T_SHORT || type == T_CHAR || type == T_BYTE || type == T_BOOLEAN, "unexpected type in cpu register");
} else if (type == T_INT || type == T_FLOAT || type == T_SHORT || type == T_CHAR || type == T_BYTE || type == T_BOOLEAN) {
locationType = Location::int_in_long;
} else {
JVMCI_ERROR_NULL("unexpected type %s in cpu register", basictype_to_str(type));
}
ScopeValue* value = new LocationValue(Location::new_reg_loc(locationType, hotspotRegister));
if (type == T_LONG) {
@ -212,13 +254,14 @@ ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableAr
}
return value;
} else {
assert(type == T_FLOAT || type == T_DOUBLE, "only float and double expected in xmm register");
Location::Type locationType;
if (type == T_FLOAT) {
// this seems weird, but the same value is used in c1_LinearScan
locationType = Location::normal;
} else {
} else if (type == T_DOUBLE) {
locationType = Location::dbl;
} else {
JVMCI_ERROR_NULL("unexpected type %s in floating point register", basictype_to_str(type));
}
ScopeValue* value = new LocationValue(Location::new_reg_loc(locationType, hotspotRegister));
if (type == T_DOUBLE) {
@ -239,9 +282,10 @@ ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableAr
locationType = Location::lng;
} else if (type == T_DOUBLE) {
locationType = Location::dbl;
} else {
assert(type == T_INT || type == T_FLOAT || type == T_SHORT || type == T_CHAR || type == T_BYTE || type == T_BOOLEAN, "unexpected type in stack slot");
} else if (type == T_INT || type == T_FLOAT || type == T_SHORT || type == T_CHAR || type == T_BYTE || type == T_BOOLEAN) {
locationType = Location::normal;
} else {
JVMCI_ERROR_NULL("unexpected type %s in stack slot", basictype_to_str(type));
}
ScopeValue* value = new LocationValue(Location::new_stk_loc(locationType, offset));
if (type == T_DOUBLE || type == T_LONG) {
@ -254,7 +298,10 @@ ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableAr
jlong prim = PrimitiveConstant::primitive(value);
return new ConstantLongValue(prim);
} else {
assert(type == JVMCIRuntime::kindToBasicType(JavaKind::typeChar(PrimitiveConstant::kind(value))), "primitive constant type doesn't match");
BasicType constantType = JVMCIRuntime::kindToBasicType(PrimitiveConstant::kind(value), CHECK_NULL);
if (type != constantType) {
JVMCI_ERROR_NULL("primitive constant type doesn't match, expected %s but got %s", basictype_to_str(type), basictype_to_str(constantType));
}
if (type == T_INT || type == T_FLOAT) {
jint prim = (jint)PrimitiveConstant::primitive(value);
switch (prim) {
@ -264,53 +311,63 @@ ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableAr
case 2: return _int_2_scope_value;
default: return new ConstantIntValue(prim);
}
} else {
assert(type == T_LONG || type == T_DOUBLE, "unexpected primitive constant type");
} else if (type == T_LONG || type == T_DOUBLE) {
jlong prim = PrimitiveConstant::primitive(value);
second = _int_1_scope_value;
return new ConstantLongValue(prim);
} else {
JVMCI_ERROR_NULL("unexpected primitive constant type %s", basictype_to_str(type));
}
}
} else {
assert(type == T_OBJECT, "unexpected object constant");
if (value->is_a(NullConstant::klass()) || value->is_a(HotSpotCompressedNullConstant::klass())) {
} else if (value->is_a(NullConstant::klass()) || value->is_a(HotSpotCompressedNullConstant::klass())) {
if (type == T_OBJECT) {
return _oop_null_scope_value;
} else {
assert(value->is_a(HotSpotObjectConstantImpl::klass()), "unexpected constant type");
JVMCI_ERROR_NULL("unexpected null constant, expected %s", basictype_to_str(type));
}
} else if (value->is_a(HotSpotObjectConstantImpl::klass())) {
if (type == T_OBJECT) {
oop obj = HotSpotObjectConstantImpl::object(value);
assert(obj != NULL, "null value must be in NullConstant");
if (obj == NULL) {
JVMCI_ERROR_NULL("null value must be in NullConstant");
}
return new ConstantOopWriteValue(JNIHandles::make_local(obj));
} else {
JVMCI_ERROR_NULL("unexpected object constant, expected %s", basictype_to_str(type));
}
}
} else if (value->is_a(VirtualObject::klass())) {
assert(type == T_OBJECT, "unexpected virtual object");
int id = VirtualObject::id(value);
ScopeValue* object = objects->at(id);
assert(object != NULL, "missing value");
return object;
} else {
value->klass()->print();
value->print();
if (type == T_OBJECT) {
int id = VirtualObject::id(value);
if (0 <= id && id < objects->length()) {
ScopeValue* object = objects->at(id);
if (object != NULL) {
return object;
}
}
JVMCI_ERROR_NULL("unknown virtual object id %d", id);
} else {
JVMCI_ERROR_NULL("unexpected virtual object, expected %s", basictype_to_str(type));
}
}
ShouldNotReachHere();
return NULL;
JVMCI_ERROR_NULL("unexpected value in scope: %s", value->klass()->signature_name())
}
void CodeInstaller::record_object_value(ObjectValue* sv, oop value, GrowableArray<ScopeValue*>* objects) {
oop type = VirtualObject::type(value);
void CodeInstaller::record_object_value(ObjectValue* sv, Handle value, GrowableArray<ScopeValue*>* objects, TRAPS) {
Handle type = VirtualObject::type(value);
int id = VirtualObject::id(value);
oop javaMirror = HotSpotResolvedObjectTypeImpl::javaClass(type);
Klass* klass = java_lang_Class::as_Klass(javaMirror);
bool isLongArray = klass == Universe::longArrayKlassObj();
objArrayOop values = VirtualObject::values(value);
objArrayOop slotKinds = VirtualObject::slotKinds(value);
objArrayHandle values = VirtualObject::values(value);
objArrayHandle slotKinds = VirtualObject::slotKinds(value);
for (jint i = 0; i < values->length(); i++) {
ScopeValue* cur_second = NULL;
oop object = values->obj_at(i);
oop kind = slotKinds->obj_at(i);
BasicType type = JVMCIRuntime::kindToBasicType(JavaKind::typeChar(kind));
ScopeValue* value = get_scope_value(object, type, objects, cur_second);
Handle object = values->obj_at(i);
BasicType type = JVMCIRuntime::kindToBasicType(slotKinds->obj_at(i), CHECK);
ScopeValue* value = get_scope_value(object, type, objects, cur_second, CHECK);
if (isLongArray && cur_second == NULL) {
// we're trying to put ints into a long array... this isn't really valid, but it's used for some optimizations.
@ -326,14 +383,19 @@ void CodeInstaller::record_object_value(ObjectValue* sv, oop value, GrowableArra
}
}
MonitorValue* CodeInstaller::get_monitor_value(oop value, GrowableArray<ScopeValue*>* objects) {
guarantee(value->is_a(StackLockValue::klass()), "Monitors must be of type StackLockValue");
MonitorValue* CodeInstaller::get_monitor_value(Handle value, GrowableArray<ScopeValue*>* objects, TRAPS) {
if (value.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
}
if (!value->is_a(StackLockValue::klass())) {
JVMCI_ERROR_NULL("Monitors must be of type StackLockValue, got %s", value->klass()->signature_name());
}
ScopeValue* second = NULL;
ScopeValue* owner_value = get_scope_value(StackLockValue::owner(value), T_OBJECT, objects, second);
ScopeValue* owner_value = get_scope_value(StackLockValue::owner(value), T_OBJECT, objects, second, CHECK_NULL);
assert(second == NULL, "monitor cannot occupy two stack slots");
ScopeValue* lock_data_value = get_scope_value(StackLockValue::slot(value), T_LONG, objects, second);
ScopeValue* lock_data_value = get_scope_value(StackLockValue::slot(value), T_LONG, objects, second, CHECK_NULL);
assert(second == lock_data_value, "monitor is LONG value that occupies two stack slots");
assert(lock_data_value->is_location(), "invalid monitor location");
Location lock_data_loc = ((LocationValue*)lock_data_value)->location();
@ -346,7 +408,7 @@ MonitorValue* CodeInstaller::get_monitor_value(oop value, GrowableArray<ScopeVal
return new MonitorValue(owner_value, lock_data_loc, eliminated);
}
void CodeInstaller::initialize_dependencies(oop compiled_code, OopRecorder* recorder) {
void CodeInstaller::initialize_dependencies(oop compiled_code, OopRecorder* recorder, TRAPS) {
JavaThread* thread = JavaThread::current();
CompilerThread* compilerThread = thread->is_Compiler_thread() ? thread->as_CompilerThread() : NULL;
_oop_recorder = recorder;
@ -368,8 +430,7 @@ void CodeInstaller::initialize_dependencies(oop compiled_code, OopRecorder* reco
} else if (assumption->klass() == Assumptions_CallSiteTargetValue::klass()) {
assumption_CallSiteTargetValue(assumption);
} else {
assumption->print();
fatal("unexpected Assumption subclass");
JVMCI_ERROR("unexpected Assumption subclass %s", assumption->klass()->signature_name());
}
}
}
@ -414,18 +475,19 @@ void RelocBuffer::ensure_size(size_t bytes) {
_size = bytes;
}
JVMCIEnv::CodeInstallResult CodeInstaller::gather_metadata(Handle target, Handle& compiled_code, CodeMetadata& metadata) {
JVMCIEnv::CodeInstallResult CodeInstaller::gather_metadata(Handle target, Handle compiled_code, CodeMetadata& metadata, TRAPS) {
CodeBuffer buffer("JVMCI Compiler CodeBuffer for Metadata");
jobject compiled_code_obj = JNIHandles::make_local(compiled_code());
initialize_dependencies(JNIHandles::resolve(compiled_code_obj), NULL);
initialize_dependencies(JNIHandles::resolve(compiled_code_obj), NULL, CHECK_OK);
// Get instructions and constants CodeSections early because we need it.
_instructions = buffer.insts();
_constants = buffer.consts();
initialize_fields(target(), JNIHandles::resolve(compiled_code_obj));
if (!initialize_buffer(buffer)) {
return JVMCIEnv::code_too_large;
initialize_fields(target(), JNIHandles::resolve(compiled_code_obj), CHECK_OK);
JVMCIEnv::CodeInstallResult result = initialize_buffer(buffer, CHECK_OK);
if (result != JVMCIEnv::ok) {
return result;
}
process_exception_handlers();
@ -446,18 +508,18 @@ JVMCIEnv::CodeInstallResult CodeInstaller::gather_metadata(Handle target, Handle
}
// constructor used to create a method
JVMCIEnv::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, Handle target, Handle& compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log) {
JVMCIEnv::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, Handle target, Handle compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log, TRAPS) {
CodeBuffer buffer("JVMCI Compiler CodeBuffer");
jobject compiled_code_obj = JNIHandles::make_local(compiled_code());
OopRecorder* recorder = new OopRecorder(&_arena, true);
initialize_dependencies(JNIHandles::resolve(compiled_code_obj), recorder);
initialize_dependencies(JNIHandles::resolve(compiled_code_obj), recorder, CHECK_OK);
// Get instructions and constants CodeSections early because we need it.
_instructions = buffer.insts();
_constants = buffer.consts();
initialize_fields(target(), JNIHandles::resolve(compiled_code_obj));
JVMCIEnv::CodeInstallResult result = initialize_buffer(buffer);
initialize_fields(target(), JNIHandles::resolve(compiled_code_obj), CHECK_OK);
JVMCIEnv::CodeInstallResult result = initialize_buffer(buffer, CHECK_OK);
if (result != JVMCIEnv::ok) {
return result;
}
@ -500,7 +562,7 @@ JVMCIEnv::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, Hand
return result;
}
void CodeInstaller::initialize_fields(oop target, oop compiled_code) {
void CodeInstaller::initialize_fields(oop target, oop compiled_code, TRAPS) {
if (compiled_code->is_a(HotSpotCompiledNmethod::klass())) {
Handle hotspotJavaMethod = HotSpotCompiledNmethod::method(compiled_code);
methodHandle method = getMethodFromHotSpotMethod(hotspotJavaMethod());
@ -521,7 +583,9 @@ void CodeInstaller::initialize_fields(oop target, oop compiled_code) {
// Pre-calculate the constants section size. This is required for PC-relative addressing.
_data_section_handle = JNIHandles::make_local(HotSpotCompiledCode::dataSection(compiled_code));
guarantee(HotSpotCompiledCode::dataSectionAlignment(compiled_code) <= _constants->alignment(), "Alignment inside constants section is restricted by alignment of section begin");
if ((_constants->alignment() % HotSpotCompiledCode::dataSectionAlignment(compiled_code)) != 0) {
JVMCI_ERROR("invalid data section alignment: %d", HotSpotCompiledCode::dataSectionAlignment(compiled_code));
}
_constants_size = data_section()->length();
_data_section_patches_handle = JNIHandles::make_local(HotSpotCompiledCode::dataSectionPatches(compiled_code));
@ -538,16 +602,18 @@ void CodeInstaller::initialize_fields(oop target, oop compiled_code) {
_word_kind_handle = JNIHandles::make_local(Architecture::wordKind(arch));
}
int CodeInstaller::estimate_stubs_size() {
int CodeInstaller::estimate_stubs_size(TRAPS) {
// Estimate the number of static call stubs that might be emitted.
int static_call_stubs = 0;
objArrayOop sites = this->sites();
for (int i = 0; i < sites->length(); i++) {
oop site = sites->obj_at(i);
if (site->is_a(CompilationResult_Mark::klass())) {
if (site != NULL && site->is_a(CompilationResult_Mark::klass())) {
oop id_obj = CompilationResult_Mark::id(site);
if (id_obj != NULL) {
assert(java_lang_boxing_object::is_instance(id_obj, T_INT), "Integer id expected");
if (!java_lang_boxing_object::is_instance(id_obj, T_INT)) {
JVMCI_ERROR_0("expected Integer id, got %s", id_obj->klass()->signature_name());
}
jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT));
if (id == INVOKESTATIC || id == INVOKESPECIAL) {
static_call_stubs++;
@ -559,7 +625,7 @@ int CodeInstaller::estimate_stubs_size() {
}
// perform data and call relocation on the CodeBuffer
JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer) {
JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer, TRAPS) {
HandleMark hm;
objArrayHandle sites = this->sites();
int locs_buffer_size = sites->length() * (relocInfo::length_limit + sizeof(relocInfo));
@ -568,7 +634,7 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer)
// stubs. Stubs have extra relocs but they are managed by the stub
// section itself so they don't need to be accounted for in the
// locs_buffer above.
int stubs_size = estimate_stubs_size();
int stubs_size = estimate_stubs_size(CHECK_OK);
int total_size = round_to(_code_size, buffer.insts()->alignment()) + round_to(_constants_size, buffer.consts()->alignment()) + round_to(stubs_size, buffer.stubs()->alignment());
if (total_size > JVMCINMethodSizeLimit) {
@ -600,19 +666,30 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer)
for (int i = 0; i < data_section_patches()->length(); i++) {
Handle patch = data_section_patches()->obj_at(i);
if (patch.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), JVMCIEnv::ok);
}
Handle reference = CompilationResult_DataPatch::reference(patch);
assert(reference->is_a(CompilationResult_ConstantReference::klass()), "patch in data section must be a ConstantReference");
if (reference.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), JVMCIEnv::ok);
}
if (!reference->is_a(CompilationResult_ConstantReference::klass())) {
JVMCI_ERROR_OK("invalid patch in data section: %s", reference->klass()->signature_name());
}
Handle constant = CompilationResult_ConstantReference::constant(reference);
if (constant.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), JVMCIEnv::ok);
}
address dest = _constants->start() + CompilationResult_Site::pcOffset(patch);
if (constant->is_a(HotSpotMetaspaceConstantImpl::klass())) {
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64
*((narrowKlass*) dest) = record_narrow_metadata_reference(constant);
*((narrowKlass*) dest) = record_narrow_metadata_reference(constant, CHECK_OK);
#else
fatal("unexpected compressed Klass* in 32-bit mode");
JVMCI_ERROR_OK("unexpected compressed Klass* in 32-bit mode");
#endif
} else {
*((Metadata**) dest) = record_metadata_reference(constant);
*((Metadata**) dest) = record_metadata_reference(constant, CHECK_OK);
}
} else if (constant->is_a(HotSpotObjectConstantImpl::klass())) {
Handle obj = HotSpotObjectConstantImpl::object(constant);
@ -623,48 +700,49 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer)
#ifdef _LP64
_constants->relocate(dest, oop_Relocation::spec(oop_index), relocInfo::narrow_oop_in_const);
#else
fatal("unexpected compressed oop in 32-bit mode");
JVMCI_ERROR_OK("unexpected compressed oop in 32-bit mode");
#endif
} else {
_constants->relocate(dest, oop_Relocation::spec(oop_index));
}
} else {
ShouldNotReachHere();
JVMCI_ERROR_OK("invalid constant in data section: %s", constant->klass()->signature_name());
}
}
jint last_pc_offset = -1;
for (int i = 0; i < sites->length(); i++) {
{
No_Safepoint_Verifier no_safepoint;
oop site = sites->obj_at(i);
jint pc_offset = CompilationResult_Site::pcOffset(site);
if (site->is_a(CompilationResult_Call::klass())) {
TRACE_jvmci_4("call at %i", pc_offset);
site_Call(buffer, pc_offset, site);
} else if (site->is_a(CompilationResult_Infopoint::klass())) {
// three reasons for infopoints denote actual safepoints
oop reason = CompilationResult_Infopoint::reason(site);
if (InfopointReason::SAFEPOINT() == reason || InfopointReason::CALL() == reason || InfopointReason::IMPLICIT_EXCEPTION() == reason) {
TRACE_jvmci_4("safepoint at %i", pc_offset);
site_Safepoint(buffer, pc_offset, site);
} else {
// if the infopoint is not an actual safepoint, it must have one of the other reasons
// (safeguard against new safepoint types that require handling above)
assert(InfopointReason::METHOD_START() == reason || InfopointReason::METHOD_END() == reason || InfopointReason::LINE_NUMBER() == reason, "");
site_Infopoint(buffer, pc_offset, site);
}
} else if (site->is_a(CompilationResult_DataPatch::klass())) {
TRACE_jvmci_4("datapatch at %i", pc_offset);
site_DataPatch(buffer, pc_offset, site);
} else if (site->is_a(CompilationResult_Mark::klass())) {
TRACE_jvmci_4("mark at %i", pc_offset);
site_Mark(buffer, pc_offset, site);
} else {
fatal("unexpected Site subclass");
}
last_pc_offset = pc_offset;
Handle site = sites->obj_at(i);
if (site.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), JVMCIEnv::ok);
}
jint pc_offset = CompilationResult_Site::pcOffset(site);
if (site->is_a(CompilationResult_Call::klass())) {
TRACE_jvmci_4("call at %i", pc_offset);
site_Call(buffer, pc_offset, site, CHECK_OK);
} else if (site->is_a(CompilationResult_Infopoint::klass())) {
// three reasons for infopoints denote actual safepoints
oop reason = CompilationResult_Infopoint::reason(site);
if (InfopointReason::SAFEPOINT() == reason || InfopointReason::CALL() == reason || InfopointReason::IMPLICIT_EXCEPTION() == reason) {
TRACE_jvmci_4("safepoint at %i", pc_offset);
site_Safepoint(buffer, pc_offset, site, CHECK_OK);
} else if (InfopointReason::METHOD_START() == reason || InfopointReason::METHOD_END() == reason || InfopointReason::LINE_NUMBER() == reason) {
site_Infopoint(buffer, pc_offset, site, CHECK_OK);
} else {
JVMCI_ERROR_OK("unknown infopoint reason at %i", pc_offset);
}
} else if (site->is_a(CompilationResult_DataPatch::klass())) {
TRACE_jvmci_4("datapatch at %i", pc_offset);
site_DataPatch(buffer, pc_offset, site, CHECK_OK);
} else if (site->is_a(CompilationResult_Mark::klass())) {
TRACE_jvmci_4("mark at %i", pc_offset);
site_Mark(buffer, pc_offset, site, CHECK_OK);
} else {
JVMCI_ERROR_OK("unexpected site subclass: %s", site->klass()->signature_name());
}
last_pc_offset = pc_offset;
if (CodeInstallSafepointChecks && SafepointSynchronize::do_call_back()) {
// this is a hacky way to force a safepoint check but nothing else was jumping out at me.
ThreadToNativeFromVM ttnfv(JavaThread::current());
@ -673,7 +751,6 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer)
#ifndef PRODUCT
if (comments() != NULL) {
No_Safepoint_Verifier no_safepoint;
for (int i = 0; i < comments()->length(); i++) {
oop comment = comments()->obj_at(i);
assert(comment->is_a(HotSpotCompiledCode_Comment::klass()), "cce");
@ -759,56 +836,61 @@ static bool bytecode_should_reexecute(Bytecodes::Code code) {
return true;
}
GrowableArray<ScopeValue*>* CodeInstaller::record_virtual_objects(oop debug_info) {
objArrayOop virtualObjects = DebugInfo::virtualObjectMapping(debug_info);
if (virtualObjects == NULL) {
GrowableArray<ScopeValue*>* CodeInstaller::record_virtual_objects(Handle debug_info, TRAPS) {
objArrayHandle virtualObjects = DebugInfo::virtualObjectMapping(debug_info);
if (virtualObjects.is_null()) {
return NULL;
}
GrowableArray<ScopeValue*>* objects = new GrowableArray<ScopeValue*>(virtualObjects->length(), virtualObjects->length(), NULL);
// Create the unique ObjectValues
for (int i = 0; i < virtualObjects->length(); i++) {
oop value = virtualObjects->obj_at(i);
Handle value = virtualObjects->obj_at(i);
int id = VirtualObject::id(value);
oop type = VirtualObject::type(value);
Handle type = VirtualObject::type(value);
oop javaMirror = HotSpotResolvedObjectTypeImpl::javaClass(type);
ObjectValue* sv = new ObjectValue(id, new ConstantOopWriteValue(JNIHandles::make_local(Thread::current(), javaMirror)));
assert(objects->at(id) == NULL, "once");
if (id < 0 || id >= objects->length()) {
JVMCI_ERROR_NULL("virtual object id %d out of bounds", id);
}
if (objects->at(id) != NULL) {
JVMCI_ERROR_NULL("duplicate virtual object id %d", id);
}
objects->at_put(id, sv);
}
// All the values which could be referenced by the VirtualObjects
// exist, so now describe all the VirtualObjects themselves.
for (int i = 0; i < virtualObjects->length(); i++) {
oop value = virtualObjects->obj_at(i);
Handle value = virtualObjects->obj_at(i);
int id = VirtualObject::id(value);
record_object_value(objects->at(id)->as_ObjectValue(), value, objects);
record_object_value(objects->at(id)->as_ObjectValue(), value, objects, CHECK_NULL);
}
_debug_recorder->dump_object_pool(objects);
return objects;
}
void CodeInstaller::record_scope(jint pc_offset, oop debug_info) {
oop position = DebugInfo::bytecodePosition(debug_info);
if (position == NULL) {
void CodeInstaller::record_scope(jint pc_offset, Handle debug_info, TRAPS) {
Handle position = DebugInfo::bytecodePosition(debug_info);
if (position.is_null()) {
// Stubs do not record scope info, just oop maps
return;
}
GrowableArray<ScopeValue*>* objectMapping = record_virtual_objects(debug_info);
record_scope(pc_offset, position, objectMapping);
GrowableArray<ScopeValue*>* objectMapping = record_virtual_objects(debug_info, CHECK);
record_scope(pc_offset, position, objectMapping, CHECK);
}
void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<ScopeValue*>* objects) {
oop frame = NULL;
void CodeInstaller::record_scope(jint pc_offset, Handle position, GrowableArray<ScopeValue*>* objects, TRAPS) {
Handle frame;
if (position->is_a(BytecodeFrame::klass())) {
frame = position;
}
oop caller_frame = BytecodePosition::caller(position);
if (caller_frame != NULL) {
record_scope(pc_offset, caller_frame, objects);
Handle caller_frame = BytecodePosition::caller(position);
if (caller_frame.not_null()) {
record_scope(pc_offset, caller_frame, objects, CHECK);
}
oop hotspot_method = BytecodePosition::method(position);
Method* method = getMethodFromHotSpotMethod(hotspot_method);
Handle hotspot_method = BytecodePosition::method(position);
Method* method = getMethodFromHotSpotMethod(hotspot_method());
jint bci = BytecodePosition::bci(position);
if (bci == BytecodeFrame::BEFORE_BCI()) {
bci = SynchronizationEntryBCI;
@ -817,13 +899,13 @@ void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<Sco
TRACE_jvmci_2("Recording scope pc_offset=%d bci=%d method=%s", pc_offset, bci, method->name_and_sig_as_C_string());
bool reexecute = false;
if (frame != NULL) {
if (frame.not_null()) {
if (bci == SynchronizationEntryBCI){
reexecute = false;
} else {
Bytecodes::Code code = Bytecodes::java_code_at(method, method->bcp_from(bci));
reexecute = bytecode_should_reexecute(code);
if (frame != NULL) {
if (frame.not_null()) {
reexecute = (BytecodeFrame::duringCall(frame) == JNI_FALSE);
}
}
@ -834,15 +916,22 @@ void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<Sco
DebugToken* monitors_token = NULL;
bool throw_exception = false;
if (frame != NULL) {
if (frame.not_null()) {
jint local_count = BytecodeFrame::numLocals(frame);
jint expression_count = BytecodeFrame::numStack(frame);
jint monitor_count = BytecodeFrame::numLocks(frame);
objArrayOop values = BytecodeFrame::values(frame);
objArrayOop slotKinds = BytecodeFrame::slotKinds(frame);
objArrayHandle values = BytecodeFrame::values(frame);
objArrayHandle slotKinds = BytecodeFrame::slotKinds(frame);
assert(local_count + expression_count + monitor_count == values->length(), "unexpected values length");
assert(local_count + expression_count == slotKinds->length(), "unexpected slotKinds length");
if (values.is_null() || slotKinds.is_null()) {
THROW(vmSymbols::java_lang_NullPointerException());
}
if (local_count + expression_count + monitor_count != values->length()) {
JVMCI_ERROR("unexpected values length %d in scope (%d locals, %d expressions, %d monitors)", values->length(), local_count, expression_count, monitor_count);
}
if (local_count + expression_count != slotKinds->length()) {
JVMCI_ERROR("unexpected slotKinds length %d in scope (%d locals, %d expressions)", slotKinds->length(), local_count, expression_count);
}
GrowableArray<ScopeValue*>* locals = local_count > 0 ? new GrowableArray<ScopeValue*> (local_count) : NULL;
GrowableArray<ScopeValue*>* expressions = expression_count > 0 ? new GrowableArray<ScopeValue*> (expression_count) : NULL;
@ -853,30 +942,30 @@ void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<Sco
for (jint i = 0; i < values->length(); i++) {
ScopeValue* second = NULL;
oop value = values->obj_at(i);
Handle value = values->obj_at(i);
if (i < local_count) {
oop kind = slotKinds->obj_at(i);
BasicType type = JVMCIRuntime::kindToBasicType(JavaKind::typeChar(kind));
ScopeValue* first = get_scope_value(value, type, objects, second);
BasicType type = JVMCIRuntime::kindToBasicType(slotKinds->obj_at(i), CHECK);
ScopeValue* first = get_scope_value(value, type, objects, second, CHECK);
if (second != NULL) {
locals->append(second);
}
locals->append(first);
} else if (i < local_count + expression_count) {
oop kind = slotKinds->obj_at(i);
BasicType type = JVMCIRuntime::kindToBasicType(JavaKind::typeChar(kind));
ScopeValue* first = get_scope_value(value, type, objects, second);
BasicType type = JVMCIRuntime::kindToBasicType(slotKinds->obj_at(i), CHECK);
ScopeValue* first = get_scope_value(value, type, objects, second, CHECK);
if (second != NULL) {
expressions->append(second);
}
expressions->append(first);
} else {
monitors->append(get_monitor_value(value, objects));
MonitorValue *monitor = get_monitor_value(value, objects, CHECK);
monitors->append(monitor);
}
if (second != NULL) {
i++;
assert(i < values->length(), "double-slot value not followed by Value.ILLEGAL");
assert(values->obj_at(i) == Value::ILLEGAL(), "double-slot value not followed by Value.ILLEGAL");
if (i >= values->length() || values->obj_at(i) != Value::ILLEGAL()) {
JVMCI_ERROR("double-slot value not followed by Value.ILLEGAL");
}
}
}
@ -891,32 +980,37 @@ void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<Sco
locals_token, expressions_token, monitors_token);
}
void CodeInstaller::site_Safepoint(CodeBuffer& buffer, jint pc_offset, oop site) {
oop debug_info = CompilationResult_Infopoint::debugInfo(site);
assert(debug_info != NULL, "debug info expected");
void CodeInstaller::site_Safepoint(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
Handle debug_info = CompilationResult_Infopoint::debugInfo(site);
if (debug_info.is_null()) {
JVMCI_ERROR("debug info expected at safepoint at %i", pc_offset);
}
// address instruction = _instructions->start() + pc_offset;
// jint next_pc_offset = Assembler::locate_next_instruction(instruction) - _instructions->start();
_debug_recorder->add_safepoint(pc_offset, create_oop_map(debug_info));
record_scope(pc_offset, debug_info);
OopMap *map = create_oop_map(debug_info, CHECK);
_debug_recorder->add_safepoint(pc_offset, map);
record_scope(pc_offset, debug_info, CHECK);
_debug_recorder->end_safepoint(pc_offset);
}
void CodeInstaller::site_Infopoint(CodeBuffer& buffer, jint pc_offset, oop site) {
oop debug_info = CompilationResult_Infopoint::debugInfo(site);
assert(debug_info != NULL, "debug info expected");
void CodeInstaller::site_Infopoint(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
Handle debug_info = CompilationResult_Infopoint::debugInfo(site);
if (debug_info.is_null()) {
JVMCI_ERROR("debug info expected at infopoint at %i", pc_offset);
}
_debug_recorder->add_non_safepoint(pc_offset);
record_scope(pc_offset, debug_info);
record_scope(pc_offset, debug_info, CHECK);
_debug_recorder->end_non_safepoint(pc_offset);
}
void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, oop site) {
oop target = CompilationResult_Call::target(site);
void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
Handle target = CompilationResult_Call::target(site);
InstanceKlass* target_klass = InstanceKlass::cast(target->klass());
oop hotspot_method = NULL; // JavaMethod
oop foreign_call = NULL;
Handle hotspot_method; // JavaMethod
Handle foreign_call;
if (target_klass->is_subclass_of(SystemDictionary::HotSpotForeignCallTarget_klass())) {
foreign_call = target;
@ -924,27 +1018,29 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, oop site) {
hotspot_method = target;
}
oop debug_info = CompilationResult_Call::debugInfo(site);
Handle debug_info = CompilationResult_Call::debugInfo(site);
assert(!!hotspot_method ^ !!foreign_call, "Call site needs exactly one type");
assert(hotspot_method.not_null() ^ foreign_call.not_null(), "Call site needs exactly one type");
NativeInstruction* inst = nativeInstruction_at(_instructions->start() + pc_offset);
jint next_pc_offset = CodeInstaller::pd_next_offset(inst, pc_offset, hotspot_method);
jint next_pc_offset = CodeInstaller::pd_next_offset(inst, pc_offset, hotspot_method, CHECK);
if (debug_info != NULL) {
_debug_recorder->add_safepoint(next_pc_offset, create_oop_map(debug_info));
record_scope(next_pc_offset, debug_info);
if (debug_info.not_null()) {
OopMap *map = create_oop_map(debug_info, CHECK);
_debug_recorder->add_safepoint(next_pc_offset, map);
record_scope(next_pc_offset, debug_info, CHECK);
}
if (foreign_call != NULL) {
if (foreign_call.not_null()) {
jlong foreign_call_destination = HotSpotForeignCallTarget::address(foreign_call);
CodeInstaller::pd_relocate_ForeignCall(inst, foreign_call_destination);
CodeInstaller::pd_relocate_ForeignCall(inst, foreign_call_destination, CHECK);
} else { // method != NULL
assert(hotspot_method != NULL, "unexpected JavaMethod");
assert(debug_info != NULL, "debug info expected");
if (debug_info.is_null()) {
JVMCI_ERROR("debug info expected at call at %i", pc_offset);
}
TRACE_jvmci_3("method call");
CodeInstaller::pd_relocate_JavaMethod(hotspot_method, pc_offset);
CodeInstaller::pd_relocate_JavaMethod(hotspot_method, pc_offset, CHECK);
if (_next_call_type == INVOKESTATIC || _next_call_type == INVOKESPECIAL) {
// Need a static call stub for transitions from compiled to interpreted.
CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset);
@ -953,38 +1049,45 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, oop site) {
_next_call_type = INVOKE_INVALID;
if (debug_info != NULL) {
if (debug_info.not_null()) {
_debug_recorder->end_safepoint(next_pc_offset);
}
}
void CodeInstaller::site_DataPatch(CodeBuffer& buffer, jint pc_offset, oop site) {
oop reference = CompilationResult_DataPatch::reference(site);
if (reference->is_a(CompilationResult_ConstantReference::klass())) {
void CodeInstaller::site_DataPatch(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
Handle reference = CompilationResult_DataPatch::reference(site);
if (reference.is_null()) {
THROW(vmSymbols::java_lang_NullPointerException());
} else if (reference->is_a(CompilationResult_ConstantReference::klass())) {
Handle constant = CompilationResult_ConstantReference::constant(reference);
if (constant->is_a(HotSpotObjectConstantImpl::klass())) {
pd_patch_OopConstant(pc_offset, constant);
if (constant.is_null()) {
THROW(vmSymbols::java_lang_NullPointerException());
} else if (constant->is_a(HotSpotObjectConstantImpl::klass())) {
pd_patch_OopConstant(pc_offset, constant, CHECK);
} else if (constant->is_a(HotSpotMetaspaceConstantImpl::klass())) {
pd_patch_MetaspaceConstant(pc_offset, constant);
} else if (constant->is_a(HotSpotSentinelConstant::klass())) {
fatal("sentinel constant unsupported");
pd_patch_MetaspaceConstant(pc_offset, constant, CHECK);
} else {
fatal("unknown constant type in data patch");
JVMCI_ERROR("unknown constant type in data patch: %s", constant->klass()->signature_name());
}
} else if (reference->is_a(CompilationResult_DataSectionReference::klass())) {
int data_offset = CompilationResult_DataSectionReference::offset(reference);
assert(0 <= data_offset && data_offset < _constants_size, "data offset 0x%X points outside data section (size 0x%X)", data_offset, _constants_size);
pd_patch_DataSectionReference(pc_offset, data_offset);
if (0 <= data_offset && data_offset < _constants_size) {
pd_patch_DataSectionReference(pc_offset, data_offset);
} else {
JVMCI_ERROR("data offset 0x%X points outside data section (size 0x%X)", data_offset, _constants_size);
}
} else {
fatal("unknown data patch type");
JVMCI_ERROR("unknown data patch type: %s", reference->klass()->signature_name());
}
}
void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, oop site) {
oop id_obj = CompilationResult_Mark::id(site);
void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
Handle id_obj = CompilationResult_Mark::id(site);
if (id_obj != NULL) {
assert(java_lang_boxing_object::is_instance(id_obj, T_INT), "Integer id expected");
if (id_obj.not_null()) {
if (!java_lang_boxing_object::is_instance(id_obj(), T_INT)) {
JVMCI_ERROR("expected Integer id, got %s", id_obj->klass()->signature_name());
}
jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT));
address pc = _instructions->start() + pc_offset;
@ -1017,7 +1120,7 @@ void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, oop site) {
case POLL_FAR:
case POLL_RETURN_NEAR:
case POLL_RETURN_FAR:
pd_relocate_poll(pc, id);
pd_relocate_poll(pc, id, CHECK);
break;
case CARD_TABLE_SHIFT:
case CARD_TABLE_ADDRESS:
@ -1027,7 +1130,7 @@ void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, oop site) {
case CRC_TABLE_ADDRESS:
break;
default:
ShouldNotReachHere();
JVMCI_ERROR("invalid mark id: %d", id);
break;
}
}

View File

@ -154,13 +154,13 @@ private:
static ConstantIntValue* _int_2_scope_value;
static LocationValue* _illegal_value;
jint pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method);
void pd_patch_OopConstant(int pc_offset, Handle& constant);
void pd_patch_MetaspaceConstant(int pc_offset, Handle& constant);
jint pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS);
void pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS);
void pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS);
void pd_patch_DataSectionReference(int pc_offset, int data_offset);
void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination);
void pd_relocate_JavaMethod(oop method, jint pc_offset);
void pd_relocate_poll(address pc, jint mark);
void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS);
void pd_relocate_JavaMethod(Handle method, jint pc_offset, TRAPS);
void pd_relocate_poll(address pc, jint mark, TRAPS);
objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); }
arrayOop code() { return (arrayOop) JNIHandles::resolve(_code_handle); }
@ -177,33 +177,33 @@ public:
CodeInstaller() : _arena(mtCompiler) {}
JVMCIEnv::CodeInstallResult gather_metadata(Handle target, Handle& compiled_code, CodeMetadata& metadata);
JVMCIEnv::CodeInstallResult install(JVMCICompiler* compiler, Handle target, Handle& compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log);
JVMCIEnv::CodeInstallResult gather_metadata(Handle target, Handle compiled_code, CodeMetadata& metadata, TRAPS);
JVMCIEnv::CodeInstallResult install(JVMCICompiler* compiler, Handle target, Handle compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log, TRAPS);
static address runtime_call_target_address(oop runtime_call);
static VMReg get_hotspot_reg(jint jvmciRegisterNumber);
static VMReg get_hotspot_reg(jint jvmciRegisterNumber, TRAPS);
static bool is_general_purpose_reg(VMReg hotspotRegister);
const OopMapSet* oopMapSet() const { return _debug_recorder->_oopmaps; }
protected:
Location::Type get_oop_type(oop value);
ScopeValue* get_scope_value(oop value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second);
MonitorValue* get_monitor_value(oop value, GrowableArray<ScopeValue*>* objects);
Location::Type get_oop_type(Handle value);
ScopeValue* get_scope_value(Handle value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second, TRAPS);
MonitorValue* get_monitor_value(Handle value, GrowableArray<ScopeValue*>* objects, TRAPS);
Metadata* record_metadata_reference(Handle& constant);
Metadata* record_metadata_reference(Handle constant, TRAPS);
#ifdef _LP64
narrowKlass record_narrow_metadata_reference(Handle& constant);
narrowKlass record_narrow_metadata_reference(Handle constant, TRAPS);
#endif
// extract the fields of the CompilationResult
void initialize_fields(oop target, oop target_method);
void initialize_dependencies(oop target_method, OopRecorder* oop_recorder);
void initialize_fields(oop target, oop target_method, TRAPS);
void initialize_dependencies(oop target_method, OopRecorder* oop_recorder, TRAPS);
int estimate_stubs_size();
int estimate_stubs_size(TRAPS);
// perform data and call relocation on the CodeBuffer
JVMCIEnv::CodeInstallResult initialize_buffer(CodeBuffer& buffer);
JVMCIEnv::CodeInstallResult initialize_buffer(CodeBuffer& buffer, TRAPS);
void assumption_NoFinalizableSubclass(Handle assumption);
void assumption_ConcreteSubtype(Handle assumption);
@ -211,19 +211,19 @@ protected:
void assumption_ConcreteMethod(Handle assumption);
void assumption_CallSiteTargetValue(Handle assumption);
void site_Safepoint(CodeBuffer& buffer, jint pc_offset, oop site);
void site_Infopoint(CodeBuffer& buffer, jint pc_offset, oop site);
void site_Call(CodeBuffer& buffer, jint pc_offset, oop site);
void site_DataPatch(CodeBuffer& buffer, jint pc_offset, oop site);
void site_Mark(CodeBuffer& buffer, jint pc_offset, oop site);
void site_Safepoint(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
void site_Infopoint(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
void site_Call(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
void site_DataPatch(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
void site_Mark(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
OopMap* create_oop_map(oop debug_info);
OopMap* create_oop_map(Handle debug_info, TRAPS);
void record_scope(jint pc_offset, oop debug_info);
void record_scope(jint pc_offset, oop code_pos, GrowableArray<ScopeValue*>* objects);
void record_object_value(ObjectValue* sv, oop value, GrowableArray<ScopeValue*>* objects);
void record_scope(jint pc_offset, Handle debug_info, TRAPS);
void record_scope(jint pc_offset, Handle code_pos, GrowableArray<ScopeValue*>* objects, TRAPS);
void record_object_value(ObjectValue* sv, Handle value, GrowableArray<ScopeValue*>* objects, TRAPS);
GrowableArray<ScopeValue*>* record_virtual_objects(oop debug_info);
GrowableArray<ScopeValue*>* record_virtual_objects(Handle debug_info, TRAPS);
void process_exception_handlers();
int estimateStubSpace(int static_call_stubs);

View File

@ -112,7 +112,7 @@ void JVMCICompiler::bootstrap() {
_bootstrapping = false;
}
void JVMCICompiler::compile_method(methodHandle method, int entry_bci, JVMCIEnv* env) {
void JVMCICompiler::compile_method(const methodHandle& method, int entry_bci, JVMCIEnv* env) {
JVMCI_EXCEPTION_CONTEXT
bool is_osr = entry_bci != InvocationEntryBci;

View File

@ -71,7 +71,7 @@ public:
// Compilation entry point for methods
virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci, DirectiveSet* directive);
void compile_method(methodHandle target, int entry_bci, JVMCIEnv* env);
void compile_method(const methodHandle& target, int entry_bci, JVMCIEnv* env);
virtual bool is_trivial(Method* method);

View File

@ -670,7 +670,7 @@ C2V_VMENTRY(jint, installCode, (JNIEnv *jniEnv, jobject, jobject target, jobject
TraceTime install_time("installCode", JVMCICompiler::codeInstallTimer());
CodeInstaller installer;
JVMCIEnv::CodeInstallResult result = installer.install(compiler, target_handle, compiled_code_handle, cb, installed_code_handle, speculation_log_handle);
JVMCIEnv::CodeInstallResult result = installer.install(compiler, target_handle, compiled_code_handle, cb, installed_code_handle, speculation_log_handle, CHECK_0);
if (PrintCodeCacheOnCompilation) {
stringStream s;
@ -690,6 +690,7 @@ C2V_VMENTRY(jint, installCode, (JNIEnv *jniEnv, jobject, jobject target, jobject
assert(installed_code_handle->is_a(InstalledCode::klass()), "wrong type");
CompilerToVM::invalidate_installed_code(installed_code_handle, CHECK_0);
InstalledCode::set_address(installed_code_handle, (jlong) cb);
InstalledCode::set_version(installed_code_handle, InstalledCode::version(installed_code_handle) + 1);
if (cb->is_nmethod()) {
InstalledCode::set_entryPoint(installed_code_handle, (jlong) cb->as_nmethod_or_null()->verified_entry_point());
} else {
@ -726,7 +727,7 @@ C2V_VMENTRY(jint, getMetadata, (JNIEnv *jniEnv, jobject, jobject target, jobject
CodeBlob *cb = NULL;
CodeInstaller installer;
JVMCIEnv::CodeInstallResult result = installer.gather_metadata(target_handle, compiled_code_handle, code_metadata); //cb, pc_descs, nr_pc_descs, scopes_descs, scopes_size, reloc_buffer);
JVMCIEnv::CodeInstallResult result = installer.gather_metadata(target_handle, compiled_code_handle, code_metadata, CHECK_0); //cb, pc_descs, nr_pc_descs, scopes_descs, scopes_size, reloc_buffer);
if (result != JVMCIEnv::ok) {
return result;
}

View File

@ -161,7 +161,7 @@ KlassHandle JVMCIEnv::get_klass_by_name_impl(KlassHandle& accessing_klass,
}
// ------------------------------------------------------------------
KlassHandle JVMCIEnv::get_klass_by_name(KlassHandle& accessing_klass,
KlassHandle JVMCIEnv::get_klass_by_name(KlassHandle accessing_klass,
Symbol* klass_name,
bool require_local) {
ResourceMark rm;
@ -177,7 +177,7 @@ KlassHandle JVMCIEnv::get_klass_by_name(KlassHandle& accessing_klass,
KlassHandle JVMCIEnv::get_klass_by_index_impl(const constantPoolHandle& cpool,
int index,
bool& is_accessible,
KlassHandle& accessor) {
KlassHandle accessor) {
JVMCI_EXCEPTION_CONTEXT;
KlassHandle klass (THREAD, ConstantPool::klass_at_if_loaded(cpool, index));
Symbol* klass_name = NULL;
@ -218,7 +218,7 @@ KlassHandle JVMCIEnv::get_klass_by_index_impl(const constantPoolHandle& cpool,
KlassHandle JVMCIEnv::get_klass_by_index(const constantPoolHandle& cpool,
int index,
bool& is_accessible,
KlassHandle& accessor) {
KlassHandle accessor) {
ResourceMark rm;
KlassHandle result = get_klass_by_index_impl(cpool, index, is_accessible, accessor);
return result;
@ -229,7 +229,7 @@ KlassHandle JVMCIEnv::get_klass_by_index(const constantPoolHandle& cpool,
//
// Implementation note: the results of field lookups are cached
// in the accessor klass.
void JVMCIEnv::get_field_by_index_impl(instanceKlassHandle& klass, fieldDescriptor& field_desc,
void JVMCIEnv::get_field_by_index_impl(instanceKlassHandle klass, fieldDescriptor& field_desc,
int index) {
JVMCI_EXCEPTION_CONTEXT;
@ -270,7 +270,7 @@ void JVMCIEnv::get_field_by_index_impl(instanceKlassHandle& klass, fieldDescript
// ------------------------------------------------------------------
// Get a field by index from a klass's constant pool.
void JVMCIEnv::get_field_by_index(instanceKlassHandle& accessor, fieldDescriptor& fd, int index) {
void JVMCIEnv::get_field_by_index(instanceKlassHandle accessor, fieldDescriptor& fd, int index) {
ResourceMark rm;
return get_field_by_index_impl(accessor, fd, index);
}
@ -278,8 +278,8 @@ void JVMCIEnv::get_field_by_index(instanceKlassHandle& accessor, fieldDescriptor
// ------------------------------------------------------------------
// Perform an appropriate method lookup based on accessor, holder,
// name, signature, and bytecode.
methodHandle JVMCIEnv::lookup_method(instanceKlassHandle& h_accessor,
instanceKlassHandle& h_holder,
methodHandle JVMCIEnv::lookup_method(instanceKlassHandle h_accessor,
instanceKlassHandle h_holder,
Symbol* name,
Symbol* sig,
Bytecodes::Code bc) {
@ -314,7 +314,7 @@ methodHandle JVMCIEnv::lookup_method(instanceKlassHandle& h_accessor,
// ------------------------------------------------------------------
methodHandle JVMCIEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
int index, Bytecodes::Code bc,
instanceKlassHandle& accessor) {
instanceKlassHandle accessor) {
if (bc == Bytecodes::_invokedynamic) {
ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index);
bool is_resolved = !cpce->is_f1_null();
@ -379,7 +379,7 @@ methodHandle JVMCIEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
}
// ------------------------------------------------------------------
instanceKlassHandle JVMCIEnv::get_instance_klass_for_declared_method_holder(KlassHandle& method_holder) {
instanceKlassHandle JVMCIEnv::get_instance_klass_for_declared_method_holder(KlassHandle method_holder) {
// For the case of <array>.clone(), the method holder can be an ArrayKlass*
// instead of an InstanceKlass*. For that case simply pretend that the
// declared holder is Object.clone since that's where the call will bottom out.
@ -397,7 +397,7 @@ instanceKlassHandle JVMCIEnv::get_instance_klass_for_declared_method_holder(Klas
// ------------------------------------------------------------------
methodHandle JVMCIEnv::get_method_by_index(const constantPoolHandle& cpool,
int index, Bytecodes::Code bc,
instanceKlassHandle& accessor) {
instanceKlassHandle accessor) {
ResourceMark rm;
return get_method_by_index_impl(cpool, index, bc, accessor);
}
@ -452,7 +452,7 @@ JVMCIEnv::CodeInstallResult JVMCIEnv::check_for_system_dictionary_modification(D
// ------------------------------------------------------------------
JVMCIEnv::CodeInstallResult JVMCIEnv::register_method(
methodHandle& method,
const methodHandle& method,
nmethod*& nm,
int entry_bci,
CodeOffsets* offsets,

View File

@ -78,7 +78,7 @@ public:
// The CI treats a klass as loaded if it is consistently defined in
// another loader, even if it hasn't yet been loaded in all loaders
// that could potentially see it via delegation.
static KlassHandle get_klass_by_name(KlassHandle& accessing_klass,
static KlassHandle get_klass_by_name(KlassHandle accessing_klass,
Symbol* klass_name,
bool require_local);
@ -86,12 +86,12 @@ public:
static KlassHandle get_klass_by_index(const constantPoolHandle& cpool,
int klass_index,
bool& is_accessible,
KlassHandle& loading_klass);
static void get_field_by_index(instanceKlassHandle& loading_klass, fieldDescriptor& fd,
KlassHandle loading_klass);
static void get_field_by_index(instanceKlassHandle loading_klass, fieldDescriptor& fd,
int field_index);
static methodHandle get_method_by_index(const constantPoolHandle& cpool,
int method_index, Bytecodes::Code bc,
instanceKlassHandle& loading_klass);
instanceKlassHandle loading_klass);
JVMCIEnv(CompileTask* task, int system_dictionary_modification_counter);
@ -112,17 +112,17 @@ private:
static KlassHandle get_klass_by_index_impl(const constantPoolHandle& cpool,
int klass_index,
bool& is_accessible,
KlassHandle& loading_klass);
static void get_field_by_index_impl(instanceKlassHandle& loading_klass, fieldDescriptor& fd,
KlassHandle loading_klass);
static void get_field_by_index_impl(instanceKlassHandle loading_klass, fieldDescriptor& fd,
int field_index);
static methodHandle get_method_by_index_impl(const constantPoolHandle& cpool,
int method_index, Bytecodes::Code bc,
instanceKlassHandle& loading_klass);
instanceKlassHandle loading_klass);
// Helper methods
static bool check_klass_accessibility(KlassHandle accessing_klass, KlassHandle resolved_klass);
static methodHandle lookup_method(instanceKlassHandle& accessor,
instanceKlassHandle& holder,
static methodHandle lookup_method(instanceKlassHandle accessor,
instanceKlassHandle holder,
Symbol* name,
Symbol* sig,
Bytecodes::Code bc);
@ -142,7 +142,7 @@ public:
// Register the result of a compilation.
static JVMCIEnv::CodeInstallResult register_method(
methodHandle& target,
const methodHandle& target,
nmethod*& nm,
int entry_bci,
CodeOffsets* offsets,
@ -166,7 +166,7 @@ public:
// InstanceKlass*. This is needed since the holder of a method in
// the bytecodes could be an array type. Basically this converts
// array types into java/lang/Object and other types stay as they are.
static instanceKlassHandle get_instance_klass_for_declared_method_holder(KlassHandle& klass);
static instanceKlassHandle get_instance_klass_for_declared_method_holder(KlassHandle klass);
};
#endif // SHARE_VM_JVMCI_JVMCIENV_HPP

View File

@ -30,7 +30,7 @@
// This function is similar to javaClasses.cpp, it computes the field offset of a (static or instance) field.
// It looks up the name and signature symbols without creating new ones, all the symbols of these classes need to be already loaded.
void compute_offset(int &dest_offset, Klass* klass, const char* name, const char* signature, bool static_field) {
void compute_offset(int &dest_offset, Klass* klass, const char* name, const char* signature, bool static_field, TRAPS) {
InstanceKlass* ik = InstanceKlass::cast(klass);
Symbol* name_symbol = SymbolTable::probe(name, (int)strlen(name));
Symbol* signature_symbol = SymbolTable::probe(signature, (int)strlen(signature));
@ -49,6 +49,11 @@ void compute_offset(int &dest_offset, Klass* klass, const char* name, const char
guarantee(fd.is_static() == static_field, "static/instance mismatch");
dest_offset = fd.offset();
assert(dest_offset != 0, "must be valid offset");
if (static_field) {
// Must ensure classes for static fields are initialized as the
// accessor itself does not include a class initialization check.
ik->initialize(CHECK);
}
}
// This piece of macro magic creates the contents of the jvmci_compute_offsets method that initializes the field indices of all the access classes.
@ -57,7 +62,7 @@ void compute_offset(int &dest_offset, Klass* klass, const char* name, const char
#define END_CLASS }
#define FIELD(klass, name, signature, static_field) compute_offset(klass::_##name##_offset, k, #name, signature, static_field);
#define FIELD(klass, name, signature, static_field) compute_offset(klass::_##name##_offset, k, #name, signature, static_field, CHECK);
#define CHAR_FIELD(klass, name) FIELD(klass, name, "C", false)
#define INT_FIELD(klass, name) FIELD(klass, name, "I", false)
#define BOOLEAN_FIELD(klass, name) FIELD(klass, name, "Z", false)
@ -69,7 +74,7 @@ void compute_offset(int &dest_offset, Klass* klass, const char* name, const char
#define STATIC_BOOLEAN_FIELD(klass, name) FIELD(klass, name, "Z", true)
void JVMCIJavaClasses::compute_offsets() {
void JVMCIJavaClasses::compute_offsets(TRAPS) {
COMPILER_CLASSES_DO(START_CLASS, END_CLASS, CHAR_FIELD, INT_FIELD, BOOLEAN_FIELD, LONG_FIELD, FLOAT_FIELD, OOP_FIELD, OOP_FIELD, OOP_FIELD, STATIC_OOP_FIELD, STATIC_OOP_FIELD, STATIC_INT_FIELD, STATIC_BOOLEAN_FIELD)
}

View File

@ -29,7 +29,7 @@
class JVMCIJavaClasses : AllStatic {
public:
static void compute_offsets();
static void compute_offsets(TRAPS);
};
/* This macro defines the structure of the CompilationResult - classes.
@ -306,7 +306,7 @@ class name : AllStatic {
assert(obj->is_a(SystemDictionary::name##_klass()), "wrong class, " #name " expected, found %s", obj->klass()->external_name()); \
assert(offset != 0, "must be valid offset"); \
} \
static void compute_offsets(); \
static void compute_offsets(TRAPS); \
public: \
static InstanceKlass* klass() { return SystemDictionary::name##_klass(); }
@ -315,10 +315,10 @@ class name : AllStatic {
#define FIELD(name, type, accessor, cast) \
static int _##name##_offset; \
static type name(oop obj) { check(obj, #name, _##name##_offset); return cast obj->accessor(_##name##_offset); } \
static type name(Handle& obj) { check(obj(), #name, _##name##_offset); return cast obj->accessor(_##name##_offset); } \
static type name(Handle obj) { check(obj(), #name, _##name##_offset); return cast obj->accessor(_##name##_offset); } \
static type name(jobject obj) { check(JNIHandles::resolve(obj), #name, _##name##_offset); return cast JNIHandles::resolve(obj)->accessor(_##name##_offset); } \
static void set_##name(oop obj, type x) { check(obj, #name, _##name##_offset); obj->accessor##_put(_##name##_offset, x); } \
static void set_##name(Handle& obj, type x) { check(obj(), #name, _##name##_offset); obj->accessor##_put(_##name##_offset, x); } \
static void set_##name(Handle obj, type x) { check(obj(), #name, _##name##_offset); obj->accessor##_put(_##name##_offset, x); } \
static void set_##name(jobject obj, type x) { check(JNIHandles::resolve(obj), #name, _##name##_offset); JNIHandles::resolve(obj)->accessor##_put(_##name##_offset, x); }
#define EMPTY_CAST
@ -392,6 +392,6 @@ COMPILER_CLASSES_DO(START_CLASS, END_CLASS, CHAR_FIELD, INT_FIELD, BOOLEAN_FIELD
#undef STATIC_BOOLEAN_FIELD
#undef EMPTY_CAST
void compute_offset(int &dest_offset, Klass* klass, const char* name, const char* signature, bool static_field);
void compute_offset(int &dest_offset, Klass* klass, const char* name, const char* signature, bool static_field, TRAPS);
#endif // SHARE_VM_JVMCI_JVMCIJAVACLASSES_HPP

View File

@ -59,7 +59,11 @@ bool JVMCIRuntime::_shutdown_called = false;
static const char* OPTION_PREFIX = "jvmci.option.";
static const size_t OPTION_PREFIX_LEN = strlen(OPTION_PREFIX);
BasicType JVMCIRuntime::kindToBasicType(jchar ch) {
BasicType JVMCIRuntime::kindToBasicType(Handle kind, TRAPS) {
if (kind.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), T_ILLEGAL);
}
jchar ch = JavaKind::typeChar(kind);
switch(ch) {
case 'z': return T_BOOLEAN;
case 'b': return T_BYTE;
@ -72,10 +76,8 @@ BasicType JVMCIRuntime::kindToBasicType(jchar ch) {
case 'a': return T_OBJECT;
case '-': return T_ILLEGAL;
default:
fatal("unexpected Kind: %c", ch);
break;
JVMCI_ERROR_(T_ILLEGAL, "unexpected Kind: %c", ch);
}
return T_ILLEGAL;
}
// Simple helper to see if the caller of a runtime stub which
@ -718,7 +720,7 @@ void JVMCIRuntime::initialize_well_known_classes(TRAPS) {
if (JVMCIRuntime::_well_known_classes_initialized == false) {
SystemDictionary::WKID scan = SystemDictionary::FIRST_JVMCI_WKID;
SystemDictionary::initialize_wk_klasses_through(SystemDictionary::LAST_JVMCI_WKID, scan, CHECK);
JVMCIJavaClasses::compute_offsets();
JVMCIJavaClasses::compute_offsets(CHECK);
JVMCIRuntime::_well_known_classes_initialized = true;
}
}

View File

@ -29,6 +29,17 @@
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#define JVMCI_ERROR(...) \
{ Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::jdk_vm_ci_common_JVMCIError(), __VA_ARGS__); return; }
#define JVMCI_ERROR_(ret, ...) \
{ Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::jdk_vm_ci_common_JVMCIError(), __VA_ARGS__); return ret; }
#define JVMCI_ERROR_0(...) JVMCI_ERROR_(0, __VA_ARGS__)
#define JVMCI_ERROR_NULL(...) JVMCI_ERROR_(NULL, __VA_ARGS__)
#define JVMCI_ERROR_OK(...) JVMCI_ERROR_(JVMCIEnv::ok, __VA_ARGS__)
#define CHECK_OK CHECK_(JVMCIEnv::ok)
class ParseClosure : public StackObj {
int _lineNo;
char* _filename;
@ -171,7 +182,7 @@ class JVMCIRuntime: public AllStatic {
} \
(void)(0
static BasicType kindToBasicType(jchar ch);
static BasicType kindToBasicType(Handle kind, TRAPS);
// The following routines are all called from compiled JVMCI code

View File

@ -86,6 +86,7 @@
template(jdk_vm_ci_code_VirtualObject, "jdk/vm/ci/code/VirtualObject") \
template(jdk_vm_ci_code_RegisterSaveLayout, "jdk/vm/ci/code/RegisterSaveLayout") \
template(jdk_vm_ci_code_InvalidInstalledCodeException, "jdk/vm/ci/code/InvalidInstalledCodeException") \
template(jdk_vm_ci_common_JVMCIError, "jdk/vm/ci/common/JVMCIError") \
template(compileMethod_name, "compileMethod") \
template(compileMethod_signature, "(Ljdk/vm/ci/hotspot/HotSpotResolvedJavaMethod;IJI)V") \
template(fromMetaspace_name, "fromMetaspace") \

View File

@ -47,7 +47,10 @@ CodeHeap::CodeHeap(const char* name, const int code_blob_type)
_freelist_segments = 0;
_freelist_length = 0;
_max_allocated_capacity = 0;
_was_full = false;
_blob_count = 0;
_nmethod_count = 0;
_adapter_count = 0;
_full_count = 0;
}
@ -185,6 +188,7 @@ void* CodeHeap::allocate(size_t instance_size) {
assert(!block->free(), "must be marked free");
DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
_max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
_blob_count++;
return block->allocated_space();
}
@ -198,6 +202,7 @@ void* CodeHeap::allocate(size_t instance_size) {
_next_segment += number_of_segments;
DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
_max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
_blob_count++;
return b->allocated_space();
} else {
return NULL;

View File

@ -100,7 +100,11 @@ class CodeHeap : public CHeapObj<mtCode> {
const char* _name; // Name of the CodeHeap
const int _code_blob_type; // CodeBlobType it contains
bool _was_full; // True if the code heap was full
int _blob_count; // Number of CodeBlobs
int _nmethod_count; // Number of nmethods
int _adapter_count; // Number of adapters
int _full_count; // Number of times the code heap was full
enum { free_sentinel = 0xFF };
@ -179,8 +183,13 @@ class CodeHeap : public CHeapObj<mtCode> {
// Debugging / Profiling
const char* name() const { return _name; }
bool was_full() { return _was_full; }
void report_full() { _was_full = true; }
int blob_count() { return _blob_count; }
int nmethod_count() { return _nmethod_count; }
void set_nmethod_count(int count) { _nmethod_count = count; }
int adapter_count() { return _adapter_count; }
void set_adapter_count(int count) { _adapter_count = count; }
int full_count() { return _full_count; }
void report_full() { _full_count++; }
private:
size_t heap_unallocated_capacity() const;

View File

@ -27,6 +27,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/specialized_oop_closures.hpp"
@ -203,7 +204,6 @@ InstanceKlass::InstanceKlass(int vtable_len,
int iksize = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
access_flags.is_interface(), is_anonymous);
set_vtable_length(vtable_len);
set_itable_length(itable_len);
set_static_field_size(static_field_size);
@ -232,7 +232,7 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_static_oop_field_count(0);
set_nonstatic_field_size(0);
set_is_marked_dependent(false);
set_has_unloaded_dependent(false);
_dep_context = DependencyContext::EMPTY;
set_init_state(InstanceKlass::allocated);
set_init_thread(NULL);
set_reference_type(rt);
@ -246,7 +246,6 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_annotations(NULL);
set_jvmti_cached_class_field_map(NULL);
set_initial_method_idnum(0);
_dependencies = NULL;
set_jvmti_cached_class_field_map(NULL);
set_cached_class_file(NULL);
set_initial_method_idnum(0);
@ -1854,200 +1853,30 @@ jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
return id;
}
int nmethodBucket::decrement() {
return Atomic::add(-1, (volatile int *)&_count);
inline DependencyContext InstanceKlass::dependencies() {
DependencyContext dep_context(&_dep_context);
return dep_context;
}
//
// Walk the list of dependent nmethods searching for nmethods which
// are dependent on the changes that were passed in and mark them for
// deoptimization. Returns the number of nmethods found.
//
int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) {
assert_locked_or_safepoint(CodeCache_lock);
int found = 0;
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
// since dependencies aren't removed until an nmethod becomes a zombie,
// the dependency list may contain nmethods which aren't alive.
if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
if (TraceDependencies) {
ResourceMark rm;
tty->print_cr("Marked for deoptimization");
changes.print();
nm->print();
nm->print_dependencies();
}
nm->mark_for_deoptimization();
found++;
}
}
return found;
}
//
// Add an nmethodBucket to the list of dependencies for this nmethod.
// It's possible that an nmethod has multiple dependencies on this klass
// so a count is kept for each bucket to guarantee that creation and
// deletion of dependencies is consistent. Returns new head of the list.
//
nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
b->increment();
return deps;
}
}
return new nmethodBucket(nm, deps);
}
//
// Decrement count of the nmethod in the dependency list and remove
// the bucket completely when the count goes to 0. This method must
// find a corresponding bucket otherwise there's a bug in the
// recording of dependencies. Returns true if the bucket was deleted,
// or marked ready for reclaimation.
bool nmethodBucket::remove_dependent_nmethod(nmethodBucket** deps, nmethod* nm, bool delete_immediately) {
assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* first = *deps;
nmethodBucket* last = NULL;
for (nmethodBucket* b = first; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
int val = b->decrement();
guarantee(val >= 0, "Underflow: %d", val);
if (val == 0) {
if (delete_immediately) {
if (last == NULL) {
*deps = b->next();
} else {
last->set_next(b->next());
}
delete b;
}
}
return true;
}
last = b;
}
#ifdef ASSERT
tty->print_raw_cr("### can't find dependent nmethod");
nm->print();
#endif // ASSERT
ShouldNotReachHere();
return false;
}
// Convenience overload, for callers that don't want to delete the nmethodBucket entry.
bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
nmethodBucket** deps_addr = &deps;
return remove_dependent_nmethod(deps_addr, nm, false /* Don't delete */);
}
//
// Reclaim all unused buckets. Returns new head of the list.
//
nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) {
nmethodBucket* first = deps;
nmethodBucket* last = NULL;
nmethodBucket* b = first;
while (b != NULL) {
assert(b->count() >= 0, "bucket count: %d", b->count());
nmethodBucket* next = b->next();
if (b->count() == 0) {
if (last == NULL) {
first = next;
} else {
last->set_next(next);
}
delete b;
// last stays the same.
} else {
last = b;
}
b = next;
}
return first;
}
#ifndef PRODUCT
void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) {
int idx = 0;
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
nmethod* nm = b->get_nmethod();
tty->print("[%d] count=%d { ", idx++, b->count());
if (!verbose) {
nm->print_on(tty, "nmethod");
tty->print_cr(" } ");
} else {
nm->print();
nm->print_dependencies();
tty->print_cr("--- } ");
}
}
}
bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
if (nm == b->get_nmethod()) {
#ifdef ASSERT
int count = b->count();
assert(count >= 0, "count shouldn't be negative: %d", count);
#endif
return true;
}
}
return false;
}
#endif //PRODUCT
int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
assert_locked_or_safepoint(CodeCache_lock);
return nmethodBucket::mark_dependent_nmethods(_dependencies, changes);
}
void InstanceKlass::clean_dependent_nmethods() {
assert_locked_or_safepoint(CodeCache_lock);
if (has_unloaded_dependent()) {
_dependencies = nmethodBucket::clean_dependent_nmethods(_dependencies);
set_has_unloaded_dependent(false);
}
#ifdef ASSERT
else {
// Verification
for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
assert(b->count() >= 0, "bucket count: %d", b->count());
assert(b->count() != 0, "empty buckets need to be cleaned");
}
}
#endif
return dependencies().mark_dependent_nmethods(changes);
}
void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
_dependencies = nmethodBucket::add_dependent_nmethod(_dependencies, nm);
dependencies().add_dependent_nmethod(nm);
}
void InstanceKlass::remove_dependent_nmethod(nmethod* nm, bool delete_immediately) {
assert_locked_or_safepoint(CodeCache_lock);
if (nmethodBucket::remove_dependent_nmethod(&_dependencies, nm, delete_immediately)) {
set_has_unloaded_dependent(true);
}
dependencies().remove_dependent_nmethod(nm, delete_immediately);
}
#ifndef PRODUCT
void InstanceKlass::print_dependent_nmethods(bool verbose) {
nmethodBucket::print_dependent_nmethods(_dependencies, verbose);
dependencies().print_dependent_nmethods(verbose);
}
bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
return nmethodBucket::is_dependent_nmethod(_dependencies, nm);
return dependencies().is_dependent_nmethod(nm);
}
#endif //PRODUCT
@ -2055,7 +1884,9 @@ void InstanceKlass::clean_weak_instanceklass_links(BoolObjectClosure* is_alive)
clean_implementors_list(is_alive);
clean_method_data(is_alive);
clean_dependent_nmethods();
// Since GC iterates InstanceKlasses sequentially, it is safe to remove stale entries here.
DependencyContext dep_context(&_dep_context);
dep_context.expunge_stale_entries();
}
void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
@ -2102,6 +1933,8 @@ void InstanceKlass::remove_unshareable_info() {
constants()->remove_unshareable_info();
assert(_dep_context == DependencyContext::EMPTY, "dependency context is not shareable");
for (int i = 0; i < methods()->length(); i++) {
Method* m = methods()->at(i);
m->remove_unshareable_info();
@ -2231,12 +2064,10 @@ void InstanceKlass::release_C_heap_structures() {
}
// release dependencies
nmethodBucket* b = _dependencies;
_dependencies = NULL;
while (b != NULL) {
nmethodBucket* next = b->next();
delete b;
b = next;
{
DependencyContext ctx(&_dep_context);
int marked = ctx.remove_all_dependents();
assert(marked == 0, "all dependencies should be already invalidated");
}
// Deallocate breakpoint records
@ -3558,199 +3389,3 @@ jint InstanceKlass::get_cached_class_file_len() {
unsigned char * InstanceKlass::get_cached_class_file_bytes() {
return VM_RedefineClasses::get_cached_class_file_bytes(_cached_class_file);
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT
class TestNmethodBucketContext {
public:
nmethod* _nmethodLast;
nmethod* _nmethodMiddle;
nmethod* _nmethodFirst;
nmethodBucket* _bucketLast;
nmethodBucket* _bucketMiddle;
nmethodBucket* _bucketFirst;
nmethodBucket* _bucketList;
TestNmethodBucketContext() {
CodeCache_lock->lock_without_safepoint_check();
_nmethodLast = reinterpret_cast<nmethod*>(0x8 * 0);
_nmethodMiddle = reinterpret_cast<nmethod*>(0x8 * 1);
_nmethodFirst = reinterpret_cast<nmethod*>(0x8 * 2);
_bucketLast = new nmethodBucket(_nmethodLast, NULL);
_bucketMiddle = new nmethodBucket(_nmethodMiddle, _bucketLast);
_bucketFirst = new nmethodBucket(_nmethodFirst, _bucketMiddle);
_bucketList = _bucketFirst;
}
~TestNmethodBucketContext() {
delete _bucketLast;
delete _bucketMiddle;
delete _bucketFirst;
CodeCache_lock->unlock();
}
};
class TestNmethodBucket {
public:
static void testRemoveDependentNmethodFirstDeleteImmediately() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodFirst, true /* delete */);
assert(c._bucketList == c._bucketMiddle, "check");
assert(c._bucketList->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next() == NULL, "check");
// Cleanup before context is deleted.
c._bucketFirst = NULL;
}
static void testRemoveDependentNmethodMiddleDeleteImmediately() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodMiddle, true /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next() == NULL, "check");
// Cleanup before context is deleted.
c._bucketMiddle = NULL;
}
static void testRemoveDependentNmethodLastDeleteImmediately() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodLast, true /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == NULL, "check");
// Cleanup before context is deleted.
c._bucketLast = NULL;
}
static void testRemoveDependentNmethodFirstDeleteDeferred() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodFirst, false /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 0, "check");
assert(c._bucketMiddle->count() == 1, "check");
assert(c._bucketLast->count() == 1, "check");
}
static void testRemoveDependentNmethodMiddleDeleteDeferred() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodMiddle, false /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 1, "check");
assert(c._bucketMiddle->count() == 0, "check");
assert(c._bucketLast->count() == 1, "check");
}
static void testRemoveDependentNmethodLastDeleteDeferred() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(&c._bucketList, c._nmethodLast, false /* delete */);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 1, "check");
assert(c._bucketMiddle->count() == 1, "check");
assert(c._bucketLast->count() == 0, "check");
}
static void testRemoveDependentNmethodConvenienceFirst() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodFirst);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 0, "check");
assert(c._bucketMiddle->count() == 1, "check");
assert(c._bucketLast->count() == 1, "check");
}
static void testRemoveDependentNmethodConvenienceMiddle() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodMiddle);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 1, "check");
assert(c._bucketMiddle->count() == 0, "check");
assert(c._bucketLast->count() == 1, "check");
}
static void testRemoveDependentNmethodConvenienceLast() {
TestNmethodBucketContext c;
nmethodBucket::remove_dependent_nmethod(c._bucketList, c._nmethodLast);
assert(c._bucketList == c._bucketFirst, "check");
assert(c._bucketList->next() == c._bucketMiddle, "check");
assert(c._bucketList->next()->next() == c._bucketLast, "check");
assert(c._bucketList->next()->next()->next() == NULL, "check");
assert(c._bucketFirst->count() == 1, "check");
assert(c._bucketMiddle->count() == 1, "check");
assert(c._bucketLast->count() == 0, "check");
}
static void testRemoveDependentNmethod() {
testRemoveDependentNmethodFirstDeleteImmediately();
testRemoveDependentNmethodMiddleDeleteImmediately();
testRemoveDependentNmethodLastDeleteImmediately();
testRemoveDependentNmethodFirstDeleteDeferred();
testRemoveDependentNmethodMiddleDeleteDeferred();
testRemoveDependentNmethodLastDeleteDeferred();
testRemoveDependentNmethodConvenienceFirst();
testRemoveDependentNmethodConvenienceMiddle();
testRemoveDependentNmethodConvenienceLast();
}
static void test() {
testRemoveDependentNmethod();
}
};
void TestNmethodBucket_test() {
TestNmethodBucket::test();
}
#endif

View File

@ -53,15 +53,15 @@
// forward declaration for class -- see below for definition
class SuperTypeClosure;
class JNIid;
class jniIdMapBase;
class BreakpointInfo;
class fieldDescriptor;
class DepChange;
class nmethodBucket;
class DependencyContext;
class fieldDescriptor;
class jniIdMapBase;
class JNIid;
class JvmtiCachedClassFieldMap;
class MemberNameTable;
class SuperTypeClosure;
// This is used in iterators below.
class FieldClosure: public StackObj {
@ -198,7 +198,6 @@ class InstanceKlass: public Klass {
// _is_marked_dependent can be set concurrently, thus cannot be part of the
// _misc_flags.
bool _is_marked_dependent; // used for marking during flushing and deoptimization
bool _has_unloaded_dependent;
// The low two bits of _misc_flags contains the kind field.
// This can be used to quickly discriminate among the four kinds of
@ -235,7 +234,7 @@ class InstanceKlass: public Klass {
MemberNameTable* _member_names; // Member names
JNIid* _jni_ids; // First JNI identifier for static fields in this class
jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none
nmethodBucket* _dependencies; // list of dependent nmethods
intptr_t _dep_context; // packed DependencyContext structure
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
BreakpointInfo* _breakpoints; // bpt lists, managed by Method*
// Linked instanceKlasses of previous versions
@ -468,9 +467,6 @@ class InstanceKlass: public Klass {
bool is_marked_dependent() const { return _is_marked_dependent; }
void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
bool has_unloaded_dependent() const { return _has_unloaded_dependent; }
void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; }
// initialization (virtuals from Klass)
bool should_be_initialized() const; // means that initialize should be called
void initialize(TRAPS);
@ -835,7 +831,8 @@ public:
JNIid* jni_id_for(int offset);
// maintenance of deoptimization dependencies
int mark_dependent_nmethods(DepChange& changes);
inline DependencyContext dependencies();
int mark_dependent_nmethods(DepChange& changes);
void add_dependent_nmethod(nmethod* nm);
void remove_dependent_nmethod(nmethod* nm, bool delete_immediately);
@ -1027,7 +1024,6 @@ public:
void clean_weak_instanceklass_links(BoolObjectClosure* is_alive);
void clean_implementors_list(BoolObjectClosure* is_alive);
void clean_method_data(BoolObjectClosure* is_alive);
void clean_dependent_nmethods();
// Explicit metaspace deallocation of fields
// For RedefineClasses and class file parsing errors, we need to deallocate
@ -1320,48 +1316,6 @@ class JNIid: public CHeapObj<mtClass> {
void verify(Klass* holder);
};
//
// nmethodBucket is used to record dependent nmethods for
// deoptimization. nmethod dependencies are actually <klass, method>
// pairs but we really only care about the klass part for purposes of
// finding nmethods which might need to be deoptimized. Instead of
// recording the method, a count of how many times a particular nmethod
// was recorded is kept. This ensures that any recording errors are
// noticed since an nmethod should be removed as many times are it's
// added.
//
class nmethodBucket: public CHeapObj<mtClass> {
friend class VMStructs;
private:
nmethod* _nmethod;
int _count;
nmethodBucket* _next;
public:
nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
_nmethod = nmethod;
_next = next;
_count = 1;
}
int count() { return _count; }
int increment() { _count += 1; return _count; }
int decrement();
nmethodBucket* next() { return _next; }
void set_next(nmethodBucket* b) { _next = b; }
nmethod* get_nmethod() { return _nmethod; }
static int mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes);
static nmethodBucket* add_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
static bool remove_dependent_nmethod(nmethodBucket** deps, nmethod* nm, bool delete_immediately);
static bool remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
static nmethodBucket* clean_dependent_nmethods(nmethodBucket* deps);
#ifndef PRODUCT
static void print_dependent_nmethods(nmethodBucket* deps, bool verbose);
static bool is_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
#endif //PRODUCT
};
// An iterator that's used to access the inner classes indices in the
// InstanceKlass::_inner_classes array.
class InnerClassesIterator : public StackObj {

View File

@ -579,12 +579,45 @@ bool Method::can_be_statically_bound() const {
}
bool Method::is_accessor() const {
return is_getter() || is_setter();
}
bool Method::is_getter() const {
if (code_size() != 5) return false;
if (size_of_parameters() != 1) return false;
if (java_code_at(0) != Bytecodes::_aload_0 ) return false;
if (java_code_at(0) != Bytecodes::_aload_0) return false;
if (java_code_at(1) != Bytecodes::_getfield) return false;
if (java_code_at(4) != Bytecodes::_areturn &&
java_code_at(4) != Bytecodes::_ireturn ) return false;
switch (java_code_at(4)) {
case Bytecodes::_ireturn:
case Bytecodes::_lreturn:
case Bytecodes::_freturn:
case Bytecodes::_dreturn:
case Bytecodes::_areturn:
break;
default:
return false;
}
return true;
}
bool Method::is_setter() const {
if (code_size() != 6) return false;
if (java_code_at(0) != Bytecodes::_aload_0) return false;
switch (java_code_at(1)) {
case Bytecodes::_iload_1:
case Bytecodes::_aload_1:
case Bytecodes::_fload_1:
if (size_of_parameters() != 2) return false;
break;
case Bytecodes::_dload_1:
case Bytecodes::_lload_1:
if (size_of_parameters() != 3) return false;
break;
default:
return false;
}
if (java_code_at(2) != Bytecodes::_putfield) return false;
if (java_code_at(5) != Bytecodes::_return) return false;
return true;
}

Some files were not shown because too many files have changed in this diff Show More