Merge
This commit is contained in:
commit
e77f4f9c55
@ -59,6 +59,7 @@ JVM_CFLAGS_INCLUDES += \
|
||||
-I$(TOPDIR)/src/hotspot/share/precompiled \
|
||||
-I$(TOPDIR)/src/java.base/share/native/include \
|
||||
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/include \
|
||||
-I$(TOPDIR)/src/java.management/share/native/include \
|
||||
-I$(TOPDIR)/src/java.base/share/native/libjimage \
|
||||
#
|
||||
|
||||
|
@ -79,6 +79,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/ModuleAwareAgents/ClassLoadPrepare \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/ModuleAwareAgents/ThreadStart \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/StartPhase/AllowedFunctions \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/dcmd/jvmti/AttachFailed \
|
||||
#
|
||||
|
||||
# Add conditional directories here when needed.
|
||||
@ -110,6 +111,8 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libRedefineDoubleDelete := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libHandshakeTransitionTest := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libHasNoEntryPoint := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libReturnError := -lc
|
||||
endif
|
||||
|
||||
ifeq ($(OPENJDK_TARGET_OS), linux)
|
||||
|
@ -985,12 +985,33 @@ public:
|
||||
}
|
||||
|
||||
void hint(int imm) {
|
||||
system(0b00, 0b011, 0b0010, imm, 0b000);
|
||||
system(0b00, 0b011, 0b0010, 0b0000, imm);
|
||||
}
|
||||
|
||||
void nop() {
|
||||
hint(0);
|
||||
}
|
||||
|
||||
void yield() {
|
||||
hint(1);
|
||||
}
|
||||
|
||||
void wfe() {
|
||||
hint(2);
|
||||
}
|
||||
|
||||
void wfi() {
|
||||
hint(3);
|
||||
}
|
||||
|
||||
void sev() {
|
||||
hint(4);
|
||||
}
|
||||
|
||||
void sevl() {
|
||||
hint(5);
|
||||
}
|
||||
|
||||
// we only provide mrs and msr for the special purpose system
|
||||
// registers where op1 (instr[20:19]) == 11 and, (currently) only
|
||||
// use it for FPSR n.b msr has L (instr[21]) == 0 mrs has L == 1
|
||||
|
@ -494,42 +494,6 @@ void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
|
||||
}
|
||||
}
|
||||
|
||||
// Rather than take a segfault when the polling page is protected,
|
||||
// explicitly check for a safepoint in progress and if there is one,
|
||||
// fake a call to the handler as if a segfault had been caught.
|
||||
void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {
|
||||
__ mov(rscratch1, SafepointSynchronize::address_of_state());
|
||||
__ ldrb(rscratch1, Address(rscratch1));
|
||||
Label nope, poll;
|
||||
__ cbz(rscratch1, nope);
|
||||
__ block_comment("safepoint");
|
||||
__ enter();
|
||||
__ push(0x3, sp); // r0 & r1
|
||||
__ push(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1
|
||||
__ adr(r0, poll);
|
||||
__ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset()));
|
||||
__ mov(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::get_poll_stub));
|
||||
__ blrt(rscratch1, 1, 0, 1);
|
||||
__ maybe_isb();
|
||||
__ pop(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1
|
||||
__ mov(rscratch1, r0);
|
||||
__ pop(0x3, sp); // r0 & r1
|
||||
__ leave();
|
||||
__ br(rscratch1);
|
||||
address polling_page(os::get_polling_page());
|
||||
assert(os::is_poll_address(polling_page), "should be");
|
||||
unsigned long off;
|
||||
__ adrp(rscratch1, Address(polling_page, rtype), off);
|
||||
__ bind(poll);
|
||||
if (info)
|
||||
add_debug_info_for_branch(info); // This isn't just debug info:
|
||||
// it's the oop map
|
||||
else
|
||||
__ code_section()->relocate(pc(), rtype);
|
||||
__ ldrw(zr, Address(rscratch1, off));
|
||||
__ bind(nope);
|
||||
}
|
||||
|
||||
void LIR_Assembler::return_op(LIR_Opr result) {
|
||||
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
|
||||
|
||||
@ -549,11 +513,9 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
|
||||
address polling_page(os::get_polling_page());
|
||||
guarantee(info != NULL, "Shouldn't be NULL");
|
||||
assert(os::is_poll_address(polling_page), "should be");
|
||||
unsigned long off;
|
||||
__ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off);
|
||||
assert(off == 0, "must be");
|
||||
__ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
|
||||
add_debug_info_for_branch(info); // This isn't just debug info:
|
||||
// it's the oop map
|
||||
// it's the oop map
|
||||
__ read_polling_page(rscratch1, relocInfo::poll_type);
|
||||
return __ offset();
|
||||
}
|
||||
|
@ -51,4 +51,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
|
||||
|
||||
#define SUPPORT_RESERVED_STACK_AREA
|
||||
|
||||
#define THREAD_LOCAL_POLL
|
||||
|
||||
#endif // CPU_AARCH64_VM_GLOBALDEFINITIONS_AARCH64_HPP
|
||||
|
@ -79,7 +79,7 @@ define_pd_global(bool, CompactStrings, true);
|
||||
// Clear short arrays bigger than one word in an arch-specific way
|
||||
define_pd_global(intx, InitArrayShortSize, BytesPerLong);
|
||||
|
||||
define_pd_global(bool, ThreadLocalHandshakes, false);
|
||||
define_pd_global(bool, ThreadLocalHandshakes, true);
|
||||
|
||||
#if defined(COMPILER1) || defined(COMPILER2)
|
||||
define_pd_global(intx, InlineSmallCode, 1000);
|
||||
|
@ -30,12 +30,13 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/safepointMechanism.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
@ -438,13 +439,26 @@ void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
|
||||
|
||||
void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||
address* table,
|
||||
bool verifyoop) {
|
||||
bool verifyoop,
|
||||
bool generate_poll) {
|
||||
if (VerifyActivationFrameSize) {
|
||||
Unimplemented();
|
||||
}
|
||||
if (verifyoop) {
|
||||
verify_oop(r0, state);
|
||||
}
|
||||
|
||||
Label safepoint;
|
||||
address* const safepoint_table = Interpreter::safept_table(state);
|
||||
bool needs_thread_local_poll = generate_poll &&
|
||||
SafepointMechanism::uses_thread_local_poll() && table != safepoint_table;
|
||||
|
||||
if (needs_thread_local_poll) {
|
||||
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
||||
ldr(rscratch2, Address(rthread, Thread::polling_page_offset()));
|
||||
tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint);
|
||||
}
|
||||
|
||||
if (table == Interpreter::dispatch_table(state)) {
|
||||
addw(rscratch2, rscratch1, Interpreter::distance_from_dispatch_table(state));
|
||||
ldr(rscratch2, Address(rdispatch, rscratch2, Address::uxtw(3)));
|
||||
@ -453,10 +467,17 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||
ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
|
||||
}
|
||||
br(rscratch2);
|
||||
|
||||
if (needs_thread_local_poll) {
|
||||
bind(safepoint);
|
||||
lea(rscratch2, ExternalAddress((address)safepoint_table));
|
||||
ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
|
||||
br(rscratch2);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::dispatch_only(TosState state) {
|
||||
dispatch_base(state, Interpreter::dispatch_table(state));
|
||||
void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
|
||||
dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
|
||||
@ -468,10 +489,10 @@ void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
|
||||
void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
|
||||
// load next bytecode
|
||||
ldrb(rscratch1, Address(pre(rbcp, step)));
|
||||
dispatch_base(state, Interpreter::dispatch_table(state));
|
||||
dispatch_base(state, Interpreter::dispatch_table(state), generate_poll);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
|
||||
@ -1585,6 +1606,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
||||
assert_different_registers(obj, rscratch1);
|
||||
Label update, next, none;
|
||||
|
||||
verify_oop(obj);
|
||||
@ -1745,6 +1767,7 @@ void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret,
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
|
||||
assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
|
||||
if (ProfileInterpreter && MethodData::profile_parameters()) {
|
||||
Label profile_continue, done;
|
||||
|
||||
@ -1752,8 +1775,8 @@ void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register t
|
||||
|
||||
// Load the offset of the area within the MDO used for
|
||||
// parameters. If it's negative we're not profiling any parameters
|
||||
ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
|
||||
tbnz(tmp1, 63, profile_continue); // i.e. sign bit set
|
||||
ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
|
||||
tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
|
||||
|
||||
// Compute a pointer to the area for parameters from the offset
|
||||
// and move the pointer to the slot for the last
|
||||
|
@ -55,7 +55,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
bool check_exceptions);
|
||||
|
||||
// base routine for all dispatches
|
||||
void dispatch_base(TosState state, address* table, bool verifyoop = true);
|
||||
void dispatch_base(TosState state, address* table,
|
||||
bool verifyoop = true, bool generate_poll = false);
|
||||
|
||||
public:
|
||||
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
|
||||
@ -165,12 +166,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void dispatch_prolog(TosState state, int step = 0);
|
||||
void dispatch_epilog(TosState state, int step = 0);
|
||||
// dispatch via rscratch1
|
||||
void dispatch_only(TosState state);
|
||||
void dispatch_only(TosState state, bool generate_poll = false);
|
||||
// dispatch normal table via rscratch1 (assume rscratch1 is loaded already)
|
||||
void dispatch_only_normal(TosState state);
|
||||
void dispatch_only_noverify(TosState state);
|
||||
// load rscratch1 from [rbcp + step] and dispatch via rscratch1
|
||||
void dispatch_next(TosState state, int step = 0);
|
||||
void dispatch_next(TosState state, int step = 0, bool generate_poll = false);
|
||||
// load rscratch1 from [esi] and dispatch via rscratch1 and table
|
||||
void dispatch_via (TosState state, address* table);
|
||||
|
||||
|
@ -287,6 +287,40 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp) {
|
||||
dsb(Assembler::SY);
|
||||
}
|
||||
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path) {
|
||||
if (SafepointMechanism::uses_thread_local_poll()) {
|
||||
ldr(rscratch1, Address(rthread, Thread::polling_page_offset()));
|
||||
tbnz(rscratch1, exact_log2(SafepointMechanism::poll_bit()), slow_path);
|
||||
} else {
|
||||
unsigned long offset;
|
||||
adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
|
||||
ldrw(rscratch1, Address(rscratch1, offset));
|
||||
assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
|
||||
cbnz(rscratch1, slow_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Just like safepoint_poll, but use an acquiring load for thread-
|
||||
// local polling.
|
||||
//
|
||||
// We need an acquire here to ensure that any subsequent load of the
|
||||
// global SafepointSynchronize::_state flag is ordered after this load
|
||||
// of the local Thread::_polling page. We don't want this poll to
|
||||
// return false (i.e. not safepointing) and a later poll of the global
|
||||
// SafepointSynchronize::_state spuriously to return true.
|
||||
//
|
||||
// This is to avoid a race when we're in a native->Java transition
|
||||
// racing the code which wakes up from a safepoint.
|
||||
//
|
||||
void MacroAssembler::safepoint_poll_acquire(Label& slow_path) {
|
||||
if (SafepointMechanism::uses_thread_local_poll()) {
|
||||
lea(rscratch1, Address(rthread, Thread::polling_page_offset()));
|
||||
ldar(rscratch1, rscratch1);
|
||||
tbnz(rscratch1, exact_log2(SafepointMechanism::poll_bit()), slow_path);
|
||||
} else {
|
||||
safepoint_poll(slow_path);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
|
||||
// we must set sp to zero to clear frame
|
||||
@ -4336,15 +4370,26 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
||||
}
|
||||
|
||||
|
||||
address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) {
|
||||
unsigned long off;
|
||||
adrp(r, Address(page, rtype), off);
|
||||
InstructionMark im(this);
|
||||
code_section()->relocate(inst_mark(), rtype);
|
||||
ldrw(zr, Address(r, off));
|
||||
return inst_mark();
|
||||
// Move the address of the polling page into dest.
|
||||
void MacroAssembler::get_polling_page(Register dest, address page, relocInfo::relocType rtype) {
|
||||
if (SafepointMechanism::uses_thread_local_poll()) {
|
||||
ldr(dest, Address(rthread, Thread::polling_page_offset()));
|
||||
} else {
|
||||
unsigned long off;
|
||||
adrp(dest, Address(page, rtype), off);
|
||||
assert(off == 0, "polling page must be page aligned");
|
||||
}
|
||||
}
|
||||
|
||||
// Move the address of the polling page into r, then read the polling
|
||||
// page.
|
||||
address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) {
|
||||
get_polling_page(r, page, rtype);
|
||||
return read_polling_page(r, rtype);
|
||||
}
|
||||
|
||||
// Read the polling page. The address of the polling page must
|
||||
// already be in r.
|
||||
address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
|
||||
InstructionMark im(this);
|
||||
code_section()->relocate(inst_mark(), rtype);
|
||||
|
@ -97,6 +97,9 @@ class MacroAssembler: public Assembler {
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
void safepoint_poll(Label& slow_path);
|
||||
void safepoint_poll_acquire(Label& slow_path);
|
||||
|
||||
// Biased locking support
|
||||
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
||||
// swap_reg is killed.
|
||||
@ -995,12 +998,12 @@ public:
|
||||
void atomic_xchgalw(Register prev, Register newv, Register addr);
|
||||
|
||||
void orptr(Address adr, RegisterOrConstant src) {
|
||||
ldr(rscratch2, adr);
|
||||
ldr(rscratch1, adr);
|
||||
if (src.is_register())
|
||||
orr(rscratch2, rscratch2, src.as_register());
|
||||
orr(rscratch1, rscratch1, src.as_register());
|
||||
else
|
||||
orr(rscratch2, rscratch2, src.as_constant());
|
||||
str(rscratch2, adr);
|
||||
orr(rscratch1, rscratch1, src.as_constant());
|
||||
str(rscratch1, adr);
|
||||
}
|
||||
|
||||
// A generic CAS; success or failure is in the EQ flag.
|
||||
@ -1199,6 +1202,7 @@ public:
|
||||
|
||||
address read_polling_page(Register r, address page, relocInfo::relocType rtype);
|
||||
address read_polling_page(Register r, relocInfo::relocType rtype);
|
||||
void get_polling_page(Register dest, address page, relocInfo::relocType rtype);
|
||||
|
||||
// CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
|
||||
void update_byte_crc32(Register crc, Register val, Register table);
|
||||
|
@ -245,6 +245,11 @@ bool NativeInstruction::is_safepoint_poll() {
|
||||
// mov(reg, polling_page);
|
||||
// ldr(zr, [reg, #offset]);
|
||||
//
|
||||
// or
|
||||
//
|
||||
// ldr(reg, [rthread, #offset]);
|
||||
// ldr(zr, [reg, #offset]);
|
||||
//
|
||||
// however, we cannot rely on the polling page address load always
|
||||
// directly preceding the read from the page. C1 does that but C2
|
||||
// has to do the load and read as two independent instruction
|
||||
|
@ -1664,7 +1664,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// critical natives they are offset down.
|
||||
GrowableArray<int> arg_order(2 * total_in_args);
|
||||
VMRegPair tmp_vmreg;
|
||||
tmp_vmreg.set1(r19->as_VMReg());
|
||||
tmp_vmreg.set2(r19->as_VMReg());
|
||||
|
||||
if (!is_critical_native) {
|
||||
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
|
||||
@ -1952,7 +1952,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
|
||||
|
||||
// Force this write out before the read below
|
||||
__ dmb(Assembler::SY);
|
||||
__ dmb(Assembler::ISH);
|
||||
} else {
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
@ -1970,13 +1970,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
Label safepoint_in_progress, safepoint_in_progress_done;
|
||||
{
|
||||
assert(SafepointSynchronize::_not_synchronized == 0, "fix this code");
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch1,
|
||||
ExternalAddress((address)SafepointSynchronize::address_of_state()),
|
||||
offset);
|
||||
__ ldrw(rscratch1, Address(rscratch1, offset));
|
||||
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||
__ safepoint_poll_acquire(safepoint_in_progress);
|
||||
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||
__ bind(safepoint_in_progress_done);
|
||||
@ -2932,8 +2926,11 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
|
||||
if (!cause_return) {
|
||||
// overwrite the return address pushed by save_live_registers
|
||||
__ ldr(c_rarg0, Address(rthread, JavaThread::saved_exception_pc_offset()));
|
||||
__ str(c_rarg0, Address(rfp, wordSize));
|
||||
// Additionally, r20 is a callee-saved register so we can look at
|
||||
// it later to determine if someone changed the return address for
|
||||
// us!
|
||||
__ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
|
||||
__ str(r20, Address(rfp, wordSize));
|
||||
}
|
||||
|
||||
// Do the call
|
||||
@ -2968,11 +2965,40 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
// No exception case
|
||||
__ bind(noException);
|
||||
|
||||
Label no_adjust, bail;
|
||||
if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
|
||||
// If our stashed return pc was modified by the runtime we avoid touching it
|
||||
__ ldr(rscratch1, Address(rfp, wordSize));
|
||||
__ cmp(r20, rscratch1);
|
||||
__ br(Assembler::NE, no_adjust);
|
||||
|
||||
#ifdef ASSERT
|
||||
// Verify the correct encoding of the poll we're about to skip.
|
||||
// See NativeInstruction::is_ldrw_to_zr()
|
||||
__ ldrw(rscratch1, Address(r20));
|
||||
__ ubfx(rscratch2, rscratch1, 22, 10);
|
||||
__ cmpw(rscratch2, 0b1011100101);
|
||||
__ br(Assembler::NE, bail);
|
||||
__ ubfx(rscratch2, rscratch1, 0, 5);
|
||||
__ cmpw(rscratch2, 0b11111);
|
||||
__ br(Assembler::NE, bail);
|
||||
#endif
|
||||
// Adjust return pc forward to step over the safepoint poll instruction
|
||||
__ add(r20, r20, NativeInstruction::instruction_size);
|
||||
__ str(r20, Address(rfp, wordSize));
|
||||
}
|
||||
|
||||
__ bind(no_adjust);
|
||||
// Normal exit, restore registers and exit.
|
||||
RegisterSaver::restore_live_registers(masm, save_vectors);
|
||||
|
||||
__ ret(lr);
|
||||
|
||||
#ifdef ASSERT
|
||||
__ bind(bail);
|
||||
__ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
|
||||
#endif
|
||||
|
||||
// Make sure all code is generated
|
||||
masm->flush();
|
||||
|
||||
|
@ -414,6 +414,14 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ restore_constant_pool_cache();
|
||||
__ get_method(rmethod);
|
||||
|
||||
if (state == atos) {
|
||||
Register obj = r0;
|
||||
Register mdp = r1;
|
||||
Register tmp = r2;
|
||||
__ ldr(mdp, Address(rmethod, Method::method_data_offset()));
|
||||
__ profile_return_type(mdp, obj, tmp);
|
||||
}
|
||||
|
||||
// Pop N words from the stack
|
||||
__ get_cache_and_index_at_bcp(r1, r2, 1, index_size);
|
||||
__ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
@ -967,12 +975,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
|
||||
__ ldrw(rscratch1, Address(rscratch1, offset));
|
||||
assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
|
||||
__ cbnz(rscratch1, slow_path);
|
||||
__ safepoint_poll(slow_path);
|
||||
|
||||
// We don't generate local frame and don't align stack because
|
||||
// we call stub code and there is no safepoint on this path.
|
||||
@ -986,6 +989,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
__ ldrw(val, Address(esp, 0)); // byte value
|
||||
__ ldrw(crc, Address(esp, wordSize)); // Initial CRC
|
||||
|
||||
unsigned long offset;
|
||||
__ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
|
||||
__ add(tbl, tbl, offset);
|
||||
|
||||
@ -1020,12 +1024,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
|
||||
__ ldrw(rscratch1, Address(rscratch1, offset));
|
||||
assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
|
||||
__ cbnz(rscratch1, slow_path);
|
||||
__ safepoint_poll(slow_path);
|
||||
|
||||
// We don't generate local frame and don't align stack because
|
||||
// we call stub code and there is no safepoint on this path.
|
||||
@ -1375,7 +1374,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
if (os::is_MP()) {
|
||||
if (UseMembar) {
|
||||
// Force this write out before the read below
|
||||
__ dsb(Assembler::SY);
|
||||
__ dmb(Assembler::ISH);
|
||||
} else {
|
||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
@ -1387,16 +1386,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
{
|
||||
Label Continue;
|
||||
{
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
|
||||
__ ldrw(rscratch2, Address(rscratch2, offset));
|
||||
}
|
||||
assert(SafepointSynchronize::_not_synchronized == 0,
|
||||
"SafepointSynchronize::_not_synchronized");
|
||||
Label L;
|
||||
__ cbnz(rscratch2, L);
|
||||
Label L, Continue;
|
||||
__ safepoint_poll_acquire(L);
|
||||
__ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbz(rscratch2, Continue);
|
||||
__ bind(L);
|
||||
@ -1671,6 +1662,14 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ mov(rscratch2, true);
|
||||
__ strb(rscratch2, do_not_unlock_if_synchronized);
|
||||
|
||||
Label no_mdp;
|
||||
Register mdp = r3;
|
||||
__ ldr(mdp, Address(rmethod, Method::method_data_offset()));
|
||||
__ cbz(mdp, no_mdp);
|
||||
__ add(mdp, mdp, in_bytes(MethodData::data_offset()));
|
||||
__ profile_parameters_type(mdp, r1, r2);
|
||||
__ bind(no_mdp);
|
||||
|
||||
// increment invocation count & check for overflow
|
||||
Label invocation_counter_overflow;
|
||||
Label profile_method;
|
||||
|
@ -1717,7 +1717,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
__ push_i(r1);
|
||||
// Adjust the bcp by the 16-bit displacement in r2
|
||||
__ add(rbcp, rbcp, r2);
|
||||
__ dispatch_only(vtos);
|
||||
__ dispatch_only(vtos, /*generate_poll*/true);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1833,7 +1833,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
// continue with the bytecode @ target
|
||||
// rscratch1: target bytecode
|
||||
// rbcp: target bcp
|
||||
__ dispatch_only(vtos);
|
||||
__ dispatch_only(vtos, /*generate_poll*/true);
|
||||
|
||||
if (UseLoopCounter) {
|
||||
if (ProfileInterpreter) {
|
||||
@ -1973,7 +1973,7 @@ void TemplateTable::ret() {
|
||||
__ ldr(rbcp, Address(rmethod, Method::const_offset()));
|
||||
__ lea(rbcp, Address(rbcp, r1));
|
||||
__ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
|
||||
__ dispatch_next(vtos);
|
||||
__ dispatch_next(vtos, 0, /*generate_poll*/true);
|
||||
}
|
||||
|
||||
void TemplateTable::wide_ret() {
|
||||
@ -1984,7 +1984,7 @@ void TemplateTable::wide_ret() {
|
||||
__ ldr(rbcp, Address(rmethod, Method::const_offset()));
|
||||
__ lea(rbcp, Address(rbcp, r1));
|
||||
__ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
|
||||
__ dispatch_next(vtos);
|
||||
__ dispatch_next(vtos, 0, /*generate_poll*/true);
|
||||
}
|
||||
|
||||
|
||||
@ -2014,7 +2014,7 @@ void TemplateTable::tableswitch() {
|
||||
__ rev32(r3, r3);
|
||||
__ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
|
||||
__ add(rbcp, rbcp, r3, ext::sxtw);
|
||||
__ dispatch_only(vtos);
|
||||
__ dispatch_only(vtos, /*generate_poll*/true);
|
||||
// handle default
|
||||
__ bind(default_case);
|
||||
__ profile_switch_default(r0);
|
||||
@ -2064,7 +2064,7 @@ void TemplateTable::fast_linearswitch() {
|
||||
__ rev32(r3, r3);
|
||||
__ add(rbcp, rbcp, r3, ext::sxtw);
|
||||
__ ldrb(rscratch1, Address(rbcp, 0));
|
||||
__ dispatch_only(vtos);
|
||||
__ dispatch_only(vtos, /*generate_poll*/true);
|
||||
}
|
||||
|
||||
void TemplateTable::fast_binaryswitch() {
|
||||
@ -2162,7 +2162,7 @@ void TemplateTable::fast_binaryswitch() {
|
||||
__ rev32(j, j);
|
||||
__ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
|
||||
__ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
|
||||
__ dispatch_only(vtos);
|
||||
__ dispatch_only(vtos, /*generate_poll*/true);
|
||||
|
||||
// default case -> j = default offset
|
||||
__ bind(default_case);
|
||||
@ -2171,7 +2171,7 @@ void TemplateTable::fast_binaryswitch() {
|
||||
__ rev32(j, j);
|
||||
__ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
|
||||
__ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
|
||||
__ dispatch_only(vtos);
|
||||
__ dispatch_only(vtos, /*generate_poll*/true);
|
||||
}
|
||||
|
||||
|
||||
|
@ -394,4 +394,6 @@ void VM_Version::initialize() {
|
||||
g.generate_getPsrInfo());
|
||||
|
||||
get_processor_features();
|
||||
|
||||
UNSUPPORTED_OPTION(CriticalJNINatives);
|
||||
}
|
||||
|
@ -2968,7 +2968,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
|
||||
Label L_cardtable_loop;
|
||||
Label L_cardtable_loop, L_done;
|
||||
|
||||
__ cbz_32(count, L_done); // zero count - nothing to do
|
||||
|
||||
__ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
|
||||
__ sub(count, count, BytesPerHeapOop); // last addr
|
||||
@ -2987,6 +2989,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ strb(zero, Address(addr, 1, post_indexed));
|
||||
__ subs(count, count, 1);
|
||||
__ b(L_cardtable_loop, ge);
|
||||
__ BIND(L_done);
|
||||
}
|
||||
break;
|
||||
case BarrierSet::ModRef:
|
||||
|
@ -41,20 +41,25 @@
|
||||
|
||||
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
const Register temp_reg = R12_scratch2;
|
||||
Label Lmiss;
|
||||
|
||||
verify_oop(receiver);
|
||||
MacroAssembler::null_check(receiver, oopDesc::klass_offset_in_bytes(), &Lmiss);
|
||||
load_klass(temp_reg, receiver);
|
||||
if (TrapBasedICMissChecks) {
|
||||
|
||||
if (TrapBasedICMissChecks && TrapBasedNullChecks) {
|
||||
trap_ic_miss_check(temp_reg, iCache);
|
||||
} else {
|
||||
Label L;
|
||||
Label Lok;
|
||||
cmpd(CCR0, temp_reg, iCache);
|
||||
beq(CCR0, L);
|
||||
beq(CCR0, Lok);
|
||||
bind(Lmiss);
|
||||
//load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0);
|
||||
calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false);
|
||||
mtctr(temp_reg);
|
||||
bctr();
|
||||
align(32, 12);
|
||||
bind(L);
|
||||
bind(Lok);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3371,7 +3371,7 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
|
||||
__ bfalse(CCR0, LnotFinal);
|
||||
|
||||
if (RewriteBytecodes && !UseSharedSpaces) {
|
||||
if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) {
|
||||
patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
|
||||
}
|
||||
invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
|
||||
|
@ -582,7 +582,11 @@ class Assembler : public AbstractAssembler {
|
||||
#define LOC_ZOPC (unsigned long)(0xebL << 40 | 0xf2L) // z196
|
||||
#define LOCG_ZOPC (unsigned long)(0xebL << 40 | 0xe2L) // z196
|
||||
|
||||
#define LMG_ZOPC (unsigned long)(235L << 40 | 4L)
|
||||
|
||||
// LOAD multiple registers at once
|
||||
#define LM_ZOPC (unsigned int)(0x98 << 24)
|
||||
#define LMY_ZOPC (unsigned long)(0xebL << 40 | 0x98L)
|
||||
#define LMG_ZOPC (unsigned long)(0xebL << 40 | 0x04L)
|
||||
|
||||
#define LE_ZOPC (unsigned int)(0x78 << 24)
|
||||
#define LEY_ZOPC (unsigned long)(237L << 40 | 100L)
|
||||
@ -613,7 +617,10 @@ class Assembler : public AbstractAssembler {
|
||||
#define STOC_ZOPC (unsigned long)(0xebL << 40 | 0xf3L) // z196
|
||||
#define STOCG_ZOPC (unsigned long)(0xebL << 40 | 0xe3L) // z196
|
||||
|
||||
#define STMG_ZOPC (unsigned long)(235L << 40 | 36L)
|
||||
// STORE multiple registers at once
|
||||
#define STM_ZOPC (unsigned int)(0x90 << 24)
|
||||
#define STMY_ZOPC (unsigned long)(0xebL << 40 | 0x90L)
|
||||
#define STMG_ZOPC (unsigned long)(0xebL << 40 | 0x24L)
|
||||
|
||||
#define STE_ZOPC (unsigned int)(0x70 << 24)
|
||||
#define STEY_ZOPC (unsigned long)(237L << 40 | 102L)
|
||||
@ -874,15 +881,19 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
// Shift
|
||||
// arithmetic
|
||||
#define SLA_ZOPC (unsigned int)(139 << 24)
|
||||
#define SLAG_ZOPC (unsigned long)(235L << 40 | 11L)
|
||||
#define SRA_ZOPC (unsigned int)(138 << 24)
|
||||
#define SRAG_ZOPC (unsigned long)(235L << 40 | 10L)
|
||||
#define SLA_ZOPC (unsigned int)(0x8b << 24)
|
||||
#define SLAK_ZOPC (unsigned long)(0xebL << 40 | 0xddL)
|
||||
#define SLAG_ZOPC (unsigned long)(0xebL << 40 | 0x0bL)
|
||||
#define SRA_ZOPC (unsigned int)(0x8a << 24)
|
||||
#define SRAK_ZOPC (unsigned long)(0xebL << 40 | 0xdcL)
|
||||
#define SRAG_ZOPC (unsigned long)(0xebL << 40 | 0x0aL)
|
||||
// logical
|
||||
#define SLL_ZOPC (unsigned int)(137 << 24)
|
||||
#define SLLG_ZOPC (unsigned long)(235L << 40 | 13L)
|
||||
#define SRL_ZOPC (unsigned int)(136 << 24)
|
||||
#define SRLG_ZOPC (unsigned long)(235L << 40 | 12L)
|
||||
#define SLL_ZOPC (unsigned int)(0x89 << 24)
|
||||
#define SLLK_ZOPC (unsigned long)(0xebL << 40 | 0xdfL)
|
||||
#define SLLG_ZOPC (unsigned long)(0xebL << 40 | 0x0dL)
|
||||
#define SRL_ZOPC (unsigned int)(0x88 << 24)
|
||||
#define SRLK_ZOPC (unsigned long)(0xebL << 40 | 0xdeL)
|
||||
#define SRLG_ZOPC (unsigned long)(0xebL << 40 | 0x0cL)
|
||||
|
||||
// Rotate, then AND/XOR/OR/insert
|
||||
// rotate
|
||||
@ -2262,12 +2273,16 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
// shift
|
||||
inline void z_sla( Register r1, int64_t d2, Register b2=Z_R0); // shift left r1 = r1 << ((d2+b2)&0x3f) ; int32, only 31 bits shifted, sign preserved!
|
||||
inline void z_slak(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift left r1 = r3 << ((d2+b2)&0x3f) ; int32, only 31 bits shifted, sign preserved!
|
||||
inline void z_slag(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift left r1 = r3 << ((d2+b2)&0x3f) ; int64, only 63 bits shifted, sign preserved!
|
||||
inline void z_sra( Register r1, int64_t d2, Register b2=Z_R0); // shift right r1 = r1 >> ((d2+b2)&0x3f) ; int32, sign extended
|
||||
inline void z_srak(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift right r1 = r3 >> ((d2+b2)&0x3f) ; int32, sign extended
|
||||
inline void z_srag(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift right r1 = r3 >> ((d2+b2)&0x3f) ; int64, sign extended
|
||||
inline void z_sll( Register r1, int64_t d2, Register b2=Z_R0); // shift left r1 = r1 << ((d2+b2)&0x3f) ; int32, zeros added
|
||||
inline void z_sllk(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift left r1 = r3 << ((d2+b2)&0x3f) ; int32, zeros added
|
||||
inline void z_sllg(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift left r1 = r3 << ((d2+b2)&0x3f) ; int64, zeros added
|
||||
inline void z_srl( Register r1, int64_t d2, Register b2=Z_R0); // shift right r1 = r1 >> ((d2+b2)&0x3f) ; int32, zero extended
|
||||
inline void z_srlk(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift right r1 = r3 >> ((d2+b2)&0x3f) ; int32, zero extended
|
||||
inline void z_srlg(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift right r1 = r3 >> ((d2+b2)&0x3f) ; int64, zero extended
|
||||
|
||||
// rotate
|
||||
@ -3035,7 +3050,11 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
inline void z_tam();
|
||||
inline void z_stckf(int64_t d2, Register b2);
|
||||
inline void z_stm( Register r1, Register r3, int64_t d2, Register b2);
|
||||
inline void z_stmy(Register r1, Register r3, int64_t d2, Register b2);
|
||||
inline void z_stmg(Register r1, Register r3, int64_t d2, Register b2);
|
||||
inline void z_lm( Register r1, Register r3, int64_t d2, Register b2);
|
||||
inline void z_lmy(Register r1, Register r3, int64_t d2, Register b2);
|
||||
inline void z_lmg(Register r1, Register r3, int64_t d2, Register b2);
|
||||
|
||||
inline void z_cs( Register r1, Register r3, int64_t d2, Register b2);
|
||||
|
@ -334,12 +334,16 @@ inline void Assembler::z_stfle(int64_t d2, Register b2) { emit_32(STFLE_ZOPC | u
|
||||
// SHIFT/RORATE OPERATIONS
|
||||
//-----------------------------------
|
||||
inline void Assembler::z_sla( Register r1, int64_t d2, Register b2) { emit_32( SLA_ZOPC | regt(r1, 8, 32) | uimm12(d2, 20, 32) | reg(b2, 16, 32)); }
|
||||
inline void Assembler::z_slak(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SLAK_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
|
||||
inline void Assembler::z_slag(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SLAG_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
|
||||
inline void Assembler::z_sra( Register r1, int64_t d2, Register b2) { emit_32( SRA_ZOPC | regt(r1, 8, 32) | uimm12(d2, 20, 32) | reg(b2, 16, 32)); }
|
||||
inline void Assembler::z_srak(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SRAK_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
|
||||
inline void Assembler::z_srag(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SRAG_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
|
||||
inline void Assembler::z_sll( Register r1, int64_t d2, Register b2) { emit_32( SLL_ZOPC | regt(r1, 8, 32) | uimm12(d2, 20, 32) | reg(b2, 16, 32)); }
|
||||
inline void Assembler::z_sllk(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SLLK_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
|
||||
inline void Assembler::z_sllg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SLLG_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
|
||||
inline void Assembler::z_srl( Register r1, int64_t d2, Register b2) { emit_32( SRL_ZOPC | regt(r1, 8, 32) | uimm12(d2, 20, 32) | reg(b2, 16, 32)); }
|
||||
inline void Assembler::z_srlk(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SRLK_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
|
||||
inline void Assembler::z_srlg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SRLG_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
|
||||
|
||||
// rotate left
|
||||
@ -690,10 +694,14 @@ inline void Assembler::z_ahhlr(Register r1, Register r2, Register r3) { emit_32(
|
||||
|
||||
inline void Assembler::z_tam() { emit_16( TAM_ZOPC); }
|
||||
inline void Assembler::z_stckf(int64_t d2, Register b2) { emit_32( STCKF_ZOPC | uimm12(d2, 20, 32) | regz(b2, 16, 32)); }
|
||||
inline void Assembler::z_stmg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( STMG_ZOPC | simm20(d2) | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) ); }
|
||||
inline void Assembler::z_lmg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( LMG_ZOPC | simm20(d2) | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) ); }
|
||||
inline void Assembler::z_stm( Register r1, Register r3, int64_t d2, Register b2) { emit_32( STM_ZOPC | reg(r1, 8, 32) | reg(r3,12,32)| reg(b2,16,32) | uimm12(d2, 20,32)); }
|
||||
inline void Assembler::z_stmy(Register r1, Register r3, int64_t d2, Register b2) { emit_48( STMY_ZOPC | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) | simm20(d2) ); }
|
||||
inline void Assembler::z_stmg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( STMG_ZOPC | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) | simm20(d2) ); }
|
||||
inline void Assembler::z_lm( Register r1, Register r3, int64_t d2, Register b2) { emit_32( LM_ZOPC | reg(r1, 8, 32) | reg(r3,12,32)| reg(b2,16,32) | uimm12(d2, 20,32)); }
|
||||
inline void Assembler::z_lmy( Register r1, Register r3, int64_t d2, Register b2) { emit_48( LMY_ZOPC | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) | simm20(d2) ); }
|
||||
inline void Assembler::z_lmg( Register r1, Register r3, int64_t d2, Register b2) { emit_48( LMG_ZOPC | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) | simm20(d2) ); }
|
||||
|
||||
inline void Assembler::z_cs(Register r1, Register r3, int64_t d2, Register b2) { emit_32( CS_ZOPC | regt(r1, 8, 32) | reg(r3, 12, 32) | reg(b2, 16, 32) | uimm12(d2, 20, 32)); }
|
||||
inline void Assembler::z_cs( Register r1, Register r3, int64_t d2, Register b2) { emit_32( CS_ZOPC | regt(r1, 8, 32) | reg(r3, 12, 32) | reg(b2, 16, 32) | uimm12(d2, 20, 32)); }
|
||||
inline void Assembler::z_csy(Register r1, Register r3, int64_t d2, Register b2) { emit_48( CSY_ZOPC | regt(r1, 8, 48) | reg(r3, 12, 48) | reg(b2, 16, 48) | simm20(d2)); }
|
||||
inline void Assembler::z_csg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( CSG_ZOPC | regt(r1, 8, 48) | reg(r3, 12, 48) | reg(b2, 16, 48) | simm20(d2)); }
|
||||
inline void Assembler::z_cs( Register r1, Register r3, const Address& a) { assert(!a.has_index(), "Cannot encode index"); z_cs( r1, r3, a.disp(), a.baseOrR0()); }
|
||||
|
@ -936,7 +936,7 @@ void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
|
||||
|
||||
// Some extra safety net.
|
||||
if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
|
||||
guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away");
|
||||
guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
|
||||
}
|
||||
|
||||
(this)->relocate(rspec, relocInfo::pcrel_addr_format);
|
||||
@ -956,7 +956,7 @@ void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
|
||||
|
||||
// Some extra safety net.
|
||||
if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
|
||||
guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away");
|
||||
guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
|
||||
}
|
||||
|
||||
(this)->relocate(rspec, relocInfo::pcrel_addr_format);
|
||||
@ -1025,6 +1025,13 @@ void MacroAssembler::testbit(Register r, unsigned int bitPos) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::prefetch_read(Address a) {
|
||||
z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
|
||||
}
|
||||
void MacroAssembler::prefetch_update(Address a) {
|
||||
z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
|
||||
}
|
||||
|
||||
// Clear a register, i.e. load const zero into reg.
|
||||
// Return len (in bytes) of generated instruction(s).
|
||||
// whole_reg: Clear 64 bits if true, 32 bits otherwise.
|
||||
@ -4896,77 +4903,296 @@ unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Reg
|
||||
|
||||
// Intrinsics for CompactStrings
|
||||
|
||||
// Compress char[] to byte[]. odd_reg contains cnt. Kills dst. Early clobber: result
|
||||
// Compress char[] to byte[].
|
||||
// Restores: src, dst
|
||||
// Uses: cnt
|
||||
// Kills: tmp, Z_R0, Z_R1.
|
||||
// Early clobber: result.
|
||||
// Note:
|
||||
// cnt is signed int. Do not rely on high word!
|
||||
// counts # characters, not bytes.
|
||||
// The result is the number of characters copied before the first incompatible character was found.
|
||||
// If tmp2 is provided and the compression fails, the compression stops exactly at this point and the result is precise.
|
||||
// If precise is true, the processing stops exactly at this point. Otherwise, the result may be off
|
||||
// by a few bytes. The result always indicates the number of copied characters.
|
||||
//
|
||||
// Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure:
|
||||
// - Different number of characters may have been written to dead array (if tmp2 not provided).
|
||||
// - Different number of characters may have been written to dead array (if precise is false).
|
||||
// - Returns a number <cnt instead of 0. (Result gets compared with cnt.)
|
||||
unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register odd_reg,
|
||||
Register even_reg, Register tmp, Register tmp2) {
|
||||
int block_start = offset();
|
||||
Label Lloop1, Lloop2, Lslow, Ldone;
|
||||
const Register addr2 = dst, ind1 = result, mask = tmp;
|
||||
const bool precise = (tmp2 != noreg);
|
||||
unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register cnt,
|
||||
Register tmp, bool precise) {
|
||||
assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp);
|
||||
|
||||
BLOCK_COMMENT("string_compress {");
|
||||
|
||||
z_sll(odd_reg, 1); // Number of bytes to read. (Must be a positive simm32.)
|
||||
clear_reg(ind1); // Index to read.
|
||||
z_llilf(mask, 0xFF00FF00);
|
||||
z_ahi(odd_reg, -16); // Last possible index for fast loop.
|
||||
z_brl(Lslow);
|
||||
|
||||
// ind1: index, even_reg: index increment, odd_reg: index limit
|
||||
z_iihf(mask, 0xFF00FF00);
|
||||
z_lhi(even_reg, 16);
|
||||
|
||||
bind(Lloop1); // 8 Characters per iteration.
|
||||
z_lg(Z_R0, Address(src, ind1));
|
||||
z_lg(Z_R1, Address(src, ind1, 8));
|
||||
if (precise) {
|
||||
BLOCK_COMMENT("encode_iso_array {");
|
||||
} else {
|
||||
BLOCK_COMMENT("string_compress {");
|
||||
}
|
||||
int block_start = offset();
|
||||
|
||||
Register Rsrc = src;
|
||||
Register Rdst = dst;
|
||||
Register Rix = tmp;
|
||||
Register Rcnt = cnt;
|
||||
Register Rmask = result; // holds incompatibility check mask until result value is stored.
|
||||
Label ScalarShortcut, AllDone;
|
||||
|
||||
z_iilf(Rmask, 0xFF00FF00);
|
||||
z_iihf(Rmask, 0xFF00FF00);
|
||||
|
||||
#if 0 // Sacrifice shortcuts for code compactness
|
||||
{
|
||||
//---< shortcuts for short strings (very frequent) >---
|
||||
// Strings with 4 and 8 characters were fond to occur very frequently.
|
||||
// Therefore, we handle them right away with minimal overhead.
|
||||
Label skipShortcut, skip4Shortcut, skip8Shortcut;
|
||||
Register Rout = Z_R0;
|
||||
z_chi(Rcnt, 4);
|
||||
z_brne(skip4Shortcut); // 4 characters are very frequent
|
||||
z_lg(Z_R0, 0, Rsrc); // Treat exactly 4 characters specially.
|
||||
if (VM_Version::has_DistinctOpnds()) {
|
||||
Rout = Z_R0;
|
||||
z_ngrk(Rix, Z_R0, Rmask);
|
||||
} else {
|
||||
Rout = Rix;
|
||||
z_lgr(Rix, Z_R0);
|
||||
z_ngr(Z_R0, Rmask);
|
||||
}
|
||||
z_brnz(skipShortcut);
|
||||
z_stcmh(Rout, 5, 0, Rdst);
|
||||
z_stcm(Rout, 5, 2, Rdst);
|
||||
z_lgfr(result, Rcnt);
|
||||
z_bru(AllDone);
|
||||
bind(skip4Shortcut);
|
||||
|
||||
z_chi(Rcnt, 8);
|
||||
z_brne(skip8Shortcut); // There's more to do...
|
||||
z_lmg(Z_R0, Z_R1, 0, Rsrc); // Treat exactly 8 characters specially.
|
||||
if (VM_Version::has_DistinctOpnds()) {
|
||||
Rout = Z_R0;
|
||||
z_ogrk(Rix, Z_R0, Z_R1);
|
||||
z_ngr(Rix, Rmask);
|
||||
} else {
|
||||
Rout = Rix;
|
||||
z_lgr(Rix, Z_R0);
|
||||
z_ogr(Z_R0, Z_R1);
|
||||
z_ngr(Z_R0, Rmask);
|
||||
}
|
||||
z_brnz(skipShortcut);
|
||||
z_stcmh(Rout, 5, 0, Rdst);
|
||||
z_stcm(Rout, 5, 2, Rdst);
|
||||
z_stcmh(Z_R1, 5, 4, Rdst);
|
||||
z_stcm(Z_R1, 5, 6, Rdst);
|
||||
z_lgfr(result, Rcnt);
|
||||
z_bru(AllDone);
|
||||
|
||||
bind(skip8Shortcut);
|
||||
clear_reg(Z_R0, true, false); // #characters already processed (none). Precond for scalar loop.
|
||||
z_brl(ScalarShortcut); // Just a few characters
|
||||
|
||||
bind(skipShortcut);
|
||||
}
|
||||
#endif
|
||||
clear_reg(Z_R0); // make sure register is properly initialized.
|
||||
|
||||
if (VM_Version::has_VectorFacility()) {
|
||||
const int min_vcnt = 32; // Minimum #characters required to use vector instructions.
|
||||
// Otherwise just do nothing in vector mode.
|
||||
// Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)).
|
||||
const int log_min_vcnt = exact_log2(min_vcnt);
|
||||
Label VectorLoop, VectorDone, VectorBreak;
|
||||
|
||||
VectorRegister Vtmp1 = Z_V16;
|
||||
VectorRegister Vtmp2 = Z_V17;
|
||||
VectorRegister Vmask = Z_V18;
|
||||
VectorRegister Vzero = Z_V19;
|
||||
VectorRegister Vsrc_first = Z_V20;
|
||||
VectorRegister Vsrc_last = Z_V23;
|
||||
|
||||
assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error");
|
||||
assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
|
||||
z_srak(Rix, Rcnt, log_min_vcnt); // # vector loop iterations
|
||||
z_brz(VectorDone); // not enough data for vector loop
|
||||
|
||||
z_vzero(Vzero); // all zeroes
|
||||
z_vgmh(Vmask, 0, 7); // generate 0xff00 mask for all 2-byte elements
|
||||
z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop
|
||||
|
||||
bind(VectorLoop);
|
||||
z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc);
|
||||
add2reg(Rsrc, min_vcnt*2);
|
||||
|
||||
//---< check for incompatible character >---
|
||||
z_vo(Vtmp1, Z_V20, Z_V21);
|
||||
z_vo(Vtmp2, Z_V22, Z_V23);
|
||||
z_vo(Vtmp1, Vtmp1, Vtmp2);
|
||||
z_vn(Vtmp1, Vtmp1, Vmask);
|
||||
z_vceqhs(Vtmp1, Vtmp1, Vzero); // high half of all chars must be zero for successful compress.
|
||||
z_brne(VectorBreak); // break vector loop, incompatible character found.
|
||||
// re-process data from current iteration in break handler.
|
||||
|
||||
//---< pack & store characters >---
|
||||
z_vpkh(Vtmp1, Z_V20, Z_V21); // pack (src1, src2) -> tmp1
|
||||
z_vpkh(Vtmp2, Z_V22, Z_V23); // pack (src3, src4) -> tmp2
|
||||
z_vstm(Vtmp1, Vtmp2, 0, Rdst); // store packed string
|
||||
add2reg(Rdst, min_vcnt);
|
||||
|
||||
z_brct(Rix, VectorLoop);
|
||||
|
||||
z_bru(VectorDone);
|
||||
|
||||
bind(VectorBreak);
|
||||
add2reg(Rsrc, -min_vcnt*2); // Fix Rsrc. Rsrc was already updated, but Rdst and Rix are not.
|
||||
z_sll(Rix, log_min_vcnt); // # chars processed so far in VectorLoop, excl. current iteration.
|
||||
z_sr(Z_R0, Rix); // correct # chars processed in total.
|
||||
|
||||
bind(VectorDone);
|
||||
}
|
||||
|
||||
{
|
||||
const int min_cnt = 8; // Minimum #characters required to use unrolled loop.
|
||||
// Otherwise just do nothing in unrolled loop.
|
||||
// Must be multiple of 8.
|
||||
const int log_min_cnt = exact_log2(min_cnt);
|
||||
Label UnrolledLoop, UnrolledDone, UnrolledBreak;
|
||||
|
||||
if (VM_Version::has_DistinctOpnds()) {
|
||||
z_ogrk(tmp2, Z_R0, Z_R1);
|
||||
z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop
|
||||
} else {
|
||||
z_lgr(tmp2, Z_R0);
|
||||
z_ogr(tmp2, Z_R1);
|
||||
z_lr(Rix, Rcnt);
|
||||
z_sr(Rix, Z_R0);
|
||||
}
|
||||
z_ngr(tmp2, mask);
|
||||
z_brne(Lslow); // Failed fast case, retry slowly.
|
||||
z_sra(Rix, log_min_cnt); // unrolled loop count
|
||||
z_brz(UnrolledDone);
|
||||
|
||||
bind(UnrolledLoop);
|
||||
z_lmg(Z_R0, Z_R1, 0, Rsrc);
|
||||
if (precise) {
|
||||
z_ogr(Z_R1, Z_R0); // check all 8 chars for incompatibility
|
||||
z_ngr(Z_R1, Rmask);
|
||||
z_brnz(UnrolledBreak);
|
||||
|
||||
z_lg(Z_R1, 8, Rsrc); // reload destroyed register
|
||||
z_stcmh(Z_R0, 5, 0, Rdst);
|
||||
z_stcm(Z_R0, 5, 2, Rdst);
|
||||
} else {
|
||||
z_stcmh(Z_R0, 5, 0, Rdst);
|
||||
z_stcm(Z_R0, 5, 2, Rdst);
|
||||
|
||||
z_ogr(Z_R0, Z_R1);
|
||||
z_ngr(Z_R0, Rmask);
|
||||
z_brnz(UnrolledBreak);
|
||||
}
|
||||
z_stcmh(Z_R1, 5, 4, Rdst);
|
||||
z_stcm(Z_R1, 5, 6, Rdst);
|
||||
|
||||
add2reg(Rsrc, min_cnt*2);
|
||||
add2reg(Rdst, min_cnt);
|
||||
z_brct(Rix, UnrolledLoop);
|
||||
|
||||
z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop.
|
||||
z_nilf(Z_R0, ~(min_cnt-1));
|
||||
z_tmll(Rcnt, min_cnt-1);
|
||||
z_brnaz(ScalarShortcut); // if all bits zero, there is nothing left to do for scalar loop.
|
||||
// Rix == 0 in all cases.
|
||||
z_lgfr(result, Rcnt); // all characters processed.
|
||||
z_sgfr(Rdst, Rcnt); // restore ptr
|
||||
z_sgfr(Rsrc, Rcnt); // restore ptr, double the element count for Rsrc restore
|
||||
z_sgfr(Rsrc, Rcnt);
|
||||
z_bru(AllDone);
|
||||
|
||||
bind(UnrolledBreak);
|
||||
z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop
|
||||
z_nilf(Z_R0, ~(min_cnt-1));
|
||||
z_sll(Rix, log_min_cnt); // # chars processed so far in UnrolledLoop, excl. current iteration.
|
||||
z_sr(Z_R0, Rix); // correct # chars processed in total.
|
||||
if (!precise) {
|
||||
z_lgfr(result, Z_R0);
|
||||
z_aghi(result, min_cnt/2); // min_cnt/2 characters have already been written
|
||||
// but ptrs were not updated yet.
|
||||
z_sgfr(Rdst, Z_R0); // restore ptr
|
||||
z_sgfr(Rsrc, Z_R0); // restore ptr, double the element count for Rsrc restore
|
||||
z_sgfr(Rsrc, Z_R0);
|
||||
z_bru(AllDone);
|
||||
}
|
||||
bind(UnrolledDone);
|
||||
}
|
||||
z_stcmh(Z_R0, 5, 0, addr2);
|
||||
z_stcm(Z_R0, 5, 2, addr2);
|
||||
if (!precise) { z_ogr(Z_R0, Z_R1); }
|
||||
z_stcmh(Z_R1, 5, 4, addr2);
|
||||
z_stcm(Z_R1, 5, 6, addr2);
|
||||
if (!precise) {
|
||||
z_ngr(Z_R0, mask);
|
||||
z_brne(Ldone); // Failed (more than needed was written).
|
||||
|
||||
{
|
||||
Label ScalarLoop, ScalarDone, ScalarBreak;
|
||||
|
||||
bind(ScalarShortcut);
|
||||
z_ltgfr(result, Rcnt);
|
||||
z_brz(AllDone);
|
||||
|
||||
#if 0 // Sacrifice shortcuts for code compactness
|
||||
{
|
||||
//---< Special treatment for very short strings (one or two characters) >---
|
||||
// For these strings, we are sure that the above code was skipped.
|
||||
// Thus, no registers were modified, register restore is not required.
|
||||
Label ScalarDoit, Scalar2Char;
|
||||
z_chi(Rcnt, 2);
|
||||
z_brh(ScalarDoit);
|
||||
z_llh(Z_R1, 0, Z_R0, Rsrc);
|
||||
z_bre(Scalar2Char);
|
||||
z_tmll(Z_R1, 0xff00);
|
||||
z_lghi(result, 0); // cnt == 1, first char invalid, no chars successfully processed
|
||||
z_brnaz(AllDone);
|
||||
z_stc(Z_R1, 0, Z_R0, Rdst);
|
||||
z_lghi(result, 1);
|
||||
z_bru(AllDone);
|
||||
|
||||
bind(Scalar2Char);
|
||||
z_llh(Z_R0, 2, Z_R0, Rsrc);
|
||||
z_tmll(Z_R1, 0xff00);
|
||||
z_lghi(result, 0); // cnt == 2, first char invalid, no chars successfully processed
|
||||
z_brnaz(AllDone);
|
||||
z_stc(Z_R1, 0, Z_R0, Rdst);
|
||||
z_tmll(Z_R0, 0xff00);
|
||||
z_lghi(result, 1); // cnt == 2, second char invalid, one char successfully processed
|
||||
z_brnaz(AllDone);
|
||||
z_stc(Z_R0, 1, Z_R0, Rdst);
|
||||
z_lghi(result, 2);
|
||||
z_bru(AllDone);
|
||||
|
||||
bind(ScalarDoit);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (VM_Version::has_DistinctOpnds()) {
|
||||
z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop
|
||||
} else {
|
||||
z_lr(Rix, Rcnt);
|
||||
z_sr(Rix, Z_R0);
|
||||
}
|
||||
z_lgfr(result, Rcnt); // # processed characters (if all runs ok).
|
||||
z_brz(ScalarDone);
|
||||
|
||||
bind(ScalarLoop);
|
||||
z_llh(Z_R1, 0, Z_R0, Rsrc);
|
||||
z_tmll(Z_R1, 0xff00);
|
||||
z_brnaz(ScalarBreak);
|
||||
z_stc(Z_R1, 0, Z_R0, Rdst);
|
||||
add2reg(Rsrc, 2);
|
||||
add2reg(Rdst, 1);
|
||||
z_brct(Rix, ScalarLoop);
|
||||
|
||||
z_bru(ScalarDone);
|
||||
|
||||
bind(ScalarBreak);
|
||||
z_sr(result, Rix);
|
||||
|
||||
bind(ScalarDone);
|
||||
z_sgfr(Rdst, result); // restore ptr
|
||||
z_sgfr(Rsrc, result); // restore ptr, double the element count for Rsrc restore
|
||||
z_sgfr(Rsrc, result);
|
||||
}
|
||||
z_aghi(addr2, 8);
|
||||
z_brxle(ind1, even_reg, Lloop1);
|
||||
|
||||
bind(Lslow);
|
||||
// Compute index limit and skip if negative.
|
||||
z_ahi(odd_reg, 16-2); // Last possible index for slow loop.
|
||||
z_lhi(even_reg, 2);
|
||||
z_cr(ind1, odd_reg);
|
||||
z_brh(Ldone);
|
||||
|
||||
bind(Lloop2); // 1 Character per iteration.
|
||||
z_llh(Z_R0, Address(src, ind1));
|
||||
z_tmll(Z_R0, 0xFF00);
|
||||
z_brnaz(Ldone); // Failed slow case: Return number of written characters.
|
||||
z_stc(Z_R0, Address(addr2));
|
||||
z_aghi(addr2, 1);
|
||||
z_brxle(ind1, even_reg, Lloop2);
|
||||
|
||||
bind(Ldone); // result = ind1 = 2*cnt
|
||||
z_srl(ind1, 1);
|
||||
|
||||
BLOCK_COMMENT("} string_compress");
|
||||
bind(AllDone);
|
||||
|
||||
if (precise) {
|
||||
BLOCK_COMMENT("} encode_iso_array");
|
||||
} else {
|
||||
BLOCK_COMMENT("} string_compress");
|
||||
}
|
||||
return offset() - block_start;
|
||||
}
|
||||
|
||||
@ -4997,53 +5223,432 @@ unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Reg
|
||||
return offset() - block_start;
|
||||
}
|
||||
|
||||
// Inflate byte[] to char[]. odd_reg contains cnt. Kills src.
|
||||
unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register odd_reg,
|
||||
Register even_reg, Register tmp) {
|
||||
int block_start = offset();
|
||||
// Inflate byte[] to char[].
|
||||
// Restores: src, dst
|
||||
// Uses: cnt
|
||||
// Kills: tmp, Z_R0, Z_R1.
|
||||
// Note:
|
||||
// cnt is signed int. Do not rely on high word!
|
||||
// counts # characters, not bytes.
|
||||
unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) {
|
||||
assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp);
|
||||
|
||||
BLOCK_COMMENT("string_inflate {");
|
||||
int block_start = offset();
|
||||
|
||||
Label Lloop1, Lloop2, Lslow, Ldone;
|
||||
const Register addr1 = src, ind2 = tmp;
|
||||
Register Rcnt = cnt; // # characters (src: bytes, dst: char (2-byte)), remaining after current loop.
|
||||
Register Rix = tmp; // loop index
|
||||
Register Rsrc = src; // addr(src array)
|
||||
Register Rdst = dst; // addr(dst array)
|
||||
Label ScalarShortcut, AllDone;
|
||||
|
||||
z_sll(odd_reg, 1); // Number of bytes to write. (Must be a positive simm32.)
|
||||
clear_reg(ind2); // Index to write.
|
||||
z_ahi(odd_reg, -16); // Last possible index for fast loop.
|
||||
z_brl(Lslow);
|
||||
#if 0 // Sacrifice shortcuts for code compactness
|
||||
{
|
||||
//---< shortcuts for short strings (very frequent) >---
|
||||
Label skipShortcut, skip4Shortcut;
|
||||
z_ltr(Rcnt, Rcnt); // absolutely nothing to do for strings of len == 0.
|
||||
z_brz(AllDone);
|
||||
clear_reg(Z_R0); // make sure registers are properly initialized.
|
||||
clear_reg(Z_R1);
|
||||
z_chi(Rcnt, 4);
|
||||
z_brne(skip4Shortcut); // 4 characters are very frequent
|
||||
z_icm(Z_R0, 5, 0, Rsrc); // Treat exactly 4 characters specially.
|
||||
z_icm(Z_R1, 5, 2, Rsrc);
|
||||
z_stm(Z_R0, Z_R1, 0, Rdst);
|
||||
z_bru(AllDone);
|
||||
bind(skip4Shortcut);
|
||||
|
||||
// ind2: index, even_reg: index increment, odd_reg: index limit
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
z_lhi(even_reg, 16);
|
||||
z_chi(Rcnt, 8);
|
||||
z_brh(skipShortcut); // There's a lot to do...
|
||||
z_lgfr(Z_R0, Rcnt); // remaining #characters (<= 8). Precond for scalar loop.
|
||||
// This does not destroy the "register cleared" state of Z_R0.
|
||||
z_brl(ScalarShortcut); // Just a few characters
|
||||
z_icmh(Z_R0, 5, 0, Rsrc); // Treat exactly 8 characters specially.
|
||||
z_icmh(Z_R1, 5, 4, Rsrc);
|
||||
z_icm(Z_R0, 5, 2, Rsrc);
|
||||
z_icm(Z_R1, 5, 6, Rsrc);
|
||||
z_stmg(Z_R0, Z_R1, 0, Rdst);
|
||||
z_bru(AllDone);
|
||||
bind(skipShortcut);
|
||||
}
|
||||
#endif
|
||||
clear_reg(Z_R0); // make sure register is properly initialized.
|
||||
|
||||
bind(Lloop1); // 8 Characters per iteration.
|
||||
z_icmh(Z_R0, 5, 0, addr1);
|
||||
z_icmh(Z_R1, 5, 4, addr1);
|
||||
z_icm(Z_R0, 5, 2, addr1);
|
||||
z_icm(Z_R1, 5, 6, addr1);
|
||||
z_aghi(addr1, 8);
|
||||
z_stg(Z_R0, Address(dst, ind2));
|
||||
z_stg(Z_R1, Address(dst, ind2, 8));
|
||||
z_brxle(ind2, even_reg, Lloop1);
|
||||
if (VM_Version::has_VectorFacility()) {
|
||||
const int min_vcnt = 32; // Minimum #characters required to use vector instructions.
|
||||
// Otherwise just do nothing in vector mode.
|
||||
// Must be multiple of vector register length (16 bytes = 128 bits).
|
||||
const int log_min_vcnt = exact_log2(min_vcnt);
|
||||
Label VectorLoop, VectorDone;
|
||||
|
||||
bind(Lslow);
|
||||
// Compute index limit and skip if negative.
|
||||
z_ahi(odd_reg, 16-2); // Last possible index for slow loop.
|
||||
z_lhi(even_reg, 2);
|
||||
z_cr(ind2, odd_reg);
|
||||
z_brh(Ldone);
|
||||
assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
|
||||
z_srak(Rix, Rcnt, log_min_vcnt); // calculate # vector loop iterations
|
||||
z_brz(VectorDone); // skip if none
|
||||
|
||||
bind(Lloop2); // 1 Character per iteration.
|
||||
z_llc(Z_R0, Address(addr1));
|
||||
z_sth(Z_R0, Address(dst, ind2));
|
||||
z_aghi(addr1, 1);
|
||||
z_brxle(ind2, even_reg, Lloop2);
|
||||
z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop
|
||||
|
||||
bind(Ldone);
|
||||
bind(VectorLoop);
|
||||
z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte)
|
||||
add2reg(Rsrc, min_vcnt);
|
||||
|
||||
z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high)
|
||||
z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low)
|
||||
z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high)
|
||||
z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low)
|
||||
z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes
|
||||
add2reg(Rdst, min_vcnt*2);
|
||||
|
||||
z_brct(Rix, VectorLoop);
|
||||
|
||||
bind(VectorDone);
|
||||
}
|
||||
|
||||
const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop.
|
||||
// Otherwise just do nothing in unrolled scalar mode.
|
||||
// Must be multiple of 8.
|
||||
{
|
||||
const int log_min_cnt = exact_log2(min_cnt);
|
||||
Label UnrolledLoop, UnrolledDone;
|
||||
|
||||
|
||||
if (VM_Version::has_DistinctOpnds()) {
|
||||
z_srk(Rix, Rcnt, Z_R0); // remaining # chars to process in unrolled loop
|
||||
} else {
|
||||
z_lr(Rix, Rcnt);
|
||||
z_sr(Rix, Z_R0);
|
||||
}
|
||||
z_sra(Rix, log_min_cnt); // unrolled loop count
|
||||
z_brz(UnrolledDone);
|
||||
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
|
||||
bind(UnrolledLoop);
|
||||
z_icmh(Z_R0, 5, 0, Rsrc);
|
||||
z_icmh(Z_R1, 5, 4, Rsrc);
|
||||
z_icm(Z_R0, 5, 2, Rsrc);
|
||||
z_icm(Z_R1, 5, 6, Rsrc);
|
||||
add2reg(Rsrc, min_cnt);
|
||||
|
||||
z_stmg(Z_R0, Z_R1, 0, Rdst);
|
||||
|
||||
add2reg(Rdst, min_cnt*2);
|
||||
z_brct(Rix, UnrolledLoop);
|
||||
|
||||
bind(UnrolledDone);
|
||||
z_lgfr(Z_R0, Rcnt); // # chars left over after unrolled loop.
|
||||
z_nilf(Z_R0, min_cnt-1);
|
||||
z_brnz(ScalarShortcut); // if zero, there is nothing left to do for scalar loop.
|
||||
// Rix == 0 in all cases.
|
||||
z_sgfr(Z_R0, Rcnt); // negative # characters the ptrs have been advanced previously.
|
||||
z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore.
|
||||
z_agr(Rdst, Z_R0);
|
||||
z_agr(Rsrc, Z_R0); // restore ptr.
|
||||
z_bru(AllDone);
|
||||
}
|
||||
|
||||
{
|
||||
bind(ScalarShortcut);
|
||||
// Z_R0 must contain remaining # characters as 64-bit signed int here.
|
||||
// register contents is preserved over scalar processing (for register fixup).
|
||||
|
||||
#if 0 // Sacrifice shortcuts for code compactness
|
||||
{
|
||||
Label ScalarDefault;
|
||||
z_chi(Rcnt, 2);
|
||||
z_brh(ScalarDefault);
|
||||
z_llc(Z_R0, 0, Z_R0, Rsrc); // 6 bytes
|
||||
z_sth(Z_R0, 0, Z_R0, Rdst); // 4 bytes
|
||||
z_brl(AllDone);
|
||||
z_llc(Z_R0, 1, Z_R0, Rsrc); // 6 bytes
|
||||
z_sth(Z_R0, 2, Z_R0, Rdst); // 4 bytes
|
||||
z_bru(AllDone);
|
||||
bind(ScalarDefault);
|
||||
}
|
||||
#endif
|
||||
|
||||
Label CodeTable;
|
||||
// Some comments on Rix calculation:
|
||||
// - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions).
|
||||
// - high word of both Rix and Rcnt may contain garbage
|
||||
// - the final lngfr takes care of that garbage, extending the sign to high word
|
||||
z_sllg(Rix, Z_R0, 2); // calculate 10*Rix = (4*Rix + Rix)*2
|
||||
z_ar(Rix, Z_R0);
|
||||
z_larl(Z_R1, CodeTable);
|
||||
z_sll(Rix, 1);
|
||||
z_lngfr(Rix, Rix); // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)].
|
||||
z_bc(Assembler::bcondAlways, 0, Rix, Z_R1);
|
||||
|
||||
z_llc(Z_R1, 6, Z_R0, Rsrc); // 6 bytes
|
||||
z_sth(Z_R1, 12, Z_R0, Rdst); // 4 bytes
|
||||
|
||||
z_llc(Z_R1, 5, Z_R0, Rsrc);
|
||||
z_sth(Z_R1, 10, Z_R0, Rdst);
|
||||
|
||||
z_llc(Z_R1, 4, Z_R0, Rsrc);
|
||||
z_sth(Z_R1, 8, Z_R0, Rdst);
|
||||
|
||||
z_llc(Z_R1, 3, Z_R0, Rsrc);
|
||||
z_sth(Z_R1, 6, Z_R0, Rdst);
|
||||
|
||||
z_llc(Z_R1, 2, Z_R0, Rsrc);
|
||||
z_sth(Z_R1, 4, Z_R0, Rdst);
|
||||
|
||||
z_llc(Z_R1, 1, Z_R0, Rsrc);
|
||||
z_sth(Z_R1, 2, Z_R0, Rdst);
|
||||
|
||||
z_llc(Z_R1, 0, Z_R0, Rsrc);
|
||||
z_sth(Z_R1, 0, Z_R0, Rdst);
|
||||
bind(CodeTable);
|
||||
|
||||
z_chi(Rcnt, 8); // no fixup for small strings. Rdst, Rsrc were not modified.
|
||||
z_brl(AllDone);
|
||||
|
||||
z_sgfr(Z_R0, Rcnt); // # characters the ptrs have been advanced previously.
|
||||
z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore.
|
||||
z_agr(Rdst, Z_R0);
|
||||
z_agr(Rsrc, Z_R0); // restore ptr.
|
||||
}
|
||||
bind(AllDone);
|
||||
|
||||
BLOCK_COMMENT("} string_inflate");
|
||||
return offset() - block_start;
|
||||
}
|
||||
|
||||
// Inflate byte[] to char[], length known at compile time.
|
||||
// Restores: src, dst
|
||||
// Kills: tmp, Z_R0, Z_R1.
|
||||
// Note:
|
||||
// len is signed int. Counts # characters, not bytes.
|
||||
unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) {
|
||||
assert_different_registers(Z_R0, Z_R1, src, dst, tmp);
|
||||
|
||||
BLOCK_COMMENT("string_inflate_const {");
|
||||
int block_start = offset();
|
||||
|
||||
Register Rix = tmp; // loop index
|
||||
Register Rsrc = src; // addr(src array)
|
||||
Register Rdst = dst; // addr(dst array)
|
||||
Label ScalarShortcut, AllDone;
|
||||
int nprocessed = 0;
|
||||
int src_off = 0; // compensate for saved (optimized away) ptr advancement.
|
||||
int dst_off = 0; // compensate for saved (optimized away) ptr advancement.
|
||||
bool restore_inputs = false;
|
||||
bool workreg_clear = false;
|
||||
|
||||
if ((len >= 32) && VM_Version::has_VectorFacility()) {
|
||||
const int min_vcnt = 32; // Minimum #characters required to use vector instructions.
|
||||
// Otherwise just do nothing in vector mode.
|
||||
// Must be multiple of vector register length (16 bytes = 128 bits).
|
||||
const int log_min_vcnt = exact_log2(min_vcnt);
|
||||
const int iterations = (len - nprocessed) >> log_min_vcnt;
|
||||
nprocessed += iterations << log_min_vcnt;
|
||||
Label VectorLoop;
|
||||
|
||||
if (iterations == 1) {
|
||||
z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc); // get next 32 characters (single-byte)
|
||||
z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high)
|
||||
z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low)
|
||||
z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high)
|
||||
z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low)
|
||||
z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes
|
||||
|
||||
src_off += min_vcnt;
|
||||
dst_off += min_vcnt*2;
|
||||
} else {
|
||||
restore_inputs = true;
|
||||
|
||||
z_lgfi(Rix, len>>log_min_vcnt);
|
||||
bind(VectorLoop);
|
||||
z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte)
|
||||
add2reg(Rsrc, min_vcnt);
|
||||
|
||||
z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high)
|
||||
z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low)
|
||||
z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high)
|
||||
z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low)
|
||||
z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes
|
||||
add2reg(Rdst, min_vcnt*2);
|
||||
|
||||
z_brct(Rix, VectorLoop);
|
||||
}
|
||||
}
|
||||
|
||||
if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) {
|
||||
const int min_vcnt = 16; // Minimum #characters required to use vector instructions.
|
||||
// Otherwise just do nothing in vector mode.
|
||||
// Must be multiple of vector register length (16 bytes = 128 bits).
|
||||
const int log_min_vcnt = exact_log2(min_vcnt);
|
||||
const int iterations = (len - nprocessed) >> log_min_vcnt;
|
||||
nprocessed += iterations << log_min_vcnt;
|
||||
assert(iterations == 1, "must be!");
|
||||
|
||||
z_vl(Z_V20, 0+src_off, Z_R0, Rsrc); // get next 16 characters (single-byte)
|
||||
z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high)
|
||||
z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low)
|
||||
z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes
|
||||
|
||||
src_off += min_vcnt;
|
||||
dst_off += min_vcnt*2;
|
||||
}
|
||||
|
||||
if ((len-nprocessed) > 8) {
|
||||
const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop.
|
||||
// Otherwise just do nothing in unrolled scalar mode.
|
||||
// Must be multiple of 8.
|
||||
const int log_min_cnt = exact_log2(min_cnt);
|
||||
const int iterations = (len - nprocessed) >> log_min_cnt;
|
||||
nprocessed += iterations << log_min_cnt;
|
||||
|
||||
//---< avoid loop overhead/ptr increment for small # iterations >---
|
||||
if (iterations <= 2) {
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
workreg_clear = true;
|
||||
|
||||
z_icmh(Z_R0, 5, 0+src_off, Rsrc);
|
||||
z_icmh(Z_R1, 5, 4+src_off, Rsrc);
|
||||
z_icm(Z_R0, 5, 2+src_off, Rsrc);
|
||||
z_icm(Z_R1, 5, 6+src_off, Rsrc);
|
||||
z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
|
||||
|
||||
src_off += min_cnt;
|
||||
dst_off += min_cnt*2;
|
||||
}
|
||||
|
||||
if (iterations == 2) {
|
||||
z_icmh(Z_R0, 5, 0+src_off, Rsrc);
|
||||
z_icmh(Z_R1, 5, 4+src_off, Rsrc);
|
||||
z_icm(Z_R0, 5, 2+src_off, Rsrc);
|
||||
z_icm(Z_R1, 5, 6+src_off, Rsrc);
|
||||
z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
|
||||
|
||||
src_off += min_cnt;
|
||||
dst_off += min_cnt*2;
|
||||
}
|
||||
|
||||
if (iterations > 2) {
|
||||
Label UnrolledLoop;
|
||||
restore_inputs = true;
|
||||
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
workreg_clear = true;
|
||||
|
||||
z_lgfi(Rix, iterations);
|
||||
bind(UnrolledLoop);
|
||||
z_icmh(Z_R0, 5, 0, Rsrc);
|
||||
z_icmh(Z_R1, 5, 4, Rsrc);
|
||||
z_icm(Z_R0, 5, 2, Rsrc);
|
||||
z_icm(Z_R1, 5, 6, Rsrc);
|
||||
add2reg(Rsrc, min_cnt);
|
||||
|
||||
z_stmg(Z_R0, Z_R1, 0, Rdst);
|
||||
add2reg(Rdst, min_cnt*2);
|
||||
|
||||
z_brct(Rix, UnrolledLoop);
|
||||
}
|
||||
}
|
||||
|
||||
if ((len-nprocessed) > 0) {
|
||||
switch (len-nprocessed) {
|
||||
case 8:
|
||||
if (!workreg_clear) {
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
}
|
||||
z_icmh(Z_R0, 5, 0+src_off, Rsrc);
|
||||
z_icmh(Z_R1, 5, 4+src_off, Rsrc);
|
||||
z_icm(Z_R0, 5, 2+src_off, Rsrc);
|
||||
z_icm(Z_R1, 5, 6+src_off, Rsrc);
|
||||
z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
|
||||
break;
|
||||
case 7:
|
||||
if (!workreg_clear) {
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
}
|
||||
clear_reg(Rix);
|
||||
z_icm(Z_R0, 5, 0+src_off, Rsrc);
|
||||
z_icm(Z_R1, 5, 2+src_off, Rsrc);
|
||||
z_icm(Rix, 5, 4+src_off, Rsrc);
|
||||
z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
|
||||
z_llc(Z_R0, 6+src_off, Z_R0, Rsrc);
|
||||
z_st(Rix, 8+dst_off, Z_R0, Rdst);
|
||||
z_sth(Z_R0, 12+dst_off, Z_R0, Rdst);
|
||||
break;
|
||||
case 6:
|
||||
if (!workreg_clear) {
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
}
|
||||
clear_reg(Rix);
|
||||
z_icm(Z_R0, 5, 0+src_off, Rsrc);
|
||||
z_icm(Z_R1, 5, 2+src_off, Rsrc);
|
||||
z_icm(Rix, 5, 4+src_off, Rsrc);
|
||||
z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
|
||||
z_st(Rix, 8+dst_off, Z_R0, Rdst);
|
||||
break;
|
||||
case 5:
|
||||
if (!workreg_clear) {
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
}
|
||||
z_icm(Z_R0, 5, 0+src_off, Rsrc);
|
||||
z_icm(Z_R1, 5, 2+src_off, Rsrc);
|
||||
z_llc(Rix, 4+src_off, Z_R0, Rsrc);
|
||||
z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
|
||||
z_sth(Rix, 8+dst_off, Z_R0, Rdst);
|
||||
break;
|
||||
case 4:
|
||||
if (!workreg_clear) {
|
||||
clear_reg(Z_R0);
|
||||
clear_reg(Z_R1);
|
||||
}
|
||||
z_icm(Z_R0, 5, 0+src_off, Rsrc);
|
||||
z_icm(Z_R1, 5, 2+src_off, Rsrc);
|
||||
z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
|
||||
break;
|
||||
case 3:
|
||||
if (!workreg_clear) {
|
||||
clear_reg(Z_R0);
|
||||
}
|
||||
z_llc(Z_R1, 2+src_off, Z_R0, Rsrc);
|
||||
z_icm(Z_R0, 5, 0+src_off, Rsrc);
|
||||
z_sth(Z_R1, 4+dst_off, Z_R0, Rdst);
|
||||
z_st(Z_R0, 0+dst_off, Rdst);
|
||||
break;
|
||||
case 2:
|
||||
z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
|
||||
z_llc(Z_R1, 1+src_off, Z_R0, Rsrc);
|
||||
z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
|
||||
z_sth(Z_R1, 2+dst_off, Z_R0, Rdst);
|
||||
break;
|
||||
case 1:
|
||||
z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
|
||||
z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
|
||||
break;
|
||||
default:
|
||||
guarantee(false, "Impossible");
|
||||
break;
|
||||
}
|
||||
src_off += len-nprocessed;
|
||||
dst_off += (len-nprocessed)*2;
|
||||
nprocessed = len;
|
||||
}
|
||||
|
||||
//---< restore modified input registers >---
|
||||
if ((nprocessed > 0) && restore_inputs) {
|
||||
z_agfi(Rsrc, -(nprocessed-src_off));
|
||||
if (nprocessed < 1000000000) { // avoid int overflow
|
||||
z_agfi(Rdst, -(nprocessed*2-dst_off));
|
||||
} else {
|
||||
z_agfi(Rdst, -(nprocessed-dst_off));
|
||||
z_agfi(Rdst, -nprocessed);
|
||||
}
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("} string_inflate_const");
|
||||
return offset() - block_start;
|
||||
}
|
||||
|
||||
|
@ -198,6 +198,9 @@ class MacroAssembler: public Assembler {
|
||||
// Test a bit in a register. Result is reflected in CC.
|
||||
void testbit(Register r, unsigned int bitPos);
|
||||
|
||||
void prefetch_read(Address a);
|
||||
void prefetch_update(Address a);
|
||||
|
||||
// Clear a register, i.e. load const zero into reg. Return len (in bytes) of
|
||||
// generated instruction(s).
|
||||
// whole_reg: Clear 64 bits if true, 32 bits otherwise.
|
||||
@ -836,7 +839,7 @@ class MacroAssembler: public Assembler {
|
||||
void load_mirror(Register mirror, Register method);
|
||||
|
||||
//--------------------------
|
||||
//--- perations on arrays.
|
||||
//--- Operations on arrays.
|
||||
//--------------------------
|
||||
unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len);
|
||||
unsigned int Clear_Array_Const(long cnt, Register base);
|
||||
@ -849,20 +852,34 @@ class MacroAssembler: public Assembler {
|
||||
// Special String Intrinsics Implementation.
|
||||
//-------------------------------------------
|
||||
// Intrinsics for CompactStrings
|
||||
// Compress char[] to byte[]. odd_reg contains cnt. tmp3 is only needed for precise behavior in failure case. Kills dst.
|
||||
unsigned int string_compress(Register result, Register src, Register dst, Register odd_reg,
|
||||
Register even_reg, Register tmp, Register tmp2 = noreg);
|
||||
// Restores: src, dst
|
||||
// Uses: cnt
|
||||
// Kills: tmp, Z_R0, Z_R1.
|
||||
// Early clobber: result.
|
||||
// Boolean precise controls accuracy of result value.
|
||||
unsigned int string_compress(Register result, Register src, Register dst, Register cnt,
|
||||
Register tmp, bool precise);
|
||||
|
||||
// Inflate byte[] to char[].
|
||||
unsigned int string_inflate_trot(Register src, Register dst, Register cnt, Register tmp);
|
||||
|
||||
// Inflate byte[] to char[].
|
||||
// Restores: src, dst
|
||||
// Uses: cnt
|
||||
// Kills: tmp, Z_R0, Z_R1.
|
||||
unsigned int string_inflate(Register src, Register dst, Register cnt, Register tmp);
|
||||
|
||||
// Inflate byte[] to char[], length known at compile time.
|
||||
// Restores: src, dst
|
||||
// Kills: tmp, Z_R0, Z_R1.
|
||||
// Note:
|
||||
// len is signed int. Counts # characters, not bytes.
|
||||
unsigned int string_inflate_const(Register src, Register dst, Register tmp, int len);
|
||||
|
||||
// Kills src.
|
||||
unsigned int has_negatives(Register result, Register src, Register cnt,
|
||||
Register odd_reg, Register even_reg, Register tmp);
|
||||
|
||||
// Inflate byte[] to char[].
|
||||
unsigned int string_inflate_trot(Register src, Register dst, Register cnt, Register tmp);
|
||||
// Odd_reg contains cnt. Kills src.
|
||||
unsigned int string_inflate(Register src, Register dst, Register odd_reg,
|
||||
Register even_reg, Register tmp);
|
||||
|
||||
unsigned int string_compare(Register str1, Register str2, Register cnt1, Register cnt2,
|
||||
Register odd_reg, Register even_reg, Register result, int ae);
|
||||
|
||||
|
@ -10267,14 +10267,14 @@ instruct indexOf_UL(iRegP haystack, rarg2RegI haycnt, iRegP needle, rarg5RegI ne
|
||||
%}
|
||||
|
||||
// char[] to byte[] compression
|
||||
instruct string_compress(iRegP src, rarg5RegP dst, iRegI result, roddRegI len, revenRegI evenReg, iRegI tmp, flagsReg cr) %{
|
||||
instruct string_compress(iRegP src, iRegP dst, iRegI result, iRegI len, iRegI tmp, flagsReg cr) %{
|
||||
match(Set result (StrCompressedCopy src (Binary dst len)));
|
||||
effect(TEMP_DEF result, USE_KILL dst, USE_KILL len, TEMP evenReg, TEMP tmp, KILL cr); // R0, R1 are killed, too.
|
||||
effect(TEMP_DEF result, TEMP tmp, KILL cr); // R0, R1 are killed, too.
|
||||
ins_cost(300);
|
||||
format %{ "String Compress $src->$dst($len) -> $result" %}
|
||||
ins_encode %{
|
||||
__ string_compress($result$$Register, $src$$Register, $dst$$Register, $len$$Register,
|
||||
$evenReg$$Register, $tmp$$Register);
|
||||
$tmp$$Register, false);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
@ -10293,13 +10293,25 @@ instruct string_compress(iRegP src, rarg5RegP dst, iRegI result, roddRegI len, r
|
||||
//%}
|
||||
|
||||
// byte[] to char[] inflation
|
||||
instruct string_inflate(Universe dummy, rarg5RegP src, iRegP dst, roddRegI len, revenRegI evenReg, iRegI tmp, flagsReg cr) %{
|
||||
instruct string_inflate(Universe dummy, iRegP src, iRegP dst, iRegI len, iRegI tmp, flagsReg cr) %{
|
||||
match(Set dummy (StrInflatedCopy src (Binary dst len)));
|
||||
effect(USE_KILL src, USE_KILL len, TEMP evenReg, TEMP tmp, KILL cr); // R0, R1 are killed, too.
|
||||
effect(TEMP tmp, KILL cr); // R0, R1 are killed, too.
|
||||
ins_cost(300);
|
||||
format %{ "String Inflate $src->$dst($len)" %}
|
||||
ins_encode %{
|
||||
__ string_inflate($src$$Register, $dst$$Register, $len$$Register, $evenReg$$Register, $tmp$$Register);
|
||||
__ string_inflate($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
|
||||
// byte[] to char[] inflation
|
||||
instruct string_inflate_const(Universe dummy, iRegP src, iRegP dst, iRegI tmp, immI len, flagsReg cr) %{
|
||||
match(Set dummy (StrInflatedCopy src (Binary dst len)));
|
||||
effect(TEMP tmp, KILL cr); // R0, R1 are killed, too.
|
||||
ins_cost(300);
|
||||
format %{ "String Inflate (constLen) $src->$dst($len)" %}
|
||||
ins_encode %{
|
||||
__ string_inflate_const($src$$Register, $dst$$Register, $tmp$$Register, $len$$constant);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
@ -10318,14 +10330,14 @@ instruct has_negatives(rarg5RegP ary1, iRegI len, iRegI result, roddRegI oddReg,
|
||||
%}
|
||||
|
||||
// encode char[] to byte[] in ISO_8859_1
|
||||
instruct encode_iso_array(rarg5RegP src, iRegP dst, iRegI result, roddRegI len, revenRegI evenReg, iRegI tmp, iRegI tmp2, flagsReg cr) %{
|
||||
instruct encode_iso_array(iRegP src, iRegP dst, iRegI result, iRegI len, iRegI tmp, flagsReg cr) %{
|
||||
match(Set result (EncodeISOArray src (Binary dst len)));
|
||||
effect(TEMP_DEF result, USE_KILL src, USE_KILL len, TEMP evenReg, TEMP tmp, TEMP tmp2, KILL cr); // R0, R1 are killed, too.
|
||||
effect(TEMP_DEF result, TEMP tmp, KILL cr); // R0, R1 are killed, too.
|
||||
ins_cost(300);
|
||||
format %{ "Encode array $src->$dst($len) -> $result" %}
|
||||
ins_encode %{
|
||||
__ string_compress($result$$Register, $src$$Register, $dst$$Register, $len$$Register,
|
||||
$evenReg$$Register, $tmp$$Register, $tmp2$$Register);
|
||||
$tmp$$Register, true);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
|
@ -2884,12 +2884,12 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
// ztos
|
||||
BTB_BEGIN(is_Bool, bsize, "putfield_or_static:is_Bool");
|
||||
__ pop(ztos);
|
||||
if (do_rewrite) {
|
||||
if (!is_static) {
|
||||
pop_and_check_object(obj);
|
||||
}
|
||||
__ z_nilf(Z_tos, 0x1);
|
||||
__ z_stc(Z_tos, field);
|
||||
if (!is_static) {
|
||||
if (do_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_zputfield, bc, Z_ARG5, true, byte_no);
|
||||
}
|
||||
__ z_bru(Done);
|
||||
|
@ -398,8 +398,13 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
|
||||
if (o == NULL) {
|
||||
__ set(NULL_WORD, reg);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
{
|
||||
ThreadInVMfromNative tiv(JavaThread::current());
|
||||
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
|
||||
}
|
||||
#endif
|
||||
int oop_index = __ oop_recorder()->find_index(o);
|
||||
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
|
||||
RelocationHolder rspec = oop_Relocation::spec(oop_index);
|
||||
__ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
|
||||
}
|
||||
|
@ -898,7 +898,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
assert_different_registers(addr, count, tmp);
|
||||
|
||||
Label L_loop;
|
||||
Label L_loop, L_done;
|
||||
|
||||
__ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_done); // zero count - nothing to do
|
||||
|
||||
__ sll_ptr(count, LogBytesPerHeapOop, count);
|
||||
__ sub(count, BytesPerHeapOop, count);
|
||||
@ -914,6 +916,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ subcc(count, 1, count);
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
|
||||
__ delayed()->add(addr, 1, addr);
|
||||
__ BIND(L_done);
|
||||
}
|
||||
break;
|
||||
case BarrierSet::ModRef:
|
||||
|
@ -1256,7 +1256,7 @@ void Assembler::addr_nop_8() {
|
||||
|
||||
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x58);
|
||||
@ -1266,7 +1266,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::addsd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -1276,7 +1276,7 @@ void Assembler::addsd(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::addss(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x58);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -1285,7 +1285,7 @@ void Assembler::addss(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::addss(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x58);
|
||||
@ -1295,7 +1295,7 @@ void Assembler::addss(XMMRegister dst, Address src) {
|
||||
void Assembler::aesdec(XMMRegister dst, Address src) {
|
||||
assert(VM_Version::supports_aes(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xDE);
|
||||
emit_operand(dst, src);
|
||||
@ -1303,7 +1303,7 @@ void Assembler::aesdec(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
|
||||
assert(VM_Version::supports_aes(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xDE);
|
||||
emit_int8(0xC0 | encode);
|
||||
@ -1312,7 +1312,7 @@ void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::aesdeclast(XMMRegister dst, Address src) {
|
||||
assert(VM_Version::supports_aes(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xDF);
|
||||
emit_operand(dst, src);
|
||||
@ -1320,7 +1320,7 @@ void Assembler::aesdeclast(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
|
||||
assert(VM_Version::supports_aes(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xDF);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -1329,7 +1329,7 @@ void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::aesenc(XMMRegister dst, Address src) {
|
||||
assert(VM_Version::supports_aes(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xDC);
|
||||
emit_operand(dst, src);
|
||||
@ -1337,7 +1337,7 @@ void Assembler::aesenc(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
|
||||
assert(VM_Version::supports_aes(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xDC);
|
||||
emit_int8(0xC0 | encode);
|
||||
@ -1346,7 +1346,7 @@ void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::aesenclast(XMMRegister dst, Address src) {
|
||||
assert(VM_Version::supports_aes(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xDD);
|
||||
emit_operand(dst, src);
|
||||
@ -1354,7 +1354,7 @@ void Assembler::aesenclast(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
|
||||
assert(VM_Version::supports_aes(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xDD);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -1387,7 +1387,7 @@ void Assembler::andl(Register dst, Register src) {
|
||||
|
||||
void Assembler::andnl(Register dst, Register src1, Register src2) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF2);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -1396,7 +1396,7 @@ void Assembler::andnl(Register dst, Register src1, Register src2) {
|
||||
void Assembler::andnl(Register dst, Register src1, Address src2) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF2);
|
||||
emit_operand(dst, src2);
|
||||
@ -1424,7 +1424,7 @@ void Assembler::bswapl(Register reg) { // bswap
|
||||
|
||||
void Assembler::blsil(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -1433,7 +1433,7 @@ void Assembler::blsil(Register dst, Register src) {
|
||||
void Assembler::blsil(Register dst, Address src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rbx, src);
|
||||
@ -1441,7 +1441,7 @@ void Assembler::blsil(Register dst, Address src) {
|
||||
|
||||
void Assembler::blsmskl(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -1450,7 +1450,7 @@ void Assembler::blsmskl(Register dst, Register src) {
|
||||
void Assembler::blsmskl(Register dst, Address src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rdx, src);
|
||||
@ -1458,7 +1458,7 @@ void Assembler::blsmskl(Register dst, Address src) {
|
||||
|
||||
void Assembler::blsrl(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -1467,7 +1467,7 @@ void Assembler::blsrl(Register dst, Register src) {
|
||||
void Assembler::blsrl(Register dst, Address src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rcx, src);
|
||||
@ -1753,7 +1753,7 @@ void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
|
||||
|
||||
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5A);
|
||||
@ -1763,7 +1763,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -1817,7 +1817,7 @@ void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
|
||||
|
||||
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5A);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -1826,7 +1826,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::cvtss2sd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5A);
|
||||
@ -1870,7 +1870,7 @@ void Assembler::decl(Address dst) {
|
||||
void Assembler::divsd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -1880,7 +1880,7 @@ void Assembler::divsd(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5E);
|
||||
@ -1890,7 +1890,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::divss(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5E);
|
||||
@ -1899,7 +1899,7 @@ void Assembler::divss(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::divss(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5E);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -2105,7 +2105,7 @@ void Assembler::jmpb(Label& L) {
|
||||
void Assembler::ldmxcsr( Address src) {
|
||||
if (UseAVX > 0 ) {
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8((unsigned char)0xAE);
|
||||
emit_operand(as_Register(2), src);
|
||||
@ -2784,7 +2784,7 @@ void Assembler::movsbl(Register dst, Register src) { // movsxb
|
||||
|
||||
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x10);
|
||||
@ -2794,7 +2794,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::movsd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -2805,7 +2805,7 @@ void Assembler::movsd(XMMRegister dst, Address src) {
|
||||
void Assembler::movsd(Address dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.reset_is_clear_context();
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
@ -2816,7 +2816,7 @@ void Assembler::movsd(Address dst, XMMRegister src) {
|
||||
|
||||
void Assembler::movss(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x10);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -2825,7 +2825,7 @@ void Assembler::movss(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::movss(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x10);
|
||||
@ -2835,7 +2835,7 @@ void Assembler::movss(XMMRegister dst, Address src) {
|
||||
void Assembler::movss(Address dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
attributes.reset_is_clear_context();
|
||||
simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
@ -2931,7 +2931,7 @@ void Assembler::mull(Register src) {
|
||||
void Assembler::mulsd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -2941,7 +2941,7 @@ void Assembler::mulsd(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x59);
|
||||
@ -2951,7 +2951,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::mulss(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x59);
|
||||
@ -2960,7 +2960,7 @@ void Assembler::mulss(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::mulss(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x59);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4289,7 +4289,7 @@ void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int
|
||||
|
||||
void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
|
||||
assert(VM_Version::supports_sse4_1(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8((unsigned char)0x0E);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4388,7 +4388,7 @@ void Assembler::smovl() {
|
||||
|
||||
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x51);
|
||||
@ -4398,7 +4398,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::sqrtsd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -4408,7 +4408,7 @@ void Assembler::sqrtsd(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x51);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4421,7 +4421,7 @@ void Assembler::std() {
|
||||
void Assembler::sqrtss(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x51);
|
||||
@ -4484,7 +4484,7 @@ void Assembler::subl(Register dst, Register src) {
|
||||
|
||||
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5C);
|
||||
@ -4494,7 +4494,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::subsd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -4504,7 +4504,7 @@ void Assembler::subsd(XMMRegister dst, Address src) {
|
||||
|
||||
void Assembler::subss(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5C);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4513,7 +4513,7 @@ void Assembler::subss(XMMRegister dst, XMMRegister src) {
|
||||
void Assembler::subss(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5C);
|
||||
@ -4735,7 +4735,7 @@ void Assembler::xorb(Register dst, Address src) {
|
||||
void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -4745,7 +4745,7 @@ void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
|
||||
void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x58);
|
||||
@ -4755,7 +4755,7 @@ void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x58);
|
||||
@ -4764,7 +4764,7 @@ void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
|
||||
void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x58);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4773,7 +4773,7 @@ void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -4783,7 +4783,7 @@ void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
|
||||
void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5E);
|
||||
@ -4793,7 +4793,7 @@ void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5E);
|
||||
@ -4802,7 +4802,7 @@ void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
|
||||
void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5E);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4810,7 +4810,7 @@ void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
|
||||
void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
|
||||
assert(VM_Version::supports_fma(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xB9);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4818,7 +4818,7 @@ void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
|
||||
|
||||
void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
|
||||
assert(VM_Version::supports_fma(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xB9);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4827,7 +4827,7 @@ void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
|
||||
void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -4837,7 +4837,7 @@ void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
|
||||
void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x59);
|
||||
@ -4847,7 +4847,7 @@ void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x59);
|
||||
@ -4856,7 +4856,7 @@ void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
|
||||
void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x59);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -4865,7 +4865,7 @@ void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
@ -4875,7 +4875,7 @@ void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
|
||||
void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5C);
|
||||
@ -4885,7 +4885,7 @@ void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5C);
|
||||
@ -4894,7 +4894,7 @@ void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
|
||||
|
||||
void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x5C);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -5203,6 +5203,24 @@ void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) {
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
|
||||
void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x51);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
|
||||
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x51);
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
|
||||
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
@ -5377,7 +5395,7 @@ void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector
|
||||
void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_avx() && (vector_len == 0) ||
|
||||
VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8(0x01);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -5436,7 +5454,7 @@ void Assembler::paddq(XMMRegister dst, XMMRegister src) {
|
||||
|
||||
void Assembler::phaddw(XMMRegister dst, XMMRegister src) {
|
||||
assert(VM_Version::supports_sse3(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8(0x01);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -6679,7 +6697,7 @@ void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, in
|
||||
|
||||
void Assembler::vzeroupper() {
|
||||
if (VM_Version::supports_vzeroupper()) {
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
(void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x77);
|
||||
}
|
||||
@ -7442,7 +7460,7 @@ void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int
|
||||
|
||||
void Assembler::shlxl(Register dst, Register src1, Register src2) {
|
||||
assert(VM_Version::supports_bmi2(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF7);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -7450,7 +7468,7 @@ void Assembler::shlxl(Register dst, Register src1, Register src2) {
|
||||
|
||||
void Assembler::shlxq(Register dst, Register src1, Register src2) {
|
||||
assert(VM_Version::supports_bmi2(), "");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF7);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -7985,7 +8003,7 @@ void Assembler::andq(Register dst, Register src) {
|
||||
|
||||
void Assembler::andnq(Register dst, Register src1, Register src2) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF2);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -7994,7 +8012,7 @@ void Assembler::andnq(Register dst, Register src1, Register src2) {
|
||||
void Assembler::andnq(Register dst, Register src1, Address src2) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF2);
|
||||
emit_operand(dst, src2);
|
||||
@ -8022,7 +8040,7 @@ void Assembler::bswapq(Register reg) {
|
||||
|
||||
void Assembler::blsiq(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -8031,7 +8049,7 @@ void Assembler::blsiq(Register dst, Register src) {
|
||||
void Assembler::blsiq(Register dst, Address src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rbx, src);
|
||||
@ -8039,7 +8057,7 @@ void Assembler::blsiq(Register dst, Address src) {
|
||||
|
||||
void Assembler::blsmskq(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -8048,7 +8066,7 @@ void Assembler::blsmskq(Register dst, Register src) {
|
||||
void Assembler::blsmskq(Register dst, Address src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rdx, src);
|
||||
@ -8056,7 +8074,7 @@ void Assembler::blsmskq(Register dst, Address src) {
|
||||
|
||||
void Assembler::blsrq(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -8065,7 +8083,7 @@ void Assembler::blsrq(Register dst, Register src) {
|
||||
void Assembler::blsrq(Register dst, Address src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rcx, src);
|
||||
@ -8504,7 +8522,7 @@ void Assembler::mulq(Register src) {
|
||||
|
||||
void Assembler::mulxq(Register dst1, Register dst2, Register src) {
|
||||
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8((unsigned char)0xF6);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -8667,7 +8685,7 @@ void Assembler::rorq(Register dst, int imm8) {
|
||||
|
||||
void Assembler::rorxq(Register dst, Register src, int imm8) {
|
||||
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8((unsigned char)0xF0);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -8676,7 +8694,7 @@ void Assembler::rorxq(Register dst, Register src, int imm8) {
|
||||
|
||||
void Assembler::rorxd(Register dst, Register src, int imm8) {
|
||||
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8((unsigned char)0xF0);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
|
@ -1919,9 +1919,11 @@ private:
|
||||
void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
||||
void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
||||
|
||||
// Sqrt Packed Floating-Point Values - Double precision only
|
||||
// Sqrt Packed Floating-Point Values
|
||||
void vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len);
|
||||
void vsqrtpd(XMMRegister dst, Address src, int vector_len);
|
||||
void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len);
|
||||
void vsqrtps(XMMRegister dst, Address src, int vector_len);
|
||||
|
||||
// Bitwise Logical AND of Packed Floating-Point Values
|
||||
void andpd(XMMRegister dst, XMMRegister src);
|
||||
|
@ -6630,6 +6630,13 @@ void MacroAssembler::restore_cpu_control_state_after_jni() {
|
||||
}
|
||||
// Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
|
||||
vzeroupper();
|
||||
// Reset k1 to 0xffff.
|
||||
if (VM_Version::supports_evex()) {
|
||||
push(rcx);
|
||||
movl(rcx, 0xffff);
|
||||
kmovwl(k1, rcx);
|
||||
pop(rcx);
|
||||
}
|
||||
|
||||
#ifndef _LP64
|
||||
// Either restore the x87 floating pointer control word after returning
|
||||
|
@ -3388,26 +3388,29 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
// No exception case
|
||||
__ bind(noException);
|
||||
|
||||
Label no_adjust, bail;
|
||||
Label no_adjust, bail, no_prefix;
|
||||
if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
|
||||
// If our stashed return pc was modified by the runtime we avoid touching it
|
||||
__ cmpptr(rbx, Address(rbp, wordSize));
|
||||
__ jccb(Assembler::notEqual, no_adjust);
|
||||
|
||||
#ifdef ASSERT
|
||||
// Verify the correct encoding of the poll we're about to skip.
|
||||
// See NativeInstruction::is_safepoint_poll()
|
||||
__ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
|
||||
__ jcc(Assembler::notEqual, bail);
|
||||
__ cmpb(Address(rbx, 1), NativeTstRegMem::instruction_code_memXregl);
|
||||
__ jcc(Assembler::notEqual, no_prefix);
|
||||
__ addptr(rbx, 1);
|
||||
__ bind(no_prefix);
|
||||
#ifdef ASSERT
|
||||
__ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_code_memXregl);
|
||||
__ jcc(Assembler::notEqual, bail);
|
||||
// Mask out the modrm bits
|
||||
__ testb(Address(rbx, 2), NativeTstRegMem::modrm_mask);
|
||||
__ testb(Address(rbx, 1), NativeTstRegMem::modrm_mask);
|
||||
// rax encodes to 0, so if the bits are nonzero it's incorrect
|
||||
__ jcc(Assembler::notZero, bail);
|
||||
#endif
|
||||
// Adjust return pc forward to step over the safepoint poll instruction
|
||||
__ addptr(Address(rbp, wordSize), 3);
|
||||
__ addptr(rbx, 2);
|
||||
__ movptr(Address(rbp, wordSize), rbx);
|
||||
}
|
||||
|
||||
__ bind(no_adjust);
|
||||
|
@ -1264,9 +1264,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
||||
|
||||
Label L_loop;
|
||||
Label L_loop, L_done;
|
||||
const Register end = count;
|
||||
|
||||
__ testl(count, count);
|
||||
__ jcc(Assembler::zero, L_done); // zero count - nothing to do
|
||||
|
||||
__ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
|
||||
__ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
|
||||
__ shrptr(start, CardTableModRefBS::card_shift);
|
||||
@ -1280,6 +1283,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ movb(Address(start, count, Address::times_1), 0);
|
||||
__ decrement(count);
|
||||
__ jcc(Assembler::greaterEqual, L_loop);
|
||||
__ BIND(L_done);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -629,18 +629,26 @@ void VM_Version::get_processor_features() {
|
||||
_features &= ~CPU_SSE;
|
||||
|
||||
// first try initial setting and detect what we can support
|
||||
int use_avx_limit = 0;
|
||||
if (UseAVX > 0) {
|
||||
if (UseAVX > 2 && supports_evex()) {
|
||||
UseAVX = 3;
|
||||
use_avx_limit = 3;
|
||||
} else if (UseAVX > 1 && supports_avx2()) {
|
||||
UseAVX = 2;
|
||||
use_avx_limit = 2;
|
||||
} else if (UseAVX > 0 && supports_avx()) {
|
||||
UseAVX = 1;
|
||||
use_avx_limit = 1;
|
||||
} else {
|
||||
UseAVX = 0;
|
||||
use_avx_limit = 0;
|
||||
}
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseAVX)) {
|
||||
FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
|
||||
} else if (UseAVX > use_avx_limit) {
|
||||
warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
|
||||
FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
|
||||
} else if (UseAVX < 0) {
|
||||
UseAVX = 0;
|
||||
warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX);
|
||||
FLAG_SET_DEFAULT(UseAVX, 0);
|
||||
}
|
||||
|
||||
if (UseAVX < 3) {
|
||||
@ -710,16 +718,29 @@ void VM_Version::get_processor_features() {
|
||||
// UseSSE is set to the smaller of what hardware supports and what
|
||||
// the command line requires. I.e., you cannot set UseSSE to 2 on
|
||||
// older Pentiums which do not support it.
|
||||
if (UseSSE > 4) UseSSE=4;
|
||||
if (UseSSE < 0) UseSSE=0;
|
||||
if (!supports_sse4_1()) // Drop to 3 if no SSE4 support
|
||||
UseSSE = MIN2((intx)3,UseSSE);
|
||||
if (!supports_sse3()) // Drop to 2 if no SSE3 support
|
||||
UseSSE = MIN2((intx)2,UseSSE);
|
||||
if (!supports_sse2()) // Drop to 1 if no SSE2 support
|
||||
UseSSE = MIN2((intx)1,UseSSE);
|
||||
if (!supports_sse ()) // Drop to 0 if no SSE support
|
||||
UseSSE = 0;
|
||||
int use_sse_limit = 0;
|
||||
if (UseSSE > 0) {
|
||||
if (UseSSE > 3 && supports_sse4_1()) {
|
||||
use_sse_limit = 4;
|
||||
} else if (UseSSE > 2 && supports_sse3()) {
|
||||
use_sse_limit = 3;
|
||||
} else if (UseSSE > 1 && supports_sse2()) {
|
||||
use_sse_limit = 2;
|
||||
} else if (UseSSE > 0 && supports_sse()) {
|
||||
use_sse_limit = 1;
|
||||
} else {
|
||||
use_sse_limit = 0;
|
||||
}
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseSSE)) {
|
||||
FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
|
||||
} else if (UseSSE > use_sse_limit) {
|
||||
warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", (int) UseSSE, use_sse_limit);
|
||||
FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
|
||||
} else if (UseSSE < 0) {
|
||||
warning("UseSSE=%d is not valid, setting it to UseSSE=0", (int) UseSSE);
|
||||
FLAG_SET_DEFAULT(UseSSE, 0);
|
||||
}
|
||||
|
||||
// Use AES instructions if available.
|
||||
if (supports_aes()) {
|
||||
|
@ -1252,6 +1252,7 @@ const bool Matcher::match_rule_supported(int opcode) {
|
||||
ret_value = false;
|
||||
break;
|
||||
case Op_SqrtVD:
|
||||
case Op_SqrtVF:
|
||||
if (UseAVX < 1) // enabled for AVX only
|
||||
ret_value = false;
|
||||
break;
|
||||
@ -2580,7 +2581,7 @@ instruct negD_reg_reg(regD dst, regD src) %{
|
||||
|
||||
instruct sqrtF_reg(regF dst, regF src) %{
|
||||
predicate(UseSSE>=1);
|
||||
match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
|
||||
match(Set dst (SqrtF src));
|
||||
|
||||
format %{ "sqrtss $dst, $src" %}
|
||||
ins_cost(150);
|
||||
@ -2592,7 +2593,7 @@ instruct sqrtF_reg(regF dst, regF src) %{
|
||||
|
||||
instruct sqrtF_mem(regF dst, memory src) %{
|
||||
predicate(UseSSE>=1);
|
||||
match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
|
||||
match(Set dst (SqrtF (LoadF src)));
|
||||
|
||||
format %{ "sqrtss $dst, $src" %}
|
||||
ins_cost(150);
|
||||
@ -2604,7 +2605,8 @@ instruct sqrtF_mem(regF dst, memory src) %{
|
||||
|
||||
instruct sqrtF_imm(regF dst, immF con) %{
|
||||
predicate(UseSSE>=1);
|
||||
match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
|
||||
match(Set dst (SqrtF con));
|
||||
|
||||
format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_cost(150);
|
||||
ins_encode %{
|
||||
@ -8388,7 +8390,7 @@ instruct vshiftcnt(vecS dst, rRegI cnt) %{
|
||||
|
||||
// --------------------------------- Sqrt --------------------------------------
|
||||
|
||||
// Floating point vector sqrt - double precision only
|
||||
// Floating point vector sqrt
|
||||
instruct vsqrt2D_reg(vecX dst, vecX src) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
||||
match(Set dst (SqrtVD src));
|
||||
@ -8455,6 +8457,94 @@ instruct vsqrt8D_mem(vecZ dst, memory mem) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vsqrt2F_reg(vecD dst, vecD src) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
||||
match(Set dst (SqrtVF src));
|
||||
format %{ "vsqrtps $dst,$src\t! sqrt packed2F" %}
|
||||
ins_encode %{
|
||||
int vector_len = 0;
|
||||
__ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vsqrt2F_mem(vecD dst, memory mem) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
||||
match(Set dst (SqrtVF (LoadVector mem)));
|
||||
format %{ "vsqrtps $dst,$mem\t! sqrt packed2F" %}
|
||||
ins_encode %{
|
||||
int vector_len = 0;
|
||||
__ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vsqrt4F_reg(vecX dst, vecX src) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
||||
match(Set dst (SqrtVF src));
|
||||
format %{ "vsqrtps $dst,$src\t! sqrt packed4F" %}
|
||||
ins_encode %{
|
||||
int vector_len = 0;
|
||||
__ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vsqrt4F_mem(vecX dst, memory mem) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
||||
match(Set dst (SqrtVF (LoadVector mem)));
|
||||
format %{ "vsqrtps $dst,$mem\t! sqrt packed4F" %}
|
||||
ins_encode %{
|
||||
int vector_len = 0;
|
||||
__ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vsqrt8F_reg(vecY dst, vecY src) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
||||
match(Set dst (SqrtVF src));
|
||||
format %{ "vsqrtps $dst,$src\t! sqrt packed8F" %}
|
||||
ins_encode %{
|
||||
int vector_len = 1;
|
||||
__ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vsqrt8F_mem(vecY dst, memory mem) %{
|
||||
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
||||
match(Set dst (SqrtVF (LoadVector mem)));
|
||||
format %{ "vsqrtps $dst,$mem\t! sqrt packed8F" %}
|
||||
ins_encode %{
|
||||
int vector_len = 1;
|
||||
__ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vsqrt16F_reg(vecZ dst, vecZ src) %{
|
||||
predicate(UseAVX > 2 && n->as_Vector()->length() == 16);
|
||||
match(Set dst (SqrtVF src));
|
||||
format %{ "vsqrtps $dst,$src\t! sqrt packed16F" %}
|
||||
ins_encode %{
|
||||
int vector_len = 2;
|
||||
__ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vsqrt16F_mem(vecZ dst, memory mem) %{
|
||||
predicate(UseAVX > 2 && n->as_Vector()->length() == 16);
|
||||
match(Set dst (SqrtVF (LoadVector mem)));
|
||||
format %{ "vsqrtps $dst,$mem\t! sqrt packed16F" %}
|
||||
ins_encode %{
|
||||
int vector_len = 2;
|
||||
__ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// ------------------------------ LeftShift -----------------------------------
|
||||
|
||||
// Shorts/Chars vector left shift
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
// no precompiled headers
|
||||
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
@ -2490,6 +2490,22 @@ bool os::can_execute_large_page_memory() {
|
||||
return false;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = NULL;
|
||||
|
||||
// Always round to os::vm_page_size(), which may be larger than 4K.
|
||||
bytes = align_up(bytes, os::vm_page_size());
|
||||
result = reserve_mmaped_memory(bytes, requested_addr, 0);
|
||||
|
||||
if (result != NULL) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
// no precompiled headers
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/osThread.hpp"
|
||||
|
||||
|
@ -2350,6 +2350,17 @@ bool os::can_execute_large_page_memory() {
|
||||
return UseHugeTLBFS;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
|
||||
if (result != NULL) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
|
||||
|
@ -323,7 +323,12 @@ void OSContainer::init() {
|
||||
}
|
||||
}
|
||||
|
||||
if (mntinfo != NULL) fclose(mntinfo);
|
||||
fclose(mntinfo);
|
||||
|
||||
if (memory == NULL || cpuset == NULL || cpu == NULL || cpuacct == NULL) {
|
||||
log_debug(os, container)("Required cgroup subsystems not found");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read /proc/self/cgroup and map host mount point to
|
||||
@ -383,12 +388,7 @@ void OSContainer::init() {
|
||||
}
|
||||
}
|
||||
|
||||
if (cgroup != NULL) fclose(cgroup);
|
||||
|
||||
if (memory == NULL || cpuset == NULL || cpu == NULL) {
|
||||
log_debug(os, container)("Required cgroup subsystems not found");
|
||||
return;
|
||||
}
|
||||
fclose(cgroup);
|
||||
|
||||
// We need to update the amount of physical memory now that
|
||||
// command line arguments have been processed.
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
// no precompiled headers
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/osThread.hpp"
|
||||
|
||||
|
@ -59,6 +59,7 @@
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/threadCritical.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
#include "semaphore_posix.hpp"
|
||||
#include "services/attachListener.hpp"
|
||||
@ -129,6 +130,7 @@
|
||||
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
|
||||
|
||||
#define LARGEPAGES_BIT (1 << 6)
|
||||
#define DAX_SHARED_BIT (1 << 8)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// global variables
|
||||
julong os::Linux::_physical_memory = 0;
|
||||
@ -1646,7 +1648,10 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
//
|
||||
// Dynamic loader will make all stacks executable after
|
||||
// this function returns, and will not do that again.
|
||||
assert(Threads::first() == NULL, "no Java threads should exist yet.");
|
||||
#ifdef ASSERT
|
||||
ThreadsListHandle tlh;
|
||||
assert(tlh.length() == 0, "no Java threads should exist yet.");
|
||||
#endif
|
||||
} else {
|
||||
warning("You have loaded library %s which might have disabled stack guard. "
|
||||
"The VM will try to fix the stack guard now.\n"
|
||||
@ -1874,16 +1879,13 @@ void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
|
||||
// may have been queued at the same time.
|
||||
|
||||
if (!_stack_is_executable) {
|
||||
JavaThread *jt = Threads::first();
|
||||
|
||||
while (jt) {
|
||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
|
||||
if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
|
||||
jt->stack_guards_enabled()) { // No pending stack overflow exceptions
|
||||
if (!os::guard_memory((char *)jt->stack_end(), jt->stack_guard_zone_size())) {
|
||||
warning("Attempt to reguard stack yellow zone failed.");
|
||||
}
|
||||
}
|
||||
jt = jt->next();
|
||||
}
|
||||
}
|
||||
|
||||
@ -3369,10 +3371,13 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
||||
// effective only if the bit 2 is cleared)
|
||||
// - (bit 5) hugetlb private memory
|
||||
// - (bit 6) hugetlb shared memory
|
||||
// - (bit 7) dax private memory
|
||||
// - (bit 8) dax shared memory
|
||||
//
|
||||
static void set_coredump_filter(void) {
|
||||
static void set_coredump_filter(bool largepages, bool dax_shared) {
|
||||
FILE *f;
|
||||
long cdm;
|
||||
bool filter_changed = false;
|
||||
|
||||
if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
|
||||
return;
|
||||
@ -3385,8 +3390,15 @@ static void set_coredump_filter(void) {
|
||||
|
||||
rewind(f);
|
||||
|
||||
if ((cdm & LARGEPAGES_BIT) == 0) {
|
||||
if (largepages && (cdm & LARGEPAGES_BIT) == 0) {
|
||||
cdm |= LARGEPAGES_BIT;
|
||||
filter_changed = true;
|
||||
}
|
||||
if (dax_shared && (cdm & DAX_SHARED_BIT) == 0) {
|
||||
cdm |= DAX_SHARED_BIT;
|
||||
filter_changed = true;
|
||||
}
|
||||
if (filter_changed) {
|
||||
fprintf(f, "%#lx", cdm);
|
||||
}
|
||||
|
||||
@ -3525,7 +3537,7 @@ void os::large_page_init() {
|
||||
size_t large_page_size = Linux::setup_large_page_size();
|
||||
UseLargePages = Linux::setup_large_page_type(large_page_size);
|
||||
|
||||
set_coredump_filter();
|
||||
set_coredump_filter(true /*largepages*/, false /*dax_shared*/);
|
||||
}
|
||||
|
||||
#ifndef SHM_HUGETLB
|
||||
@ -3896,6 +3908,17 @@ bool os::can_execute_large_page_memory() {
|
||||
return UseTransparentHugePages || UseHugeTLBFS;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
|
||||
if (result != NULL) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
|
||||
@ -4947,25 +4970,20 @@ jint os::init_2(void) {
|
||||
UseNUMA = false;
|
||||
}
|
||||
}
|
||||
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
|
||||
// we can make the adaptive lgrp chunk resizing work. If the user specified
|
||||
// both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
|
||||
// disable adaptive resizing.
|
||||
if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
|
||||
if (FLAG_IS_DEFAULT(UseNUMA)) {
|
||||
UseNUMA = false;
|
||||
} else {
|
||||
if (FLAG_IS_DEFAULT(UseLargePages) &&
|
||||
FLAG_IS_DEFAULT(UseSHM) &&
|
||||
FLAG_IS_DEFAULT(UseHugeTLBFS)) {
|
||||
UseLargePages = false;
|
||||
} else if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
|
||||
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
|
||||
UseAdaptiveSizePolicy = false;
|
||||
UseAdaptiveNUMAChunkSizing = false;
|
||||
}
|
||||
|
||||
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
|
||||
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
|
||||
// we can make the adaptive lgrp chunk resizing work. If the user specified both
|
||||
// UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
|
||||
// and disable adaptive resizing.
|
||||
if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
|
||||
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
|
||||
"disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
|
||||
UseAdaptiveSizePolicy = false;
|
||||
UseAdaptiveNUMAChunkSizing = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!UseNUMA && ForceNUMA) {
|
||||
UseNUMA = true;
|
||||
}
|
||||
@ -5012,6 +5030,9 @@ jint os::init_2(void) {
|
||||
// initialize thread priority policy
|
||||
prio_init();
|
||||
|
||||
if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
|
||||
set_coredump_filter(false /*largepages*/, true /*dax_shared*/);
|
||||
}
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <pthread.h>
|
||||
#include <semaphore.h>
|
||||
#include <signal.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <time.h>
|
||||
@ -52,6 +53,20 @@
|
||||
#endif
|
||||
#define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
|
||||
|
||||
#ifndef MAP_ANONYMOUS
|
||||
#define MAP_ANONYMOUS MAP_ANON
|
||||
#endif
|
||||
|
||||
#define check_with_errno(check_type, cond, msg) \
|
||||
do { \
|
||||
int err = errno; \
|
||||
check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \
|
||||
os::errno_name(err)); \
|
||||
} while (false)
|
||||
|
||||
#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)
|
||||
#define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
|
||||
|
||||
// Check core dump limit and report possible place where core can be found
|
||||
void os::check_dump_limit(char* buffer, size_t bufferSize) {
|
||||
if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
|
||||
@ -145,10 +160,124 @@ void os::wait_for_keypress_at_exit(void) {
|
||||
return;
|
||||
}
|
||||
|
||||
int os::create_file_for_heap(const char* dir) {
|
||||
|
||||
const char name_template[] = "/jvmheap.XXXXXX";
|
||||
|
||||
char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal);
|
||||
if (fullname == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
|
||||
return -1;
|
||||
}
|
||||
(void)strncpy(fullname, dir, strlen(dir)+1);
|
||||
(void)strncat(fullname, name_template, strlen(name_template));
|
||||
|
||||
os::native_path(fullname);
|
||||
|
||||
sigset_t set, oldset;
|
||||
int ret = sigfillset(&set);
|
||||
assert_with_errno(ret == 0, "sigfillset returned error");
|
||||
|
||||
// set the file creation mask.
|
||||
mode_t file_mode = S_IRUSR | S_IWUSR;
|
||||
|
||||
// create a new file.
|
||||
int fd = mkstemp(fullname);
|
||||
|
||||
if (fd < 0) {
|
||||
warning("Could not create file for heap with template %s", fullname);
|
||||
os::free(fullname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
|
||||
ret = unlink(fullname);
|
||||
assert_with_errno(ret == 0, "unlink returned error");
|
||||
|
||||
os::free(fullname);
|
||||
return fd;
|
||||
}
|
||||
|
||||
static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) {
|
||||
char * addr;
|
||||
int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS;
|
||||
if (requested_addr != NULL) {
|
||||
assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size");
|
||||
flags |= MAP_FIXED;
|
||||
}
|
||||
|
||||
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
|
||||
// touch an uncommitted page. Otherwise, the read/write might
|
||||
// succeed if we have enough swap space to back the physical page.
|
||||
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
|
||||
flags, -1, 0);
|
||||
|
||||
if (addr != MAP_FAILED) {
|
||||
MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC);
|
||||
return addr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int util_posix_fallocate(int fd, off_t offset, off_t len) {
|
||||
#ifdef __APPLE__
|
||||
fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
|
||||
// First we try to get a continuous chunk of disk space
|
||||
int ret = fcntl(fd, F_PREALLOCATE, &store);
|
||||
if (ret == -1) {
|
||||
// Maybe we are too fragmented, try to allocate non-continuous range
|
||||
store.fst_flags = F_ALLOCATEALL;
|
||||
ret = fcntl(fd, F_PREALLOCATE, &store);
|
||||
}
|
||||
if(ret != -1) {
|
||||
return ftruncate(fd, len);
|
||||
}
|
||||
return -1;
|
||||
#else
|
||||
return posix_fallocate(fd, offset, len);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Map the given address range to the provided file descriptor.
|
||||
char* os::map_memory_to_file(char* base, size_t size, int fd) {
|
||||
assert(fd != -1, "File descriptor is not valid");
|
||||
|
||||
// allocate space for the file
|
||||
if (util_posix_fallocate(fd, 0, (off_t)size) != 0) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory."));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
int flags = MAP_SHARED;
|
||||
if (base != NULL) {
|
||||
flags |= MAP_FIXED;
|
||||
}
|
||||
char* addr = (char*)mmap(base, size, prot, flags, fd, 0);
|
||||
|
||||
if (addr == MAP_FAILED) {
|
||||
return NULL;
|
||||
}
|
||||
if (base != NULL && addr != base) {
|
||||
if (!os::release_memory(addr, size)) {
|
||||
warning("Could not release memory on unsuccessful file mapping");
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
|
||||
assert(fd != -1, "File descriptor is not valid");
|
||||
assert(base != NULL, "Base cannot be NULL");
|
||||
|
||||
return map_memory_to_file(base, size, fd);
|
||||
}
|
||||
|
||||
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
|
||||
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
|
||||
// rather than unmapping and remapping the whole chunk to get requested alignment.
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment) {
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
|
||||
"Alignment must be a multiple of allocation granularity (page size)");
|
||||
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
|
||||
@ -156,7 +285,20 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
|
||||
size_t extra_size = size + alignment;
|
||||
assert(extra_size >= size, "overflow, size is too large to allow alignment");
|
||||
|
||||
char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
|
||||
char* extra_base;
|
||||
if (file_desc != -1) {
|
||||
// For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because
|
||||
// we need to deal with shrinking of the file space later when we release extra memory after alignment.
|
||||
// We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory.
|
||||
// So here to call a helper function while reserve memory for us. After we have a aligned base,
|
||||
// we will replace anonymous mapping with file mapping.
|
||||
extra_base = reserve_mmapped_memory(extra_size, NULL);
|
||||
if (extra_base != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
|
||||
}
|
||||
} else {
|
||||
extra_base = os::reserve_memory(extra_size, NULL, alignment);
|
||||
}
|
||||
|
||||
if (extra_base == NULL) {
|
||||
return NULL;
|
||||
@ -183,6 +325,13 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
|
||||
os::release_memory(extra_base + begin_offset + size, end_offset);
|
||||
}
|
||||
|
||||
if (file_desc != -1) {
|
||||
// After we have an aligned address, we can replace anonymous mapping with file mapping
|
||||
if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
}
|
||||
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
|
||||
}
|
||||
return aligned_base;
|
||||
}
|
||||
|
||||
@ -478,8 +627,7 @@ int os::sleep(Thread* thread, jlong millis, bool interruptible) {
|
||||
// interrupt support
|
||||
|
||||
void os::interrupt(Thread* thread) {
|
||||
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
|
||||
"possibility of dangling Thread pointer");
|
||||
debug_only(Thread::check_for_dangling_thread_pointer(thread);)
|
||||
|
||||
OSThread* osthread = thread->osthread();
|
||||
|
||||
@ -499,12 +647,10 @@ void os::interrupt(Thread* thread) {
|
||||
|
||||
ParkEvent * ev = thread->_ParkEvent ;
|
||||
if (ev != NULL) ev->unpark() ;
|
||||
|
||||
}
|
||||
|
||||
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
|
||||
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
|
||||
"possibility of dangling Thread pointer");
|
||||
debug_only(Thread::check_for_dangling_thread_pointer(thread);)
|
||||
|
||||
OSThread* osthread = thread->osthread();
|
||||
|
||||
@ -1351,16 +1497,6 @@ void os::ThreadCrashProtection::check_crash_protection(int sig,
|
||||
}
|
||||
}
|
||||
|
||||
#define check_with_errno(check_type, cond, msg) \
|
||||
do { \
|
||||
int err = errno; \
|
||||
check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \
|
||||
os::errno_name(err)); \
|
||||
} while (false)
|
||||
|
||||
#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)
|
||||
#define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
|
||||
|
||||
// POSIX unamed semaphores are not supported on OS X.
|
||||
#ifndef __APPLE__
|
||||
|
||||
|
@ -2585,6 +2585,17 @@ char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
|
||||
return addr;
|
||||
}
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
|
||||
if (result != NULL) {
|
||||
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
// available (and not reserved for something else).
|
||||
|
||||
|
@ -2904,6 +2904,75 @@ void os::large_page_init() {
|
||||
UseLargePages = success;
|
||||
}
|
||||
|
||||
int os::create_file_for_heap(const char* dir) {
|
||||
|
||||
const char name_template[] = "/jvmheap.XXXXXX";
|
||||
char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal);
|
||||
if (fullname == NULL) {
|
||||
vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
|
||||
return -1;
|
||||
}
|
||||
|
||||
(void)strncpy(fullname, dir, strlen(dir)+1);
|
||||
(void)strncat(fullname, name_template, strlen(name_template));
|
||||
|
||||
os::native_path(fullname);
|
||||
|
||||
char *path = _mktemp(fullname);
|
||||
if (path == NULL) {
|
||||
warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
|
||||
os::free(fullname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
|
||||
|
||||
os::free(fullname);
|
||||
if (fd < 0) {
|
||||
warning("Problem opening file for heap (%s)", os::strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
// If 'base' is not NULL, function will return NULL if it cannot get 'base'
|
||||
char* os::map_memory_to_file(char* base, size_t size, int fd) {
|
||||
assert(fd != -1, "File descriptor is not valid");
|
||||
|
||||
HANDLE fh = (HANDLE)_get_osfhandle(fd);
|
||||
#ifdef _LP64
|
||||
HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
|
||||
(DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
|
||||
#else
|
||||
HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
|
||||
0, (DWORD)size, NULL);
|
||||
#endif
|
||||
if (fileMapping == NULL) {
|
||||
if (GetLastError() == ERROR_DISK_FULL) {
|
||||
vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
|
||||
}
|
||||
else {
|
||||
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
|
||||
|
||||
CloseHandle(fileMapping);
|
||||
|
||||
return (char*)addr;
|
||||
}
|
||||
|
||||
char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
|
||||
assert(fd != -1, "File descriptor is not valid");
|
||||
assert(base != NULL, "Base address cannot be NULL");
|
||||
|
||||
release_memory(base, size);
|
||||
return map_memory_to_file(base, size, fd);
|
||||
}
|
||||
|
||||
// On win32, one cannot release just a part of reserved memory, it's an
|
||||
// all or nothing deal. When we split a reservation, we must break the
|
||||
// reservation into two reservations.
|
||||
@ -2923,7 +2992,7 @@ void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
|
||||
// Multiple threads can race in this code but it's not possible to unmap small sections of
|
||||
// virtual space to get requested alignment, like posix-like os's.
|
||||
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment) {
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
|
||||
"Alignment must be a multiple of allocation granularity (page size)");
|
||||
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
|
||||
@ -2934,16 +3003,20 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
|
||||
char* aligned_base = NULL;
|
||||
|
||||
do {
|
||||
char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
|
||||
char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
|
||||
if (extra_base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
// Do manual alignment
|
||||
aligned_base = align_up(extra_base, alignment);
|
||||
|
||||
os::release_memory(extra_base, extra_size);
|
||||
if (file_desc != -1) {
|
||||
os::unmap_memory(extra_base, extra_size);
|
||||
} else {
|
||||
os::release_memory(extra_base, extra_size);
|
||||
}
|
||||
|
||||
aligned_base = os::reserve_memory(size, aligned_base);
|
||||
aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
|
||||
|
||||
} while (aligned_base == NULL);
|
||||
|
||||
@ -2989,6 +3062,11 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
return reserve_memory(bytes, requested_addr);
|
||||
}
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
|
||||
assert(file_desc >= 0, "file_desc is not valid");
|
||||
return map_memory_to_file(requested_addr, bytes, file_desc);
|
||||
}
|
||||
|
||||
size_t os::large_page_size() {
|
||||
return _large_page_size;
|
||||
}
|
||||
@ -3490,9 +3568,7 @@ OSReturn os::get_native_priority(const Thread* const thread,
|
||||
void os::hint_no_preempt() {}
|
||||
|
||||
void os::interrupt(Thread* thread) {
|
||||
assert(!thread->is_Java_thread() || Thread::current() == thread ||
|
||||
Threads_lock->owned_by_self(),
|
||||
"possibility of dangling Thread pointer");
|
||||
debug_only(Thread::check_for_dangling_thread_pointer(thread);)
|
||||
|
||||
OSThread* osthread = thread->osthread();
|
||||
osthread->set_interrupted(true);
|
||||
@ -3513,8 +3589,7 @@ void os::interrupt(Thread* thread) {
|
||||
|
||||
|
||||
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
|
||||
assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
|
||||
"possibility of dangling Thread pointer");
|
||||
debug_only(Thread::check_for_dangling_thread_pointer(thread);)
|
||||
|
||||
OSThread* osthread = thread->osthread();
|
||||
// There is no synchronization between the setting of the interrupt
|
||||
|
@ -30,74 +30,6 @@
|
||||
|
||||
// Implementation of class atomic
|
||||
|
||||
#ifdef M68K
|
||||
|
||||
/*
|
||||
* __m68k_cmpxchg
|
||||
*
|
||||
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
|
||||
* Returns newval on success and oldval if no exchange happened.
|
||||
* This implementation is processor specific and works on
|
||||
* 68020 68030 68040 and 68060.
|
||||
*
|
||||
* It will not work on ColdFire, 68000 and 68010 since they lack the CAS
|
||||
* instruction.
|
||||
* Using a kernelhelper would be better for arch complete implementation.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
|
||||
int ret;
|
||||
__asm __volatile ("cas%.l %0,%2,%1"
|
||||
: "=d" (ret), "+m" (*(ptr))
|
||||
: "d" (newval), "0" (oldval));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Perform an atomic compare and swap: if the current value of `*PTR'
|
||||
is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
|
||||
`*PTR' before the operation.*/
|
||||
static inline int m68k_compare_and_swap(int newval,
|
||||
volatile int *ptr,
|
||||
int oldval) {
|
||||
for (;;) {
|
||||
int prev = *ptr;
|
||||
if (prev != oldval)
|
||||
return prev;
|
||||
|
||||
if (__m68k_cmpxchg (prev, newval, ptr) == newval)
|
||||
// Success.
|
||||
return prev;
|
||||
|
||||
// We failed even though prev == oldval. Try again.
|
||||
}
|
||||
}
|
||||
|
||||
/* Atomically add an int to memory. */
|
||||
static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until success.
|
||||
|
||||
int prev = *ptr;
|
||||
|
||||
if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
|
||||
return prev + add_value;
|
||||
}
|
||||
}
|
||||
|
||||
/* Atomically write VALUE into `*PTR' and returns the previous
|
||||
contents of `*PTR'. */
|
||||
static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until success.
|
||||
int prev = *ptr;
|
||||
|
||||
if (__m68k_cmpxchg (prev, newval, ptr) == prev)
|
||||
return prev;
|
||||
}
|
||||
}
|
||||
#endif // M68K
|
||||
|
||||
#ifdef ARM
|
||||
|
||||
/*
|
||||
@ -175,12 +107,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
|
||||
#ifdef ARM
|
||||
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
|
||||
#else
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
@ -200,9 +128,6 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
|
||||
#else
|
||||
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
||||
// operation. Note that some platforms only support this with the
|
||||
@ -215,7 +140,6 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
// barrier.
|
||||
__sync_synchronize();
|
||||
return result;
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
@ -242,12 +166,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
|
||||
#else
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
|
@ -36,12 +36,18 @@
|
||||
|
||||
// Atomically copy 64 bits of data
|
||||
static void atomic_copy64(const volatile void *src, volatile void *dst) {
|
||||
#if defined(PPC32)
|
||||
#if defined(PPC32) && !defined(__SPE__)
|
||||
double tmp;
|
||||
asm volatile ("lfd %0, %2\n"
|
||||
"stfd %0, %1\n"
|
||||
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
|
||||
: "Q"(*(volatile double*)src));
|
||||
#elif defined(PPC32) && defined(__SPE__)
|
||||
long tmp;
|
||||
asm volatile ("evldd %0, %2\n"
|
||||
"evstdd %0, %1\n"
|
||||
: "=&r"(tmp), "=Q"(*(volatile long*)dst)
|
||||
: "Q"(*(volatile long*)src));
|
||||
#elif defined(S390) && !defined(_LP64)
|
||||
double tmp;
|
||||
asm volatile ("ld %0, 0(%1)\n"
|
||||
|
@ -4034,6 +4034,7 @@ int MatchRule::is_expensive() const {
|
||||
strcmp(opType,"ModF")==0 ||
|
||||
strcmp(opType,"ModI")==0 ||
|
||||
strcmp(opType,"SqrtD")==0 ||
|
||||
strcmp(opType,"SqrtF")==0 ||
|
||||
strcmp(opType,"TanD")==0 ||
|
||||
strcmp(opType,"ConvD2F")==0 ||
|
||||
strcmp(opType,"ConvD2I")==0 ||
|
||||
@ -4167,7 +4168,7 @@ bool MatchRule::is_vector() const {
|
||||
"DivVF","DivVD",
|
||||
"AbsVF","AbsVD",
|
||||
"NegVF","NegVD",
|
||||
"SqrtVD",
|
||||
"SqrtVD","SqrtVF",
|
||||
"AndV" ,"XorV" ,"OrV",
|
||||
"AddReductionVI", "AddReductionVL",
|
||||
"AddReductionVF", "AddReductionVD",
|
||||
|
@ -167,6 +167,7 @@ void AOTLib::verify_config() {
|
||||
verify_flag(_config->_compactFields, CompactFields, "CompactFields");
|
||||
verify_flag(_config->_enableContended, EnableContended, "EnableContended");
|
||||
verify_flag(_config->_restrictContended, RestrictContended, "RestrictContended");
|
||||
verify_flag(_config->_threadLocalHandshakes, ThreadLocalHandshakes, "ThreadLocalHandshakes");
|
||||
|
||||
if (!TieredCompilation && _config->_tieredAOT) {
|
||||
handle_config_error("Shared file %s error: Expected to run with tiered compilation on", _name);
|
||||
|
@ -92,7 +92,7 @@ typedef struct {
|
||||
} AOTHeader;
|
||||
|
||||
typedef struct {
|
||||
enum { CONFIG_SIZE = 7 * jintSize + 11 };
|
||||
enum { CONFIG_SIZE = 7 * jintSize + 12 };
|
||||
// 7 int values
|
||||
int _config_size;
|
||||
int _narrowOopShift;
|
||||
@ -101,7 +101,7 @@ typedef struct {
|
||||
int _fieldsAllocationStyle;
|
||||
int _objectAlignment;
|
||||
int _codeSegmentSize;
|
||||
// byte[11] array map to boolean values here
|
||||
// byte[12] array map to boolean values here
|
||||
bool _debug_VM;
|
||||
bool _useCompressedOops;
|
||||
bool _useCompressedClassPointers;
|
||||
@ -113,6 +113,7 @@ typedef struct {
|
||||
bool _enableContended;
|
||||
bool _restrictContended;
|
||||
bool _omitAssertions;
|
||||
bool _threadLocalHandshakes;
|
||||
} AOTConfiguration;
|
||||
|
||||
class AOTLib : public CHeapObj<mtCode> {
|
||||
|
@ -146,15 +146,6 @@ void AOTLoader::initialize() {
|
||||
return;
|
||||
}
|
||||
|
||||
const char* home = Arguments::get_java_home();
|
||||
const char* file_separator = os::file_separator();
|
||||
|
||||
for (int i = 0; i < (int) (sizeof(modules) / sizeof(const char*)); i++) {
|
||||
char library[JVM_MAXPATHLEN];
|
||||
jio_snprintf(library, sizeof(library), "%s%slib%slib%s%s%s%s", home, file_separator, file_separator, modules[i], UseCompressedOops ? "-coop" : "", UseG1GC ? "" : "-nong1", os::dll_file_extension());
|
||||
load_library(library, false);
|
||||
}
|
||||
|
||||
// Scan the AOTLibrary option.
|
||||
if (AOTLibrary != NULL) {
|
||||
const int len = (int)strlen(AOTLibrary);
|
||||
@ -172,6 +163,16 @@ void AOTLoader::initialize() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load well-know AOT libraries from Java installation directory.
|
||||
const char* home = Arguments::get_java_home();
|
||||
const char* file_separator = os::file_separator();
|
||||
|
||||
for (int i = 0; i < (int) (sizeof(modules) / sizeof(const char*)); i++) {
|
||||
char library[JVM_MAXPATHLEN];
|
||||
jio_snprintf(library, sizeof(library), "%s%slib%slib%s%s%s%s", home, file_separator, file_separator, modules[i], UseCompressedOops ? "-coop" : "", UseG1GC ? "" : "-nong1", os::dll_file_extension());
|
||||
load_library(library, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -239,6 +240,21 @@ void AOTLoader::set_narrow_klass_shift() {
|
||||
}
|
||||
|
||||
void AOTLoader::load_library(const char* name, bool exit_on_error) {
|
||||
// Skip library if a library with the same name is already loaded.
|
||||
const int file_separator = *os::file_separator();
|
||||
const char* start = strrchr(name, file_separator);
|
||||
const char* new_name = (start == NULL) ? name : (start + 1);
|
||||
FOR_ALL_AOT_LIBRARIES(lib) {
|
||||
const char* lib_name = (*lib)->name();
|
||||
start = strrchr(lib_name, file_separator);
|
||||
const char* old_name = (start == NULL) ? lib_name : (start + 1);
|
||||
if (strcmp(old_name, new_name) == 0) {
|
||||
if (PrintAOT) {
|
||||
warning("AOT library %s is already loaded as %s.", name, lib_name);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
char ebuf[1024];
|
||||
void* handle = os::dll_load(name, ebuf, sizeof ebuf);
|
||||
if (handle == NULL) {
|
||||
|
@ -196,8 +196,8 @@ class LIR_OprDesc: public CompilationResourceObj {
|
||||
// data opr-type opr-kind
|
||||
// +--------------+-------+-------+
|
||||
// [max...........|7 6 5 4|3 2 1 0]
|
||||
// ^
|
||||
// is_pointer bit
|
||||
// ^
|
||||
// is_pointer bit
|
||||
//
|
||||
// lowest bit cleared, means it is a structure pointer
|
||||
// we need 4 bits to represent types
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,13 +23,32 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jvm.h"
|
||||
#include "jimage.hpp"
|
||||
#include "classfile/classListParser.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/sharedClassUtil.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/fieldType.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
ClassListParser* ClassListParser::_instance = NULL;
|
||||
|
||||
ClassListParser::ClassListParser(const char* file) {
|
||||
assert(_instance == NULL, "must be singleton");
|
||||
_instance = this;
|
||||
_classlist_file = file;
|
||||
_file = fopen(file, "r");
|
||||
_line_no = 0;
|
||||
_interfaces = new (ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, true);
|
||||
|
||||
if (_file == NULL) {
|
||||
char errmsg[JVM_MAXPATHLEN];
|
||||
os::lasterror(errmsg, JVM_MAXPATHLEN);
|
||||
@ -41,6 +60,7 @@ ClassListParser::~ClassListParser() {
|
||||
if (_file) {
|
||||
fclose(_file);
|
||||
}
|
||||
_instance = NULL;
|
||||
}
|
||||
|
||||
bool ClassListParser::parse_one_line() {
|
||||
@ -48,10 +68,10 @@ bool ClassListParser::parse_one_line() {
|
||||
if (fgets(_line, sizeof(_line), _file) == NULL) {
|
||||
return false;
|
||||
}
|
||||
int line_len = (int)strlen(_line);
|
||||
if (line_len > _max_allowed_line_len) {
|
||||
tty->print_cr("input line too long (must be no longer than %d chars)", _max_allowed_line_len);
|
||||
vm_exit_during_initialization("Loading classlist failed");
|
||||
++ _line_no;
|
||||
_line_len = (int)strlen(_line);
|
||||
if (_line_len > _max_allowed_line_len) {
|
||||
error("input line too long (must be no longer than %d chars)", _max_allowed_line_len);
|
||||
}
|
||||
if (*_line == '#') { // comment
|
||||
continue;
|
||||
@ -59,8 +79,380 @@ bool ClassListParser::parse_one_line() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Remove trailing \r\n
|
||||
_line[strcspn(_line, "\r\n")] = 0;
|
||||
_id = _unspecified;
|
||||
_super = _unspecified;
|
||||
_interfaces->clear();
|
||||
_source = NULL;
|
||||
_interfaces_specified = false;
|
||||
|
||||
{
|
||||
int len = (int)strlen(_line);
|
||||
int i;
|
||||
// Replace \t\r\n with ' '
|
||||
for (i=0; i<len; i++) {
|
||||
if (_line[i] == '\t' || _line[i] == '\r' || _line[i] == '\n') {
|
||||
_line[i] = ' ';
|
||||
}
|
||||
}
|
||||
|
||||
// Remove trailing newline/space
|
||||
while (len > 0) {
|
||||
if (_line[len-1] == ' ') {
|
||||
_line[len-1] = '\0';
|
||||
len --;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_line_len = len;
|
||||
_class_name = _line;
|
||||
}
|
||||
|
||||
if ((_token = strchr(_line, ' ')) == NULL) {
|
||||
// No optional arguments are specified.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Mark the end of the name, and go to the next input char
|
||||
*_token++ = '\0';
|
||||
|
||||
while (*_token) {
|
||||
skip_whitespaces();
|
||||
|
||||
if (parse_int_option("id:", &_id)) {
|
||||
continue;
|
||||
} else if (parse_int_option("super:", &_super)) {
|
||||
check_already_loaded("Super class", _super);
|
||||
continue;
|
||||
} else if (skip_token("interfaces:")) {
|
||||
int i;
|
||||
while (try_parse_int(&i)) {
|
||||
check_already_loaded("Interface", i);
|
||||
_interfaces->append(i);
|
||||
}
|
||||
} else if (skip_token("source:")) {
|
||||
skip_whitespaces();
|
||||
_source = _token;
|
||||
char* s = strchr(_token, ' ');
|
||||
if (s == NULL) {
|
||||
break; // end of input line
|
||||
} else {
|
||||
*s = '\0'; // mark the end of _source
|
||||
_token = s+1;
|
||||
}
|
||||
} else {
|
||||
error("Unknown input");
|
||||
}
|
||||
}
|
||||
|
||||
// if src is specified
|
||||
// id super interfaces must all be specified
|
||||
// loader may be specified
|
||||
// else
|
||||
// # the class is loaded from classpath
|
||||
// id may be specified
|
||||
// super, interfaces, loader must not be specified
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClassListParser::skip_whitespaces() {
|
||||
while (*_token == ' ' || *_token == '\t') {
|
||||
_token ++;
|
||||
}
|
||||
}
|
||||
|
||||
void ClassListParser::skip_non_whitespaces() {
|
||||
while (*_token && *_token != ' ' && *_token != '\t') {
|
||||
_token ++;
|
||||
}
|
||||
}
|
||||
|
||||
void ClassListParser::parse_int(int* value) {
|
||||
skip_whitespaces();
|
||||
if (sscanf(_token, "%i", value) == 1) {
|
||||
skip_non_whitespaces();
|
||||
if (*value < 0) {
|
||||
error("Error: negative integers not allowed (%d)", *value);
|
||||
}
|
||||
} else {
|
||||
error("Error: expected integer");
|
||||
}
|
||||
}
|
||||
|
||||
bool ClassListParser::try_parse_int(int* value) {
|
||||
skip_whitespaces();
|
||||
if (sscanf(_token, "%i", value) == 1) {
|
||||
skip_non_whitespaces();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ClassListParser::skip_token(const char* option_name) {
|
||||
size_t len = strlen(option_name);
|
||||
if (strncmp(_token, option_name, len) == 0) {
|
||||
_token += len;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool ClassListParser::parse_int_option(const char* option_name, int* value) {
|
||||
if (skip_token(option_name)) {
|
||||
if (*value != _unspecified) {
|
||||
error("%s specified twice", option_name);
|
||||
} else {
|
||||
parse_int(value);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void ClassListParser::print_specified_interfaces() {
|
||||
const int n = _interfaces->length();
|
||||
jio_fprintf(defaultStream::error_stream(), "Currently specified interfaces[%d] = {\n", n);
|
||||
for (int i=0; i<n; i++) {
|
||||
InstanceKlass* k = lookup_class_by_id(_interfaces->at(i));
|
||||
jio_fprintf(defaultStream::error_stream(), " %4d = %s\n", _interfaces->at(i), k->name()->as_klass_external_name());
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(), "}\n");
|
||||
}
|
||||
|
||||
void ClassListParser::print_actual_interfaces(InstanceKlass *ik) {
|
||||
int n = ik->local_interfaces()->length();
|
||||
jio_fprintf(defaultStream::error_stream(), "Actual interfaces[%d] = {\n", n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
InstanceKlass* e = InstanceKlass::cast(ik->local_interfaces()->at(i));
|
||||
jio_fprintf(defaultStream::error_stream(), " %s\n", e->name()->as_klass_external_name());
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(), "}\n");
|
||||
}
|
||||
|
||||
void ClassListParser::error(const char *msg, ...) {
|
||||
va_list ap;
|
||||
va_start(ap, msg);
|
||||
int error_index = _token - _line;
|
||||
if (error_index >= _line_len) {
|
||||
error_index = _line_len - 1;
|
||||
}
|
||||
if (error_index < 0) {
|
||||
error_index = 0;
|
||||
}
|
||||
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"An error has occurred while processing class list file %s %d:%d.\n",
|
||||
_classlist_file, _line_no, (error_index + 1));
|
||||
jio_vfprintf(defaultStream::error_stream(), msg, ap);
|
||||
|
||||
if (_line_len <= 0) {
|
||||
jio_fprintf(defaultStream::error_stream(), "\n");
|
||||
} else {
|
||||
jio_fprintf(defaultStream::error_stream(), ":\n");
|
||||
for (int i=0; i<_line_len; i++) {
|
||||
char c = _line[i];
|
||||
if (c == '\0') {
|
||||
jio_fprintf(defaultStream::error_stream(), "%s", " ");
|
||||
} else {
|
||||
jio_fprintf(defaultStream::error_stream(), "%c", c);
|
||||
}
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(), "\n");
|
||||
for (int i=0; i<error_index; i++) {
|
||||
jio_fprintf(defaultStream::error_stream(), "%s", " ");
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(), "^\n");
|
||||
}
|
||||
|
||||
vm_exit_during_initialization("class list format error.", NULL);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
// This function is used for loading classes for customized class loaders
|
||||
// during archive dumping.
|
||||
InstanceKlass* ClassListParser::load_class_from_source(Symbol* class_name, TRAPS) {
|
||||
#if !(defined(_LP64) && (defined(LINUX)|| defined(SOLARIS) || defined(AIX)))
|
||||
// The only supported platforms are: (1) Linux/64-bit; (2) Solaris/64-bit; (3) AIX/64-bit
|
||||
//
|
||||
// This #if condition should be in sync with the areCustomLoadersSupportedForCDS
|
||||
// method in test/lib/jdk/test/lib/Platform.java.
|
||||
error("AppCDS custom class loaders not supported on this platform");
|
||||
#endif
|
||||
|
||||
assert(UseAppCDS, "must be");
|
||||
if (!is_super_specified()) {
|
||||
error("If source location is specified, super class must be also specified");
|
||||
}
|
||||
if (!is_id_specified()) {
|
||||
error("If source location is specified, id must be also specified");
|
||||
}
|
||||
InstanceKlass* k = ClassLoaderExt::load_class(class_name, _source, THREAD);
|
||||
|
||||
if (strncmp(_class_name, "java/", 5) == 0) {
|
||||
log_info(cds)("Prohibited package for non-bootstrap classes: %s.class from %s",
|
||||
_class_name, _source);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (k != NULL) {
|
||||
if (k->local_interfaces()->length() != _interfaces->length()) {
|
||||
print_specified_interfaces();
|
||||
print_actual_interfaces(k);
|
||||
error("The number of interfaces (%d) specified in class list does not match the class file (%d)",
|
||||
_interfaces->length(), k->local_interfaces()->length());
|
||||
}
|
||||
|
||||
if (!SystemDictionaryShared::add_non_builtin_klass(class_name, ClassLoaderData::the_null_class_loader_data(),
|
||||
k, THREAD)) {
|
||||
error("Duplicated class %s", _class_name);
|
||||
}
|
||||
|
||||
// This tells JVM_FindLoadedClass to not find this class.
|
||||
k->set_shared_classpath_index(UNREGISTERED_INDEX);
|
||||
}
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
InstanceKlass* ClassListParser::load_current_class(TRAPS) {
|
||||
TempNewSymbol class_name_symbol = SymbolTable::new_symbol(_class_name, THREAD);
|
||||
guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
|
||||
|
||||
InstanceKlass *klass = NULL;
|
||||
if (!is_loading_from_source()) {
|
||||
if (is_super_specified()) {
|
||||
error("If source location is not specified, super class must not be specified");
|
||||
}
|
||||
if (are_interfaces_specified()) {
|
||||
error("If source location is not specified, interface(s) must not be specified");
|
||||
}
|
||||
|
||||
bool non_array = !FieldType::is_array(class_name_symbol);
|
||||
|
||||
Handle s = java_lang_String::create_from_symbol(class_name_symbol, CHECK_0);
|
||||
// Translate to external class name format, i.e., convert '/' chars to '.'
|
||||
Handle string = java_lang_String::externalize_classname(s, CHECK_0);
|
||||
JavaValue result(T_OBJECT);
|
||||
InstanceKlass* spec_klass = non_array ?
|
||||
SystemDictionary::ClassLoader_klass() : SystemDictionary::Class_klass();
|
||||
Symbol* method_name = non_array ?
|
||||
vmSymbols::loadClass_name() : vmSymbols::forName_name();
|
||||
Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
|
||||
|
||||
if (non_array) {
|
||||
JavaCalls::call_virtual(&result,
|
||||
loader, //SystemDictionary::java_system_loader(),
|
||||
spec_klass,
|
||||
method_name, //vmSymbols::loadClass_name(),
|
||||
vmSymbols::string_class_signature(),
|
||||
string,
|
||||
THREAD);
|
||||
} else {
|
||||
JavaCalls::call_static(&result,
|
||||
spec_klass,
|
||||
method_name,
|
||||
vmSymbols::string_class_signature(),
|
||||
string,
|
||||
CHECK_NULL);
|
||||
}
|
||||
assert(result.get_type() == T_OBJECT, "just checking");
|
||||
oop obj = (oop) result.get_jobject();
|
||||
if (!HAS_PENDING_EXCEPTION && (obj != NULL)) {
|
||||
if (non_array) {
|
||||
klass = InstanceKlass::cast(java_lang_Class::as_Klass(obj));
|
||||
} else {
|
||||
klass = static_cast<InstanceKlass*>(java_lang_Class::array_klass_acquire(obj));
|
||||
}
|
||||
} else { // load classes in bootclasspath/a
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
}
|
||||
|
||||
if (non_array) {
|
||||
Klass* k = SystemDictionary::resolve_or_null(class_name_symbol, CHECK_NULL);
|
||||
if (k != NULL) {
|
||||
klass = InstanceKlass::cast(k);
|
||||
} else {
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
THROW_NULL(vmSymbols::java_lang_ClassNotFoundException());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If "source:" tag is specified, all super class and super interfaces must be specified in the
|
||||
// class list file.
|
||||
if (UseAppCDS) {
|
||||
klass = load_class_from_source(class_name_symbol, CHECK_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
if (klass != NULL && is_id_specified()) {
|
||||
int id = this->id();
|
||||
SystemDictionaryShared::update_shared_entry(klass, id);
|
||||
InstanceKlass* old = table()->lookup(id);
|
||||
if (old != NULL && old != klass) {
|
||||
error("Duplicated ID %d for class %s", id, _class_name);
|
||||
}
|
||||
table()->add(id, klass);
|
||||
}
|
||||
|
||||
return klass;
|
||||
}
|
||||
|
||||
bool ClassListParser::is_loading_from_source() {
|
||||
return (_source != NULL);
|
||||
}
|
||||
|
||||
InstanceKlass* ClassListParser::lookup_class_by_id(int id) {
|
||||
InstanceKlass* klass = table()->lookup(id);
|
||||
if (klass == NULL) {
|
||||
error("Class ID %d has not been defined", id);
|
||||
}
|
||||
return klass;
|
||||
}
|
||||
|
||||
|
||||
InstanceKlass* ClassListParser::lookup_super_for_current_class(Symbol* super_name) {
|
||||
if (!is_loading_from_source()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
InstanceKlass* k = lookup_class_by_id(super());
|
||||
if (super_name != k->name()) {
|
||||
error("The specified super class %s (id %d) does not match actual super class %s",
|
||||
k->name()->as_klass_external_name(), super(),
|
||||
super_name->as_klass_external_name());
|
||||
}
|
||||
return k;
|
||||
}
|
||||
|
||||
InstanceKlass* ClassListParser::lookup_interface_for_current_class(Symbol* interface_name) {
|
||||
if (!is_loading_from_source()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const int n = _interfaces->length();
|
||||
if (n == 0) {
|
||||
error("Class %s implements the interface %s, but no interface has been specified in the input line",
|
||||
_class_name, interface_name->as_klass_external_name());
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
int i;
|
||||
for (i=0; i<n; i++) {
|
||||
InstanceKlass* k = lookup_class_by_id(_interfaces->at(i));
|
||||
if (interface_name == k->name()) {
|
||||
return k;
|
||||
}
|
||||
}
|
||||
|
||||
// interface_name is not specified by the "interfaces:" keyword.
|
||||
print_specified_interfaces();
|
||||
error("The interface %s implemented by class %s does not match any of the specified interface IDs",
|
||||
interface_name->as_klass_external_name(), _class_name);
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,30 +27,122 @@
|
||||
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
class CDSClassInfo;
|
||||
|
||||
// Look up from ID -> InstanceKlass*
|
||||
class ID2KlassTable : public Hashtable<InstanceKlass*, mtClass> {
|
||||
public:
|
||||
ID2KlassTable() : Hashtable<InstanceKlass*, mtClass>(1987, sizeof(HashtableEntry<InstanceKlass*, mtClass>)) { }
|
||||
void add(int id, InstanceKlass* klass) {
|
||||
unsigned int hash = (unsigned int)id;
|
||||
HashtableEntry<InstanceKlass*, mtClass>* entry = new_entry(hash, klass);
|
||||
add_entry(hash_to_index(hash), entry);
|
||||
}
|
||||
|
||||
InstanceKlass* lookup(int id) {
|
||||
unsigned int hash = (unsigned int)id;
|
||||
int index = hash_to_index(id);
|
||||
for (HashtableEntry<InstanceKlass*, mtClass>* e = bucket(index); e != NULL; e = e->next()) {
|
||||
if (e->hash() == hash) {
|
||||
return e->literal();
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
};
|
||||
|
||||
class ClassListParser : public StackObj {
|
||||
enum {
|
||||
_unspecified = -999,
|
||||
|
||||
// Max number of bytes allowed per line in the classlist.
|
||||
// Theoretically Java class names could be 65535 bytes in length. In reality,
|
||||
// Theoretically Java class names could be 65535 bytes in length. Also, an input line
|
||||
// could have a very long path name up to JVM_MAXPATHLEN bytes in length. In reality,
|
||||
// 4K bytes is more than enough.
|
||||
_max_allowed_line_len = 4096,
|
||||
_line_buf_extra = 10, // for detecting input too long
|
||||
_line_buf_size = _max_allowed_line_len + _line_buf_extra
|
||||
};
|
||||
|
||||
static ClassListParser* _instance; // the singleton.
|
||||
const char* _classlist_file;
|
||||
FILE* _file;
|
||||
char _line[_line_buf_size]; // The buffer that holds the current line.
|
||||
|
||||
ID2KlassTable _id2klass_table;
|
||||
|
||||
// The following field contains information from the *current* line being
|
||||
// parsed.
|
||||
char _line[_line_buf_size]; // The buffer that holds the current line. Some characters in
|
||||
// the buffer may be overwritten by '\0' during parsing.
|
||||
int _line_len; // Original length of the input line.
|
||||
int _line_no; // Line number for current line being parsed
|
||||
const char* _class_name;
|
||||
int _id;
|
||||
int _super;
|
||||
GrowableArray<int>* _interfaces;
|
||||
bool _interfaces_specified;
|
||||
const char* _source;
|
||||
|
||||
bool parse_int_option(const char* option_name, int* value);
|
||||
InstanceKlass* load_class_from_source(Symbol* class_name, TRAPS);
|
||||
ID2KlassTable *table() {
|
||||
return &_id2klass_table;
|
||||
}
|
||||
InstanceKlass* lookup_class_by_id(int id);
|
||||
void print_specified_interfaces();
|
||||
void print_actual_interfaces(InstanceKlass *ik);
|
||||
public:
|
||||
ClassListParser(const char* file);
|
||||
~ClassListParser();
|
||||
|
||||
static ClassListParser* instance() {
|
||||
return _instance;
|
||||
}
|
||||
bool parse_one_line();
|
||||
char* _token;
|
||||
void error(const char* msg, ...);
|
||||
void parse_int(int* value);
|
||||
bool try_parse_int(int* value);
|
||||
bool skip_token(const char* option_name);
|
||||
void skip_whitespaces();
|
||||
void skip_non_whitespaces();
|
||||
|
||||
bool is_id_specified() {
|
||||
return _id != _unspecified;
|
||||
}
|
||||
bool is_super_specified() {
|
||||
return _super != _unspecified;
|
||||
}
|
||||
bool are_interfaces_specified() {
|
||||
return _interfaces->length() > 0;
|
||||
}
|
||||
int id() {
|
||||
assert(is_id_specified(), "do not query unspecified id");
|
||||
return _id;
|
||||
}
|
||||
int super() {
|
||||
assert(is_super_specified(), "do not query unspecified super");
|
||||
return _super;
|
||||
}
|
||||
void check_already_loaded(const char* which, int id) {
|
||||
if (_id2klass_table.lookup(id) == NULL) {
|
||||
error("%s id %d is not yet loaded", which, id);
|
||||
}
|
||||
}
|
||||
|
||||
const char* current_class_name() {
|
||||
return _line;
|
||||
return _class_name;
|
||||
}
|
||||
|
||||
InstanceKlass* load_current_class(TRAPS);
|
||||
|
||||
bool is_loading_from_source();
|
||||
|
||||
// Look up the super or interface of the current class being loaded
|
||||
// (in this->load_current_class()).
|
||||
InstanceKlass* lookup_super_for_current_class(Symbol* super_name);
|
||||
InstanceKlass* lookup_interface_for_current_class(Symbol* interface_name);
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_VM_MEMORY_CLASSLISTPARSER_HPP
|
||||
#endif
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_CLASSFILE_CLASSLOADER_HPP
|
||||
|
||||
#include "jimage.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
@ -42,6 +43,7 @@
|
||||
class JImageFile;
|
||||
class ClassFileStream;
|
||||
class PackageEntry;
|
||||
template <typename T> class GrowableArray;
|
||||
|
||||
class ClassPathEntry : public CHeapObj<mtClass> {
|
||||
private:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,14 +23,329 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classFileParser.hpp"
|
||||
#include "classfile/classFileStream.hpp"
|
||||
#include "classfile/classListParser.hpp"
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/classLoaderData.inline.hpp"
|
||||
#include "classfile/klassFactory.hpp"
|
||||
#include "classfile/sharedClassUtil.hpp"
|
||||
#include "classfile/sharedPathsMiscInfo.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "services/threadService.hpp"
|
||||
#include "utilities/stringUtils.hpp"
|
||||
|
||||
jshort ClassLoaderExt::_app_paths_start_index = ClassLoaderExt::max_classpath_index;
|
||||
bool ClassLoaderExt::_has_app_classes = false;
|
||||
bool ClassLoaderExt::_has_platform_classes = false;
|
||||
|
||||
void ClassLoaderExt::setup_app_search_path() {
|
||||
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
|
||||
_app_paths_start_index = ClassLoader::num_boot_classpath_entries();
|
||||
char* app_class_path = os::strdup(Arguments::get_appclasspath());
|
||||
|
||||
if (strcmp(app_class_path, ".") == 0) {
|
||||
// This doesn't make any sense, even for AppCDS, so let's skip it. We
|
||||
// don't want to throw an error here because -cp "." is usually assigned
|
||||
// by the launcher when classpath is not specified.
|
||||
trace_class_path("app loader class path (skipped)=", app_class_path);
|
||||
} else {
|
||||
trace_class_path("app loader class path=", app_class_path);
|
||||
shared_paths_misc_info()->add_app_classpath(app_class_path);
|
||||
ClassLoader::setup_app_search_path(app_class_path);
|
||||
}
|
||||
}
|
||||
|
||||
char* ClassLoaderExt::read_manifest(ClassPathEntry* entry, jint *manifest_size, bool clean_text, TRAPS) {
|
||||
const char* name = "META-INF/MANIFEST.MF";
|
||||
char* manifest;
|
||||
jint size;
|
||||
|
||||
assert(entry->is_jar_file(), "must be");
|
||||
manifest = (char*) ((ClassPathZipEntry*)entry )->open_entry(name, &size, true, CHECK_NULL);
|
||||
|
||||
if (manifest == NULL) { // No Manifest
|
||||
*manifest_size = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
if (clean_text) {
|
||||
// See http://docs.oracle.com/javase/6/docs/technotes/guides/jar/jar.html#JAR%20Manifest
|
||||
// (1): replace all CR/LF and CR with LF
|
||||
StringUtils::replace_no_expand(manifest, "\r\n", "\n");
|
||||
|
||||
// (2) remove all new-line continuation (remove all "\n " substrings)
|
||||
StringUtils::replace_no_expand(manifest, "\n ", "");
|
||||
}
|
||||
|
||||
*manifest_size = (jint)strlen(manifest);
|
||||
return manifest;
|
||||
}
|
||||
|
||||
char* ClassLoaderExt::get_class_path_attr(const char* jar_path, char* manifest, jint manifest_size) {
|
||||
const char* tag = "Class-Path: ";
|
||||
const int tag_len = (int)strlen(tag);
|
||||
char* found = NULL;
|
||||
char* line_start = manifest;
|
||||
char* end = manifest + manifest_size;
|
||||
|
||||
assert(*end == 0, "must be nul-terminated");
|
||||
|
||||
while (line_start < end) {
|
||||
char* line_end = strchr(line_start, '\n');
|
||||
if (line_end == NULL) {
|
||||
// JAR spec require the manifest file to be terminated by a new line.
|
||||
break;
|
||||
}
|
||||
if (strncmp(tag, line_start, tag_len) == 0) {
|
||||
if (found != NULL) {
|
||||
// Same behavior as jdk/src/share/classes/java/util/jar/Attributes.java
|
||||
// If duplicated entries are found, the last one is used.
|
||||
tty->print_cr("Warning: Duplicate name in Manifest: %s.\n"
|
||||
"Ensure that the manifest does not have duplicate entries, and\n"
|
||||
"that blank lines separate individual sections in both your\n"
|
||||
"manifest and in the META-INF/MANIFEST.MF entry in the jar file:\n%s\n", tag, jar_path);
|
||||
}
|
||||
found = line_start + tag_len;
|
||||
assert(found <= line_end, "sanity");
|
||||
*line_end = '\0';
|
||||
}
|
||||
line_start = line_end + 1;
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry,
|
||||
bool check_for_duplicates) {
|
||||
Thread* THREAD = Thread::current();
|
||||
ResourceMark rm(THREAD);
|
||||
jint manifest_size;
|
||||
char* manifest = read_manifest(entry, &manifest_size, CHECK);
|
||||
|
||||
if (manifest == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (strstr(manifest, "Extension-List:") != NULL) {
|
||||
tty->print_cr("-Xshare:dump does not support Extension-List in JAR manifest: %s", entry->name());
|
||||
vm_exit(1);
|
||||
}
|
||||
|
||||
char* cp_attr = get_class_path_attr(entry->name(), manifest, manifest_size);
|
||||
|
||||
if (cp_attr != NULL && strlen(cp_attr) > 0) {
|
||||
trace_class_path("found Class-Path: ", cp_attr);
|
||||
|
||||
char sep = os::file_separator()[0];
|
||||
const char* dir_name = entry->name();
|
||||
const char* dir_tail = strrchr(dir_name, sep);
|
||||
int dir_len;
|
||||
if (dir_tail == NULL) {
|
||||
dir_len = 0;
|
||||
} else {
|
||||
dir_len = dir_tail - dir_name + 1;
|
||||
}
|
||||
|
||||
// Split the cp_attr by spaces, and add each file
|
||||
char* file_start = cp_attr;
|
||||
char* end = file_start + strlen(file_start);
|
||||
|
||||
while (file_start < end) {
|
||||
char* file_end = strchr(file_start, ' ');
|
||||
if (file_end != NULL) {
|
||||
*file_end = 0;
|
||||
file_end += 1;
|
||||
} else {
|
||||
file_end = end;
|
||||
}
|
||||
|
||||
int name_len = (int)strlen(file_start);
|
||||
if (name_len > 0) {
|
||||
ResourceMark rm(THREAD);
|
||||
char* libname = NEW_RESOURCE_ARRAY(char, dir_len + name_len + 1);
|
||||
*libname = 0;
|
||||
strncat(libname, dir_name, dir_len);
|
||||
strncat(libname, file_start, name_len);
|
||||
trace_class_path("library = ", libname);
|
||||
ClassLoader::update_class_path_entry_list(libname, true, false);
|
||||
}
|
||||
|
||||
file_start = file_end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderExt::setup_search_paths() {
|
||||
if (UseAppCDS) {
|
||||
shared_paths_misc_info()->record_app_offset();
|
||||
ClassLoaderExt::setup_app_search_path();
|
||||
}
|
||||
}
|
||||
|
||||
Thread* ClassLoaderExt::Context::_dump_thread = NULL;
|
||||
|
||||
bool ClassLoaderExt::check(ClassLoaderExt::Context *context,
|
||||
const ClassFileStream* stream,
|
||||
const int classpath_index) {
|
||||
if (stream != NULL) {
|
||||
// Ignore any App classes from signed JAR file during CDS archiving
|
||||
// dumping
|
||||
if (DumpSharedSpaces &&
|
||||
SharedClassUtil::is_classpath_entry_signed(classpath_index) &&
|
||||
classpath_index >= _app_paths_start_index) {
|
||||
tty->print_cr("Preload Warning: Skipping %s from signed JAR",
|
||||
context->class_name());
|
||||
return false;
|
||||
}
|
||||
if (classpath_index >= _app_paths_start_index) {
|
||||
_has_app_classes = true;
|
||||
_has_platform_classes = true;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClassLoaderExt::record_result(ClassLoaderExt::Context *context,
|
||||
Symbol* class_name,
|
||||
const s2 classpath_index,
|
||||
InstanceKlass* result,
|
||||
TRAPS) {
|
||||
assert(DumpSharedSpaces, "Sanity");
|
||||
|
||||
// We need to remember where the class comes from during dumping.
|
||||
oop loader = result->class_loader();
|
||||
s2 classloader_type = ClassLoader::BOOT_LOADER;
|
||||
if (SystemDictionary::is_system_class_loader(loader)) {
|
||||
classloader_type = ClassLoader::APP_LOADER;
|
||||
ClassLoaderExt::set_has_app_classes();
|
||||
} else if (SystemDictionary::is_platform_class_loader(loader)) {
|
||||
classloader_type = ClassLoader::PLATFORM_LOADER;
|
||||
ClassLoaderExt::set_has_platform_classes();
|
||||
}
|
||||
result->set_shared_classpath_index(classpath_index);
|
||||
result->set_class_loader_type(classloader_type);
|
||||
}
|
||||
|
||||
void ClassLoaderExt::finalize_shared_paths_misc_info() {
|
||||
if (UseAppCDS) {
|
||||
if (!_has_app_classes) {
|
||||
shared_paths_misc_info()->pop_app();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load the class of the given name from the location given by path. The path is specified by
|
||||
// the "source:" in the class list file (see classListParser.cpp), and can be a directory or
|
||||
// a JAR file.
|
||||
InstanceKlass* ClassLoaderExt::load_class(Symbol* name, const char* path, TRAPS) {
|
||||
|
||||
assert(name != NULL, "invariant");
|
||||
assert(DumpSharedSpaces && UseAppCDS, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
|
||||
ResourceMark rm(THREAD);
|
||||
const char* class_name = name->as_C_string();
|
||||
|
||||
const char* file_name = file_name_for_class_name(class_name,
|
||||
name->utf8_length());
|
||||
assert(file_name != NULL, "invariant");
|
||||
|
||||
// Lookup stream for parsing .class file
|
||||
ClassFileStream* stream = NULL;
|
||||
ClassPathEntry* e = find_classpath_entry_from_cache(path, CHECK_NULL);
|
||||
if (e == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
{
|
||||
PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
|
||||
((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
|
||||
PerfClassTraceTime::CLASS_LOAD);
|
||||
stream = e->open_stream(file_name, CHECK_NULL);
|
||||
}
|
||||
|
||||
if (NULL == stream) {
|
||||
tty->print_cr("Preload Warning: Cannot find %s", class_name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
assert(stream != NULL, "invariant");
|
||||
stream->set_verify(true);
|
||||
|
||||
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
|
||||
Handle protection_domain;
|
||||
|
||||
InstanceKlass* result = KlassFactory::create_from_stream(stream,
|
||||
name,
|
||||
loader_data,
|
||||
protection_domain,
|
||||
NULL, // host_klass
|
||||
NULL, // cp_patches
|
||||
THREAD);
|
||||
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
tty->print_cr("Preload Error: Failed to load %s", class_name);
|
||||
return NULL;
|
||||
}
|
||||
result->set_shared_classpath_index(UNREGISTERED_INDEX);
|
||||
SystemDictionaryShared::set_shared_class_misc_info(result, stream);
|
||||
return result;
|
||||
}
|
||||
|
||||
struct CachedClassPathEntry {
|
||||
const char* _path;
|
||||
ClassPathEntry* _entry;
|
||||
};
|
||||
|
||||
static GrowableArray<CachedClassPathEntry>* cached_path_entries = NULL;
|
||||
|
||||
ClassPathEntry* ClassLoaderExt::find_classpath_entry_from_cache(const char* path, TRAPS) {
|
||||
// This is called from dump time so it's single threaded and there's no need for a lock.
|
||||
assert(DumpSharedSpaces && UseAppCDS, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
|
||||
if (cached_path_entries == NULL) {
|
||||
cached_path_entries = new (ResourceObj::C_HEAP, mtClass) GrowableArray<CachedClassPathEntry>(20, /*c heap*/ true);
|
||||
}
|
||||
CachedClassPathEntry ccpe;
|
||||
for (int i=0; i<cached_path_entries->length(); i++) {
|
||||
ccpe = cached_path_entries->at(i);
|
||||
if (strcmp(ccpe._path, path) == 0) {
|
||||
if (i != 0) {
|
||||
// Put recent entries at the beginning to speed up searches.
|
||||
cached_path_entries->remove_at(i);
|
||||
cached_path_entries->insert_before(0, ccpe);
|
||||
}
|
||||
return ccpe._entry;
|
||||
}
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
if (os::stat(path, &st) != 0) {
|
||||
// File or directory not found
|
||||
return NULL;
|
||||
}
|
||||
ClassPathEntry* new_entry = NULL;
|
||||
|
||||
new_entry = create_class_path_entry(path, &st, false, false, CHECK_NULL);
|
||||
if (new_entry == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
ccpe._path = strdup(path);
|
||||
ccpe._entry = new_entry;
|
||||
cached_path_entries->insert_before(0, ccpe);
|
||||
return new_entry;
|
||||
}
|
||||
|
||||
Klass* ClassLoaderExt::load_one_class(ClassListParser* parser, TRAPS) {
|
||||
TempNewSymbol class_name_symbol = SymbolTable::new_symbol(parser->current_class_name(), THREAD);
|
||||
guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
|
||||
return SystemDictionary::resolve_or_null(class_name_symbol, THREAD);
|
||||
return parser->load_current_class(THREAD);
|
||||
}
|
||||
|
@ -26,65 +26,152 @@
|
||||
#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
|
||||
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class ClassListParser;
|
||||
CDS_ONLY(class SharedPathsMiscInfoExt;)
|
||||
CDS_ONLY(class ClassListParser;)
|
||||
|
||||
class ClassLoaderExt: public ClassLoader { // AllStatic
|
||||
public:
|
||||
|
||||
enum SomeConstants {
|
||||
max_classpath_index = 0x7fff
|
||||
};
|
||||
// ClassLoaderExt::Context --
|
||||
//
|
||||
// This is used by DumpSharedSpaces only - it enforces the same classloader
|
||||
// delegation model as would be in run-time. I.e.,
|
||||
// + classes defined by the NULL class loader cannot load classes in the PLATFORM or APP paths.
|
||||
// + classes defined by the PLATFORM class loader cannot load classes in the APP paths.
|
||||
class Context {
|
||||
static Thread* _dump_thread;
|
||||
const char* _class_name;
|
||||
const char* _file_name;
|
||||
public:
|
||||
const char* class_name() {
|
||||
return _class_name;
|
||||
}
|
||||
const char* file_name() {
|
||||
return _file_name;
|
||||
}
|
||||
|
||||
Context(const char* class_name, const char* file_name, TRAPS) {
|
||||
_class_name = class_name;
|
||||
_file_name = file_name;
|
||||
#if INCLUDE_CDS
|
||||
if (!DumpSharedSpaces && !UseSharedSpaces) {
|
||||
// Must not modify _app_paths_start_index if we're not using CDS.
|
||||
assert(_app_paths_start_index == ClassLoaderExt::max_classpath_index, "must be");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool check(const ClassFileStream* stream, const int classpath_index) {
|
||||
return true;
|
||||
CDS_ONLY(return ClassLoaderExt::check(this, stream, classpath_index);)
|
||||
NOT_CDS(return true;)
|
||||
}
|
||||
|
||||
bool should_verify(int classpath_index) {
|
||||
return false;
|
||||
CDS_ONLY(return (classpath_index >= _app_paths_start_index);)
|
||||
NOT_CDS(return false;)
|
||||
}
|
||||
|
||||
void record_result(Symbol* class_name,
|
||||
const s2 classpath_index,
|
||||
InstanceKlass* result, TRAPS) {
|
||||
InstanceKlass* result,
|
||||
TRAPS) {
|
||||
#if INCLUDE_CDS
|
||||
assert(DumpSharedSpaces, "Sanity");
|
||||
oop loader = result->class_loader();
|
||||
s2 classloader_type = ClassLoader::BOOT_LOADER;
|
||||
if (SystemDictionary::is_system_class_loader(loader)) {
|
||||
classloader_type = ClassLoader::APP_LOADER;
|
||||
ClassLoaderExt::set_has_app_classes();
|
||||
} else if (SystemDictionary::is_platform_class_loader(loader)) {
|
||||
classloader_type = ClassLoader::PLATFORM_LOADER;
|
||||
ClassLoaderExt::set_has_platform_classes();
|
||||
}
|
||||
result->set_shared_classpath_index(classpath_index);
|
||||
result->set_class_loader_type(classloader_type);
|
||||
ClassLoaderExt::record_result(this, class_name, classpath_index, result, THREAD);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
~Context() {
|
||||
#if INCLUDE_CDS
|
||||
if (!DumpSharedSpaces && !UseSharedSpaces) {
|
||||
// Must not modify app_paths_start_index if we're not using CDS.
|
||||
assert(_app_paths_start_index == ClassLoaderExt::max_classpath_index, "must be");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}; // end ClassLoaderExt::Context
|
||||
|
||||
private:
|
||||
#if INCLUDE_CDS
|
||||
static char* get_class_path_attr(const char* jar_path, char* manifest, jint manifest_size);
|
||||
static void setup_app_search_path(); // Only when -Xshare:dump
|
||||
static SharedPathsMiscInfoExt* shared_paths_misc_info() {
|
||||
return (SharedPathsMiscInfoExt*)_shared_paths_misc_info;
|
||||
}
|
||||
static jshort _app_paths_start_index; // index of first app JAR in shared classpath entry table
|
||||
static bool _has_app_classes;
|
||||
static bool _has_platform_classes;
|
||||
#endif
|
||||
|
||||
public:
|
||||
CDS_ONLY(static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates);)
|
||||
|
||||
// Called by JVMTI code to add boot classpath
|
||||
static void append_boot_classpath(ClassPathEntry* new_entry) {
|
||||
#if INCLUDE_CDS
|
||||
if (UseAppCDS) {
|
||||
warning("UseAppCDS is disabled because bootstrap classpath has been appended");
|
||||
UseAppCDS = false;
|
||||
}
|
||||
#endif
|
||||
ClassLoader::add_to_boot_append_entries(new_entry);
|
||||
}
|
||||
static void setup_search_paths() {}
|
||||
static bool is_boot_classpath(int classpath_index) {
|
||||
return true;
|
||||
}
|
||||
static Klass* load_one_class(ClassListParser* parser, TRAPS);
|
||||
|
||||
static void setup_search_paths() NOT_CDS_RETURN;
|
||||
|
||||
#if INCLUDE_CDS
|
||||
static void set_has_app_classes() {}
|
||||
static void set_has_platform_classes() {}
|
||||
private:
|
||||
static char* read_manifest(ClassPathEntry* entry, jint *manifest_size, bool clean_text, TRAPS);
|
||||
static ClassPathEntry* find_classpath_entry_from_cache(const char* path, TRAPS);
|
||||
|
||||
public:
|
||||
static char* read_manifest(ClassPathEntry* entry, jint *manifest_size, TRAPS) {
|
||||
return NULL;
|
||||
// Remove all the new-line continuations (which wrap long lines at 72 characters, see
|
||||
// http://docs.oracle.com/javase/6/docs/technotes/guides/jar/jar.html#JAR%20Manifest), so
|
||||
// that the manifest is easier to parse.
|
||||
return read_manifest(entry, manifest_size, true, THREAD);
|
||||
}
|
||||
static char* read_raw_manifest(ClassPathEntry* entry, jint *manifest_size, TRAPS) {
|
||||
// Do not remove new-line continuations, so we can easily pass it as an argument to
|
||||
// java.util.jar.Manifest.getManifest() at run-time.
|
||||
return read_manifest(entry, manifest_size, false, THREAD);
|
||||
}
|
||||
|
||||
static void finalize_shared_paths_misc_info();
|
||||
|
||||
static jshort app_paths_start_index() { return _app_paths_start_index; }
|
||||
|
||||
static void init_paths_start_index(jshort app_start) {
|
||||
_app_paths_start_index = app_start;
|
||||
}
|
||||
|
||||
static bool is_boot_classpath(int classpath_index) {
|
||||
return classpath_index < _app_paths_start_index;
|
||||
}
|
||||
|
||||
static bool has_platform_or_app_classes() {
|
||||
return _has_app_classes || _has_platform_classes;
|
||||
}
|
||||
|
||||
static bool check(class ClassLoaderExt::Context *context,
|
||||
const ClassFileStream* stream,
|
||||
const int classpath_index);
|
||||
|
||||
static void record_result(class ClassLoaderExt::Context *context,
|
||||
Symbol* class_name,
|
||||
const s2 classpath_index,
|
||||
InstanceKlass* result, TRAPS);
|
||||
static InstanceKlass* load_class(Symbol* h_name, const char* path, TRAPS);
|
||||
static Klass* load_one_class(ClassListParser* parser, TRAPS);
|
||||
static void set_has_app_classes() {
|
||||
_has_app_classes = true;
|
||||
}
|
||||
static void set_has_platform_classes() {
|
||||
_has_platform_classes = true;
|
||||
}
|
||||
static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates) {}
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_VM_CLASSFILE_KLASSFACTORY_HPP
|
||||
#define SHARE_VM_CLASSFILE_KLASSFACTORY_HPP
|
||||
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
|
||||
class ClassFileStream;
|
||||
|
251
src/hotspot/share/classfile/sharedClassUtil.cpp
Normal file
251
src/hotspot/share/classfile/sharedClassUtil.cpp
Normal file
@ -0,0 +1,251 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/sharedClassUtil.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
class ManifestStream: public ResourceObj {
|
||||
private:
|
||||
u1* _buffer_start; // Buffer bottom
|
||||
u1* _buffer_end; // Buffer top (one past last element)
|
||||
u1* _current; // Current buffer position
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
ManifestStream(u1* buffer, int length) : _buffer_start(buffer),
|
||||
_current(buffer) {
|
||||
_buffer_end = buffer + length;
|
||||
}
|
||||
|
||||
static bool is_attr(u1* attr, const char* name) {
|
||||
return strncmp((const char*)attr, name, strlen(name)) == 0;
|
||||
}
|
||||
|
||||
static char* copy_attr(u1* value, size_t len) {
|
||||
char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
|
||||
strncpy(buf, (char*)value, len);
|
||||
buf[len] = 0;
|
||||
return buf;
|
||||
}
|
||||
|
||||
// The return value indicates if the JAR is signed or not
|
||||
bool check_is_signed() {
|
||||
u1* attr = _current;
|
||||
bool isSigned = false;
|
||||
while (_current < _buffer_end) {
|
||||
if (*_current == '\n') {
|
||||
*_current = '\0';
|
||||
u1* value = (u1*)strchr((char*)attr, ':');
|
||||
if (value != NULL) {
|
||||
assert(*(value+1) == ' ', "Unrecognized format" );
|
||||
if (strstr((char*)attr, "-Digest") != NULL) {
|
||||
isSigned = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
*_current = '\n'; // restore
|
||||
attr = _current + 1;
|
||||
}
|
||||
_current ++;
|
||||
}
|
||||
return isSigned;
|
||||
}
|
||||
};
|
||||
|
||||
void SharedPathsMiscInfoExt::print_path(outputStream* out, int type, const char* path) {
|
||||
switch(type) {
|
||||
case APP:
|
||||
ClassLoader::trace_class_path("Expecting -Djava.class.path=", path);
|
||||
break;
|
||||
default:
|
||||
SharedPathsMiscInfo::print_path(out, type, path);
|
||||
}
|
||||
}
|
||||
|
||||
bool SharedPathsMiscInfoExt::check(jint type, const char* path) {
|
||||
|
||||
switch (type) {
|
||||
case APP:
|
||||
{
|
||||
// Prefix is OK: E.g., dump with -cp foo.jar, but run with -cp foo.jar:bar.jar
|
||||
size_t len = strlen(path);
|
||||
const char *appcp = Arguments::get_appclasspath();
|
||||
assert(appcp != NULL, "NULL app classpath");
|
||||
size_t appcp_len = strlen(appcp);
|
||||
if (appcp_len < len) {
|
||||
return fail("Run time APP classpath is shorter than the one at dump time: ", appcp);
|
||||
}
|
||||
ResourceMark rm;
|
||||
char* tmp_path;
|
||||
if (len == appcp_len) {
|
||||
tmp_path = (char*)appcp;
|
||||
} else {
|
||||
tmp_path = NEW_RESOURCE_ARRAY(char, len + 1);
|
||||
strncpy(tmp_path, appcp, len);
|
||||
tmp_path[len] = 0;
|
||||
}
|
||||
if (os::file_name_strcmp(path, tmp_path) != 0) {
|
||||
return fail("[APP classpath mismatch, actual: -Djava.class.path=", appcp);
|
||||
}
|
||||
if (appcp[len] != '\0' && appcp[len] != os::path_separator()[0]) {
|
||||
return fail("Dump time APP classpath is not a proper prefix of run time APP classpath: ", appcp);
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return SharedPathsMiscInfo::check(type, path);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void SharedClassUtil::update_shared_classpath(ClassPathEntry *cpe, SharedClassPathEntry* e, TRAPS) {
|
||||
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
|
||||
SharedClassPathEntryExt* ent = (SharedClassPathEntryExt*)e;
|
||||
ResourceMark rm(THREAD);
|
||||
jint manifest_size;
|
||||
bool isSigned;
|
||||
|
||||
if (cpe->is_jar_file()) {
|
||||
char* manifest = ClassLoaderExt::read_manifest(cpe, &manifest_size, CHECK);
|
||||
if (manifest != NULL) {
|
||||
ManifestStream* stream = new ManifestStream((u1*)manifest,
|
||||
manifest_size);
|
||||
isSigned = stream->check_is_signed();
|
||||
if (isSigned) {
|
||||
ent->_is_signed = true;
|
||||
} else {
|
||||
// Copy the manifest into the shared archive
|
||||
manifest = ClassLoaderExt::read_raw_manifest(cpe, &manifest_size, CHECK);
|
||||
Array<u1>* buf = MetadataFactory::new_array<u1>(loader_data,
|
||||
manifest_size,
|
||||
THREAD);
|
||||
char* p = (char*)(buf->data());
|
||||
memcpy(p, manifest, manifest_size);
|
||||
ent->set_manifest(buf);
|
||||
ent->_is_signed = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SharedClassUtil::initialize(TRAPS) {
|
||||
if (UseSharedSpaces) {
|
||||
int size = FileMapInfo::get_number_of_share_classpaths();
|
||||
if (size > 0) {
|
||||
SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
|
||||
if (!DumpSharedSpaces) {
|
||||
FileMapHeaderExt* header = (FileMapHeaderExt*)FileMapInfo::current_info()->header();
|
||||
ClassLoaderExt::init_paths_start_index(header->_app_paths_start_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (DumpSharedSpaces) {
|
||||
if (SharedArchiveConfigFile) {
|
||||
read_extra_data(SharedArchiveConfigFile, THREAD);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SharedClassUtil::read_extra_data(const char* filename, TRAPS) {
|
||||
HashtableTextDump reader(filename);
|
||||
reader.check_version("VERSION: 1.0");
|
||||
|
||||
while (reader.remain() > 0) {
|
||||
int utf8_length;
|
||||
int prefix_type = reader.scan_prefix(&utf8_length);
|
||||
ResourceMark rm(THREAD);
|
||||
char* utf8_buffer = NEW_RESOURCE_ARRAY(char, utf8_length);
|
||||
reader.get_utf8(utf8_buffer, utf8_length);
|
||||
|
||||
if (prefix_type == HashtableTextDump::SymbolPrefix) {
|
||||
SymbolTable::new_symbol(utf8_buffer, utf8_length, THREAD);
|
||||
} else{
|
||||
assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity");
|
||||
utf8_buffer[utf8_length] = '\0';
|
||||
oop s = StringTable::intern(utf8_buffer, THREAD);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool SharedClassUtil::is_classpath_entry_signed(int classpath_index) {
|
||||
assert(classpath_index >= 0, "Sanity");
|
||||
SharedClassPathEntryExt* ent = (SharedClassPathEntryExt*)
|
||||
FileMapInfo::shared_classpath(classpath_index);
|
||||
return ent->_is_signed;
|
||||
}
|
||||
|
||||
void FileMapHeaderExt::populate(FileMapInfo* mapinfo, size_t alignment) {
|
||||
FileMapInfo::FileMapHeader::populate(mapinfo, alignment);
|
||||
|
||||
ClassLoaderExt::finalize_shared_paths_misc_info();
|
||||
_app_paths_start_index = ClassLoaderExt::app_paths_start_index();
|
||||
|
||||
_verify_local = BytecodeVerificationLocal;
|
||||
_verify_remote = BytecodeVerificationRemote;
|
||||
_has_platform_or_app_classes = ClassLoaderExt::has_platform_or_app_classes();
|
||||
}
|
||||
|
||||
bool FileMapHeaderExt::validate() {
|
||||
if (UseAppCDS) {
|
||||
const char* prop = Arguments::get_property("java.system.class.loader");
|
||||
if (prop != NULL) {
|
||||
warning("UseAppCDS is disabled because the java.system.class.loader property is specified (value = \"%s\"). "
|
||||
"To enable UseAppCDS, this property must be not be set", prop);
|
||||
UseAppCDS = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!FileMapInfo::FileMapHeader::validate()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// For backwards compatibility, we don't check the verification setting
|
||||
// if the archive only contains system classes.
|
||||
if (_has_platform_or_app_classes &&
|
||||
((!_verify_local && BytecodeVerificationLocal) ||
|
||||
(!_verify_remote && BytecodeVerificationRemote))) {
|
||||
FileMapInfo::fail_continue("The shared archive file was created with less restrictive "
|
||||
"verification setting than the current setting.");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
@ -27,37 +27,108 @@
|
||||
|
||||
#include "classfile/sharedPathsMiscInfo.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
|
||||
class SharedClassUtil : AllStatic {
|
||||
class FileMapHeaderExt: public FileMapInfo::FileMapHeader {
|
||||
public:
|
||||
jshort _app_paths_start_index; // Index of first app classpath entry
|
||||
bool _verify_local; // BytecodeVerificationLocal setting
|
||||
bool _verify_remote; // BytecodeVerificationRemote setting
|
||||
bool _has_platform_or_app_classes; // Archive contains app classes
|
||||
|
||||
static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
|
||||
return new SharedPathsMiscInfo();
|
||||
FileMapHeaderExt() {
|
||||
_has_platform_or_app_classes = true;
|
||||
}
|
||||
virtual void populate(FileMapInfo* mapinfo, size_t alignment);
|
||||
virtual bool validate();
|
||||
};
|
||||
|
||||
// In addition to SharedPathsMiscInfo, the following information is also stored
|
||||
//
|
||||
//
|
||||
// + The value of Arguments::get_appclasspath() used during dumping.
|
||||
//
|
||||
class SharedPathsMiscInfoExt : public SharedPathsMiscInfo {
|
||||
private:
|
||||
int _app_offset;
|
||||
public:
|
||||
enum {
|
||||
APP = 5
|
||||
};
|
||||
|
||||
virtual const char* type_name(int type) {
|
||||
switch (type) {
|
||||
case APP: return "APP";
|
||||
default: return SharedPathsMiscInfo::type_name(type);
|
||||
}
|
||||
}
|
||||
|
||||
static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
|
||||
return new SharedPathsMiscInfo(buf, size);
|
||||
virtual void print_path(outputStream* out, int type, const char* path);
|
||||
|
||||
SharedPathsMiscInfoExt() : SharedPathsMiscInfo() {
|
||||
_app_offset = 0;
|
||||
}
|
||||
SharedPathsMiscInfoExt(char* buf, int size) : SharedPathsMiscInfo(buf, size) {
|
||||
_app_offset = 0;
|
||||
}
|
||||
|
||||
static FileMapInfo::FileMapHeader* allocate_file_map_header() {
|
||||
return new FileMapInfo::FileMapHeader();
|
||||
virtual bool check(jint type, const char* path);
|
||||
|
||||
void add_app_classpath(const char* path) {
|
||||
add_path(path, APP);
|
||||
}
|
||||
|
||||
static size_t file_map_header_size() {
|
||||
return sizeof(FileMapInfo::FileMapHeader);
|
||||
void record_app_offset() {
|
||||
_app_offset = get_used_bytes();
|
||||
}
|
||||
|
||||
static size_t shared_class_path_entry_size() {
|
||||
return sizeof(SharedClassPathEntry);
|
||||
}
|
||||
|
||||
static void update_shared_classpath(ClassPathEntry *cpe,
|
||||
SharedClassPathEntry* ent, TRAPS) {}
|
||||
static void initialize(TRAPS) {}
|
||||
|
||||
inline static bool is_shared_boot_class(Klass* klass) {
|
||||
return (klass->_shared_class_path_index >= 0);
|
||||
void pop_app() {
|
||||
_cur_ptr = _buf_start + _app_offset;
|
||||
write_jint(0);
|
||||
}
|
||||
};
|
||||
|
||||
class SharedClassPathEntryExt: public SharedClassPathEntry {
|
||||
public:
|
||||
//Maniest attributes
|
||||
bool _is_signed;
|
||||
void set_manifest(Array<u1>* manifest) {
|
||||
_manifest = manifest;
|
||||
}
|
||||
};
|
||||
|
||||
class SharedClassUtil : AllStatic {
|
||||
public:
|
||||
static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
|
||||
return new SharedPathsMiscInfoExt();
|
||||
}
|
||||
|
||||
static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
|
||||
return new SharedPathsMiscInfoExt(buf, size);
|
||||
}
|
||||
|
||||
static FileMapInfo::FileMapHeader* allocate_file_map_header() {
|
||||
return new FileMapHeaderExt();
|
||||
}
|
||||
|
||||
static size_t file_map_header_size() {
|
||||
return sizeof(FileMapHeaderExt);
|
||||
}
|
||||
|
||||
static size_t shared_class_path_entry_size() {
|
||||
return sizeof(SharedClassPathEntryExt);
|
||||
}
|
||||
|
||||
static void update_shared_classpath(ClassPathEntry *cpe, SharedClassPathEntry* ent, TRAPS);
|
||||
static void initialize(TRAPS);
|
||||
|
||||
private:
|
||||
static void read_extra_data(const char* filename, TRAPS);
|
||||
|
||||
public:
|
||||
static bool is_classpath_entry_signed(int classpath_index);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
|
||||
|
@ -34,6 +34,18 @@
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
SharedPathsMiscInfo::SharedPathsMiscInfo() {
|
||||
_buf_size = INITIAL_BUF_SIZE;
|
||||
_cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
|
||||
_allocated = true;
|
||||
}
|
||||
|
||||
SharedPathsMiscInfo::~SharedPathsMiscInfo() {
|
||||
if (_allocated) {
|
||||
FREE_C_HEAP_ARRAY(char, _buf_start);
|
||||
}
|
||||
}
|
||||
|
||||
void SharedPathsMiscInfo::add_path(const char* path, int type) {
|
||||
log_info(class, path)("type=%s ", type_name(type));
|
||||
ClassLoader::trace_class_path("add misc shared path ", path);
|
||||
@ -127,7 +139,8 @@ bool SharedPathsMiscInfo::check() {
|
||||
bool SharedPathsMiscInfo::check(jint type, const char* path) {
|
||||
switch (type) {
|
||||
case BOOT:
|
||||
if (os::file_name_strcmp(path, Arguments::get_sysclasspath()) != 0) {
|
||||
// In the future we should perform the check based on the content of the mapped archive.
|
||||
if (UseAppCDS && os::file_name_strcmp(path, Arguments::get_sysclasspath()) != 0) {
|
||||
return fail("[BOOT classpath mismatch, actual =", Arguments::get_sysclasspath());
|
||||
}
|
||||
break;
|
||||
|
@ -74,11 +74,7 @@ public:
|
||||
INITIAL_BUF_SIZE = 128
|
||||
};
|
||||
// This constructor is used when creating the misc information (during dump)
|
||||
SharedPathsMiscInfo() {
|
||||
_buf_size = INITIAL_BUF_SIZE;
|
||||
_cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
|
||||
_allocated = true;
|
||||
}
|
||||
SharedPathsMiscInfo();
|
||||
// This constructor is used when validating the misc info (during run time)
|
||||
SharedPathsMiscInfo(char *buff, int size) {
|
||||
_cur_ptr = _buf_start = buff;
|
||||
@ -86,11 +82,8 @@ public:
|
||||
_buf_size = size;
|
||||
_allocated = false;
|
||||
}
|
||||
~SharedPathsMiscInfo() {
|
||||
if (_allocated) {
|
||||
FREE_C_HEAP_ARRAY(char, _buf_start);
|
||||
}
|
||||
}
|
||||
~SharedPathsMiscInfo();
|
||||
|
||||
int get_used_bytes() {
|
||||
return _cur_ptr - _buf_start;
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_VM_CLASSFILE_STRINGTABLE_HPP
|
||||
#define SHARE_VM_CLASSFILE_STRINGTABLE_HPP
|
||||
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
template <class T, class N> class CompactHashtable;
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
|
||||
#define SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
|
||||
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
|
@ -1087,7 +1087,7 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
|
||||
#if INCLUDE_CDS
|
||||
ResourceMark rm(THREAD);
|
||||
if (DumpSharedSpaces && !class_loader.is_null() &&
|
||||
!ArgumentsExt::using_AppCDS() && strcmp(class_name->as_C_string(), "Unnamed") != 0) {
|
||||
!UseAppCDS && strcmp(class_name->as_C_string(), "Unnamed") != 0) {
|
||||
// If AppCDS is not enabled, don't define the class at dump time (except for the "Unnamed"
|
||||
// class, which is used by MethodHandles).
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());
|
||||
|
1086
src/hotspot/share/classfile/systemDictionaryShared.cpp
Normal file
1086
src/hotspot/share/classfile/systemDictionaryShared.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -25,75 +25,362 @@
|
||||
#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
|
||||
#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
|
||||
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
|
||||
|
||||
/*===============================================================================
|
||||
|
||||
Handling of the classes in the AppCDS archive
|
||||
|
||||
To ensure safety and to simplify the implementation, archived classes are
|
||||
"segregated" into several types. The following rules describe how they
|
||||
are stored and looked up.
|
||||
|
||||
[1] Category of archived classes
|
||||
|
||||
There are 3 disjoint groups of classes stored in the AppCDS archive. They are
|
||||
categorized as by their SharedDictionaryEntry::loader_type()
|
||||
|
||||
BUILTIN: These classes may be defined ONLY by the BOOT/PLATFORM/APP
|
||||
loaders.
|
||||
|
||||
UNREGISTERED: These classes may be defined ONLY by a ClassLoader
|
||||
instance that's not listed above (using fingerprint matching)
|
||||
|
||||
[2] How classes from different categories are specified in the classlist:
|
||||
|
||||
Starting from JDK9, each class in the classlist may be specified with
|
||||
these keywords: "id", "super", "interfaces", "loader" and "source".
|
||||
|
||||
|
||||
BUILTIN Only the "id" keyword may be (optionally) specified. All other
|
||||
keywords are forbidden.
|
||||
|
||||
The named class is looked up from the jimage and from
|
||||
Xbootclasspath/a and CLASSPATH.
|
||||
|
||||
UNREGISTERED: The "id", "super", and "source" keywords must all be
|
||||
specified.
|
||||
|
||||
The "interfaces" keyword must be specified if the class implements
|
||||
one or more local interfaces. The "interfaces" keyword must not be
|
||||
specified if the class does not implement local interfaces.
|
||||
|
||||
The named class is looked up from the location specified in the
|
||||
"source" keyword.
|
||||
|
||||
Example classlist:
|
||||
|
||||
# BUILTIN
|
||||
java/lang/Object id: 0
|
||||
java/lang/Cloneable id: 1
|
||||
java/lang/String
|
||||
|
||||
# UNREGISTERED
|
||||
Bar id: 3 super: 0 interfaces: 1 source: /foo.jar
|
||||
|
||||
|
||||
[3] Identifying the loader_type of archived classes in the shared dictionary
|
||||
|
||||
Each archived Klass* C is associated with a SharedDictionaryEntry* E
|
||||
|
||||
BUILTIN: (C->shared_classpath_index() >= 0)
|
||||
UNREGISTERED: (C->shared_classpath_index() < 0)
|
||||
|
||||
[4] Lookup of archived classes at run time:
|
||||
|
||||
(a) BUILTIN loaders:
|
||||
|
||||
Search the shared directory for a BUILTIN class with a matching name.
|
||||
|
||||
(b) UNREGISTERED loaders:
|
||||
|
||||
The search originates with SystemDictionaryShared::lookup_from_stream().
|
||||
|
||||
Search the shared directory for a UNREGISTERED class with a matching
|
||||
(name, clsfile_len, clsfile_crc32) tuple.
|
||||
|
||||
===============================================================================*/
|
||||
#define UNREGISTERED_INDEX -9999
|
||||
|
||||
class ClassFileStream;
|
||||
|
||||
class SystemDictionaryShared: public SystemDictionary {
|
||||
// Archived classes need extra information not needed by traditionally loaded classes.
|
||||
// To keep footprint small, we add these in the dictionary entry instead of the InstanceKlass.
|
||||
class SharedDictionaryEntry : public DictionaryEntry {
|
||||
|
||||
public:
|
||||
static void initialize(TRAPS) {}
|
||||
static InstanceKlass* find_or_load_shared_class(Symbol* class_name,
|
||||
Handle class_loader,
|
||||
TRAPS) {
|
||||
return NULL;
|
||||
}
|
||||
static void roots_oops_do(OopClosure* blk) {}
|
||||
static void oops_do(OopClosure* f) {}
|
||||
static bool is_sharing_possible(ClassLoaderData* loader_data) {
|
||||
oop class_loader = loader_data->class_loader();
|
||||
return (class_loader == NULL);
|
||||
}
|
||||
static bool is_shared_class_visible_for_classloader(
|
||||
InstanceKlass* ik,
|
||||
Handle class_loader,
|
||||
const char* pkg_string,
|
||||
Symbol* pkg_name,
|
||||
PackageEntry* pkg_entry,
|
||||
ModuleEntry* mod_entry,
|
||||
TRAPS) {
|
||||
return false;
|
||||
enum LoaderType {
|
||||
LT_BUILTIN,
|
||||
LT_UNREGISTERED
|
||||
};
|
||||
|
||||
enum {
|
||||
FROM_FIELD_IS_PROTECTED = 1 << 0,
|
||||
FROM_IS_ARRAY = 1 << 1,
|
||||
FROM_IS_OBJECT = 1 << 2
|
||||
};
|
||||
|
||||
int _id;
|
||||
int _clsfile_size;
|
||||
int _clsfile_crc32;
|
||||
void* _verifier_constraints; // FIXME - use a union here to avoid type casting??
|
||||
void* _verifier_constraint_flags;
|
||||
|
||||
// See "Identifying the loader_type of archived classes" comments above.
|
||||
LoaderType loader_type() const {
|
||||
Klass* k = (Klass*)literal();
|
||||
|
||||
if ((k->shared_classpath_index() != UNREGISTERED_INDEX)) {
|
||||
return LT_BUILTIN;
|
||||
} else {
|
||||
return LT_UNREGISTERED;
|
||||
}
|
||||
}
|
||||
|
||||
SharedDictionaryEntry* next() {
|
||||
return (SharedDictionaryEntry*)(DictionaryEntry::next());
|
||||
}
|
||||
|
||||
bool is_builtin() const {
|
||||
return loader_type() == LT_BUILTIN;
|
||||
}
|
||||
bool is_unregistered() const {
|
||||
return loader_type() == LT_UNREGISTERED;
|
||||
}
|
||||
|
||||
void add_verification_constraint(Symbol* name,
|
||||
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object);
|
||||
int finalize_verification_constraints();
|
||||
void check_verification_constraints(InstanceKlass* klass, TRAPS);
|
||||
void metaspace_pointers_do(MetaspaceClosure* it) NOT_CDS_RETURN;
|
||||
};
|
||||
|
||||
class SharedDictionary : public Dictionary {
|
||||
SharedDictionaryEntry* get_entry_for_builtin_loader(const Symbol* name) const;
|
||||
SharedDictionaryEntry* get_entry_for_unregistered_loader(const Symbol* name,
|
||||
int clsfile_size,
|
||||
int clsfile_crc32) const;
|
||||
|
||||
// Convenience functions
|
||||
SharedDictionaryEntry* bucket(int index) const {
|
||||
return (SharedDictionaryEntry*)(Dictionary::bucket(index));
|
||||
}
|
||||
|
||||
public:
|
||||
SharedDictionaryEntry* find_entry_for(Klass* klass);
|
||||
void finalize_verification_constraints();
|
||||
|
||||
bool add_non_builtin_klass(const Symbol* class_name,
|
||||
ClassLoaderData* loader_data,
|
||||
InstanceKlass* obj);
|
||||
|
||||
void update_entry(Klass* klass, int id);
|
||||
|
||||
Klass* find_class_for_builtin_loader(const Symbol* name) const;
|
||||
Klass* find_class_for_unregistered_loader(const Symbol* name,
|
||||
int clsfile_size,
|
||||
int clsfile_crc32) const;
|
||||
bool class_exists_for_unregistered_loader(const Symbol* name) {
|
||||
return (get_entry_for_unregistered_loader(name, -1, -1) != NULL);
|
||||
}
|
||||
};
|
||||
|
||||
class SystemDictionaryShared: public SystemDictionary {
|
||||
private:
|
||||
// These _shared_xxxs arrays are used to initialize the java.lang.Package and
|
||||
// java.security.ProtectionDomain objects associated with each shared class.
|
||||
//
|
||||
// See SystemDictionaryShared::init_security_info for more info.
|
||||
static objArrayOop _shared_protection_domains;
|
||||
static objArrayOop _shared_jar_urls;
|
||||
static objArrayOop _shared_jar_manifests;
|
||||
|
||||
static InstanceKlass* load_shared_class_for_builtin_loader(
|
||||
Symbol* class_name,
|
||||
Handle class_loader,
|
||||
TRAPS);
|
||||
static Handle get_package_name(Symbol* class_name, TRAPS);
|
||||
|
||||
|
||||
// Package handling:
|
||||
//
|
||||
// 1. For named modules in the runtime image
|
||||
// BOOT classes: Reuses the existing JVM_GetSystemPackage(s) interfaces
|
||||
// to get packages in named modules for shared classes.
|
||||
// Package for non-shared classes in named module is also
|
||||
// handled using JVM_GetSystemPackage(s).
|
||||
//
|
||||
// APP classes: VM calls ClassLoaders.AppClassLoader::definePackage(String, Module)
|
||||
// to define package for shared app classes from named
|
||||
// modules.
|
||||
//
|
||||
// PLATFORM classes: VM calls ClassLoaders.PlatformClassLoader::definePackage(String, Module)
|
||||
// to define package for shared platform classes from named
|
||||
// modules.
|
||||
//
|
||||
// 2. For unnamed modules
|
||||
// BOOT classes: Reuses the existing JVM_GetSystemPackage(s) interfaces to
|
||||
// get packages for shared boot classes in unnamed modules.
|
||||
//
|
||||
// APP classes: VM calls ClassLoaders.AppClassLoader::defineOrCheckPackage()
|
||||
// with with the manifest and url from archived data.
|
||||
//
|
||||
// PLATFORM classes: No package is defined.
|
||||
//
|
||||
// The following two define_shared_package() functions are used to define
|
||||
// package for shared APP and PLATFORM classes.
|
||||
static void define_shared_package(Symbol* class_name,
|
||||
Handle class_loader,
|
||||
Handle manifest,
|
||||
Handle url,
|
||||
TRAPS);
|
||||
static void define_shared_package(Symbol* class_name,
|
||||
Handle class_loader,
|
||||
ModuleEntry* mod_entry,
|
||||
TRAPS);
|
||||
|
||||
static Handle get_shared_jar_manifest(int shared_path_index, TRAPS);
|
||||
static Handle get_shared_jar_url(int shared_path_index, TRAPS);
|
||||
static Handle get_protection_domain_from_classloader(Handle class_loader,
|
||||
Handle url, TRAPS);
|
||||
static Handle get_shared_protection_domain(Handle class_loader,
|
||||
int shared_path_index,
|
||||
Handle url,
|
||||
TRAPS);
|
||||
static Handle get_shared_protection_domain(Handle class_loader,
|
||||
ModuleEntry* mod, TRAPS);
|
||||
static Handle init_security_info(Handle class_loader, InstanceKlass* ik, TRAPS);
|
||||
|
||||
static void atomic_set_array_index(objArrayOop array, int index, oop o) {
|
||||
// Benign race condition: array.obj_at(index) may already be filled in.
|
||||
// The important thing here is that all threads pick up the same result.
|
||||
// It doesn't matter which racing thread wins, as long as only one
|
||||
// result is used by all threads, and all future queries.
|
||||
array->atomic_compare_exchange_oop(index, o, NULL);
|
||||
}
|
||||
|
||||
static oop shared_protection_domain(int index);
|
||||
static void atomic_set_shared_protection_domain(int index, oop pd) {
|
||||
atomic_set_array_index(_shared_protection_domains, index, pd);
|
||||
}
|
||||
static void allocate_shared_protection_domain_array(int size, TRAPS);
|
||||
static oop shared_jar_url(int index);
|
||||
static void atomic_set_shared_jar_url(int index, oop url) {
|
||||
atomic_set_array_index(_shared_jar_urls, index, url);
|
||||
}
|
||||
static void allocate_shared_jar_url_array(int size, TRAPS);
|
||||
static oop shared_jar_manifest(int index);
|
||||
static void atomic_set_shared_jar_manifest(int index, oop man) {
|
||||
atomic_set_array_index(_shared_jar_manifests, index, man);
|
||||
}
|
||||
static void allocate_shared_jar_manifest_array(int size, TRAPS);
|
||||
static InstanceKlass* acquire_class_for_current_thread(
|
||||
InstanceKlass *ik,
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
TRAPS);
|
||||
|
||||
public:
|
||||
static void initialize(TRAPS);
|
||||
|
||||
// Called by PLATFORM/APP loader only
|
||||
static InstanceKlass* find_or_load_shared_class(Symbol* class_name,
|
||||
Handle class_loader,
|
||||
TRAPS);
|
||||
|
||||
|
||||
static void allocate_shared_data_arrays(int size, TRAPS);
|
||||
static void oops_do(OopClosure* f);
|
||||
static void roots_oops_do(OopClosure* f) {
|
||||
oops_do(f);
|
||||
}
|
||||
|
||||
// Check if sharing is supported for the class loader.
|
||||
static bool is_sharing_possible(ClassLoaderData* loader_data) {
|
||||
oop class_loader = loader_data->class_loader();
|
||||
return (class_loader == NULL ||
|
||||
(UseAppCDS && (SystemDictionary::is_system_class_loader(class_loader) ||
|
||||
SystemDictionary::is_platform_class_loader(class_loader)))
|
||||
);
|
||||
}
|
||||
static bool is_shared_class_visible_for_classloader(InstanceKlass* ik,
|
||||
Handle class_loader,
|
||||
const char* pkg_string,
|
||||
Symbol* pkg_name,
|
||||
PackageEntry* pkg_entry,
|
||||
ModuleEntry* mod_entry,
|
||||
TRAPS);
|
||||
static PackageEntry* get_package_entry(Symbol* pkg,
|
||||
ClassLoaderData *loader_data) {
|
||||
if (loader_data != NULL) {
|
||||
PackageEntryTable* pkgEntryTable = loader_data->packages();
|
||||
return pkgEntryTable->lookup_only(pkg);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool add_non_builtin_klass(Symbol* class_name, ClassLoaderData* loader_data,
|
||||
InstanceKlass* k, TRAPS);
|
||||
static Klass* dump_time_resolve_super_or_fail(Symbol* child_name,
|
||||
Symbol* class_name,
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
bool is_superclass,
|
||||
TRAPS) {
|
||||
return NULL;
|
||||
}
|
||||
TRAPS);
|
||||
|
||||
static size_t dictionary_entry_size() {
|
||||
return sizeof(DictionaryEntry);
|
||||
return (DumpSharedSpaces) ? sizeof(SharedDictionaryEntry) : sizeof(DictionaryEntry);
|
||||
}
|
||||
static void init_shared_dictionary_entry(Klass* k, DictionaryEntry* entry) NOT_CDS_RETURN;
|
||||
static bool is_builtin(DictionaryEntry* ent) {
|
||||
// Can't use virtual function is_builtin because DictionaryEntry doesn't initialize
|
||||
// vtable because it's not constructed properly.
|
||||
SharedDictionaryEntry* entry = (SharedDictionaryEntry*)ent;
|
||||
return entry->is_builtin();
|
||||
}
|
||||
|
||||
static void init_shared_dictionary_entry(Klass* k, DictionaryEntry* entry) {}
|
||||
static bool is_builtin(DictionaryEntry* entry) { return true; }
|
||||
// For convenient access to the SharedDictionaryEntry's of the archived classes.
|
||||
static SharedDictionary* shared_dictionary() {
|
||||
assert(!DumpSharedSpaces, "not for dumping");
|
||||
return (SharedDictionary*)SystemDictionary::shared_dictionary();
|
||||
}
|
||||
|
||||
static InstanceKlass* lookup_from_stream(Symbol* class_name,
|
||||
static SharedDictionary* boot_loader_dictionary() {
|
||||
return (SharedDictionary*)ClassLoaderData::the_null_class_loader_data()->dictionary();
|
||||
}
|
||||
|
||||
static void update_shared_entry(Klass* klass, int id) {
|
||||
assert(DumpSharedSpaces, "sanity");
|
||||
assert((SharedDictionary*)(klass->class_loader_data()->dictionary()) != NULL, "sanity");
|
||||
((SharedDictionary*)(klass->class_loader_data()->dictionary()))->update_entry(klass, id);
|
||||
}
|
||||
|
||||
static void set_shared_class_misc_info(Klass* k, ClassFileStream* cfs);
|
||||
|
||||
static InstanceKlass* lookup_from_stream(const Symbol* class_name,
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
const ClassFileStream* st,
|
||||
TRAPS) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// The (non-application) CDS implementation supports only classes in the boot
|
||||
// class loader, which ensures that the verification constraints are the same
|
||||
// during archive creation time and runtime. Thus we can do the constraint checks
|
||||
// entirely during archive creation time.
|
||||
TRAPS);
|
||||
// "verification_constraints" are a set of checks performed by
|
||||
// VerificationType::is_reference_assignable_from when verifying a shared class during
|
||||
// dump time.
|
||||
//
|
||||
// With AppCDS, it is possible to override archived classes by calling
|
||||
// ClassLoader.defineClass() directly. SystemDictionary::load_shared_class() already
|
||||
// ensures that you cannot load a shared class if its super type(s) are changed. However,
|
||||
// we need an additional check to ensure that the verification_constraints did not change
|
||||
// between dump time and runtime.
|
||||
static bool add_verification_constraint(Klass* k, Symbol* name,
|
||||
Symbol* from_name, bool from_field_is_protected,
|
||||
bool from_is_array, bool from_is_object) {return false;}
|
||||
static void finalize_verification_constraints() {}
|
||||
bool from_is_array, bool from_is_object) NOT_CDS_RETURN_(false);
|
||||
static void finalize_verification_constraints() NOT_CDS_RETURN;
|
||||
static void check_verification_constraints(InstanceKlass* klass,
|
||||
TRAPS) {}
|
||||
};
|
||||
|
||||
class SharedDictionaryEntry : public DictionaryEntry {
|
||||
public:
|
||||
void metaspace_pointers_do(MetaspaceClosure* it) {}
|
||||
TRAPS) NOT_CDS_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017 Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,17 @@
|
||||
#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARY_EXT_HPP
|
||||
#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARY_EXT_HPP
|
||||
|
||||
#if INCLUDE_CDS
|
||||
|
||||
#define WK_KLASSES_DO_EXT(do_klass) \
|
||||
/* well-known classes */ \
|
||||
do_klass(jdk_internal_loader_ClassLoaders_klass, jdk_internal_loader_ClassLoaders, Pre ) \
|
||||
/*end*/
|
||||
|
||||
#else
|
||||
|
||||
#define WK_KLASSES_DO_EXT(do_klass)
|
||||
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARY_EXT_HPP
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define SHARE_VM_CLASSFILE_VMSYMBOLS_HPP
|
||||
|
||||
#include "classfile/moduleEntry.hpp"
|
||||
#include "classfile/vmSymbols_ext.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "trace/traceMacros.hpp"
|
||||
@ -673,8 +672,12 @@
|
||||
/* trace signatures */ \
|
||||
TRACE_TEMPLATES(template) \
|
||||
\
|
||||
/* extensions */ \
|
||||
VM_SYMBOLS_DO_EXT(template, do_alias) \
|
||||
/* cds */ \
|
||||
template(jdk_internal_loader_ClassLoaders, "jdk/internal/loader/ClassLoaders") \
|
||||
template(jdk_vm_cds_SharedClassInfo, "jdk/vm/cds/SharedClassInfo") \
|
||||
template(url_void_signature, "(Ljava/net/URL;)V") \
|
||||
template(toFileURL_name, "toFileURL") \
|
||||
template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \
|
||||
\
|
||||
/*end*/
|
||||
|
||||
|
@ -259,12 +259,12 @@ void CodeCache::initialize_heaps() {
|
||||
}
|
||||
|
||||
// We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
|
||||
if(!heap_available(CodeBlobType::MethodProfiled)) {
|
||||
if (!heap_available(CodeBlobType::MethodProfiled)) {
|
||||
non_profiled_size += profiled_size;
|
||||
profiled_size = 0;
|
||||
}
|
||||
// We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
|
||||
if(!heap_available(CodeBlobType::MethodNonProfiled)) {
|
||||
if (!heap_available(CodeBlobType::MethodNonProfiled)) {
|
||||
non_nmethod_size += non_profiled_size;
|
||||
non_profiled_size = 0;
|
||||
}
|
||||
@ -282,10 +282,11 @@ void CodeCache::initialize_heaps() {
|
||||
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
|
||||
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
|
||||
|
||||
// Align CodeHeaps
|
||||
size_t alignment = heap_alignment();
|
||||
// If large page support is enabled, align code heaps according to large
|
||||
// page size to make sure that code cache is covered by large pages.
|
||||
const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity());
|
||||
non_nmethod_size = align_up(non_nmethod_size, alignment);
|
||||
profiled_size = align_down(profiled_size, alignment);
|
||||
profiled_size = align_down(profiled_size, alignment);
|
||||
|
||||
// Reserve one continuous chunk of memory for CodeHeaps and split it into
|
||||
// parts for the individual heaps. The memory layout looks like this:
|
||||
@ -308,37 +309,29 @@ void CodeCache::initialize_heaps() {
|
||||
add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
|
||||
}
|
||||
|
||||
size_t CodeCache::heap_alignment() {
|
||||
// If large page support is enabled, align code heaps according to large
|
||||
// page size to make sure that code cache is covered by large pages.
|
||||
const size_t page_size = os::can_execute_large_page_memory() ?
|
||||
os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) :
|
||||
os::vm_page_size();
|
||||
return MAX2(page_size, (size_t) os::vm_allocation_granularity());
|
||||
size_t CodeCache::page_size(bool aligned) {
|
||||
if (os::can_execute_large_page_memory()) {
|
||||
return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) :
|
||||
os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8);
|
||||
} else {
|
||||
return os::vm_page_size();
|
||||
}
|
||||
}
|
||||
|
||||
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
|
||||
// Determine alignment
|
||||
const size_t page_size = os::can_execute_large_page_memory() ?
|
||||
MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
|
||||
os::page_size_for_region_aligned(size, 8)) :
|
||||
os::vm_page_size();
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
const size_t r_align = MAX2(page_size, granularity);
|
||||
const size_t r_size = align_up(size, r_align);
|
||||
const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
|
||||
MAX2(page_size, granularity);
|
||||
|
||||
ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
|
||||
|
||||
// Align and reserve space for code cache
|
||||
const size_t rs_ps = page_size();
|
||||
const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
|
||||
const size_t rs_size = align_up(size, rs_align);
|
||||
ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size());
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for code cache");
|
||||
vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
|
||||
rs_size/K));
|
||||
}
|
||||
|
||||
// Initialize bounds
|
||||
_low_bound = (address)rs.base();
|
||||
_high_bound = _low_bound + rs.size();
|
||||
|
||||
return rs;
|
||||
}
|
||||
|
||||
@ -415,7 +408,8 @@ void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type)
|
||||
size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
|
||||
size_initial = align_up(size_initial, os::vm_page_size());
|
||||
if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for code cache");
|
||||
vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
|
||||
heap->name(), size_initial/K));
|
||||
}
|
||||
|
||||
// Register the CodeHeap
|
||||
|
@ -107,7 +107,7 @@ class CodeCache : AllStatic {
|
||||
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
||||
// Returns the name of the VM option to set the size of the corresponding CodeHeap
|
||||
static const char* get_code_heap_flag_name(int code_blob_type);
|
||||
static size_t heap_alignment(); // Returns the alignment of the CodeHeaps in bytes
|
||||
static size_t page_size(bool aligned = true); // Returns the page size used by the CodeCache
|
||||
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
|
||||
|
||||
// Iteration
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "code/nmethod.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
// Constructors
|
||||
|
||||
@ -209,14 +211,24 @@ void ConstantDoubleValue::print_on(outputStream* st) const {
|
||||
// ConstantOopWriteValue
|
||||
|
||||
void ConstantOopWriteValue::write_on(DebugInfoWriteStream* stream) {
|
||||
assert(JNIHandles::resolve(value()) == NULL ||
|
||||
Universe::heap()->is_in_reserved(JNIHandles::resolve(value())),
|
||||
"Should be in heap");
|
||||
#ifdef ASSERT
|
||||
{
|
||||
// cannot use ThreadInVMfromNative here since in case of JVMCI compiler,
|
||||
// thread is already in VM state.
|
||||
ThreadInVMfromUnknown tiv;
|
||||
assert(JNIHandles::resolve(value()) == NULL ||
|
||||
Universe::heap()->is_in_reserved(JNIHandles::resolve(value())),
|
||||
"Should be in heap");
|
||||
}
|
||||
#endif
|
||||
stream->write_int(CONSTANT_OOP_CODE);
|
||||
stream->write_handle(value());
|
||||
}
|
||||
|
||||
void ConstantOopWriteValue::print_on(outputStream* st) const {
|
||||
// using ThreadInVMfromUnknown here since in case of JVMCI compiler,
|
||||
// thread is already in VM state.
|
||||
ThreadInVMfromUnknown tiv;
|
||||
JNIHandles::resolve(value())->print_value_on(st);
|
||||
}
|
||||
|
||||
|
@ -438,14 +438,14 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method,
|
||||
basic_lock_sp_offset, oop_maps);
|
||||
NOT_PRODUCT(if (nm != NULL) native_nmethod_stats.note_native_nmethod(nm));
|
||||
}
|
||||
// verify nmethod
|
||||
debug_only(if (nm) nm->verify();) // might block
|
||||
|
||||
if (nm != NULL) {
|
||||
nm->log_new_nmethod();
|
||||
}
|
||||
// verify nmethod
|
||||
debug_only(nm->verify();) // might block
|
||||
|
||||
nm->make_in_use();
|
||||
nm->log_new_nmethod();
|
||||
nm->make_in_use();
|
||||
}
|
||||
return nm;
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ class nmethod : public CompiledMethod {
|
||||
bool _unload_reported;
|
||||
|
||||
// Protected by Patching_lock
|
||||
volatile char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
|
||||
volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
|
||||
|
@ -78,7 +78,6 @@ StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
|
||||
_queue_begin = 0;
|
||||
_queue_end = 0;
|
||||
_number_of_stubs = 0;
|
||||
register_queue(this);
|
||||
}
|
||||
|
||||
|
||||
@ -205,36 +204,6 @@ void StubQueue::remove_all(){
|
||||
}
|
||||
|
||||
|
||||
enum { StubQueueLimit = 10 }; // there are only a few in the world
|
||||
static StubQueue* registered_stub_queues[StubQueueLimit];
|
||||
|
||||
void StubQueue::register_queue(StubQueue* sq) {
|
||||
for (int i = 0; i < StubQueueLimit; i++) {
|
||||
if (registered_stub_queues[i] == NULL) {
|
||||
registered_stub_queues[i] = sq;
|
||||
return;
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::queues_do(void f(StubQueue* sq)) {
|
||||
for (int i = 0; i < StubQueueLimit; i++) {
|
||||
if (registered_stub_queues[i] != NULL) {
|
||||
f(registered_stub_queues[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::stubs_do(void f(Stub* s)) {
|
||||
debug_only(verify();)
|
||||
MutexLockerEx lock(_mutex);
|
||||
for (Stub* s = first(); s != NULL; s = next(s)) f(s);
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::verify() {
|
||||
// verify only if initialized
|
||||
if (_stub_buffer == NULL) return;
|
||||
|
@ -172,8 +172,6 @@ class StubQueue: public CHeapObj<mtCode> {
|
||||
void stub_verify(Stub* s) { _stub_interface->verify(s); }
|
||||
void stub_print(Stub* s) { _stub_interface->print(s); }
|
||||
|
||||
static void register_queue(StubQueue*);
|
||||
|
||||
public:
|
||||
StubQueue(StubInterface* stub_interface, int buffer_size, Mutex* lock,
|
||||
const char* name);
|
||||
@ -204,8 +202,6 @@ class StubQueue: public CHeapObj<mtCode> {
|
||||
void deallocate_unused_tail(); // deallocate the unused tail of the underlying CodeBlob
|
||||
// only used from TemplateInterpreter::initialize()
|
||||
// Iteration
|
||||
static void queues_do(void f(StubQueue* s)); // call f with each StubQueue
|
||||
void stubs_do(void f(Stub* s)); // call f with all stubs
|
||||
Stub* first() const { return number_of_stubs() > 0 ? stub_at(_queue_begin) : NULL; }
|
||||
Stub* next(Stub* s) const { int i = index_of(s) + stub_size(s);
|
||||
// Only wrap around in the non-contiguous case (see stubss.cpp)
|
||||
@ -213,9 +209,6 @@ class StubQueue: public CHeapObj<mtCode> {
|
||||
return (i == _queue_end) ? NULL : stub_at(i);
|
||||
}
|
||||
|
||||
address stub_code_begin(Stub* s) const { return _stub_interface->code_begin(s); }
|
||||
address stub_code_end(Stub* s) const { return _stub_interface->code_end(s); }
|
||||
|
||||
// Debugging/printing
|
||||
void verify(); // verifies the stub queue
|
||||
void print(); // prints information about the stub queue
|
||||
|
@ -25,10 +25,10 @@
|
||||
#ifndef SHARE_VM_COMPILER_COMPILETASK_HPP
|
||||
#define SHARE_VM_COMPILER_COMPILETASK_HPP
|
||||
|
||||
#include "code/nmethod.hpp"
|
||||
#include "ci/ciMethod.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/xmlstream.hpp"
|
||||
|
||||
// CompileTask
|
||||
|
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_VM_COMPILER_METHODMATCHER_HPP
|
||||
#define SHARE_VM_COMPILER_METHODMATCHER_HPP
|
||||
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "code/compressedStream.hpp"
|
||||
#include "code/vmreg.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// Interface for generating the frame map for compiled code. A frame map
|
||||
@ -42,6 +43,7 @@
|
||||
class frame;
|
||||
class RegisterMap;
|
||||
class DerivedPointerEntry;
|
||||
class OopClosure;
|
||||
|
||||
class OopMapValue: public StackObj {
|
||||
friend class VMStructs;
|
||||
|
@ -30,3 +30,20 @@
|
||||
// Technically this should be derived from machine speed, and
|
||||
// ideally it would be dynamically adjusted.
|
||||
float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000;
|
||||
|
||||
void AllocationStats::initialize(bool split_birth) {
|
||||
AdaptivePaddedAverage* dummy =
|
||||
new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
|
||||
CMS_FLSPadding);
|
||||
_desired = 0;
|
||||
_coal_desired = 0;
|
||||
_surplus = 0;
|
||||
_bfr_surp = 0;
|
||||
_prev_sweep = 0;
|
||||
_before_sweep = 0;
|
||||
_coal_births = 0;
|
||||
_coal_deaths = 0;
|
||||
_split_births = (split_birth ? 1 : 0);
|
||||
_split_deaths = 0;
|
||||
_returned_bytes = 0;
|
||||
}
|
||||
|
@ -64,22 +64,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
||||
ssize_t _split_deaths; // loss from splitting
|
||||
size_t _returned_bytes; // number of bytes returned to list.
|
||||
public:
|
||||
void initialize(bool split_birth = false) {
|
||||
AdaptivePaddedAverage* dummy =
|
||||
new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
|
||||
CMS_FLSPadding);
|
||||
_desired = 0;
|
||||
_coal_desired = 0;
|
||||
_surplus = 0;
|
||||
_bfr_surp = 0;
|
||||
_prev_sweep = 0;
|
||||
_before_sweep = 0;
|
||||
_coal_births = 0;
|
||||
_coal_deaths = 0;
|
||||
_split_births = (split_birth ? 1 : 0);
|
||||
_split_deaths = 0;
|
||||
_returned_bytes = 0;
|
||||
}
|
||||
void initialize(bool split_birth = false);
|
||||
|
||||
AllocationStats() {
|
||||
initialize();
|
||||
|
@ -71,6 +71,6 @@ void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
||||
// initialize the policy counters - 2 collectors, 3 generations
|
||||
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
|
||||
// initialize the policy counters - 2 collectors, 2 generations
|
||||
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 2);
|
||||
}
|
||||
|
@ -23,17 +23,48 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/cms/compactibleFreeListSpace.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepThread.hpp"
|
||||
#include "gc/cms/cmsHeap.hpp"
|
||||
#include "gc/cms/parNewGeneration.hpp"
|
||||
#include "gc/cms/vmCMSOperations.hpp"
|
||||
#include "gc/shared/genMemoryPools.hpp"
|
||||
#include "gc/shared/genOopClosures.inline.hpp"
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "services/memoryManager.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
CMSHeap::CMSHeap(GenCollectorPolicy *policy) : GenCollectedHeap(policy) {
|
||||
class CompactibleFreeListSpacePool : public CollectedMemoryPool {
|
||||
private:
|
||||
CompactibleFreeListSpace* _space;
|
||||
public:
|
||||
CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
|
||||
const char* name,
|
||||
size_t max_size,
|
||||
bool support_usage_threshold) :
|
||||
CollectedMemoryPool(name, space->capacity(), max_size, support_usage_threshold),
|
||||
_space(space) {
|
||||
}
|
||||
|
||||
MemoryUsage get_memory_usage() {
|
||||
size_t max_heap_size = (available_for_allocation() ? max_size() : 0);
|
||||
size_t used = used_in_bytes();
|
||||
size_t committed = _space->capacity();
|
||||
|
||||
return MemoryUsage(initial_size(), used, committed, max_heap_size);
|
||||
}
|
||||
|
||||
size_t used_in_bytes() {
|
||||
return _space->used();
|
||||
}
|
||||
};
|
||||
|
||||
CMSHeap::CMSHeap(GenCollectorPolicy *policy) :
|
||||
GenCollectedHeap(policy), _eden_pool(NULL), _survivor_pool(NULL), _old_pool(NULL) {
|
||||
_workers = new WorkGang("GC Thread", ParallelGCThreads,
|
||||
/* are_GC_task_threads */true,
|
||||
/* are_ConcurrentGC_threads */false);
|
||||
@ -54,6 +85,38 @@ jint CMSHeap::initialize() {
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
void CMSHeap::initialize_serviceability() {
|
||||
_young_manager = new GCMemoryManager("ParNew", "end of minor GC");
|
||||
_old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC");
|
||||
|
||||
ParNewGeneration* young = (ParNewGeneration*) young_gen();
|
||||
_eden_pool = new ContiguousSpacePool(young->eden(),
|
||||
"Par Eden Space",
|
||||
young->max_eden_size(),
|
||||
false);
|
||||
|
||||
_survivor_pool = new SurvivorContiguousSpacePool(young,
|
||||
"Par Survivor Space",
|
||||
young->max_survivor_size(),
|
||||
false);
|
||||
|
||||
ConcurrentMarkSweepGeneration* old = (ConcurrentMarkSweepGeneration*) old_gen();
|
||||
_old_pool = new CompactibleFreeListSpacePool(old->cmsSpace(),
|
||||
"CMS Old Gen",
|
||||
old->reserved().byte_size(),
|
||||
true);
|
||||
|
||||
_young_manager->add_pool(_eden_pool);
|
||||
_young_manager->add_pool(_survivor_pool);
|
||||
young->set_gc_manager(_young_manager);
|
||||
|
||||
_old_manager->add_pool(_eden_pool);
|
||||
_old_manager->add_pool(_survivor_pool);
|
||||
_old_manager->add_pool(_old_pool);
|
||||
old ->set_gc_manager(_old_manager);
|
||||
|
||||
}
|
||||
|
||||
void CMSHeap::check_gen_kinds() {
|
||||
assert(young_gen()->kind() == Generation::ParNew,
|
||||
"Wrong youngest generation type");
|
||||
@ -183,3 +246,18 @@ void CMSHeap::gc_epilogue(bool full) {
|
||||
GenCollectedHeap::gc_epilogue(full);
|
||||
always_do_update_barrier = true;
|
||||
};
|
||||
|
||||
GrowableArray<GCMemoryManager*> CMSHeap::memory_managers() {
|
||||
GrowableArray<GCMemoryManager*> memory_managers(2);
|
||||
memory_managers.append(_young_manager);
|
||||
memory_managers.append(_old_manager);
|
||||
return memory_managers;
|
||||
}
|
||||
|
||||
GrowableArray<MemoryPool*> CMSHeap::memory_pools() {
|
||||
GrowableArray<MemoryPool*> memory_pools(3);
|
||||
memory_pools.append(_eden_pool);
|
||||
memory_pools.append(_survivor_pool);
|
||||
memory_pools.append(_old_pool);
|
||||
return memory_pools;
|
||||
}
|
||||
|
@ -29,9 +29,12 @@
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
class CLDClosure;
|
||||
class GenCollectorPolicy;
|
||||
class GCMemoryManager;
|
||||
class MemoryPool;
|
||||
class OopsInGenClosure;
|
||||
class outputStream;
|
||||
class StrongRootsScope;
|
||||
@ -80,6 +83,9 @@ public:
|
||||
void safepoint_synchronize_begin();
|
||||
void safepoint_synchronize_end();
|
||||
|
||||
virtual GrowableArray<GCMemoryManager*> memory_managers();
|
||||
virtual GrowableArray<MemoryPool*> memory_pools();
|
||||
|
||||
// If "young_gen_as_roots" is false, younger generations are
|
||||
// not scanned as roots; in this case, the caller must be arranging to
|
||||
// scan the younger generations itself. (For example, a generation might
|
||||
@ -92,12 +98,19 @@ public:
|
||||
OopsInGenClosure* root_closure,
|
||||
CLDClosure* cld_closure);
|
||||
|
||||
GCMemoryManager* old_manager() const { return _old_manager; }
|
||||
|
||||
private:
|
||||
WorkGang* _workers;
|
||||
MemoryPool* _eden_pool;
|
||||
MemoryPool* _survivor_pool;
|
||||
MemoryPool* _old_pool;
|
||||
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
|
||||
virtual void initialize_serviceability();
|
||||
|
||||
// Accessor for memory state verification support
|
||||
NOT_PRODUCT(
|
||||
virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
|
||||
|
@ -8116,42 +8116,42 @@ size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
|
||||
}
|
||||
|
||||
TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
|
||||
|
||||
GCMemoryManager* manager = CMSHeap::heap()->old_manager();
|
||||
switch (phase) {
|
||||
case CMSCollector::InitialMarking:
|
||||
initialize(true /* fullGC */ ,
|
||||
cause /* cause of the GC */,
|
||||
true /* recordGCBeginTime */,
|
||||
true /* recordPreGCUsage */,
|
||||
false /* recordPeakUsage */,
|
||||
false /* recordPostGCusage */,
|
||||
true /* recordAccumulatedGCTime */,
|
||||
false /* recordGCEndTime */,
|
||||
false /* countCollection */ );
|
||||
initialize(manager /* GC manager */ ,
|
||||
cause /* cause of the GC */,
|
||||
true /* recordGCBeginTime */,
|
||||
true /* recordPreGCUsage */,
|
||||
false /* recordPeakUsage */,
|
||||
false /* recordPostGCusage */,
|
||||
true /* recordAccumulatedGCTime */,
|
||||
false /* recordGCEndTime */,
|
||||
false /* countCollection */ );
|
||||
break;
|
||||
|
||||
case CMSCollector::FinalMarking:
|
||||
initialize(true /* fullGC */ ,
|
||||
cause /* cause of the GC */,
|
||||
false /* recordGCBeginTime */,
|
||||
false /* recordPreGCUsage */,
|
||||
false /* recordPeakUsage */,
|
||||
false /* recordPostGCusage */,
|
||||
true /* recordAccumulatedGCTime */,
|
||||
false /* recordGCEndTime */,
|
||||
false /* countCollection */ );
|
||||
initialize(manager /* GC manager */ ,
|
||||
cause /* cause of the GC */,
|
||||
false /* recordGCBeginTime */,
|
||||
false /* recordPreGCUsage */,
|
||||
false /* recordPeakUsage */,
|
||||
false /* recordPostGCusage */,
|
||||
true /* recordAccumulatedGCTime */,
|
||||
false /* recordGCEndTime */,
|
||||
false /* countCollection */ );
|
||||
break;
|
||||
|
||||
case CMSCollector::Sweeping:
|
||||
initialize(true /* fullGC */ ,
|
||||
cause /* cause of the GC */,
|
||||
false /* recordGCBeginTime */,
|
||||
false /* recordPreGCUsage */,
|
||||
true /* recordPeakUsage */,
|
||||
true /* recordPostGCusage */,
|
||||
false /* recordAccumulatedGCTime */,
|
||||
true /* recordGCEndTime */,
|
||||
true /* countCollection */ );
|
||||
initialize(manager /* GC manager */ ,
|
||||
cause /* cause of the GC */,
|
||||
false /* recordGCBeginTime */,
|
||||
false /* recordPreGCUsage */,
|
||||
true /* recordPeakUsage */,
|
||||
true /* recordPostGCusage */,
|
||||
false /* recordAccumulatedGCTime */,
|
||||
true /* recordGCEndTime */,
|
||||
true /* countCollection */ );
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/cms/gSpaceCounters.hpp"
|
||||
#include "gc/shared/generation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@ -71,3 +72,7 @@ GSpaceCounters::GSpaceCounters(const char* name, int ordinal, size_t max_size,
|
||||
_gen->capacity(), CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
GSpaceCounters::~GSpaceCounters() {
|
||||
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
|
||||
}
|
||||
|
@ -52,9 +52,7 @@ class GSpaceCounters: public CHeapObj<mtGC> {
|
||||
GSpaceCounters(const char* name, int ordinal, size_t max_size, Generation* g,
|
||||
GenerationCounters* gc, bool sampled=true);
|
||||
|
||||
~GSpaceCounters() {
|
||||
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
|
||||
}
|
||||
~GSpaceCounters();
|
||||
|
||||
inline void update_capacity() {
|
||||
_capacity->set_value(_gen->capacity());
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
|
||||
// Closure used for updating remembered sets and recording references that
|
||||
// point into the collection set while the mutator is running.
|
||||
@ -319,7 +320,7 @@ void DirtyCardQueueSet::abandon_logs() {
|
||||
clear();
|
||||
// Since abandon is done only at safepoints, we can safely manipulate
|
||||
// these queues.
|
||||
for (JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
|
||||
t->dirty_card_queue().reset();
|
||||
}
|
||||
shared_dirty_card_queue()->reset();
|
||||
@ -338,7 +339,7 @@ void DirtyCardQueueSet::concatenate_logs() {
|
||||
int save_max_completed_queue = _max_completed_queue;
|
||||
_max_completed_queue = max_jint;
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
||||
for (JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
|
||||
concatenate_log(t->dirty_card_queue());
|
||||
}
|
||||
concatenate_log(_shared_dirty_card_queue);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/g1/g1Arguments.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/shared/gcArguments.inline.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
@ -92,6 +93,22 @@ void G1Arguments::initialize_flags() {
|
||||
}
|
||||
|
||||
log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
|
||||
|
||||
#ifdef COMPILER2
|
||||
// Enable loop strip mining to offer better pause time guarantees
|
||||
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
|
||||
FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
|
||||
}
|
||||
if (UseCountedLoopSafepoints && FLAG_IS_DEFAULT(LoopStripMiningIter)) {
|
||||
FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool G1Arguments::parse_verification_type(const char* type) {
|
||||
G1CollectedHeap::heap()->verifier()->parse_verification_type(type);
|
||||
// Always return true because we want to parse all values.
|
||||
return true;
|
||||
}
|
||||
|
||||
CollectedHeap* G1Arguments::create_heap() {
|
||||
|
@ -32,6 +32,7 @@ class CollectedHeap;
|
||||
class G1Arguments : public GCArguments {
|
||||
public:
|
||||
virtual void initialize_flags();
|
||||
virtual bool parse_verification_type(const char* type);
|
||||
virtual size_t conservative_max_heap_alignment();
|
||||
virtual CollectedHeap* create_heap();
|
||||
};
|
||||
|
@ -39,12 +39,12 @@
|
||||
#include "gc/g1/g1ConcurrentRefineThread.hpp"
|
||||
#include "gc/g1/g1EvacStats.inline.hpp"
|
||||
#include "gc/g1/g1FullCollector.hpp"
|
||||
#include "gc/g1/g1FullGCScope.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HeapSizingPolicy.hpp"
|
||||
#include "gc/g1/g1HeapTransition.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1MemoryPool.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.inline.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
@ -81,6 +81,7 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -1083,7 +1084,6 @@ void G1CollectedHeap::print_hrm_post_compaction() {
|
||||
PostCompactionPrinterClosure cl(hr_printer());
|
||||
heap_region_iterate(&cl);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void G1CollectedHeap::abort_concurrent_cycle() {
|
||||
@ -1132,7 +1132,7 @@ void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
|
||||
assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
|
||||
assert(used() == recalculate_used(), "Should be equal");
|
||||
_verifier->verify_region_sets_optional();
|
||||
_verifier->verify_before_gc();
|
||||
_verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
|
||||
_verifier->check_bitmaps("Full GC Start");
|
||||
}
|
||||
|
||||
@ -1173,7 +1173,7 @@ void G1CollectedHeap::verify_after_full_collection() {
|
||||
check_gc_time_stamps();
|
||||
_hrm.verify_optional();
|
||||
_verifier->verify_region_sets_optional();
|
||||
_verifier->verify_after_gc();
|
||||
_verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
|
||||
// Clear the previous marking bitmap, if needed for bitmap verification.
|
||||
// Note we cannot do this when we clear the next marking bitmap in
|
||||
// G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
|
||||
@ -1217,34 +1217,6 @@ void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_tr
|
||||
#endif
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_full_collection_inner(G1FullGCScope* scope) {
|
||||
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
|
||||
g1_policy()->record_full_collection_start();
|
||||
|
||||
print_heap_before_gc();
|
||||
print_heap_regions();
|
||||
|
||||
abort_concurrent_cycle();
|
||||
verify_before_full_collection(scope->is_explicit_gc());
|
||||
|
||||
gc_prologue(true);
|
||||
prepare_heap_for_full_collection();
|
||||
|
||||
G1FullCollector collector(scope, ref_processor_stw(), concurrent_mark()->next_mark_bitmap(), workers()->active_workers());
|
||||
collector.prepare_collection();
|
||||
collector.collect();
|
||||
collector.complete_collection();
|
||||
|
||||
prepare_heap_for_mutators();
|
||||
|
||||
g1_policy()->record_full_collection_end();
|
||||
gc_epilogue(true);
|
||||
|
||||
verify_after_full_collection();
|
||||
|
||||
print_heap_after_full_collection(scope->heap_transition());
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::do_full_collection(bool explicit_gc,
|
||||
bool clear_all_soft_refs) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
@ -1257,8 +1229,12 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
|
||||
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
|
||||
collector_policy()->should_clear_all_soft_refs();
|
||||
|
||||
G1FullGCScope scope(explicit_gc, do_clear_all_soft_refs);
|
||||
do_full_collection_inner(&scope);
|
||||
G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
|
||||
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
|
||||
|
||||
collector.prepare_collection();
|
||||
collector.collect();
|
||||
collector.complete_collection();
|
||||
|
||||
// Full collection was successfully completed.
|
||||
return true;
|
||||
@ -1550,6 +1526,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
|
||||
CollectedHeap(),
|
||||
_young_gen_sampling_thread(NULL),
|
||||
_collector_policy(collector_policy),
|
||||
_memory_manager("G1 Young Generation", "end of minor GC"),
|
||||
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
|
||||
_eden_pool(NULL),
|
||||
_survivor_pool(NULL),
|
||||
_old_pool(NULL),
|
||||
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
|
||||
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
|
||||
_g1_policy(create_g1_policy(_gc_timer_stw)),
|
||||
@ -1854,6 +1835,20 @@ jint G1CollectedHeap::initialize() {
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::initialize_serviceability() {
|
||||
_eden_pool = new G1EdenPool(this);
|
||||
_survivor_pool = new G1SurvivorPool(this);
|
||||
_old_pool = new G1OldGenPool(this);
|
||||
|
||||
_full_gc_memory_manager.add_pool(_eden_pool);
|
||||
_full_gc_memory_manager.add_pool(_survivor_pool);
|
||||
_full_gc_memory_manager.add_pool(_old_pool);
|
||||
|
||||
_memory_manager.add_pool(_eden_pool);
|
||||
_memory_manager.add_pool(_survivor_pool);
|
||||
|
||||
}
|
||||
|
||||
void G1CollectedHeap::stop() {
|
||||
// Stop all concurrent threads. We do this to make sure these threads
|
||||
// do not continue to execute and access resources (e.g. logging)
|
||||
@ -1879,6 +1874,7 @@ size_t G1CollectedHeap::conservative_max_heap_alignment() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::post_initialize() {
|
||||
CollectedHeap::post_initialize();
|
||||
ref_processing_init();
|
||||
}
|
||||
|
||||
@ -2653,11 +2649,9 @@ G1CollectedHeap::doConcurrentMark() {
|
||||
|
||||
size_t G1CollectedHeap::pending_card_num() {
|
||||
size_t extra_cards = 0;
|
||||
JavaThread *curr = Threads::first();
|
||||
while (curr != NULL) {
|
||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
|
||||
DirtyCardQueue& dcq = curr->dirty_card_queue();
|
||||
extra_cards += dcq.size();
|
||||
curr = curr->next();
|
||||
}
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
size_t buffer_size = dcqs.buffer_size();
|
||||
@ -2963,13 +2957,17 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
GCTraceCPUTime tcpu;
|
||||
|
||||
G1HeapVerifier::G1VerifyType verify_type;
|
||||
FormatBuffer<> gc_string("Pause ");
|
||||
if (collector_state()->during_initial_mark_pause()) {
|
||||
gc_string.append("Initial Mark");
|
||||
verify_type = G1HeapVerifier::G1VerifyInitialMark;
|
||||
} else if (collector_state()->gcs_are_young()) {
|
||||
gc_string.append("Young");
|
||||
verify_type = G1HeapVerifier::G1VerifyYoungOnly;
|
||||
} else {
|
||||
gc_string.append("Mixed");
|
||||
verify_type = G1HeapVerifier::G1VerifyMixed;
|
||||
}
|
||||
GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
|
||||
|
||||
@ -2980,7 +2978,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
|
||||
|
||||
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
||||
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
|
||||
TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
|
||||
|
||||
// If the secondary_free_list is not empty, append it to the
|
||||
// free_list. No need to wait for the cleanup operation to finish;
|
||||
@ -3010,7 +3008,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
heap_region_iterate(&v_cl);
|
||||
}
|
||||
|
||||
_verifier->verify_before_gc();
|
||||
_verifier->verify_before_gc(verify_type);
|
||||
|
||||
_verifier->check_bitmaps("GC Start");
|
||||
|
||||
@ -3170,7 +3168,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
heap_region_iterate(&v_cl);
|
||||
}
|
||||
|
||||
_verifier->verify_after_gc();
|
||||
_verifier->verify_after_gc(verify_type);
|
||||
_verifier->check_bitmaps("GC End");
|
||||
|
||||
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
|
||||
@ -5394,3 +5392,18 @@ void G1CollectedHeap::rebuild_strong_code_roots() {
|
||||
RebuildStrongCodeRootClosure blob_cl(this);
|
||||
CodeCache::blobs_do(&blob_cl);
|
||||
}
|
||||
|
||||
GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
|
||||
GrowableArray<GCMemoryManager*> memory_managers(2);
|
||||
memory_managers.append(&_memory_manager);
|
||||
memory_managers.append(&_full_gc_memory_manager);
|
||||
return memory_managers;
|
||||
}
|
||||
|
||||
GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
|
||||
GrowableArray<MemoryPool*> memory_pools(3);
|
||||
memory_pools.append(_eden_pool);
|
||||
memory_pools.append(_survivor_pool);
|
||||
memory_pools.append(_old_pool);
|
||||
return memory_pools;
|
||||
}
|
||||
|
@ -42,14 +42,15 @@
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/g1SurvivorRegions.hpp"
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
#include "gc/g1/hSpaceCounters.hpp"
|
||||
#include "gc/g1/heapRegionManager.hpp"
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/gcHeapSummary.hpp"
|
||||
#include "gc/shared/plab.hpp"
|
||||
#include "gc/shared/preservedMarks.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "services/memoryManager.hpp"
|
||||
#include "utilities/stack.hpp"
|
||||
|
||||
// A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
|
||||
@ -64,6 +65,7 @@ class GenerationSpec;
|
||||
class G1ParScanThreadState;
|
||||
class G1ParScanThreadStateSet;
|
||||
class G1ParScanThreadState;
|
||||
class MemoryPool;
|
||||
class ObjectClosure;
|
||||
class SpaceClosure;
|
||||
class CompactibleSpaceClosure;
|
||||
@ -126,6 +128,7 @@ class G1CollectedHeap : public CollectedHeap {
|
||||
friend class VM_G1IncCollectionPause;
|
||||
friend class VMStructs;
|
||||
friend class MutatorAllocRegion;
|
||||
friend class G1FullCollector;
|
||||
friend class G1GCAllocRegion;
|
||||
friend class G1HeapVerifier;
|
||||
|
||||
@ -148,6 +151,13 @@ private:
|
||||
WorkGang* _workers;
|
||||
G1CollectorPolicy* _collector_policy;
|
||||
|
||||
GCMemoryManager _memory_manager;
|
||||
GCMemoryManager _full_gc_memory_manager;
|
||||
|
||||
MemoryPool* _eden_pool;
|
||||
MemoryPool* _survivor_pool;
|
||||
MemoryPool* _old_pool;
|
||||
|
||||
static size_t _humongous_object_threshold_in_words;
|
||||
|
||||
// The secondary free list which contains regions that have been
|
||||
@ -161,6 +171,8 @@ private:
|
||||
// It keeps track of the humongous regions.
|
||||
HeapRegionSet _humongous_set;
|
||||
|
||||
virtual void initialize_serviceability();
|
||||
|
||||
void eagerly_reclaim_humongous_regions();
|
||||
// Start a new incremental collection set for the next pause.
|
||||
void start_new_collection_set();
|
||||
@ -517,7 +529,6 @@ protected:
|
||||
private:
|
||||
// Internal helpers used during full GC to split it up to
|
||||
// increase readability.
|
||||
void do_full_collection_inner(G1FullGCScope* scope);
|
||||
void abort_concurrent_cycle();
|
||||
void verify_before_full_collection(bool explicit_gc);
|
||||
void prepare_heap_for_full_collection();
|
||||
@ -1006,6 +1017,9 @@ public:
|
||||
// Adaptive size policy. No such thing for g1.
|
||||
virtual AdaptiveSizePolicy* size_policy() { return NULL; }
|
||||
|
||||
virtual GrowableArray<GCMemoryManager*> memory_managers();
|
||||
virtual GrowableArray<MemoryPool*> memory_pools();
|
||||
|
||||
// The rem set and barrier set.
|
||||
G1RemSet* g1_rem_set() const { return _g1_rem_set; }
|
||||
|
||||
|
@ -1015,9 +1015,7 @@ void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
|
||||
SvcGCMarker sgcm(SvcGCMarker::OTHER);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
|
||||
g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (before)");
|
||||
}
|
||||
g1h->verifier()->check_bitmaps("Remark Start");
|
||||
|
||||
@ -1038,9 +1036,7 @@ void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
|
||||
|
||||
// Verify the heap w.r.t. the previous marking bitmap.
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
|
||||
g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (overflow)");
|
||||
}
|
||||
|
||||
// Clear the marking state because we will be restarting
|
||||
@ -1055,9 +1051,7 @@ void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
|
||||
true /* expected_active */);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
|
||||
g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "During GC (after)");
|
||||
}
|
||||
g1h->verifier()->check_bitmaps("Remark End");
|
||||
assert(!restart_for_overflow(), "sanity");
|
||||
@ -1189,9 +1183,7 @@ void G1ConcurrentMark::cleanup() {
|
||||
g1h->verifier()->verify_region_sets_optional();
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
|
||||
g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (before)");
|
||||
}
|
||||
g1h->verifier()->check_bitmaps("Cleanup Start");
|
||||
|
||||
@ -1263,9 +1255,7 @@ void G1ConcurrentMark::cleanup() {
|
||||
Universe::update_heap_info_at_gc();
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
g1h->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
|
||||
g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (after)");
|
||||
}
|
||||
|
||||
g1h->verifier()->check_bitmaps("Cleanup End");
|
||||
@ -1756,28 +1746,24 @@ private:
|
||||
G1ConcurrentMark* _cm;
|
||||
public:
|
||||
void work(uint worker_id) {
|
||||
// Since all available tasks are actually started, we should
|
||||
// only proceed if we're supposed to be active.
|
||||
if (worker_id < _cm->active_tasks()) {
|
||||
G1CMTask* task = _cm->task(worker_id);
|
||||
task->record_start_time();
|
||||
{
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
G1CMTask* task = _cm->task(worker_id);
|
||||
task->record_start_time();
|
||||
{
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
|
||||
Threads::threads_do(&threads_f);
|
||||
}
|
||||
|
||||
do {
|
||||
task->do_marking_step(1000000000.0 /* something very large */,
|
||||
true /* do_termination */,
|
||||
false /* is_serial */);
|
||||
} while (task->has_aborted() && !_cm->has_overflown());
|
||||
// If we overflow, then we do not want to restart. We instead
|
||||
// want to abort remark and do concurrent marking again.
|
||||
task->record_end_time();
|
||||
G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
|
||||
Threads::threads_do(&threads_f);
|
||||
}
|
||||
|
||||
do {
|
||||
task->do_marking_step(1000000000.0 /* something very large */,
|
||||
true /* do_termination */,
|
||||
false /* is_serial */);
|
||||
} while (task->has_aborted() && !_cm->has_overflown());
|
||||
// If we overflow, then we do not want to restart. We instead
|
||||
// want to abort remark and do concurrent marking again.
|
||||
task->record_end_time();
|
||||
}
|
||||
|
||||
G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineThread.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -33,6 +34,107 @@
|
||||
#include "utilities/pair.hpp"
|
||||
#include <math.h>
|
||||
|
||||
G1ConcurrentRefineThread* G1ConcurrentRefineThreadControl::create_refinement_thread(uint worker_id, bool initializing) {
|
||||
G1ConcurrentRefineThread* result = NULL;
|
||||
if (initializing || !InjectGCWorkerCreationFailure) {
|
||||
result = new G1ConcurrentRefineThread(_cr, worker_id);
|
||||
}
|
||||
if (result == NULL || result->osthread() == NULL) {
|
||||
log_warning(gc)("Failed to create refinement thread %u, no more %s",
|
||||
worker_id,
|
||||
result == NULL ? "memory" : "OS threads");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
G1ConcurrentRefineThreadControl::G1ConcurrentRefineThreadControl() :
|
||||
_cr(NULL),
|
||||
_threads(NULL),
|
||||
_num_max_threads(0)
|
||||
{
|
||||
}
|
||||
|
||||
G1ConcurrentRefineThreadControl::~G1ConcurrentRefineThreadControl() {
|
||||
for (uint i = 0; i < _num_max_threads; i++) {
|
||||
G1ConcurrentRefineThread* t = _threads[i];
|
||||
if (t != NULL) {
|
||||
delete t;
|
||||
}
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads);
|
||||
}
|
||||
|
||||
jint G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cr, uint num_max_threads) {
|
||||
assert(cr != NULL, "G1ConcurrentRefine must not be NULL");
|
||||
_cr = cr;
|
||||
_num_max_threads = num_max_threads;
|
||||
|
||||
_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, num_max_threads, mtGC);
|
||||
if (_threads == NULL) {
|
||||
vm_shutdown_during_initialization("Could not allocate thread holder array.");
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
|
||||
for (uint i = 0; i < num_max_threads; i++) {
|
||||
if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) {
|
||||
_threads[i] = NULL;
|
||||
} else {
|
||||
_threads[i] = create_refinement_thread(i, true);
|
||||
if (_threads[i] == NULL) {
|
||||
vm_shutdown_during_initialization("Could not allocate refinement threads.");
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
}
|
||||
}
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
void G1ConcurrentRefineThreadControl::maybe_activate_next(uint cur_worker_id) {
|
||||
assert(cur_worker_id < _num_max_threads,
|
||||
"Activating another thread from %u not allowed since there can be at most %u",
|
||||
cur_worker_id, _num_max_threads);
|
||||
if (cur_worker_id == (_num_max_threads - 1)) {
|
||||
// Already the last thread, there is no more thread to activate.
|
||||
return;
|
||||
}
|
||||
|
||||
uint worker_id = cur_worker_id + 1;
|
||||
G1ConcurrentRefineThread* thread_to_activate = _threads[worker_id];
|
||||
if (thread_to_activate == NULL) {
|
||||
// Still need to create the thread...
|
||||
_threads[worker_id] = create_refinement_thread(worker_id, false);
|
||||
thread_to_activate = _threads[worker_id];
|
||||
}
|
||||
if (thread_to_activate != NULL && !thread_to_activate->is_active()) {
|
||||
thread_to_activate->activate();
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentRefineThreadControl::print_on(outputStream* st) const {
|
||||
for (uint i = 0; i < _num_max_threads; ++i) {
|
||||
if (_threads[i] != NULL) {
|
||||
_threads[i]->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) {
|
||||
for (uint i = 0; i < _num_max_threads; i++) {
|
||||
if (_threads[i] != NULL) {
|
||||
tc->do_thread(_threads[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentRefineThreadControl::stop() {
|
||||
for (uint i = 0; i < _num_max_threads; i++) {
|
||||
if (_threads[i] != NULL) {
|
||||
_threads[i]->stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Arbitrary but large limits, to simplify some of the zone calculations.
|
||||
// The general idea is to allow expressions like
|
||||
// MIN2(x OP y, max_XXX_zone)
|
||||
@ -96,7 +198,7 @@ static Thresholds calc_thresholds(size_t green_zone,
|
||||
size_t yellow_zone,
|
||||
uint worker_i) {
|
||||
double yellow_size = yellow_zone - green_zone;
|
||||
double step = yellow_size / G1ConcurrentRefine::thread_num();
|
||||
double step = yellow_size / G1ConcurrentRefine::max_num_threads();
|
||||
if (worker_i == 0) {
|
||||
// Potentially activate worker 0 more aggressively, to keep
|
||||
// available buffers near green_zone value. When yellow_size is
|
||||
@ -115,8 +217,7 @@ G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
|
||||
size_t yellow_zone,
|
||||
size_t red_zone,
|
||||
size_t min_yellow_zone_size) :
|
||||
_threads(NULL),
|
||||
_n_worker_threads(thread_num()),
|
||||
_thread_control(),
|
||||
_green_zone(green_zone),
|
||||
_yellow_zone(yellow_zone),
|
||||
_red_zone(red_zone),
|
||||
@ -125,9 +226,13 @@ G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
|
||||
assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
|
||||
}
|
||||
|
||||
jint G1ConcurrentRefine::initialize() {
|
||||
return _thread_control.initialize(this, max_num_threads());
|
||||
}
|
||||
|
||||
static size_t calc_min_yellow_zone_size() {
|
||||
size_t step = G1ConcRefinementThresholdStep;
|
||||
uint n_workers = G1ConcurrentRefine::thread_num();
|
||||
uint n_workers = G1ConcurrentRefine::max_num_threads();
|
||||
if ((max_yellow_zone / step) < n_workers) {
|
||||
return max_yellow_zone;
|
||||
} else {
|
||||
@ -191,77 +296,27 @@ G1ConcurrentRefine* G1ConcurrentRefine::create(jint* ecode) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cr->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, cr->_n_worker_threads, mtGC);
|
||||
if (cr->_threads == NULL) {
|
||||
*ecode = JNI_ENOMEM;
|
||||
vm_shutdown_during_initialization("Could not allocate an array for G1ConcurrentRefineThread");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
|
||||
|
||||
G1ConcurrentRefineThread *next = NULL;
|
||||
for (uint i = cr->_n_worker_threads - 1; i != UINT_MAX; i--) {
|
||||
Thresholds thresholds = calc_thresholds(green_zone, yellow_zone, i);
|
||||
G1ConcurrentRefineThread* t =
|
||||
new G1ConcurrentRefineThread(cr,
|
||||
next,
|
||||
worker_id_offset,
|
||||
i,
|
||||
activation_level(thresholds),
|
||||
deactivation_level(thresholds));
|
||||
assert(t != NULL, "Conc refine should have been created");
|
||||
if (t->osthread() == NULL) {
|
||||
*ecode = JNI_ENOMEM;
|
||||
vm_shutdown_during_initialization("Could not create G1ConcurrentRefineThread");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
assert(t->cr() == cr, "Conc refine thread should refer to this");
|
||||
cr->_threads[i] = t;
|
||||
next = t;
|
||||
}
|
||||
|
||||
*ecode = JNI_OK;
|
||||
*ecode = cr->initialize();
|
||||
return cr;
|
||||
}
|
||||
|
||||
void G1ConcurrentRefine::stop() {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
_threads[i]->stop();
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentRefine::update_thread_thresholds() {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, i);
|
||||
_threads[i]->update_thresholds(activation_level(thresholds),
|
||||
deactivation_level(thresholds));
|
||||
}
|
||||
_thread_control.stop();
|
||||
}
|
||||
|
||||
G1ConcurrentRefine::~G1ConcurrentRefine() {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
delete _threads[i];
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads);
|
||||
}
|
||||
|
||||
void G1ConcurrentRefine::threads_do(ThreadClosure *tc) {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
tc->do_thread(_threads[i]);
|
||||
}
|
||||
_thread_control.worker_threads_do(tc);
|
||||
}
|
||||
|
||||
uint G1ConcurrentRefine::thread_num() {
|
||||
uint G1ConcurrentRefine::max_num_threads() {
|
||||
return G1ConcRefinementThreads;
|
||||
}
|
||||
|
||||
void G1ConcurrentRefine::print_threads_on(outputStream* st) const {
|
||||
for (uint i = 0; i < _n_worker_threads; ++i) {
|
||||
_threads[i]->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
_thread_control.print_on(st);
|
||||
}
|
||||
|
||||
static size_t calc_new_green_zone(size_t green,
|
||||
@ -326,16 +381,15 @@ void G1ConcurrentRefine::adjust(double update_rs_time,
|
||||
|
||||
if (G1UseAdaptiveConcRefinement) {
|
||||
update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);
|
||||
update_thread_thresholds();
|
||||
|
||||
// Change the barrier params
|
||||
if (_n_worker_threads == 0) {
|
||||
if (max_num_threads() == 0) {
|
||||
// Disable dcqs notification when there are no threads to notify.
|
||||
dcqs.set_process_completed_threshold(INT_MAX);
|
||||
} else {
|
||||
// Worker 0 is the primary; wakeup is via dcqs notification.
|
||||
STATIC_ASSERT(max_yellow_zone <= INT_MAX);
|
||||
size_t activate = _threads[0]->activation_threshold();
|
||||
size_t activate = activation_threshold(0);
|
||||
dcqs.set_process_completed_threshold((int)activate);
|
||||
}
|
||||
dcqs.set_max_completed_queue((int)red_zone());
|
||||
@ -349,3 +403,42 @@ void G1ConcurrentRefine::adjust(double update_rs_time,
|
||||
}
|
||||
dcqs.notify_if_necessary();
|
||||
}
|
||||
|
||||
size_t G1ConcurrentRefine::activation_threshold(uint worker_id) const {
|
||||
Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
|
||||
return activation_level(thresholds);
|
||||
}
|
||||
|
||||
size_t G1ConcurrentRefine::deactivation_threshold(uint worker_id) const {
|
||||
Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
|
||||
return deactivation_level(thresholds);
|
||||
}
|
||||
|
||||
uint G1ConcurrentRefine::worker_id_offset() {
|
||||
return DirtyCardQueueSet::num_par_ids();
|
||||
}
|
||||
|
||||
void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) {
|
||||
if (num_cur_buffers > activation_threshold(worker_id + 1)) {
|
||||
_thread_control.maybe_activate_next(worker_id);
|
||||
}
|
||||
}
|
||||
|
||||
bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
|
||||
size_t curr_buffer_num = dcqs.completed_buffers_num();
|
||||
// If the number of the buffers falls down into the yellow zone,
|
||||
// that means that the transition period after the evacuation pause has ended.
|
||||
// Since the value written to the DCQS is the same for all threads, there is no
|
||||
// need to synchronize.
|
||||
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= yellow_zone()) {
|
||||
dcqs.set_completed_queue_padding(0);
|
||||
}
|
||||
|
||||
maybe_activate_more_threads(worker_id, curr_buffer_num);
|
||||
|
||||
// Process the next buffer, if there are enough left.
|
||||
return dcqs.refine_completed_buffer_concurrently(worker_id + worker_id_offset(),
|
||||
deactivation_threshold(worker_id));
|
||||
}
|
||||
|
@ -30,30 +30,63 @@
|
||||
|
||||
// Forward decl
|
||||
class CardTableEntryClosure;
|
||||
class G1ConcurrentRefine;
|
||||
class G1ConcurrentRefineThread;
|
||||
class outputStream;
|
||||
class ThreadClosure;
|
||||
|
||||
class G1ConcurrentRefine : public CHeapObj<mtGC> {
|
||||
// Helper class for refinement thread management. Used to start, stop and
|
||||
// iterate over them.
|
||||
class G1ConcurrentRefineThreadControl VALUE_OBJ_CLASS_SPEC {
|
||||
G1ConcurrentRefine* _cr;
|
||||
|
||||
G1ConcurrentRefineThread** _threads;
|
||||
uint _n_worker_threads;
|
||||
/*
|
||||
* The value of the update buffer queue length falls into one of 3 zones:
|
||||
* green, yellow, red. If the value is in [0, green) nothing is
|
||||
* done, the buffers are left unprocessed to enable the caching effect of the
|
||||
* dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
|
||||
* threads are gradually activated. In [yellow, red) all threads are
|
||||
* running. If the length becomes red (max queue length) the mutators start
|
||||
* processing the buffers.
|
||||
*
|
||||
* There are some interesting cases (when G1UseAdaptiveConcRefinement
|
||||
* is turned off):
|
||||
* 1) green = yellow = red = 0. In this case the mutator will process all
|
||||
* buffers. Except for those that are created by the deferred updates
|
||||
* machinery during a collection.
|
||||
* 2) green = 0. Means no caching. Can be a good way to minimize the
|
||||
* amount of time spent updating rsets during a collection.
|
||||
*/
|
||||
uint _num_max_threads;
|
||||
|
||||
// Create the refinement thread for the given worker id.
|
||||
// If initializing is true, ignore InjectGCWorkerCreationFailure.
|
||||
G1ConcurrentRefineThread* create_refinement_thread(uint worker_id, bool initializing);
|
||||
public:
|
||||
G1ConcurrentRefineThreadControl();
|
||||
~G1ConcurrentRefineThreadControl();
|
||||
|
||||
jint initialize(G1ConcurrentRefine* cr, uint num_max_threads);
|
||||
|
||||
// If there is a "successor" thread that can be activated given the current id,
|
||||
// activate it.
|
||||
void maybe_activate_next(uint cur_worker_id);
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
void worker_threads_do(ThreadClosure* tc);
|
||||
void stop();
|
||||
};
|
||||
|
||||
// Controls refinement threads and their activation based on the number of completed
|
||||
// buffers currently available in the global dirty card queue.
|
||||
// Refinement threads pick work from the queue based on these thresholds. They are activated
|
||||
// gradually based on the amount of work to do.
|
||||
// Refinement thread n activates thread n+1 if the instance of this class determines there
|
||||
// is enough work available. Threads deactivate themselves if the current amount of
|
||||
// completed buffers falls below their individual threshold.
|
||||
class G1ConcurrentRefine : public CHeapObj<mtGC> {
|
||||
G1ConcurrentRefineThreadControl _thread_control;
|
||||
/*
|
||||
* The value of the completed dirty card queue length falls into one of 3 zones:
|
||||
* green, yellow, red. If the value is in [0, green) nothing is
|
||||
* done, the buffers are left unprocessed to enable the caching effect of the
|
||||
* dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
|
||||
* threads are gradually activated. In [yellow, red) all threads are
|
||||
* running. If the length becomes red (max queue length) the mutators start
|
||||
* processing the buffers.
|
||||
*
|
||||
* There are some interesting cases (when G1UseAdaptiveConcRefinement
|
||||
* is turned off):
|
||||
* 1) green = yellow = red = 0. In this case the mutator will process all
|
||||
* buffers. Except for those that are created by the deferred updates
|
||||
* machinery during a collection.
|
||||
* 2) green = 0. Means no caching. Can be a good way to minimize the
|
||||
* amount of time spent updating remembered sets during a collection.
|
||||
*/
|
||||
size_t _green_zone;
|
||||
size_t _yellow_zone;
|
||||
size_t _red_zone;
|
||||
@ -69,24 +102,32 @@ class G1ConcurrentRefine : public CHeapObj<mtGC> {
|
||||
size_t update_rs_processed_buffers,
|
||||
double goal_ms);
|
||||
|
||||
// Update thread thresholds to account for updated zone values.
|
||||
void update_thread_thresholds();
|
||||
static uint worker_id_offset();
|
||||
void maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers);
|
||||
|
||||
public:
|
||||
jint initialize();
|
||||
public:
|
||||
~G1ConcurrentRefine();
|
||||
|
||||
// Returns a G1ConcurrentRefine instance if succeeded to create/initialize G1ConcurrentRefine and G1ConcurrentRefineThreads.
|
||||
// Otherwise, returns NULL with error code.
|
||||
// Returns a G1ConcurrentRefine instance if succeeded to create/initialize the
|
||||
// G1ConcurrentRefine instance. Otherwise, returns NULL with error code.
|
||||
static G1ConcurrentRefine* create(jint* ecode);
|
||||
|
||||
void stop();
|
||||
|
||||
// Adjust refinement thresholds based on work done during the pause and the goal time.
|
||||
void adjust(double update_rs_time, size_t update_rs_processed_buffers, double goal_ms);
|
||||
|
||||
size_t activation_threshold(uint worker_id) const;
|
||||
size_t deactivation_threshold(uint worker_id) const;
|
||||
// Perform a single refinement step. Called by the refinement threads when woken up.
|
||||
bool do_refinement_step(uint worker_id);
|
||||
|
||||
// Iterate over all concurrent refinement threads applying the given closure.
|
||||
void threads_do(ThreadClosure *tc);
|
||||
|
||||
static uint thread_num();
|
||||
// Maximum number of refinement threads.
|
||||
static uint max_num_threads();
|
||||
|
||||
void print_threads_on(outputStream* st) const;
|
||||
|
||||
|
@ -25,32 +25,20 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineThread.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr,
|
||||
G1ConcurrentRefineThread *next,
|
||||
uint worker_id_offset,
|
||||
uint worker_id,
|
||||
size_t activate,
|
||||
size_t deactivate) :
|
||||
G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr, uint worker_id) :
|
||||
ConcurrentGCThread(),
|
||||
_worker_id_offset(worker_id_offset),
|
||||
_worker_id(worker_id),
|
||||
_active(false),
|
||||
_next(next),
|
||||
_monitor(NULL),
|
||||
_cr(cr),
|
||||
_vtime_accum(0.0),
|
||||
_activation_threshold(activate),
|
||||
_deactivation_threshold(deactivate)
|
||||
_vtime_accum(0.0)
|
||||
{
|
||||
|
||||
// Each thread has its own monitor. The i-th thread is responsible for signaling
|
||||
// to thread i+1 if the number of buffers in the queue exceeds a threshold for this
|
||||
// thread. Monitors are also used to wake up the threads during termination.
|
||||
@ -67,13 +55,6 @@ G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr,
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
void G1ConcurrentRefineThread::update_thresholds(size_t activate,
|
||||
size_t deactivate) {
|
||||
assert(deactivate < activate, "precondition");
|
||||
_activation_threshold = activate;
|
||||
_deactivation_threshold = deactivate;
|
||||
}
|
||||
|
||||
void G1ConcurrentRefineThread::wait_for_completed_buffers() {
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
while (!should_terminate() && !is_active()) {
|
||||
@ -118,9 +99,9 @@ void G1ConcurrentRefineThread::run_service() {
|
||||
}
|
||||
|
||||
size_t buffers_processed = 0;
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
log_debug(gc, refine)("Activated %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
|
||||
_worker_id, _activation_threshold, dcqs.completed_buffers_num());
|
||||
log_debug(gc, refine)("Activated worker %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
|
||||
_worker_id, _cr->activation_threshold(_worker_id),
|
||||
JavaThread::dirty_card_queue_set().completed_buffers_num());
|
||||
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
@ -131,33 +112,18 @@ void G1ConcurrentRefineThread::run_service() {
|
||||
continue; // Re-check for termination after yield delay.
|
||||
}
|
||||
|
||||
size_t curr_buffer_num = dcqs.completed_buffers_num();
|
||||
// If the number of the buffers falls down into the yellow zone,
|
||||
// that means that the transition period after the evacuation pause has ended.
|
||||
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cr()->yellow_zone()) {
|
||||
dcqs.set_completed_queue_padding(0);
|
||||
}
|
||||
|
||||
// Check if we need to activate the next thread.
|
||||
if ((_next != NULL) &&
|
||||
!_next->is_active() &&
|
||||
(curr_buffer_num > _next->_activation_threshold)) {
|
||||
_next->activate();
|
||||
}
|
||||
|
||||
// Process the next buffer, if there are enough left.
|
||||
if (!dcqs.refine_completed_buffer_concurrently(_worker_id + _worker_id_offset, _deactivation_threshold)) {
|
||||
break; // Deactivate, number of buffers fell below threshold.
|
||||
if (!_cr->do_refinement_step(_worker_id)) {
|
||||
break;
|
||||
}
|
||||
++buffers_processed;
|
||||
}
|
||||
}
|
||||
|
||||
deactivate();
|
||||
log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT
|
||||
log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT
|
||||
", current: " SIZE_FORMAT ", processed: " SIZE_FORMAT,
|
||||
_worker_id, _deactivation_threshold,
|
||||
dcqs.completed_buffers_num(),
|
||||
_worker_id, _cr->deactivation_threshold(_worker_id),
|
||||
JavaThread::dirty_card_queue_set().completed_buffers_num(),
|
||||
buffers_processed);
|
||||
|
||||
if (os::supports_vtime()) {
|
||||
|
@ -43,43 +43,29 @@ class G1ConcurrentRefineThread: public ConcurrentGCThread {
|
||||
uint _worker_id;
|
||||
uint _worker_id_offset;
|
||||
|
||||
// The refinement threads collection is linked list. A predecessor can activate a successor
|
||||
// when the number of the rset update buffer crosses a certain threshold. A successor
|
||||
// would self-deactivate when the number of the buffers falls below the threshold.
|
||||
bool _active;
|
||||
G1ConcurrentRefineThread* _next;
|
||||
Monitor* _monitor;
|
||||
G1ConcurrentRefine* _cr;
|
||||
|
||||
// This thread's activation/deactivation thresholds
|
||||
size_t _activation_threshold;
|
||||
size_t _deactivation_threshold;
|
||||
|
||||
void wait_for_completed_buffers();
|
||||
|
||||
void set_active(bool x) { _active = x; }
|
||||
bool is_active();
|
||||
void activate();
|
||||
// Deactivate this thread.
|
||||
void deactivate();
|
||||
|
||||
bool is_primary() { return (_worker_id == 0); }
|
||||
|
||||
void run_service();
|
||||
void stop_service();
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
G1ConcurrentRefineThread(G1ConcurrentRefine* cr, G1ConcurrentRefineThread* next,
|
||||
uint worker_id_offset, uint worker_id,
|
||||
size_t activate, size_t deactivate);
|
||||
G1ConcurrentRefineThread(G1ConcurrentRefine* cg1r, uint worker_id);
|
||||
|
||||
void update_thresholds(size_t activate, size_t deactivate);
|
||||
size_t activation_threshold() const { return _activation_threshold; }
|
||||
bool is_active();
|
||||
// Activate this thread.
|
||||
void activate();
|
||||
|
||||
// Total virtual time so far.
|
||||
double vtime_accum() { return _vtime_accum; }
|
||||
|
||||
G1ConcurrentRefine* cr() { return _cr; }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1CONCURRENTREFINETHREAD_HPP
|
||||
|
@ -52,7 +52,7 @@ G1DefaultPolicy::G1DefaultPolicy(STWGCTimer* gc_timer) :
|
||||
_analytics(new G1Analytics(&_predictor)),
|
||||
_mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
|
||||
_ihop_control(create_ihop_control(&_predictor)),
|
||||
_policy_counters(new GCPolicyCounters("GarbageFirst", 1, 3)),
|
||||
_policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
|
||||
_young_list_fixed_length(0),
|
||||
_short_lived_surv_rate_group(new SurvRateGroup()),
|
||||
_survivor_surv_rate_group(new SurvRateGroup()),
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
|
||||
#include "gc/g1/g1FullGCScope.hpp"
|
||||
#include "gc/g1/g1OopClosures.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/preservedMarks.hpp"
|
||||
@ -62,20 +63,24 @@ static void update_derived_pointers() {
|
||||
#endif
|
||||
}
|
||||
|
||||
G1FullCollector::G1FullCollector(G1FullGCScope* scope,
|
||||
ReferenceProcessor* reference_processor,
|
||||
G1CMBitMap* bitmap,
|
||||
uint workers) :
|
||||
_scope(scope),
|
||||
_num_workers(workers),
|
||||
_mark_bitmap(bitmap),
|
||||
G1CMBitMap* G1FullCollector::mark_bitmap() {
|
||||
return _heap->concurrent_mark()->next_mark_bitmap();
|
||||
}
|
||||
|
||||
ReferenceProcessor* G1FullCollector::reference_processor() {
|
||||
return _heap->ref_processor_stw();
|
||||
}
|
||||
|
||||
G1FullCollector::G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs) :
|
||||
_heap(heap),
|
||||
_scope(memory_manager, explicit_gc, clear_soft_refs),
|
||||
_num_workers(heap->workers()->active_workers()),
|
||||
_oop_queue_set(_num_workers),
|
||||
_array_queue_set(_num_workers),
|
||||
_preserved_marks_set(true),
|
||||
_reference_processor(reference_processor),
|
||||
_serial_compaction_point(),
|
||||
_is_alive(_mark_bitmap),
|
||||
_is_alive_mutator(_reference_processor, &_is_alive) {
|
||||
_is_alive(heap->concurrent_mark()->next_mark_bitmap()),
|
||||
_is_alive_mutator(heap->ref_processor_stw(), &_is_alive) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
|
||||
_preserved_marks_set.init(_num_workers);
|
||||
@ -99,8 +104,19 @@ G1FullCollector::~G1FullCollector() {
|
||||
}
|
||||
|
||||
void G1FullCollector::prepare_collection() {
|
||||
_reference_processor->enable_discovery();
|
||||
_reference_processor->setup_policy(scope()->should_clear_soft_refs());
|
||||
_heap->g1_policy()->record_full_collection_start();
|
||||
|
||||
_heap->print_heap_before_gc();
|
||||
_heap->print_heap_regions();
|
||||
|
||||
_heap->abort_concurrent_cycle();
|
||||
_heap->verify_before_full_collection(scope()->is_explicit_gc());
|
||||
|
||||
_heap->gc_prologue(true);
|
||||
_heap->prepare_heap_for_full_collection();
|
||||
|
||||
reference_processor()->enable_discovery();
|
||||
reference_processor()->setup_policy(scope()->should_clear_soft_refs());
|
||||
|
||||
// When collecting the permanent generation Method*s may be moving,
|
||||
// so we either have to flush all bcp data or convert it into bci.
|
||||
@ -139,6 +155,15 @@ void G1FullCollector::complete_collection() {
|
||||
BiasedLocking::restore_marks();
|
||||
CodeCache::gc_epilogue();
|
||||
JvmtiExport::gc_epilogue();
|
||||
|
||||
_heap->prepare_heap_for_mutators();
|
||||
|
||||
_heap->g1_policy()->record_full_collection_end();
|
||||
_heap->gc_epilogue(true);
|
||||
|
||||
_heap->verify_after_full_collection();
|
||||
|
||||
_heap->print_heap_after_full_collection(scope()->heap_transition());
|
||||
}
|
||||
|
||||
void G1FullCollector::phase1_mark_live_objects() {
|
||||
@ -164,11 +189,11 @@ void G1FullCollector::phase1_mark_live_objects() {
|
||||
GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
|
||||
// Unload classes and purge the SystemDictionary.
|
||||
bool purged_class = SystemDictionary::do_unloading(&_is_alive, scope()->timer());
|
||||
G1CollectedHeap::heap()->complete_cleaning(&_is_alive, purged_class);
|
||||
_heap->complete_cleaning(&_is_alive, purged_class);
|
||||
} else {
|
||||
GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer());
|
||||
// If no class unloading just clean out strings and symbols.
|
||||
G1CollectedHeap::heap()->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled());
|
||||
_heap->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled());
|
||||
}
|
||||
|
||||
scope()->tracer()->report_object_count_after_gc(&_is_alive);
|
||||
@ -210,18 +235,18 @@ void G1FullCollector::phase4_do_compaction() {
|
||||
}
|
||||
|
||||
void G1FullCollector::restore_marks() {
|
||||
SharedRestorePreservedMarksTaskExecutor task_executor(G1CollectedHeap::heap()->workers());
|
||||
SharedRestorePreservedMarksTaskExecutor task_executor(_heap->workers());
|
||||
_preserved_marks_set.restore(&task_executor);
|
||||
_preserved_marks_set.reclaim();
|
||||
}
|
||||
|
||||
void G1FullCollector::run_task(AbstractGangTask* task) {
|
||||
G1CollectedHeap::heap()->workers()->run_task(task, _num_workers);
|
||||
_heap->workers()->run_task(task, _num_workers);
|
||||
}
|
||||
|
||||
void G1FullCollector::verify_after_marking() {
|
||||
if (!VerifyDuringGC) {
|
||||
//Only do verification if VerifyDuringGC is set.
|
||||
if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) {
|
||||
// Only do verification if VerifyDuringGC and G1VerifyFull is set.
|
||||
return;
|
||||
}
|
||||
|
||||
@ -229,7 +254,7 @@ void G1FullCollector::verify_after_marking() {
|
||||
#if COMPILER2_OR_JVMCI
|
||||
DerivedPointerTableDeactivate dpt_deact;
|
||||
#endif
|
||||
G1CollectedHeap::heap()->prepare_for_verify();
|
||||
_heap->prepare_for_verify();
|
||||
// Note: we can verify only the heap here. When an object is
|
||||
// marked, the previous value of the mark word (including
|
||||
// identity hash values, ages, etc) is preserved, and the mark
|
||||
@ -240,6 +265,6 @@ void G1FullCollector::verify_after_marking() {
|
||||
// fail. At the end of the GC, the original mark word values
|
||||
// (including hash values) are restored to the appropriate
|
||||
// objects.
|
||||
GCTraceTime(Info, gc, verify)("During GC (full)");
|
||||
G1CollectedHeap::heap()->verify(VerifyOption_G1UseFullMarking);
|
||||
GCTraceTime(Info, gc, verify)("Verifying During GC (full)");
|
||||
_heap->verify(VerifyOption_G1UseFullMarking);
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/g1/g1FullGCCompactionPoint.hpp"
|
||||
#include "gc/g1/g1FullGCMarker.hpp"
|
||||
#include "gc/g1/g1FullGCOopClosures.hpp"
|
||||
#include "gc/g1/g1FullGCScope.hpp"
|
||||
#include "gc/shared/preservedMarks.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
@ -38,45 +39,41 @@ class G1CMBitMap;
|
||||
class G1FullGCMarker;
|
||||
class G1FullGCScope;
|
||||
class G1FullGCCompactionPoint;
|
||||
class GCMemoryManager;
|
||||
class ReferenceProcessor;
|
||||
|
||||
// The G1FullCollector holds data associated with the current Full GC.
|
||||
class G1FullCollector : StackObj {
|
||||
G1FullGCScope* _scope;
|
||||
G1CollectedHeap* _heap;
|
||||
G1FullGCScope _scope;
|
||||
uint _num_workers;
|
||||
G1FullGCMarker** _markers;
|
||||
G1FullGCCompactionPoint** _compaction_points;
|
||||
G1CMBitMap* _mark_bitmap;
|
||||
OopQueueSet _oop_queue_set;
|
||||
ObjArrayTaskQueueSet _array_queue_set;
|
||||
PreservedMarksSet _preserved_marks_set;
|
||||
ReferenceProcessor* _reference_processor;
|
||||
G1FullGCCompactionPoint _serial_compaction_point;
|
||||
|
||||
G1IsAliveClosure _is_alive;
|
||||
ReferenceProcessorIsAliveMutator _is_alive_mutator;
|
||||
|
||||
public:
|
||||
G1FullCollector(G1FullGCScope* scope,
|
||||
ReferenceProcessor* reference_processor,
|
||||
G1CMBitMap* mark_bitmap,
|
||||
uint workers);
|
||||
G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs);
|
||||
~G1FullCollector();
|
||||
|
||||
void prepare_collection();
|
||||
void collect();
|
||||
void complete_collection();
|
||||
|
||||
G1FullGCScope* scope() { return _scope; }
|
||||
G1FullGCScope* scope() { return &_scope; }
|
||||
uint workers() { return _num_workers; }
|
||||
G1FullGCMarker* marker(uint id) { return _markers[id]; }
|
||||
G1FullGCCompactionPoint* compaction_point(uint id) { return _compaction_points[id]; }
|
||||
G1CMBitMap* mark_bitmap() { return _mark_bitmap; }
|
||||
OopQueueSet* oop_queue_set() { return &_oop_queue_set; }
|
||||
ObjArrayTaskQueueSet* array_queue_set() { return &_array_queue_set; }
|
||||
PreservedMarksSet* preserved_mark_set() { return &_preserved_marks_set; }
|
||||
ReferenceProcessor* reference_processor() { return _reference_processor; }
|
||||
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
|
||||
G1CMBitMap* mark_bitmap();
|
||||
ReferenceProcessor* reference_processor();
|
||||
|
||||
private:
|
||||
void phase1_mark_live_objects();
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_GC_G1_G1FULLGCCOMPACTIONPOINT_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
class HeapRegion;
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1FullGCScope.hpp"
|
||||
|
||||
G1FullGCScope::G1FullGCScope(bool explicit_gc, bool clear_soft) :
|
||||
G1FullGCScope::G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft) :
|
||||
_rm(),
|
||||
_explicit_gc(explicit_gc),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
@ -36,7 +36,7 @@ G1FullGCScope::G1FullGCScope(bool explicit_gc, bool clear_soft) :
|
||||
_active(),
|
||||
_cpu_time(),
|
||||
_soft_refs(clear_soft, _g1h->collector_policy()),
|
||||
_memory_stats(true, _g1h->gc_cause()),
|
||||
_memory_stats(memory_manager, _g1h->gc_cause()),
|
||||
_collector_stats(_g1h->g1mm()->full_collection_counters()),
|
||||
_heap_transition(_g1h) {
|
||||
_timer.register_gc_start();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user